nvidia-nat 1.3.0a20250904__py3-none-any.whl → 1.3.0a20250906__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -14,7 +14,7 @@ name = "{{ package_name }}"
14
14
  dependencies = [
15
15
  "{{ nat_dependency }}",
16
16
  ]
17
- requires-python = ">=3.11,<3.13"
17
+ requires-python = ">=3.11,<3.14"
18
18
  description = "Custom NeMo Agent Toolkit Workflow"
19
19
  classifiers = ["Programming Language :: Python"]
20
20
 
@@ -44,11 +44,9 @@ class WeaveEvaluationIntegration:
44
44
  self.eval_trace_context = eval_trace_context
45
45
 
46
46
  try:
47
- from weave.flow.eval_imperative import EvaluationLogger
48
- from weave.flow.eval_imperative import ScoreLogger
47
+ from weave import EvaluationLogger
49
48
  from weave.trace.context import weave_client_context
50
49
  self.evaluation_logger_cls = EvaluationLogger
51
- self.score_logger_cls = ScoreLogger
52
50
  self.weave_client_context = weave_client_context
53
51
  self.available = True
54
52
  except ImportError:
@@ -94,7 +92,10 @@ class WeaveEvaluationIntegration:
94
92
  weave_dataset = self._get_weave_dataset(eval_input)
95
93
  config_dict = config.model_dump(mode="json")
96
94
  config_dict["name"] = workflow_alias
97
- self.eval_logger = self.evaluation_logger_cls(model=config_dict, dataset=weave_dataset)
95
+ self.eval_logger = self.evaluation_logger_cls(model=config_dict,
96
+ dataset=weave_dataset,
97
+ name=workflow_alias,
98
+ eval_attributes={})
98
99
  self.pred_loggers = {}
99
100
 
100
101
  # Capture the current evaluation call for context propagation
@@ -189,3 +190,4 @@ class WeaveEvaluationIntegration:
189
190
  # Log the summary to finish the evaluation, disable auto-summarize
190
191
  # as we will be adding profiler metrics to the summary
191
192
  self.eval_logger.log_summary(summary, auto_summarize=False)
193
+ logger.info("Logged Evaluation Summary to Weave")
@@ -14,6 +14,8 @@
14
14
  # limitations under the License.
15
15
 
16
16
  import logging
17
+ import os
18
+ import sys
17
19
  import typing
18
20
  from datetime import datetime
19
21
  from pathlib import Path
@@ -31,6 +33,20 @@ logger = logging.getLogger(__name__)
31
33
  YAML_EXTENSIONS = (".yaml", ".yml")
32
34
 
33
35
 
36
+ def _is_reserved(path: Path) -> bool:
37
+ """
38
+ Check if a path is reserved in the current Python version and platform.
39
+
40
+ On Windows, this function checks if the path is reserved in the current Python version.
41
+ On other platforms, returns False
42
+ """
43
+ if sys.platform != "win32":
44
+ return False
45
+ if sys.version_info >= (3, 13):
46
+ return os.path.isreserved(path)
47
+ return path.is_reserved()
48
+
49
+
34
50
  class EvaluateRequest(BaseModel):
35
51
  """Request model for the evaluate endpoint."""
36
52
  config_file: str = Field(description="Path to the configuration file for evaluation")
@@ -51,7 +67,7 @@ class EvaluateRequest(BaseModel):
51
67
  f"Job ID '{job_id}' contains invalid characters. Only alphanumeric characters and underscores are"
52
68
  " allowed.")
53
69
 
54
- if job_id_path.is_reserved():
70
+ if _is_reserved(job_id_path):
55
71
  # reserved names is Windows specific
56
72
  raise ValueError(f"Job ID '{job_id}' is a reserved name. Please choose a different name.")
57
73
 
@@ -68,7 +84,7 @@ class EvaluateRequest(BaseModel):
68
84
  raise ValueError(f"Config file '{config_file}' must be a YAML file with one of the following extensions: "
69
85
  f"{', '.join(YAML_EXTENSIONS)}")
70
86
 
71
- if config_file_path.is_reserved():
87
+ if _is_reserved(config_file_path):
72
88
  # reserved names is Windows specific
73
89
  raise ValueError(f"Config file '{config_file}' is a reserved name. Please choose a different name.")
74
90
 
@@ -50,46 +50,74 @@ class ProcessingExporter(Generic[PipelineInputT, PipelineOutputT], BaseExporter,
50
50
  - Processor pipeline management (add, remove, clear)
51
51
  - Type compatibility validation between processors
52
52
  - Pipeline processing with error handling
53
+ - Configurable None filtering: processors returning None can drop items from pipeline
53
54
  - Automatic type validation before export
54
55
  """
55
56
 
56
- def __init__(self, context_state: ContextState | None = None):
57
+ def __init__(self, context_state: ContextState | None = None, drop_nones: bool = True):
57
58
  """Initialize the processing exporter.
58
59
 
59
60
  Args:
60
- context_state: The context state to use for the exporter.
61
+ context_state (ContextState | None): The context state to use for the exporter.
62
+ drop_nones (bool): Whether to drop items when processors return None (default: True).
61
63
  """
62
64
  super().__init__(context_state)
63
65
  self._processors: list[Processor] = [] # List of processors that implement process(item) -> item
64
-
65
- def add_processor(self, processor: Processor) -> None:
66
+ self._processor_names: dict[str, int] = {} # Maps processor names to their positions
67
+ self._pipeline_locked: bool = False # Prevents modifications after startup
68
+ self._drop_nones: bool = drop_nones # Whether to drop None values between processors
69
+
70
+ def add_processor(self,
71
+ processor: Processor,
72
+ name: str | None = None,
73
+ position: int | None = None,
74
+ before: str | None = None,
75
+ after: str | None = None) -> None:
66
76
  """Add a processor to the processing pipeline.
67
77
 
68
- Processors are executed in the order they are added.
69
- Processors can transform between any types (T -> U).
78
+ Processors are executed in the order they are added. Processes can transform between any types (T -> U).
79
+ Supports flexible positioning using names, positions, or relative placement.
70
80
 
71
81
  Args:
72
- processor: The processor to add to the pipeline
82
+ processor (Processor): The processor to add to the pipeline
83
+ name (str | None): Name for the processor (for later reference). Must be unique.
84
+ position (int | None): Specific position to insert at (0-based index, -1 for append)
85
+ before (str | None): Insert before the named processor
86
+ after (str | None): Insert after the named processor
87
+
88
+ Raises:
89
+ RuntimeError: If pipeline is locked (after startup)
90
+ ValueError: If positioning arguments conflict or named processor not found
73
91
  """
92
+ self._check_pipeline_locked()
74
93
 
75
- # Check if the processor is compatible with the last processor in the pipeline
76
- if len(self._processors) > 0:
77
- try:
78
- if not issubclass(processor.input_class, self._processors[-1].output_class):
79
- raise ValueError(f"Processor {processor.__class__.__name__} input type {processor.input_type} "
80
- f"is not compatible with the {self._processors[-1].__class__.__name__} "
81
- f"output type {self._processors[-1].output_type}")
82
- except TypeError:
83
- # Handle cases where input_class or output_class are generic types that can't be used with issubclass
84
- # Fall back to type comparison for generic types
85
- logger.warning(
86
- "Cannot use issubclass() for type compatibility check between "
87
- "%s (%s) and %s (%s). Skipping compatibility check.",
88
- processor.__class__.__name__,
89
- processor.input_type,
90
- self._processors[-1].__class__.__name__,
91
- self._processors[-1].output_type)
92
- self._processors.append(processor)
94
+ # Determine insertion position
95
+ insert_position = self._calculate_insertion_position(position, before, after)
96
+
97
+ # Validate type compatibility at insertion point
98
+ self._validate_insertion_compatibility(processor, insert_position)
99
+
100
+ # Pre-validate name (no side effects yet)
101
+ if name is not None:
102
+ if not isinstance(name, str):
103
+ raise TypeError(f"Processor name must be a string, got {type(name).__name__}")
104
+ if name in self._processor_names:
105
+ raise ValueError(f"Processor name '{name}' already exists")
106
+
107
+ # Shift existing name positions (do this before list mutation)
108
+ for proc_name, pos in list(self._processor_names.items()):
109
+ if pos >= insert_position:
110
+ self._processor_names[proc_name] = pos + 1
111
+
112
+ # Insert the processor
113
+ if insert_position == len(self._processors):
114
+ self._processors.append(processor)
115
+ else:
116
+ self._processors.insert(insert_position, processor)
117
+
118
+ # Record the new processor name, if provided
119
+ if name is not None:
120
+ self._processor_names[name] = insert_position
93
121
 
94
122
  # Set up pipeline continuation callback for processors that support it
95
123
  if isinstance(processor, CallbackProcessor):
@@ -99,27 +127,231 @@ class ProcessingExporter(Generic[PipelineInputT, PipelineOutputT], BaseExporter,
99
127
 
100
128
  processor.set_done_callback(pipeline_callback)
101
129
 
102
- def remove_processor(self, processor: Processor) -> None:
130
+ def remove_processor(self, processor: Processor | str | int) -> None:
103
131
  """Remove a processor from the processing pipeline.
104
132
 
105
133
  Args:
106
- processor: The processor to remove from the pipeline
134
+ processor (Processor | str | int): The processor to remove (by name, position, or object).
135
+
136
+ Raises:
137
+ RuntimeError: If pipeline is locked (after startup)
138
+ ValueError: If named processor or position not found
139
+ TypeError: If processor argument has invalid type
107
140
  """
108
- if processor in self._processors:
109
- self._processors.remove(processor)
141
+ self._check_pipeline_locked()
142
+
143
+ # Determine processor and position to remove
144
+ if isinstance(processor, str):
145
+ # Remove by name
146
+ if processor not in self._processor_names:
147
+ raise ValueError(f"Processor '{processor}' not found in pipeline")
148
+ position = self._processor_names[processor]
149
+ processor_obj = self._processors[position]
150
+ elif isinstance(processor, int):
151
+ # Remove by position
152
+ if not (0 <= processor < len(self._processors)):
153
+ raise ValueError(f"Position {processor} is out of range [0, {len(self._processors) - 1}]")
154
+ position = processor
155
+ processor_obj = self._processors[position]
156
+ elif isinstance(processor, Processor):
157
+ # Remove by object (existing behavior)
158
+ if processor not in self._processors:
159
+ return # Silently ignore if not found (existing behavior)
160
+ position = self._processors.index(processor)
161
+ processor_obj = processor
162
+ else:
163
+ raise TypeError(f"Processor must be a Processor object, string name, or int position, "
164
+ f"got {type(processor).__name__}")
165
+
166
+ # Remove the processor
167
+ self._processors.remove(processor_obj)
168
+
169
+ # Remove from name mapping and update positions
170
+ name_to_remove = None
171
+ for name, pos in self._processor_names.items():
172
+ if pos == position:
173
+ name_to_remove = name
174
+ break
175
+
176
+ if name_to_remove:
177
+ del self._processor_names[name_to_remove]
178
+
179
+ # Update positions for processors that shifted
180
+ for name, pos in self._processor_names.items():
181
+ if pos > position:
182
+ self._processor_names[name] = pos - 1
110
183
 
111
184
  def clear_processors(self) -> None:
112
185
  """Clear all processors from the pipeline."""
186
+ self._check_pipeline_locked()
113
187
  self._processors.clear()
188
+ self._processor_names.clear()
189
+
190
+ def reset_pipeline(self) -> None:
191
+ """Reset the pipeline to allow modifications.
192
+
193
+ This unlocks the pipeline and clears all processors, allowing
194
+ the pipeline to be reconfigured. Can only be called when the
195
+ exporter is stopped.
196
+
197
+ Raises:
198
+ RuntimeError: If exporter is currently running
199
+ """
200
+ if self._running:
201
+ raise RuntimeError("Cannot reset pipeline while exporter is running. "
202
+ "Call stop() first, then reset_pipeline().")
203
+
204
+ self._pipeline_locked = False
205
+ self._processors.clear()
206
+ self._processor_names.clear()
207
+ logger.debug("Pipeline reset - unlocked and cleared all processors")
208
+
209
+ def get_processor_by_name(self, name: str) -> Processor | None:
210
+ """Get a processor by its name.
211
+
212
+ Args:
213
+ name (str): The name of the processor to retrieve
214
+
215
+ Returns:
216
+ Processor | None: The processor with the given name, or None if not found
217
+ """
218
+ if not isinstance(name, str):
219
+ raise TypeError(f"Processor name must be a string, got {type(name).__name__}")
220
+ if name in self._processor_names:
221
+ position = self._processor_names[name]
222
+ return self._processors[position]
223
+ logger.debug("Processor '%s' not found in pipeline", name)
224
+ return None
225
+
226
+ def _check_pipeline_locked(self) -> None:
227
+ """Check if pipeline is locked and raise error if it is."""
228
+ if self._pipeline_locked:
229
+ raise RuntimeError("Cannot modify processor pipeline after exporter has started. "
230
+ "Pipeline must be fully configured before calling start().")
231
+
232
+ def _calculate_insertion_position(self, position: int | None, before: str | None, after: str | None) -> int:
233
+ """Calculate the insertion position based on provided arguments.
234
+
235
+ Args:
236
+ position (int | None): Explicit position (0-based index, -1 for append)
237
+ before (str | None): Insert before this named processor
238
+ after (str | None): Insert after this named processor
239
+
240
+ Returns:
241
+ int: The calculated insertion position
242
+
243
+ Raises:
244
+ ValueError: If arguments conflict or named processor not found
245
+ """
246
+ # Check for conflicting arguments
247
+ args_provided = sum(x is not None for x in [position, before, after])
248
+ if args_provided > 1:
249
+ raise ValueError("Only one of position, before, or after can be specified")
250
+
251
+ # Default to append
252
+ if args_provided == 0:
253
+ return len(self._processors)
254
+
255
+ # Handle explicit position
256
+ if position is not None:
257
+ if position == -1:
258
+ return len(self._processors)
259
+ if 0 <= position <= len(self._processors):
260
+ return position
261
+ raise ValueError(f"Position {position} is out of range [0, {len(self._processors)}]")
262
+
263
+ # Handle before/after named processors
264
+ if before is not None:
265
+ if not isinstance(before, str):
266
+ raise TypeError(f"'before' parameter must be a string, got {type(before).__name__}")
267
+ if before not in self._processor_names:
268
+ raise ValueError(f"Processor '{before}' not found in pipeline")
269
+ return self._processor_names[before]
270
+
271
+ if after is not None:
272
+ if not isinstance(after, str):
273
+ raise TypeError(f"'after' parameter must be a string, got {type(after).__name__}")
274
+ if after not in self._processor_names:
275
+ raise ValueError(f"Processor '{after}' not found in pipeline")
276
+ return self._processor_names[after] + 1
277
+
278
+ # Should never reach here
279
+ return len(self._processors)
280
+
281
+ def _validate_insertion_compatibility(self, processor: Processor, position: int) -> None:
282
+ """Validate type compatibility for processor insertion.
283
+
284
+ Args:
285
+ processor (Processor): The processor to insert
286
+ position (int): The position where it will be inserted
287
+
288
+ Raises:
289
+ ValueError: If processor is not compatible with neighbors
290
+ """
291
+ # Check compatibility with neighbors
292
+ if position > 0:
293
+ predecessor = self._processors[position - 1]
294
+ self._check_processor_compatibility(predecessor,
295
+ processor,
296
+ "predecessor",
297
+ predecessor.output_class,
298
+ processor.input_class,
299
+ str(predecessor.output_type),
300
+ str(processor.input_type))
301
+
302
+ if position < len(self._processors):
303
+ successor = self._processors[position]
304
+ self._check_processor_compatibility(processor,
305
+ successor,
306
+ "successor",
307
+ processor.output_class,
308
+ successor.input_class,
309
+ str(processor.output_type),
310
+ str(successor.input_type))
311
+
312
+ def _check_processor_compatibility(self,
313
+ source_processor: Processor,
314
+ target_processor: Processor,
315
+ relationship: str,
316
+ source_class: type,
317
+ target_class: type,
318
+ source_type: str,
319
+ target_type: str) -> None:
320
+ """Check type compatibility between two processors.
321
+
322
+ Args:
323
+ source_processor (Processor): The processor providing output
324
+ target_processor (Processor): The processor receiving input
325
+ relationship (str): Description of relationship ("predecessor" or "successor")
326
+ source_class (type): The output class of source processor
327
+ target_class (type): The input class of target processor
328
+ source_type (str): String representation of source type
329
+ target_type (str): String representation of target type
330
+ """
331
+ try:
332
+ if not issubclass(source_class, target_class):
333
+ raise ValueError(f"Processor {target_processor.__class__.__name__} input type {target_type} "
334
+ f"is not compatible with {relationship} {source_processor.__class__.__name__} "
335
+ f"output type {source_type}")
336
+ except TypeError:
337
+ logger.warning(
338
+ "Cannot use issubclass() for type compatibility check between "
339
+ "%s (%s) and %s (%s). Skipping compatibility check.",
340
+ source_processor.__class__.__name__,
341
+ source_type,
342
+ target_processor.__class__.__name__,
343
+ target_type)
114
344
 
115
345
  async def _pre_start(self) -> None:
346
+
347
+ # Validate that the pipeline is compatible with the exporter
116
348
  if len(self._processors) > 0:
117
349
  first_processor = self._processors[0]
118
350
  last_processor = self._processors[-1]
119
351
 
120
352
  # validate that the first processor's input type is compatible with the exporter's input type
121
353
  try:
122
- if not issubclass(first_processor.input_class, self.input_class):
354
+ if not issubclass(self.input_class, first_processor.input_class):
123
355
  raise ValueError(f"Processor {first_processor.__class__.__name__} input type "
124
356
  f"{first_processor.input_type} is not compatible with the "
125
357
  f"{self.input_type} input type")
@@ -149,14 +381,17 @@ class ProcessingExporter(Generic[PipelineInputT, PipelineOutputT], BaseExporter,
149
381
  self.output_type,
150
382
  e)
151
383
 
152
- async def _process_pipeline(self, item: PipelineInputT) -> PipelineOutputT:
384
+ # Lock the pipeline to prevent further modifications
385
+ self._pipeline_locked = True
386
+
387
+ async def _process_pipeline(self, item: PipelineInputT) -> PipelineOutputT | None:
153
388
  """Process item through all registered processors.
154
389
 
155
390
  Args:
156
391
  item (PipelineInputT): The item to process (starts as PipelineInputT, can transform to PipelineOutputT)
157
392
 
158
393
  Returns:
159
- PipelineOutputT: The processed item after running through all processors
394
+ PipelineOutputT | None: The processed item after running through all processors
160
395
  """
161
396
  return await self._process_through_processors(self._processors, item) # type: ignore
162
397
 
@@ -168,12 +403,18 @@ class ProcessingExporter(Generic[PipelineInputT, PipelineOutputT], BaseExporter,
168
403
  item (Any): The item to process
169
404
 
170
405
  Returns:
171
- The processed item after running through all processors
406
+ Any: The processed item after running through all processors, or None if
407
+ drop_nones is True and any processor returned None
172
408
  """
173
409
  processed_item = item
174
410
  for processor in processors:
175
411
  try:
176
412
  processed_item = await processor.process(processed_item)
413
+ # Drop None values between processors if configured to do so
414
+ if self._drop_nones and processed_item is None:
415
+ logger.debug("Processor %s returned None, dropping item from pipeline",
416
+ processor.__class__.__name__)
417
+ return None
177
418
  except Exception as e:
178
419
  logger.exception("Error in processor %s: %s", processor.__class__.__name__, e)
179
420
  # Continue with unprocessed item rather than failing
@@ -221,6 +462,11 @@ class ProcessingExporter(Generic[PipelineInputT, PipelineOutputT], BaseExporter,
221
462
  remaining_processors = self._processors[source_index + 1:]
222
463
  processed_item = await self._process_through_processors(remaining_processors, item)
223
464
 
465
+ # Skip export if remaining pipeline dropped the item (returned None)
466
+ if processed_item is None:
467
+ logger.debug("Item was dropped by remaining processor pipeline, skipping export")
468
+ return
469
+
224
470
  # Export the final result
225
471
  await self._export_final_item(processed_item)
226
472
 
@@ -233,11 +479,16 @@ class ProcessingExporter(Generic[PipelineInputT, PipelineOutputT], BaseExporter,
233
479
  """Export an item after processing it through the pipeline.
234
480
 
235
481
  Args:
236
- item: The item to export
482
+ item (PipelineInputT): The item to export
237
483
  """
238
484
  try:
239
485
  # Then, run through the processor pipeline
240
- final_item: PipelineOutputT = await self._process_pipeline(item)
486
+ final_item: PipelineOutputT | None = await self._process_pipeline(item)
487
+
488
+ # Skip export if pipeline dropped the item (returned None)
489
+ if final_item is None:
490
+ logger.debug("Item was dropped by processor pipeline, skipping export")
491
+ return
241
492
 
242
493
  # Handle different output types from batch processors
243
494
  if isinstance(final_item, list) and len(final_item) == 0:
@@ -276,12 +527,16 @@ class ProcessingExporter(Generic[PipelineInputT, PipelineOutputT], BaseExporter,
276
527
  the actual export logic after the item has been processed through the pipeline.
277
528
 
278
529
  Args:
279
- item: The processed item to export (PipelineOutputT type)
530
+ item (PipelineOutputT | list[PipelineOutputT]): The processed item to export (PipelineOutputT type)
280
531
  """
281
532
  pass
282
533
 
283
- def _create_export_task(self, coro: Coroutine):
284
- """Create task with minimal overhead but proper tracking."""
534
+ def _create_export_task(self, coro: Coroutine) -> None:
535
+ """Create task with minimal overhead but proper tracking.
536
+
537
+ Args:
538
+ coro: The coroutine to create a task for
539
+ """
285
540
  if not self._running:
286
541
  logger.warning("%s: Attempted to create export task while not running", self.name)
287
542
  return
@@ -296,7 +551,7 @@ class ProcessingExporter(Generic[PipelineInputT, PipelineOutputT], BaseExporter,
296
551
  raise
297
552
 
298
553
  @override
299
- async def _cleanup(self):
554
+ async def _cleanup(self) -> None:
300
555
  """Enhanced cleanup that shuts down all shutdown-aware processors.
301
556
 
302
557
  Each processor is responsible for its own cleanup, including routing
@@ -0,0 +1,41 @@
1
+ # SPDX-FileCopyrightText: Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from pydantic import BaseModel
17
+ from pydantic import Field
18
+
19
+
20
+ class RedactionConfigMixin(BaseModel):
21
+ """Mixin for basic redaction configuration.
22
+
23
+ Provides core redaction functionality that can be used standalone
24
+ or inherited by specialized redaction mixins.
25
+ """
26
+ redaction_enabled: bool = Field(default=False, description="Whether to enable redaction processing.")
27
+ redaction_value: str = Field(default="[REDACTED]", description="Value to replace redacted attributes with.")
28
+ redaction_attributes: list[str] = Field(default_factory=lambda: ["input.value", "output.value", "metadata"],
29
+ description="Span attributes to redact when redaction is triggered.")
30
+ force_redaction: bool = Field(default=False, description="Always redact regardless of other conditions.")
31
+
32
+
33
+ class HeaderRedactionConfigMixin(RedactionConfigMixin):
34
+ """Mixin for header-based redaction configuration.
35
+
36
+ Inherits core redaction fields (redaction_enabled, redaction_attributes, force_redaction)
37
+ and adds header-specific configuration for authentication-based redaction decisions.
38
+
39
+ Note: The callback function must be provided directly to the processor at runtime.
40
+ """
41
+ redaction_header: str = Field(default="x-redaction-key", description="Header to check for redaction decisions.")
@@ -0,0 +1,50 @@
1
+ # SPDX-FileCopyrightText: Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from enum import Enum
17
+ from typing import Generic
18
+ from typing import TypeVar
19
+
20
+ from pydantic import BaseModel
21
+ from pydantic import Field
22
+
23
+ TagValueT = TypeVar("TagValueT")
24
+
25
+
26
+ class PrivacyLevel(str, Enum):
27
+ """Privacy level for the traces."""
28
+ NONE = "none"
29
+ LOW = "low"
30
+ MEDIUM = "medium"
31
+ HIGH = "high"
32
+
33
+
34
+ class TaggingConfigMixin(BaseModel, Generic[TagValueT]):
35
+ """Generic mixin for tagging spans with typed values.
36
+
37
+ This mixin provides a flexible tagging system where both the tag key
38
+ and value type can be customized for different use cases.
39
+ """
40
+ tag_key: str | None = Field(default=None, description="Key to use when tagging traces.")
41
+ tag_value: TagValueT | None = Field(default=None, description="Value to tag the traces with.")
42
+
43
+
44
+ class PrivacyTaggingConfigMixin(TaggingConfigMixin[PrivacyLevel]):
45
+ """Mixin for privacy level tagging on spans.
46
+
47
+ Specializes TaggingConfigMixin to work with PrivacyLevel enum values,
48
+ providing a typed interface for privacy-related span tagging.
49
+ """
50
+ pass
@@ -0,0 +1,123 @@
1
+ # SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import logging
17
+ from collections.abc import Callable
18
+ from functools import lru_cache
19
+
20
+ from starlette.datastructures import Headers
21
+
22
+ from nat.builder.context import Context
23
+ from nat.data_models.span import Span
24
+ from nat.observability.processor.redaction_processor import SpanRedactionProcessor
25
+ from nat.utils.type_utils import override
26
+
27
+ logger = logging.getLogger(__name__)
28
+
29
+
30
+ def default_callback(_auth_key: str) -> bool:
31
+ """Default callback that always returns False."""
32
+ return False
33
+
34
+
35
+ class HeaderRedactionProcessor(SpanRedactionProcessor):
36
+ """Processor that redacts the span based on auth key, span attributes, and callback.
37
+
38
+ Uses an LRU cache to avoid redundant callback executions for the same auth keys,
39
+ providing bounded memory usage and automatic eviction of least recently used entries.
40
+
41
+ Args:
42
+ attributes: List of span attribute keys to redact.
43
+ header: The header key to check for authentication.
44
+ callback: Function to determine if the auth key should trigger redaction.
45
+ enabled: Whether the processor is enabled (default: True).
46
+ force_redact: If True, always redact regardless of header checks (default: False).
47
+ redaction_value: The value to replace redacted attributes with (default: "[REDACTED]").
48
+ """
49
+
50
+ def __init__(self,
51
+ attributes: list[str] | None = None,
52
+ header: str | None = None,
53
+ callback: Callable[[str], bool] | None = None,
54
+ enabled: bool = True,
55
+ force_redact: bool = False,
56
+ redaction_value: str = "[REDACTED]"):
57
+ self.attributes = attributes or []
58
+ self.header = header
59
+ self.callback = callback or default_callback
60
+ self.enabled = enabled
61
+ self.force_redact = force_redact
62
+ self.redaction_value = redaction_value
63
+
64
+ @override
65
+ def should_redact(self, item: Span, context: Context) -> bool:
66
+ """Determine if this span should be redacted based on header auth.
67
+
68
+ Args:
69
+ item (Span): The span to check.
70
+ context (Context): The current context.
71
+
72
+ Returns:
73
+ bool: True if the span should be redacted, False otherwise.
74
+ """
75
+ # If force_redact is enabled, always redact regardless of other conditions
76
+ if self.force_redact:
77
+ return True
78
+
79
+ if not self.enabled:
80
+ return False
81
+
82
+ headers: Headers | None = context.metadata.headers
83
+
84
+ if headers is None or self.header is None:
85
+ return False
86
+
87
+ auth_key = headers.get(self.header, None)
88
+
89
+ if not auth_key:
90
+ return False
91
+
92
+ # Use LRU cached method to determine if redaction is needed
93
+ return self._should_redact_impl(auth_key)
94
+
95
+ @lru_cache(maxsize=128)
96
+ def _should_redact_impl(self, auth_key: str) -> bool:
97
+ """Implementation method for checking if redaction should occur.
98
+
99
+ This method uses lru_cache to avoid redundant callback executions.
100
+
101
+ Args:
102
+ auth_key (str): The authentication key to check.
103
+
104
+ Returns:
105
+ bool: True if the span should be redacted, False otherwise.
106
+ """
107
+ return self.callback(auth_key)
108
+
109
+ @override
110
+ def redact_item(self, item: Span) -> Span:
111
+ """Redact the span.
112
+
113
+ Args:
114
+ item (Span): The span to redact.
115
+
116
+ Returns:
117
+ Span: The redacted span.
118
+ """
119
+ for key in self.attributes:
120
+ if key in item.attributes:
121
+ item.attributes[key] = self.redaction_value
122
+
123
+ return item
@@ -0,0 +1,77 @@
1
+ # SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import logging
17
+ from abc import ABC
18
+ from abc import abstractmethod
19
+ from typing import TypeVar
20
+
21
+ from nat.builder.context import Context
22
+ from nat.data_models.span import Span
23
+ from nat.observability.processor.processor import Processor
24
+ from nat.utils.type_utils import override
25
+
26
+ RedactionItemT = TypeVar('RedactionItemT')
27
+
28
+ logger = logging.getLogger(__name__)
29
+
30
+
31
+ class RedactionProcessor(Processor[RedactionItemT, RedactionItemT], ABC):
32
+ """Abstract base class for redaction processors."""
33
+
34
+ @abstractmethod
35
+ def should_redact(self, item: RedactionItemT, context: Context) -> bool:
36
+ """Determine if this item should be redacted.
37
+
38
+ Args:
39
+ item (RedactionItemT): The item to check.
40
+ context (Context): The current context.
41
+
42
+ Returns:
43
+ bool: True if the item should be redacted, False otherwise.
44
+ """
45
+ pass
46
+
47
+ @abstractmethod
48
+ def redact_item(self, item: RedactionItemT) -> RedactionItemT:
49
+ """Redact the item.
50
+
51
+ Args:
52
+ item (RedactionItemT): The item to redact.
53
+
54
+ Returns:
55
+ RedactionItemT: The redacted item.
56
+ """
57
+ pass
58
+
59
+ @override
60
+ async def process(self, item: RedactionItemT) -> RedactionItemT:
61
+ """Perform redaction on the item if it should be redacted.
62
+
63
+ Args:
64
+ item (RedactionItemT): The item to process.
65
+
66
+ Returns:
67
+ RedactionItemT: The processed item.
68
+ """
69
+ context = Context.get()
70
+ if self.should_redact(item, context):
71
+ return self.redact_item(item)
72
+ return item
73
+
74
+
75
+ class SpanRedactionProcessor(RedactionProcessor[Span]):
76
+ """Abstract base class for span redaction processors."""
77
+ pass
@@ -0,0 +1,61 @@
1
+ # SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import logging
17
+ import os
18
+
19
+ from nat.data_models.span import Span
20
+ from nat.observability.processor.processor import Processor
21
+ from nat.utils.type_utils import override
22
+
23
+ logger = logging.getLogger(__name__)
24
+
25
+
26
+ class SpanTaggingProcessor(Processor[Span, Span]):
27
+ """Processor that tags spans with key-value metadata attributes.
28
+
29
+ This processor adds custom tags to spans by setting attributes with a configurable prefix.
30
+ Tags are only applied when both tag_key and tag_value are provided. The processor uses
31
+ a span prefix (configurable via NAT_SPAN_PREFIX environment variable) to namespace
32
+ the tag attributes.
33
+
34
+ Args:
35
+ tag_key: The key name for the tag to add to spans.
36
+ tag_value: The value for the tag to add to spans.
37
+ span_prefix: The prefix to use for tag attributes (default: from NAT_SPAN_PREFIX env var or "nat").
38
+ """
39
+
40
+ def __init__(self, tag_key: str | None = None, tag_value: str | None = None, span_prefix: str | None = None):
41
+ self.tag_key = tag_key
42
+ self.tag_value = tag_value
43
+
44
+ if span_prefix is None:
45
+ span_prefix = os.getenv("NAT_SPAN_PREFIX", "nat").strip() or "nat"
46
+
47
+ self._span_prefix = span_prefix
48
+
49
+ @override
50
+ async def process(self, item: Span) -> Span:
51
+ """Tag the span with a tag if both tag_key and tag_value are provided.
52
+
53
+ Args:
54
+ item (Span): The span to tag.
55
+
56
+ Returns:
57
+ Span: The tagged span.
58
+ """
59
+ if self.tag_key and self.tag_value:
60
+ item.set_attribute(f"{self._span_prefix}.{self.tag_key}", self.tag_value)
61
+ return item
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: nvidia-nat
3
- Version: 1.3.0a20250904
3
+ Version: 1.3.0a20250906
4
4
  Summary: NVIDIA NeMo Agent toolkit
5
5
  Author: NVIDIA Corporation
6
6
  Maintainer: NVIDIA Corporation
@@ -207,7 +207,10 @@ License: Apache License
207
207
  limitations under the License.
208
208
  Keywords: ai,rag,agents
209
209
  Classifier: Programming Language :: Python
210
- Requires-Python: <3.13,>=3.11
210
+ Classifier: Programming Language :: Python :: 3.11
211
+ Classifier: Programming Language :: Python :: 3.12
212
+ Classifier: Programming Language :: Python :: 3.13
213
+ Requires-Python: <3.14,>=3.11
211
214
  Description-Content-Type: text/markdown
212
215
  License-File: LICENSE-3rd-party.txt
213
216
  License-File: LICENSE.md
@@ -92,7 +92,7 @@ nat/cli/commands/workflow/workflow.py,sha256=40nIOehOX-4xI-qJqJraBX3XVz3l2VtFsZk
92
92
  nat/cli/commands/workflow/workflow_commands.py,sha256=jk0Nm27hhyb0Nj7WzVdQa_w2HP9yEZTzb6U_O7llg0c,13032
93
93
  nat/cli/commands/workflow/templates/__init__.py.j2,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
94
94
  nat/cli/commands/workflow/templates/config.yml.j2,sha256=KkZl1fOMVQVFBW-BD_d0Lu8kQgNBtjNpfojhSCPu4uA,222
95
- nat/cli/commands/workflow/templates/pyproject.toml.j2,sha256=-DQwYsLHyNsTSkQydVRR8WDq7-0FB8YG2TtvluDeLFI,1035
95
+ nat/cli/commands/workflow/templates/pyproject.toml.j2,sha256=lDBC4exHYutXa_skuJj176yMEuZr-DsdzrqQHPZoKpk,1035
96
96
  nat/cli/commands/workflow/templates/register.py.j2,sha256=txA-qBpWhxRc0GUcVRCIqVI6gGSh-TJijemrUqnb38s,138
97
97
  nat/cli/commands/workflow/templates/workflow.py.j2,sha256=Z4uZPG9rtf1nxF74dF4DqDtrF3uYmYUmWowDFbQBjao,1241
98
98
  nat/data_models/__init__.py,sha256=Xs1JQ16L9btwreh4pdGKwskffAw1YFO48jKrU4ib_7c,685
@@ -170,7 +170,7 @@ nat/eval/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
170
170
  nat/eval/utils/eval_trace_ctx.py,sha256=hN0YZ0wMOPzh9I-iSav-cGdxY3RWQWoE_tk5BxUf1mc,3264
171
171
  nat/eval/utils/output_uploader.py,sha256=27-aKIejV-6DGNErR6iTioNE5rN_lEeiNoBTS1qIVVM,5579
172
172
  nat/eval/utils/tqdm_position_registry.py,sha256=9CtpCk1wtYCSyieHPaSp8nlZu6EcNUOaUz2RTqfekrA,1286
173
- nat/eval/utils/weave_eval.py,sha256=nKLUa17qcLydZMWXy545iincrvgAMNGFsD73Qd2nhTU,7578
173
+ nat/eval/utils/weave_eval.py,sha256=fma5x9JbWpWrfQbfMHcjMovlRVR0v35yfNt1Avt6Vro,7719
174
174
  nat/experimental/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
175
175
  nat/experimental/decorators/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
176
176
  nat/experimental/decorators/experimental_warning_decorator.py,sha256=ji9fITsDF5N5ps5Y9jwb1sqc1e_3TvkUUUBBiJC76CE,5020
@@ -217,7 +217,7 @@ nat/front_ends/console/console_front_end_plugin.py,sha256=BJ1o2IflZeFKC2gGfL_gI2
217
217
  nat/front_ends/console/register.py,sha256=2Kf6Mthx6jzWzU8YdhYIR1iABmZDvs1UXM_20npXWXs,1153
218
218
  nat/front_ends/cron/__init__.py,sha256=Xs1JQ16L9btwreh4pdGKwskffAw1YFO48jKrU4ib_7c,685
219
219
  nat/front_ends/fastapi/__init__.py,sha256=Xs1JQ16L9btwreh4pdGKwskffAw1YFO48jKrU4ib_7c,685
220
- nat/front_ends/fastapi/fastapi_front_end_config.py,sha256=-fkA7sJ2sBHmCvxdTyd-JJSLc1inVnT_pXa6kmmuGI4,10849
220
+ nat/front_ends/fastapi/fastapi_front_end_config.py,sha256=AV0yY86MjCGrB9VIcvykWvfu5WcnMcrti6rXxUdk9co,11292
221
221
  nat/front_ends/fastapi/fastapi_front_end_controller.py,sha256=ei-34KCMpyaeAgeAN4gVvSGFjewjjRhHZPN0FqAfhDY,2548
222
222
  nat/front_ends/fastapi/fastapi_front_end_plugin.py,sha256=t_kpMG6S9SIdBKz4Igy0rzrZ9xFz9DUbCBc0eowaixk,4432
223
223
  nat/front_ends/fastapi/fastapi_front_end_plugin_worker.py,sha256=1FS1GC_lsQEMP64cPuXrF5MW4B8dMEyBxudJWz9XqPw,51852
@@ -268,7 +268,7 @@ nat/observability/exporter/__init__.py,sha256=Xs1JQ16L9btwreh4pdGKwskffAw1YFO48j
268
268
  nat/observability/exporter/base_exporter.py,sha256=tP7c5O-gQnHB_1TwIJJUib1xmtUhX4f4Mf_bmpejtkI,16612
269
269
  nat/observability/exporter/exporter.py,sha256=fqF0GYuhZRQEq0skq_FK2nlnsaUAzLpQi-OciaOkRno,2391
270
270
  nat/observability/exporter/file_exporter.py,sha256=XYsFjF8ob4Ag-SyGtKEh6wRU9lBx3lbdu7Uo85NvVyo,1465
271
- nat/observability/exporter/processing_exporter.py,sha256=4YG5YyWMdALZYoTbXzIofWHY9K17YiVWHHz_CFCngUI,14412
271
+ nat/observability/exporter/processing_exporter.py,sha256=lfSURNc03kkAYFFnzcStSgC2EEYoFY0JgnvOvh05n3E,26212
272
272
  nat/observability/exporter/raw_exporter.py,sha256=0ROEd-DlLP6pIxl4u2zJ6PMVrDrQa0DMHFDRsdGQMIk,1859
273
273
  nat/observability/exporter/span_exporter.py,sha256=p2rugOIyubBk_Frg1c-x-THzvFZt8q8HhYssKUp8Hdg,13250
274
274
  nat/observability/mixin/__init__.py,sha256=Xs1JQ16L9btwreh4pdGKwskffAw1YFO48jKrU4ib_7c,685
@@ -276,16 +276,21 @@ nat/observability/mixin/batch_config_mixin.py,sha256=DixQq-jRhBFJvpOX-gq7GvPmZCP
276
276
  nat/observability/mixin/collector_config_mixin.py,sha256=3iptkRH9N6JgcsPq7GyjjJVAoxjd-l42UKE7iSF4Hq8,1087
277
277
  nat/observability/mixin/file_mixin.py,sha256=J5kC70O3hoU86IDOoQtdk7caRD28nlIZL3igXcRSNBE,12306
278
278
  nat/observability/mixin/file_mode.py,sha256=Rq7l8UegECub5QCyCAYwhyL_Jul386gW-ANmtMmv2G4,837
279
+ nat/observability/mixin/redaction_config_mixin.py,sha256=aizfK6nI8_bAJF2lrxSzcxjz8Er9o7itGNa-hDyRhr0,2028
279
280
  nat/observability/mixin/resource_conflict_mixin.py,sha256=mcUp3Qinmhiepq3DyRvp9IaKGYtJfDgQVB-MuyVkWvk,5243
280
281
  nat/observability/mixin/serialize_mixin.py,sha256=DgRHJpXCz9qHFYzhlTTx8Dkj297EylCKK3ydGrH5zOw,2478
282
+ nat/observability/mixin/tagging_config_mixin.py,sha256=_LLC2fZYM3CzMr2E1Kcy_-UXbjRb8fP1Yd8kcgTNozk,1731
281
283
  nat/observability/mixin/type_introspection_mixin.py,sha256=VCb68SY_hitWrWLaK2UHQLkjn2jsgxSn9593T2N3zC0,6637
282
284
  nat/observability/processor/__init__.py,sha256=Xs1JQ16L9btwreh4pdGKwskffAw1YFO48jKrU4ib_7c,685
283
285
  nat/observability/processor/batching_processor.py,sha256=R0Qy3bHmf-QBM6FJmjmccdsf60JuvYLU-tV4kvy2hBA,13762
284
286
  nat/observability/processor/callback_processor.py,sha256=T5DsEm4HCUOi1VL29XCpaK04sYQvJ75KZLH-mlJGQgk,1547
285
287
  nat/observability/processor/falsy_batch_filter_processor.py,sha256=CInyZ1eIjtD1W6imPbuqwUeoWOMQ_0J0M9nPL6XwhTo,1778
288
+ nat/observability/processor/header_redaction_processor.py,sha256=cUmsVjHepddFHZu80zvdugG2eOUaGQr6cqxKyVS8fi4,4284
286
289
  nat/observability/processor/intermediate_step_serializer.py,sha256=aHeCmixyop7uxNnKmrUZ8SIFeBNS05gYohKLepqbrcQ,1249
287
290
  nat/observability/processor/processor.py,sha256=kTqOsus5Ycu5aFnxCTH1EkCP23uRBZ4xNhXmj3DP5OE,2593
288
291
  nat/observability/processor/processor_factory.py,sha256=1Ak4OWwmbimc5PKWwqPYq4fJqJifFm6MiI8vcafeErY,2408
292
+ nat/observability/processor/redaction_processor.py,sha256=Mg4zIShjQwX3GCKgXgrx7VaHhoGG3JrFY_QkoDUNtEk,2401
293
+ nat/observability/processor/span_tagging_processor.py,sha256=w5DqWrNU0US1afjVifMAq1_wCjNobwzxo9wAovDMKos,2298
289
294
  nat/observability/utils/__init__.py,sha256=Xs1JQ16L9btwreh4pdGKwskffAw1YFO48jKrU4ib_7c,685
290
295
  nat/observability/utils/dict_utils.py,sha256=DcNhZ0mgcJ-QQfsCl9QSGL-m_jTuHhr1N-v43ZCAMik,7371
291
296
  nat/observability/utils/time_utils.py,sha256=V8m-e3ldUgwv031B17y29yLXIowdlTH4QW8xDw9WKvk,1071
@@ -439,10 +444,10 @@ nat/utils/reactive/base/observer_base.py,sha256=6BiQfx26EMumotJ3KoVcdmFBYR_fnAss
439
444
  nat/utils/reactive/base/subject_base.py,sha256=UQOxlkZTIeeyYmG5qLtDpNf_63Y7p-doEeUA08_R8ME,2521
440
445
  nat/utils/settings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
441
446
  nat/utils/settings/global_settings.py,sha256=9JaO6pxKT_Pjw6rxJRsRlFCXdVKCl_xUKU2QHZQWWNM,7294
442
- nvidia_nat-1.3.0a20250904.dist-info/licenses/LICENSE-3rd-party.txt,sha256=fOk5jMmCX9YoKWyYzTtfgl-SUy477audFC5hNY4oP7Q,284609
443
- nvidia_nat-1.3.0a20250904.dist-info/licenses/LICENSE.md,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
444
- nvidia_nat-1.3.0a20250904.dist-info/METADATA,sha256=9HdvGhtE8HFgl2Sk2i2jeLzGUH1cuDfkSCTdvVtGjU0,21933
445
- nvidia_nat-1.3.0a20250904.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
446
- nvidia_nat-1.3.0a20250904.dist-info/entry_points.txt,sha256=FNh4pZVSe_61s29zdks66lmXBPtsnko8KSZ4ffv7WVE,653
447
- nvidia_nat-1.3.0a20250904.dist-info/top_level.txt,sha256=lgJWLkigiVZuZ_O1nxVnD_ziYBwgpE2OStdaCduMEGc,8
448
- nvidia_nat-1.3.0a20250904.dist-info/RECORD,,
447
+ nvidia_nat-1.3.0a20250906.dist-info/licenses/LICENSE-3rd-party.txt,sha256=fOk5jMmCX9YoKWyYzTtfgl-SUy477audFC5hNY4oP7Q,284609
448
+ nvidia_nat-1.3.0a20250906.dist-info/licenses/LICENSE.md,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
449
+ nvidia_nat-1.3.0a20250906.dist-info/METADATA,sha256=qVb103Cu_wifrnyZGQACLnhWOJLB2JuT5sZbfHhdPV0,22086
450
+ nvidia_nat-1.3.0a20250906.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
451
+ nvidia_nat-1.3.0a20250906.dist-info/entry_points.txt,sha256=FNh4pZVSe_61s29zdks66lmXBPtsnko8KSZ4ffv7WVE,653
452
+ nvidia_nat-1.3.0a20250906.dist-info/top_level.txt,sha256=lgJWLkigiVZuZ_O1nxVnD_ziYBwgpE2OStdaCduMEGc,8
453
+ nvidia_nat-1.3.0a20250906.dist-info/RECORD,,