nvidia-nat 1.3.0rc3__py3-none-any.whl → 1.4.0a20251010__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -24,7 +24,6 @@ from nat.builder.function_info import FunctionInfo
24
24
  from nat.cli.register_workflow import register_function
25
25
  from nat.data_models.agent import AgentBaseConfig
26
26
  from nat.data_models.api_server import ChatRequest
27
- from nat.data_models.api_server import ChatRequestOrMessage
28
27
  from nat.data_models.api_server import ChatResponse
29
28
  from nat.data_models.api_server import Usage
30
29
  from nat.data_models.component_ref import FunctionGroupRef
@@ -71,6 +70,9 @@ class ReActAgentWorkflowConfig(AgentBaseConfig, OptimizableMixin, name="react_ag
71
70
  default=None,
72
71
  description="Provides the SYSTEM_PROMPT to use with the agent") # defaults to SYSTEM_PROMPT in prompt.py
73
72
  max_history: int = Field(default=15, description="Maximum number of messages to keep in the conversation history.")
73
+ use_openai_api: bool = Field(default=False,
74
+ description=("Use OpenAI API for the input/output types to the function. "
75
+ "If False, strings will be used."))
74
76
  additional_instructions: str | None = OptimizableField(
75
77
  default=None,
76
78
  description="Additional instructions to provide to the agent in addition to the base prompt.",
@@ -116,23 +118,21 @@ async def react_agent_workflow(config: ReActAgentWorkflowConfig, builder: Builde
116
118
  pass_tool_call_errors_to_agent=config.pass_tool_call_errors_to_agent,
117
119
  normalize_tool_input_quotes=config.normalize_tool_input_quotes).build_graph()
118
120
 
119
- async def _response_fn(chat_request_or_message: ChatRequestOrMessage) -> ChatResponse | str:
121
+ async def _response_fn(input_message: ChatRequest) -> ChatResponse:
120
122
  """
121
123
  Main workflow entry function for the ReAct Agent.
122
124
 
123
125
  This function invokes the ReAct Agent Graph and returns the response.
124
126
 
125
127
  Args:
126
- chat_request_or_message (ChatRequestOrMessage): The input message to process
128
+ input_message (ChatRequest): The input message to process
127
129
 
128
130
  Returns:
129
- ChatResponse | str: The response from the agent or error message
131
+ ChatResponse: The response from the agent or error message
130
132
  """
131
133
  try:
132
- message = GlobalTypeConverter.get().convert(chat_request_or_message, to_type=ChatRequest)
133
-
134
134
  # initialize the starting state with the user query
135
- messages: list[BaseMessage] = trim_messages(messages=[m.model_dump() for m in message.messages],
135
+ messages: list[BaseMessage] = trim_messages(messages=[m.model_dump() for m in input_message.messages],
136
136
  max_tokens=config.max_history,
137
137
  strategy="last",
138
138
  token_counter=len,
@@ -153,16 +153,25 @@ async def react_agent_workflow(config: ReActAgentWorkflowConfig, builder: Builde
153
153
  content = str(output_message.content)
154
154
 
155
155
  # Create usage statistics for the response
156
- prompt_tokens = sum(len(str(msg.content).split()) for msg in message.messages)
156
+ prompt_tokens = sum(len(str(msg.content).split()) for msg in input_message.messages)
157
157
  completion_tokens = len(content.split()) if content else 0
158
158
  total_tokens = prompt_tokens + completion_tokens
159
159
  usage = Usage(prompt_tokens=prompt_tokens, completion_tokens=completion_tokens, total_tokens=total_tokens)
160
- response = ChatResponse.from_string(content, usage=usage)
161
- if chat_request_or_message.is_string:
162
- return GlobalTypeConverter.get().convert(response, to_type=str)
163
- return response
160
+ return ChatResponse.from_string(content, usage=usage)
161
+
164
162
  except Exception as ex:
165
163
  logger.exception("%s ReAct Agent failed with exception: %s", AGENT_LOG_PREFIX, str(ex))
166
164
  raise RuntimeError
167
165
 
168
- yield FunctionInfo.from_fn(_response_fn, description=config.description)
166
+ if (config.use_openai_api):
167
+ yield FunctionInfo.from_fn(_response_fn, description=config.description)
168
+ else:
169
+
170
+ async def _str_api_fn(input_message: str) -> str:
171
+ oai_input = GlobalTypeConverter.get().try_convert(input_message, to_type=ChatRequest)
172
+
173
+ oai_output = await _response_fn(oai_input)
174
+
175
+ return GlobalTypeConverter.get().try_convert(oai_output, to_type=str)
176
+
177
+ yield FunctionInfo.from_fn(_str_api_fn, description=config.description)
@@ -25,7 +25,6 @@ from nat.builder.function_info import FunctionInfo
25
25
  from nat.cli.register_workflow import register_function
26
26
  from nat.data_models.agent import AgentBaseConfig
27
27
  from nat.data_models.api_server import ChatRequest
28
- from nat.data_models.api_server import ChatRequestOrMessage
29
28
  from nat.data_models.api_server import ChatResponse
30
29
  from nat.data_models.api_server import Usage
31
30
  from nat.data_models.component_ref import FunctionGroupRef
@@ -55,6 +54,9 @@ class ReWOOAgentWorkflowConfig(AgentBaseConfig, name="rewoo_agent"):
55
54
  description="The number of retries before raising a tool call error.",
56
55
  ge=1)
57
56
  max_history: int = Field(default=15, description="Maximum number of messages to keep in the conversation history.")
57
+ use_openai_api: bool = Field(default=False,
58
+ description=("Use OpenAI API for the input/output types to the function. "
59
+ "If False, strings will be used."))
58
60
  additional_planner_instructions: str | None = Field(
59
61
  default=None,
60
62
  validation_alias=AliasChoices("additional_planner_instructions", "additional_instructions"),
@@ -123,23 +125,21 @@ async def rewoo_agent_workflow(config: ReWOOAgentWorkflowConfig, builder: Builde
123
125
  tool_call_max_retries=config.tool_call_max_retries,
124
126
  raise_tool_call_error=config.raise_tool_call_error).build_graph()
125
127
 
126
- async def _response_fn(chat_request_or_message: ChatRequestOrMessage) -> ChatResponse | str:
128
+ async def _response_fn(input_message: ChatRequest) -> ChatResponse:
127
129
  """
128
130
  Main workflow entry function for the ReWOO Agent.
129
131
 
130
132
  This function invokes the ReWOO Agent Graph and returns the response.
131
133
 
132
134
  Args:
133
- chat_request_or_message (ChatRequestOrMessage): The input message to process
135
+ input_message (ChatRequest): The input message to process
134
136
 
135
137
  Returns:
136
- ChatResponse | str: The response from the agent or error message
138
+ ChatResponse: The response from the agent or error message
137
139
  """
138
140
  try:
139
- message = GlobalTypeConverter.get().convert(chat_request_or_message, to_type=ChatRequest)
140
-
141
141
  # initialize the starting state with the user query
142
- messages: list[BaseMessage] = trim_messages(messages=[m.model_dump() for m in message.messages],
142
+ messages: list[BaseMessage] = trim_messages(messages=[m.model_dump() for m in input_message.messages],
143
143
  max_tokens=config.max_history,
144
144
  strategy="last",
145
145
  token_counter=len,
@@ -160,16 +160,25 @@ async def rewoo_agent_workflow(config: ReWOOAgentWorkflowConfig, builder: Builde
160
160
  output_message = str(output_message)
161
161
 
162
162
  # Create usage statistics for the response
163
- prompt_tokens = sum(len(str(msg.content).split()) for msg in message.messages)
163
+ prompt_tokens = sum(len(str(msg.content).split()) for msg in input_message.messages)
164
164
  completion_tokens = len(output_message.split()) if output_message else 0
165
165
  total_tokens = prompt_tokens + completion_tokens
166
166
  usage = Usage(prompt_tokens=prompt_tokens, completion_tokens=completion_tokens, total_tokens=total_tokens)
167
- response = ChatResponse.from_string(output_message, usage=usage)
168
- if chat_request_or_message.is_string:
169
- return GlobalTypeConverter.get().convert(response, to_type=str)
170
- return response
167
+ return ChatResponse.from_string(output_message, usage=usage)
168
+
171
169
  except Exception as ex:
172
170
  logger.exception("ReWOO Agent failed with exception: %s", ex)
173
171
  raise RuntimeError
174
172
 
175
- yield FunctionInfo.from_fn(_response_fn, description=config.description)
173
+ if (config.use_openai_api):
174
+ yield FunctionInfo.from_fn(_response_fn, description=config.description)
175
+
176
+ else:
177
+
178
+ async def _str_api_fn(input_message: str) -> str:
179
+ oai_input = GlobalTypeConverter.get().try_convert(input_message, to_type=ChatRequest)
180
+ oai_output = await _response_fn(oai_input)
181
+
182
+ return GlobalTypeConverter.get().try_convert(oai_output, to_type=str)
183
+
184
+ yield FunctionInfo.from_fn(_str_api_fn, description=config.description)
@@ -23,10 +23,8 @@ from nat.builder.function_info import FunctionInfo
23
23
  from nat.cli.register_workflow import register_function
24
24
  from nat.data_models.agent import AgentBaseConfig
25
25
  from nat.data_models.api_server import ChatRequest
26
- from nat.data_models.api_server import ChatRequestOrMessage
27
26
  from nat.data_models.component_ref import FunctionGroupRef
28
27
  from nat.data_models.component_ref import FunctionRef
29
- from nat.utils.type_converter import GlobalTypeConverter
30
28
 
31
29
  logger = logging.getLogger(__name__)
32
30
 
@@ -83,23 +81,21 @@ async def tool_calling_agent_workflow(config: ToolCallAgentWorkflowConfig, build
83
81
  handle_tool_errors=config.handle_tool_errors,
84
82
  return_direct=return_direct_tools).build_graph()
85
83
 
86
- async def _response_fn(chat_request_or_message: ChatRequestOrMessage) -> str:
84
+ async def _response_fn(input_message: ChatRequest) -> str:
87
85
  """
88
86
  Main workflow entry function for the Tool Calling Agent.
89
87
 
90
88
  This function invokes the Tool Calling Agent Graph and returns the response.
91
89
 
92
90
  Args:
93
- chat_request_or_message (ChatRequestOrMessage): The input message to process
91
+ input_message (ChatRequest): The input message to process
94
92
 
95
93
  Returns:
96
94
  str: The response from the agent or error message
97
95
  """
98
96
  try:
99
- message = GlobalTypeConverter.get().convert(chat_request_or_message, to_type=ChatRequest)
100
-
101
97
  # initialize the starting state with the user query
102
- messages: list[BaseMessage] = trim_messages(messages=[m.model_dump() for m in message.messages],
98
+ messages: list[BaseMessage] = trim_messages(messages=[m.model_dump() for m in input_message.messages],
103
99
  max_tokens=config.max_history,
104
100
  strategy="last",
105
101
  token_counter=len,
nat/builder/function.py CHANGED
@@ -416,9 +416,8 @@ class FunctionGroup:
416
416
  """
417
417
  if not name.strip():
418
418
  raise ValueError("Function name cannot be empty or blank")
419
- if not re.match(r"^[a-zA-Z0-9_.-]+$", name):
420
- raise ValueError(
421
- f"Function name can only contain letters, numbers, underscores, periods, and hyphens: {name}")
419
+ if not re.match(r"^[a-zA-Z0-9_-]+$", name):
420
+ raise ValueError(f"Function name can only contain letters, numbers, underscores, and hyphens: {name}")
422
421
  if name in self._functions:
423
422
  raise ValueError(f"Function {name} already exists in function group {self._instance_name}")
424
423
 
@@ -28,7 +28,6 @@ from pydantic import HttpUrl
28
28
  from pydantic import conlist
29
29
  from pydantic import field_serializer
30
30
  from pydantic import field_validator
31
- from pydantic import model_validator
32
31
  from pydantic_core.core_schema import ValidationInfo
33
32
 
34
33
  from nat.data_models.interactive import HumanPrompt
@@ -121,7 +120,15 @@ class Message(BaseModel):
121
120
  role: UserMessageContentRoleType
122
121
 
123
122
 
124
- class ChatRequestOptionals(BaseModel):
123
+ class ChatRequest(BaseModel):
124
+ """
125
+ ChatRequest is a data model that represents a request to the NAT chat API.
126
+ Fully compatible with OpenAI Chat Completions API specification.
127
+ """
128
+
129
+ # Required fields
130
+ messages: typing.Annotated[list[Message], conlist(Message, min_length=1)]
131
+
125
132
  # Optional fields (OpenAI Chat Completions API compatible)
126
133
  model: str | None = Field(default=None, description="name of the model to use")
127
134
  frequency_penalty: float | None = Field(default=0.0,
@@ -146,16 +153,6 @@ class ChatRequestOptionals(BaseModel):
146
153
  parallel_tool_calls: bool | None = Field(default=True, description="Whether to enable parallel function calling")
147
154
  user: str | None = Field(default=None, description="Unique identifier representing end-user")
148
155
 
149
-
150
- class ChatRequest(ChatRequestOptionals):
151
- """
152
- ChatRequest is a data model that represents a request to the NAT chat API.
153
- Fully compatible with OpenAI Chat Completions API specification.
154
- """
155
-
156
- # Required fields
157
- messages: typing.Annotated[list[Message], conlist(Message, min_length=1)]
158
-
159
156
  model_config = ConfigDict(extra="allow",
160
157
  json_schema_extra={
161
158
  "example": {
@@ -197,42 +194,6 @@ class ChatRequest(ChatRequestOptionals):
197
194
  top_p=top_p)
198
195
 
199
196
 
200
- class ChatRequestOrMessage(ChatRequestOptionals):
201
- """
202
- ChatRequestOrMessage is a data model that represents either a conversation or a string input.
203
- This is useful for functions that can handle either type of input.
204
-
205
- `messages` is compatible with the OpenAI Chat Completions API specification.
206
-
207
- `input_string` is a string input that can be used for functions that do not require a conversation.
208
- """
209
-
210
- messages: typing.Annotated[list[Message] | None, conlist(Message, min_length=1)] = Field(
211
- default=None, description="The conversation messages to process.")
212
-
213
- input_string: str | None = Field(default=None, alias="input_message", description="The input message to process.")
214
-
215
- @property
216
- def is_string(self) -> bool:
217
- return self.input_string is not None
218
-
219
- @property
220
- def is_conversation(self) -> bool:
221
- return self.messages is not None
222
-
223
- @model_validator(mode="after")
224
- def validate_messages_or_input_string(self):
225
- if self.messages is not None and self.input_string is not None:
226
- raise ValueError("Either messages or input_message/input_string must be provided, not both")
227
- if self.messages is None and self.input_string is None:
228
- raise ValueError("Either messages or input_message/input_string must be provided")
229
- if self.input_string is not None:
230
- extra_fields = self.model_dump(exclude={"input_string"}, exclude_none=True, exclude_unset=True)
231
- if len(extra_fields) > 0:
232
- raise ValueError("no extra fields are permitted when input_message/input_string is provided")
233
- return self
234
-
235
-
236
197
  class ChoiceMessage(BaseModel):
237
198
  content: str | None = None
238
199
  role: UserMessageContentRoleType | None = None
@@ -700,36 +661,6 @@ def _string_to_nat_chat_request(data: str) -> ChatRequest:
700
661
  GlobalTypeConverter.register_converter(_string_to_nat_chat_request)
701
662
 
702
663
 
703
- def _chat_request_or_message_to_chat_request(data: ChatRequestOrMessage) -> ChatRequest:
704
- if data.input_string is not None:
705
- return _string_to_nat_chat_request(data.input_string)
706
- return ChatRequest(**data.model_dump(exclude={"input_string"}))
707
-
708
-
709
- GlobalTypeConverter.register_converter(_chat_request_or_message_to_chat_request)
710
-
711
-
712
- def _chat_request_to_chat_request_or_message(data: ChatRequest) -> ChatRequestOrMessage:
713
- return ChatRequestOrMessage(**data.model_dump(by_alias=True))
714
-
715
-
716
- GlobalTypeConverter.register_converter(_chat_request_to_chat_request_or_message)
717
-
718
-
719
- def _chat_request_or_message_to_string(data: ChatRequestOrMessage) -> str:
720
- return data.input_string or ""
721
-
722
-
723
- GlobalTypeConverter.register_converter(_chat_request_or_message_to_string)
724
-
725
-
726
- def _string_to_chat_request_or_message(data: str) -> ChatRequestOrMessage:
727
- return ChatRequestOrMessage(input_message=data)
728
-
729
-
730
- GlobalTypeConverter.register_converter(_string_to_chat_request_or_message)
731
-
732
-
733
664
  # ======== ChatResponse Converters ========
734
665
  def _nat_chat_response_to_string(data: ChatResponse) -> str:
735
666
  if data.choices and data.choices[0].message:
@@ -93,14 +93,6 @@ class TypeConverter:
93
93
  if to_type is None or decomposed.is_instance(data):
94
94
  return data
95
95
 
96
- # 2) If data is a union type, try to convert to each type in the union
97
- if decomposed.is_union:
98
- for union_type in decomposed.args:
99
- result = self._convert(data, union_type)
100
- if result is not None:
101
- return result
102
- return None
103
-
104
96
  root = decomposed.root
105
97
 
106
98
  # 2) Attempt direct in *this* converter
@@ -1,10 +1,10 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: nvidia-nat
3
- Version: 1.3.0rc3
3
+ Version: 1.4.0a20251010
4
4
  Summary: NVIDIA NeMo Agent toolkit
5
5
  Author: NVIDIA Corporation
6
6
  Maintainer: NVIDIA Corporation
7
- License: Apache-2.0
7
+ License-Expression: Apache-2.0
8
8
  Project-URL: documentation, https://docs.nvidia.com/nemo/agent-toolkit/latest/
9
9
  Project-URL: source, https://github.com/NVIDIA/NeMo-Agent-Toolkit
10
10
  Keywords: ai,rag,agents
@@ -14,8 +14,8 @@ Classifier: Programming Language :: Python :: 3.12
14
14
  Classifier: Programming Language :: Python :: 3.13
15
15
  Requires-Python: <3.14,>=3.11
16
16
  Description-Content-Type: text/markdown
17
- License-File: LICENSE-3rd-party.txt
18
17
  License-File: LICENSE.md
18
+ License-File: LICENSE-3rd-party.txt
19
19
  Requires-Dist: aioboto3>=11.0.0
20
20
  Requires-Dist: authlib~=1.5
21
21
  Requires-Dist: click~=8.1
@@ -10,16 +10,16 @@ nat/agent/react_agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3h
10
10
  nat/agent/react_agent/agent.py,sha256=sWrg9WrglTKQQyG3EcjNm2JTEchCPEo9li-Po7TJKss,21294
11
11
  nat/agent/react_agent/output_parser.py,sha256=m7K6wRwtckBBpAHqOf3BZ9mqZLwrP13Kxz5fvNxbyZE,4219
12
12
  nat/agent/react_agent/prompt.py,sha256=N47JJrT6xwYQCv1jedHhlul2AE7EfKsSYfAbgJwWRew,1758
13
- nat/agent/react_agent/register.py,sha256=lpiso1tKq70ZYKbV9zXZegtXPLJNBaBrnG25R9hyA9Q,9008
13
+ nat/agent/react_agent/register.py,sha256=wAoPkly7dE8bb5x8XFf5-C1qJQausLKQwQcFCby_dwU,9307
14
14
  nat/agent/reasoning_agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
15
15
  nat/agent/reasoning_agent/reasoning_agent.py,sha256=k_0wEDqACQn1Rn1MAKxoXyqOKsthHCQ1gt990YYUqHU,9575
16
16
  nat/agent/rewoo_agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
17
17
  nat/agent/rewoo_agent/agent.py,sha256=XXgVXY9xwkyxnr093KXUtfgyNxAQbyGAecoGqN5mMLY,26199
18
18
  nat/agent/rewoo_agent/prompt.py,sha256=B0JeL1xDX4VKcShlkkviEcAsOKAwzSlX8NcAQdmUUPw,3645
19
- nat/agent/rewoo_agent/register.py,sha256=s6D9W4x5jIkda8l67gj3A46aefk6KQPuZ4H-ZJkVAtY,9300
19
+ nat/agent/rewoo_agent/register.py,sha256=GfJRQgpFWl-LQ-pPaG7EUeBH5u7pDZZNVP5cSweZJdM,9599
20
20
  nat/agent/tool_calling_agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
21
21
  nat/agent/tool_calling_agent/agent.py,sha256=4SIp29I56oznPRQu7B3HCoX53Ri3_o3BRRYNJjeBkF8,11006
22
- nat/agent/tool_calling_agent/register.py,sha256=h1Xfr1KPvQkslPg-NqdOMQAmx1PNFAIvvOC5bAIJtbE,7074
22
+ nat/agent/tool_calling_agent/register.py,sha256=ijiRfgDVtt2p7_q1YbIQZmUVV8-jf3yT18HwtKyReUI,6822
23
23
  nat/authentication/__init__.py,sha256=Xs1JQ16L9btwreh4pdGKwskffAw1YFO48jKrU4ib_7c,685
24
24
  nat/authentication/interfaces.py,sha256=1J2CWEJ_n6CLA3_HD3XV28CSbyfxrPAHzr7Q4kKDFdc,3511
25
25
  nat/authentication/register.py,sha256=lFhswYUk9iZ53mq33fClR9UfjJPdjGIivGGNHQeWiYo,915
@@ -48,7 +48,7 @@ nat/builder/eval_builder.py,sha256=I-ScvupmorClYoVBIs_PhSsB7Xf9e2nGWe0rCZp3txo,6
48
48
  nat/builder/evaluator.py,sha256=xWHMND2vcAUkdFP7FU3jnVki1rUHeTa0-9saFh2hWKs,1162
49
49
  nat/builder/framework_enum.py,sha256=n7IaTQBxhFozIQqRMcX9kXntw28JhFzCj82jJ0C5tNU,901
50
50
  nat/builder/front_end.py,sha256=FCJ87NSshVVuTg8zZrq3YAr_u0RaYVZVcibnqlRFy-M,2173
51
- nat/builder/function.py,sha256=3h51TA0D6EQGWjHDsoxa_8ooQcZpk_-yAndk4oc5dGo,27790
51
+ nat/builder/function.py,sha256=RrfKSCt9WunPhwn5fk8X7wuvb9A21iO8T-IySHUi3KM,27763
52
52
  nat/builder/function_base.py,sha256=0Eg8RtjWhEU3Yme0CVxcRutobA0Qo8-YHZLI6L2qAgM,13116
53
53
  nat/builder/function_info.py,sha256=7Rmrn-gOFrT2TIJklJwA_O-ycx_oimwZ0-qMYpbuZrU,25161
54
54
  nat/builder/intermediate_step_manager.py,sha256=iOuMLWTaES0J0XzaLxhTUqFvuoCAChJu3V69T43K0k0,7599
@@ -112,7 +112,7 @@ nat/control_flow/router_agent/prompt.py,sha256=fIAiNsAs1zXRAatButR76zSpHJNxSkXXK
112
112
  nat/control_flow/router_agent/register.py,sha256=4RGmS9sy-QtIMmvh8mfMcR1VqxFPLpG4RckWCIExh40,4144
113
113
  nat/data_models/__init__.py,sha256=Xs1JQ16L9btwreh4pdGKwskffAw1YFO48jKrU4ib_7c,685
114
114
  nat/data_models/agent.py,sha256=IwDyb9Zc3R4Zd5rFeqt7q0EQswczAl5focxV9KozIzs,1625
115
- nat/data_models/api_server.py,sha256=NWT1ChN2qaakD2DgyYCy_7MhfzvEBQX15qnUXnpCQmk,28883
115
+ nat/data_models/api_server.py,sha256=V8y1v9-5p4kmaQmmDU2N6m_V_CFJeozDzJEoIHOSV8w,26177
116
116
  nat/data_models/authentication.py,sha256=XPu9W8nh4XRSuxPv3HxO-FMQ_JtTEoK6Y02JwnzDwTg,8457
117
117
  nat/data_models/common.py,sha256=nXXfGrjpxebzBUa55mLdmzePLt7VFHvTAc6Znj3yEv0,5875
118
118
  nat/data_models/component.py,sha256=b_hXOA8Gm5UNvlFkAhsR6kEvf33ST50MKtr5kWf75Ao,1894
@@ -448,7 +448,7 @@ nat/utils/metadata_utils.py,sha256=BSsiB6jIWd8oEuEynJi55qCG762UuTYFaiUH0OT9HdY,2
448
448
  nat/utils/optional_imports.py,sha256=jQSVBc2fBSRw-2d6r8cEwvh5-di2EUUPakuuo9QbbwA,4039
449
449
  nat/utils/producer_consumer_queue.py,sha256=AcSYkAMBxLx06A5Xdy960PP3AJ7YaSPGJ7rbN_hJsjI,6599
450
450
  nat/utils/string_utils.py,sha256=71HuIzGx7rF8ocTmeoUBpnCi1Qf1yynYlNLLIKP4BVs,1415
451
- nat/utils/type_converter.py,sha256=vDZzrZ9ycWgZJdkWB1sHB2ivZX-E8fPfkrB-vAAxroI,10968
451
+ nat/utils/type_converter.py,sha256=-2PwMsEm7tlmrniZzO7x2DnRxhOEeJGVAIJc3c5n2g4,10655
452
452
  nat/utils/type_utils.py,sha256=SMo5hM4dKf2G3U_0J0wvdFX6-lzMVSh8vd-W34Oixow,14836
453
453
  nat/utils/url_utils.py,sha256=UzDP_xaS6brWTu7vAws0B4jZyrITIK9Si3U6pZBZqDE,1028
454
454
  nat/utils/data_models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -470,10 +470,10 @@ nat/utils/reactive/base/observer_base.py,sha256=6BiQfx26EMumotJ3KoVcdmFBYR_fnAss
470
470
  nat/utils/reactive/base/subject_base.py,sha256=UQOxlkZTIeeyYmG5qLtDpNf_63Y7p-doEeUA08_R8ME,2521
471
471
  nat/utils/settings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
472
472
  nat/utils/settings/global_settings.py,sha256=9JaO6pxKT_Pjw6rxJRsRlFCXdVKCl_xUKU2QHZQWWNM,7294
473
- nvidia_nat-1.3.0rc3.dist-info/licenses/LICENSE-3rd-party.txt,sha256=fOk5jMmCX9YoKWyYzTtfgl-SUy477audFC5hNY4oP7Q,284609
474
- nvidia_nat-1.3.0rc3.dist-info/licenses/LICENSE.md,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
475
- nvidia_nat-1.3.0rc3.dist-info/METADATA,sha256=qZ0sBY6rZTYae27D_pu8g_fy-S9T4lOeReHqaNXKNOE,10222
476
- nvidia_nat-1.3.0rc3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
477
- nvidia_nat-1.3.0rc3.dist-info/entry_points.txt,sha256=4jCqjyETMpyoWbCBf4GalZU8I_wbstpzwQNezdAVbbo,698
478
- nvidia_nat-1.3.0rc3.dist-info/top_level.txt,sha256=lgJWLkigiVZuZ_O1nxVnD_ziYBwgpE2OStdaCduMEGc,8
479
- nvidia_nat-1.3.0rc3.dist-info/RECORD,,
473
+ nvidia_nat-1.4.0a20251010.dist-info/licenses/LICENSE-3rd-party.txt,sha256=fOk5jMmCX9YoKWyYzTtfgl-SUy477audFC5hNY4oP7Q,284609
474
+ nvidia_nat-1.4.0a20251010.dist-info/licenses/LICENSE.md,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
475
+ nvidia_nat-1.4.0a20251010.dist-info/METADATA,sha256=00VTeVFR2xV-WeSbD9_vuaSgV-cmgEgGTxnDpualLPM,10239
476
+ nvidia_nat-1.4.0a20251010.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
477
+ nvidia_nat-1.4.0a20251010.dist-info/entry_points.txt,sha256=4jCqjyETMpyoWbCBf4GalZU8I_wbstpzwQNezdAVbbo,698
478
+ nvidia_nat-1.4.0a20251010.dist-info/top_level.txt,sha256=lgJWLkigiVZuZ_O1nxVnD_ziYBwgpE2OStdaCduMEGc,8
479
+ nvidia_nat-1.4.0a20251010.dist-info/RECORD,,