nvidia-nat-langchain 1.3.0.dev2__py3-none-any.whl → 1.4.0a20251216__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
nat/meta/pypi.md CHANGED
@@ -18,6 +18,6 @@ limitations under the License.
18
18
  ![NVIDIA NeMo Agent Toolkit](https://media.githubusercontent.com/media/NVIDIA/NeMo-Agent-Toolkit/refs/heads/main/docs/source/_static/banner.png "NeMo Agent toolkit banner image")
19
19
 
20
20
  # NVIDIA NeMo Agent Toolkit Subpackage
21
- This is a subpackage for LangChain and LangGraph integration in NeMo Agent toolkit.
21
+ This is a subpackage for LangChain/LangGraph integration in NeMo Agent toolkit.
22
22
 
23
23
  For more information about the NVIDIA NeMo Agent toolkit, please visit the [NeMo Agent toolkit GitHub Repo](https://github.com/NVIDIA/NeMo-Agent-Toolkit).
@@ -13,8 +13,6 @@
13
13
  # See the License for the specific language governing permissions and
14
14
  # limitations under the License.
15
15
 
16
- # pylint: disable=unused-argument
17
-
18
16
  from nat.builder.builder import Builder
19
17
  from nat.builder.framework_enum import LLMFrameworkEnum
20
18
  from nat.cli.register_workflow import register_embedder_client
@@ -30,7 +28,13 @@ async def azure_openai_langchain(embedder_config: AzureOpenAIEmbedderModelConfig
30
28
 
31
29
  from langchain_openai import AzureOpenAIEmbeddings
32
30
 
33
- client = AzureOpenAIEmbeddings(**embedder_config.model_dump(exclude={"type"}, by_alias=True))
31
+ client = AzureOpenAIEmbeddings(
32
+ **embedder_config.model_dump(exclude={"type", "api_version"},
33
+ by_alias=True,
34
+ exclude_none=True,
35
+ exclude_unset=True),
36
+ api_version=embedder_config.api_version,
37
+ )
34
38
 
35
39
  if isinstance(embedder_config, RetryMixin):
36
40
  client = patch_with_retry(client,
@@ -46,7 +50,8 @@ async def nim_langchain(embedder_config: NIMEmbedderModelConfig, builder: Builde
46
50
 
47
51
  from langchain_nvidia_ai_endpoints import NVIDIAEmbeddings
48
52
 
49
- client = NVIDIAEmbeddings(**embedder_config.model_dump(exclude={"type"}, by_alias=True))
53
+ client = NVIDIAEmbeddings(
54
+ **embedder_config.model_dump(exclude={"type"}, by_alias=True, exclude_none=True, exclude_unset=True))
50
55
 
51
56
  if isinstance(embedder_config, RetryMixin):
52
57
  client = patch_with_retry(client,
@@ -62,7 +67,8 @@ async def openai_langchain(embedder_config: OpenAIEmbedderModelConfig, builder:
62
67
 
63
68
  from langchain_openai import OpenAIEmbeddings
64
69
 
65
- client = OpenAIEmbeddings(**embedder_config.model_dump(exclude={"type"}, by_alias=True))
70
+ client = OpenAIEmbeddings(
71
+ **embedder_config.model_dump(exclude={"type"}, by_alias=True, exclude_none=True, exclude_unset=True))
66
72
 
67
73
  if isinstance(embedder_config, RetryMixin):
68
74
  client = patch_with_retry(client,
@@ -12,24 +12,82 @@
12
12
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
13
  # See the License for the specific language governing permissions and
14
14
  # limitations under the License.
15
+ # pylint: disable=unused-argument
16
+
17
+ import logging
18
+ from collections.abc import Sequence
19
+ from typing import TypeVar
15
20
 
16
21
  from nat.builder.builder import Builder
17
22
  from nat.builder.framework_enum import LLMFrameworkEnum
18
23
  from nat.cli.register_workflow import register_llm_client
24
+ from nat.data_models.llm import APITypeEnum
25
+ from nat.data_models.llm import LLMBaseConfig
19
26
  from nat.data_models.retry_mixin import RetryMixin
27
+ from nat.data_models.thinking_mixin import ThinkingMixin
20
28
  from nat.llm.aws_bedrock_llm import AWSBedrockModelConfig
21
29
  from nat.llm.azure_openai_llm import AzureOpenAIModelConfig
30
+ from nat.llm.litellm_llm import LiteLlmModelConfig
22
31
  from nat.llm.nim_llm import NIMModelConfig
23
32
  from nat.llm.openai_llm import OpenAIModelConfig
33
+ from nat.llm.utils.thinking import BaseThinkingInjector
34
+ from nat.llm.utils.thinking import FunctionArgumentWrapper
35
+ from nat.llm.utils.thinking import patch_with_thinking
24
36
  from nat.utils.exception_handlers.automatic_retries import patch_with_retry
37
+ from nat.utils.responses_api import validate_no_responses_api
38
+ from nat.utils.type_utils import override
25
39
 
40
+ logger = logging.getLogger(__name__)
26
41
 
27
- @register_llm_client(config_type=AWSBedrockModelConfig, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
28
- async def aws_bedrock_langchain(llm_config: AWSBedrockModelConfig, _builder: Builder):
42
+ ModelType = TypeVar("ModelType")
29
43
 
30
- from langchain_aws import ChatBedrockConverse
31
44
 
32
- client = ChatBedrockConverse(**llm_config.model_dump(exclude={"type", "context_size"}, by_alias=True))
45
+ def _patch_llm_based_on_config(client: ModelType, llm_config: LLMBaseConfig) -> ModelType:
46
+
47
+ from langchain_core.language_models import LanguageModelInput
48
+ from langchain_core.messages import BaseMessage
49
+ from langchain_core.messages import HumanMessage
50
+ from langchain_core.messages import SystemMessage
51
+ from langchain_core.prompt_values import PromptValue
52
+
53
+ class LangchainThinkingInjector(BaseThinkingInjector):
54
+
55
+ @override
56
+ def inject(self, messages: LanguageModelInput, *args, **kwargs) -> FunctionArgumentWrapper:
57
+ """
58
+ Inject a system prompt into the messages.
59
+
60
+ The messages are the first (non-object) argument to the function.
61
+ The rest of the arguments are passed through unchanged.
62
+
63
+ Args:
64
+ messages: The messages to inject the system prompt into.
65
+ *args: The rest of the arguments to the function.
66
+ **kwargs: The rest of the keyword arguments to the function.
67
+
68
+ Returns:
69
+ FunctionArgumentWrapper: An object that contains the transformed args and kwargs.
70
+
71
+ Raises:
72
+ ValueError: If the messages are not a valid type for LanguageModelInput.
73
+ """
74
+ if isinstance(messages, PromptValue):
75
+ messages = messages.to_messages()
76
+ elif isinstance(messages, str):
77
+ messages = [HumanMessage(content=messages)]
78
+
79
+ if isinstance(messages, Sequence) and all(isinstance(m, BaseMessage) for m in messages):
80
+ for i, message in enumerate(messages):
81
+ if isinstance(message, SystemMessage):
82
+ if self.system_prompt not in str(message.content):
83
+ messages = list(messages)
84
+ messages[i] = SystemMessage(content=f"{message.content}\n{self.system_prompt}")
85
+ break
86
+ else:
87
+ messages = list(messages)
88
+ messages.insert(0, SystemMessage(content=self.system_prompt))
89
+ return FunctionArgumentWrapper(messages, *args, **kwargs)
90
+ raise ValueError(f"Unsupported message type: {type(messages)}")
33
91
 
34
92
  if isinstance(llm_config, RetryMixin):
35
93
  client = patch_with_retry(client,
@@ -37,7 +95,37 @@ async def aws_bedrock_langchain(llm_config: AWSBedrockModelConfig, _builder: Bui
37
95
  retry_codes=llm_config.retry_on_status_codes,
38
96
  retry_on_messages=llm_config.retry_on_errors)
39
97
 
40
- yield client
98
+ if isinstance(llm_config, ThinkingMixin) and llm_config.thinking_system_prompt is not None:
99
+ client = patch_with_thinking(
100
+ client,
101
+ LangchainThinkingInjector(
102
+ system_prompt=llm_config.thinking_system_prompt,
103
+ function_names=[
104
+ "invoke",
105
+ "ainvoke",
106
+ "stream",
107
+ "astream",
108
+ ],
109
+ ))
110
+
111
+ return client
112
+
113
+
114
+ @register_llm_client(config_type=AWSBedrockModelConfig, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
115
+ async def aws_bedrock_langchain(llm_config: AWSBedrockModelConfig, _builder: Builder):
116
+
117
+ from langchain_aws import ChatBedrockConverse
118
+
119
+ validate_no_responses_api(llm_config, LLMFrameworkEnum.LANGCHAIN)
120
+
121
+ client = ChatBedrockConverse(**llm_config.model_dump(
122
+ exclude={"type", "context_size", "thinking", "api_type"},
123
+ by_alias=True,
124
+ exclude_none=True,
125
+ exclude_unset=True,
126
+ ))
127
+
128
+ yield _patch_llm_based_on_config(client, llm_config)
41
129
 
42
130
 
43
131
  @register_llm_client(config_type=AzureOpenAIModelConfig, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
@@ -45,15 +133,17 @@ async def azure_openai_langchain(llm_config: AzureOpenAIModelConfig, _builder: B
45
133
 
46
134
  from langchain_openai import AzureChatOpenAI
47
135
 
48
- client = AzureChatOpenAI(**llm_config.model_dump(exclude={"type"}, by_alias=True))
136
+ validate_no_responses_api(llm_config, LLMFrameworkEnum.LANGCHAIN)
49
137
 
50
- if isinstance(llm_config, RetryMixin):
51
- client = patch_with_retry(client,
52
- retries=llm_config.num_retries,
53
- retry_codes=llm_config.retry_on_status_codes,
54
- retry_on_messages=llm_config.retry_on_errors)
138
+ client = AzureChatOpenAI(
139
+ **llm_config.model_dump(exclude={"type", "thinking", "api_type", "api_version"},
140
+ by_alias=True,
141
+ exclude_none=True,
142
+ exclude_unset=True),
143
+ api_version=llm_config.api_version,
144
+ )
55
145
 
56
- yield client
146
+ yield _patch_llm_based_on_config(client, llm_config)
57
147
 
58
148
 
59
149
  @register_llm_client(config_type=NIMModelConfig, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
@@ -61,15 +151,20 @@ async def nim_langchain(llm_config: NIMModelConfig, _builder: Builder):
61
151
 
62
152
  from langchain_nvidia_ai_endpoints import ChatNVIDIA
63
153
 
64
- client = ChatNVIDIA(**llm_config.model_dump(exclude={"type"}, by_alias=True))
154
+ validate_no_responses_api(llm_config, LLMFrameworkEnum.LANGCHAIN)
65
155
 
66
- if isinstance(llm_config, RetryMixin):
67
- client = patch_with_retry(client,
68
- retries=llm_config.num_retries,
69
- retry_codes=llm_config.retry_on_status_codes,
70
- retry_on_messages=llm_config.retry_on_errors)
156
+ # prefer max_completion_tokens over max_tokens
157
+ client = ChatNVIDIA(
158
+ **llm_config.model_dump(
159
+ exclude={"type", "max_tokens", "thinking", "api_type"},
160
+ by_alias=True,
161
+ exclude_none=True,
162
+ exclude_unset=True,
163
+ ),
164
+ max_completion_tokens=llm_config.max_tokens,
165
+ )
71
166
 
72
- yield client
167
+ yield _patch_llm_based_on_config(client, llm_config)
73
168
 
74
169
 
75
170
  @register_llm_client(config_type=OpenAIModelConfig, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
@@ -77,18 +172,37 @@ async def openai_langchain(llm_config: OpenAIModelConfig, _builder: Builder):
77
172
 
78
173
  from langchain_openai import ChatOpenAI
79
174
 
80
- # Default kwargs for OpenAI to include usage metadata in the response. If the user has set stream_usage to False, we
81
- # will not include this.
82
- default_kwargs = {"stream_usage": True}
175
+ if llm_config.api_type == APITypeEnum.RESPONSES:
176
+ client = ChatOpenAI(stream_usage=True,
177
+ use_responses_api=True,
178
+ use_previous_response_id=True,
179
+ **llm_config.model_dump(
180
+ exclude={"type", "thinking", "api_type"},
181
+ by_alias=True,
182
+ exclude_none=True,
183
+ exclude_unset=True,
184
+ ))
185
+ else:
186
+ # If stream_usage is specified, it will override the default value of True.
187
+ client = ChatOpenAI(stream_usage=True,
188
+ **llm_config.model_dump(
189
+ exclude={"type", "thinking", "api_type"},
190
+ by_alias=True,
191
+ exclude_none=True,
192
+ exclude_unset=True,
193
+ ))
83
194
 
84
- kwargs = {**default_kwargs, **llm_config.model_dump(exclude={"type"}, by_alias=True)}
195
+ yield _patch_llm_based_on_config(client, llm_config)
85
196
 
86
- client = ChatOpenAI(**kwargs)
87
197
 
88
- if isinstance(llm_config, RetryMixin):
89
- client = patch_with_retry(client,
90
- retries=llm_config.num_retries,
91
- retry_codes=llm_config.retry_on_status_codes,
92
- retry_on_messages=llm_config.retry_on_errors)
198
+ @register_llm_client(config_type=LiteLlmModelConfig, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
199
+ async def litellm_langchain(llm_config: LiteLlmModelConfig, _builder: Builder):
200
+
201
+ from langchain_litellm import ChatLiteLLM
202
+
203
+ validate_no_responses_api(llm_config, LLMFrameworkEnum.LANGCHAIN)
204
+
205
+ client = ChatLiteLLM(**llm_config.model_dump(
206
+ exclude={"type", "thinking", "api_type"}, by_alias=True, exclude_none=True, exclude_unset=True))
93
207
 
94
- yield client
208
+ yield _patch_llm_based_on_config(client, llm_config)
@@ -13,7 +13,6 @@
13
13
  # See the License for the specific language governing permissions and
14
14
  # limitations under the License.
15
15
 
16
- # pylint: disable=unused-import
17
16
  # flake8: noqa
18
17
  # isort:skip_file
19
18
 
@@ -24,7 +24,7 @@ logger = logging.getLogger(__name__)
24
24
 
25
25
 
26
26
  @register_tool_wrapper(wrapper_type=LLMFrameworkEnum.LANGCHAIN)
27
- def langchain_tool_wrapper(name: str, fn: Function, builder: Builder): # pylint: disable=unused-argument
27
+ def langchain_tool_wrapper(name: str, fn: Function, builder: Builder):
28
28
 
29
29
  import asyncio
30
30
 
@@ -61,6 +61,6 @@ You are a helpful code assistant that can teach a junior developer how to code.
61
61
  response = await tool.ainvoke({"question": query})
62
62
  if config.verbose:
63
63
  log.debug('Tool input was: %s\nTool output is: \n%s', query, response)
64
- return response.content
64
+ return response.text()
65
65
 
66
66
  yield FunctionInfo.from_fn(_inner, description=config.description)
@@ -13,7 +13,6 @@
13
13
  # See the License for the specific language governing permissions and
14
14
  # limitations under the License.
15
15
 
16
- # pylint: disable=unused-import
17
16
  # flake8: noqa
18
17
  # isort:skip_file
19
18
 
@@ -13,9 +13,13 @@
13
13
  # See the License for the specific language governing permissions and
14
14
  # limitations under the License.
15
15
 
16
+ from pydantic import Field
17
+
16
18
  from nat.builder.builder import Builder
17
19
  from nat.builder.function_info import FunctionInfo
18
20
  from nat.cli.register_workflow import register_function
21
+ from nat.data_models.common import SerializableSecretStr
22
+ from nat.data_models.common import get_secret_value
19
23
  from nat.data_models.function import FunctionBaseConfig
20
24
 
21
25
 
@@ -26,35 +30,40 @@ class TavilyInternetSearchToolConfig(FunctionBaseConfig, name="tavily_internet_s
26
30
  Requires a TAVILY_API_KEY.
27
31
  """
28
32
  max_results: int = 3
29
- api_key: str = ""
33
+ api_key: SerializableSecretStr = Field(default="", description="The API key for the Tavily service.")
30
34
 
31
35
 
32
36
  @register_function(config_type=TavilyInternetSearchToolConfig)
33
37
  async def tavily_internet_search(tool_config: TavilyInternetSearchToolConfig, builder: Builder):
34
38
  import os
35
39
 
36
- from langchain_community.tools import TavilySearchResults
40
+ from langchain_tavily import TavilySearch
37
41
 
38
42
  if not os.environ.get("TAVILY_API_KEY"):
39
- os.environ["TAVILY_API_KEY"] = tool_config.api_key
43
+ if tool_config.api_key:
44
+ os.environ["TAVILY_API_KEY"] = get_secret_value(tool_config.api_key)
40
45
  # This tavily tool requires an API Key and it must be set as an environment variable (TAVILY_API_KEY)
41
46
  # Refer to create_customize_workflow.md for instructions of getting the API key
42
47
 
43
48
  async def _tavily_internet_search(question: str) -> str:
49
+ """This tool retrieves relevant contexts from web search (using Tavily) for the given question.
50
+
51
+ Args:
52
+ question (str): The question to be answered.
53
+
54
+ Returns:
55
+ str: The web search results.
56
+ """
44
57
  # Search the web and get the requested amount of results
45
- tavily_search = TavilySearchResults(max_results=tool_config.max_results)
58
+ tavily_search = TavilySearch(max_results=tool_config.max_results)
46
59
  search_docs = await tavily_search.ainvoke({'query': question})
47
60
  # Format
48
61
  web_search_results = "\n\n---\n\n".join(
49
- [f'<Document href="{doc["url"]}"/>\n{doc["content"]}\n</Document>' for doc in search_docs])
62
+ [f'<Document href="{doc["url"]}"/>\n{doc["content"]}\n</Document>' for doc in search_docs["results"]])
50
63
  return web_search_results
51
64
 
52
65
  # Create a Generic NAT tool that can be used with any supported LLM framework
53
66
  yield FunctionInfo.from_fn(
54
67
  _tavily_internet_search,
55
- description=("""This tool retrieves relevant contexts from web search (using Tavily) for the given question.
56
-
57
- Args:
58
- question (str): The question to be answered.
59
- """),
68
+ description=_tavily_internet_search.__doc__,
60
69
  )
@@ -1,19 +1,32 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: nvidia-nat-langchain
3
- Version: 1.3.0.dev2
4
- Summary: Subpackage for LangChain and LangGraph integration in NeMo Agent toolkit
3
+ Version: 1.4.0a20251216
4
+ Summary: Subpackage for LangChain/LangGraph integration in NeMo Agent toolkit
5
+ Author: NVIDIA Corporation
6
+ Maintainer: NVIDIA Corporation
7
+ License: Apache-2.0
8
+ Project-URL: documentation, https://docs.nvidia.com/nemo/agent-toolkit/latest/
9
+ Project-URL: source, https://github.com/NVIDIA/NeMo-Agent-Toolkit
5
10
  Keywords: ai,rag,agents
6
11
  Classifier: Programming Language :: Python
7
- Requires-Python: <3.13,>=3.11
12
+ Classifier: Programming Language :: Python :: 3.11
13
+ Classifier: Programming Language :: Python :: 3.12
14
+ Classifier: Programming Language :: Python :: 3.13
15
+ Requires-Python: <3.14,>=3.11
8
16
  Description-Content-Type: text/markdown
9
- Requires-Dist: nvidia-nat==v1.3.0-dev2
10
- Requires-Dist: langchain-aws~=0.2.1
11
- Requires-Dist: langchain-core~=0.3.7
12
- Requires-Dist: langchain-nvidia-ai-endpoints~=0.3.5
13
- Requires-Dist: langchain-milvus~=0.1.5
14
- Requires-Dist: langchain-openai~=0.3.5
15
- Requires-Dist: langgraph~=0.2.50
16
- Requires-Dist: langchain-milvus~=0.1.8
17
+ License-File: LICENSE-3rd-party.txt
18
+ License-File: LICENSE.md
19
+ Requires-Dist: nvidia-nat==v1.4.0a20251216
20
+ Requires-Dist: langchain~=0.3.27
21
+ Requires-Dist: langchain-aws~=0.2.31
22
+ Requires-Dist: langchain-core~=0.3.75
23
+ Requires-Dist: langchain-litellm~=0.2.3
24
+ Requires-Dist: langchain-milvus~=0.2.1
25
+ Requires-Dist: langchain-nvidia-ai-endpoints~=0.3.17
26
+ Requires-Dist: langchain-openai~=0.3.32
27
+ Requires-Dist: langchain-tavily~=0.2.11
28
+ Requires-Dist: langgraph~=0.6.7
29
+ Dynamic: license-file
17
30
 
18
31
  <!--
19
32
  SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
@@ -35,6 +48,6 @@ limitations under the License.
35
48
  ![NVIDIA NeMo Agent Toolkit](https://media.githubusercontent.com/media/NVIDIA/NeMo-Agent-Toolkit/refs/heads/main/docs/source/_static/banner.png "NeMo Agent toolkit banner image")
36
49
 
37
50
  # NVIDIA NeMo Agent Toolkit Subpackage
38
- This is a subpackage for LangChain and LangGraph integration in NeMo Agent toolkit.
51
+ This is a subpackage for LangChain/LangGraph integration in NeMo Agent toolkit.
39
52
 
40
53
  For more information about the NVIDIA NeMo Agent toolkit, please visit the [NeMo Agent toolkit GitHub Repo](https://github.com/NVIDIA/NeMo-Agent-Toolkit).
@@ -0,0 +1,19 @@
1
+ nat/meta/pypi.md,sha256=T_KFtTXVxhFM8Y6K3OlNByA5sTXLQuqqUpHgNOCvZBU,1120
2
+ nat/plugins/langchain/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
+ nat/plugins/langchain/embedder.py,sha256=Ie-J4N4lvygW0zNKklKZVSYxYFcRW6p_QlRdcz0WxcE,3607
4
+ nat/plugins/langchain/llm.py,sha256=FaEPleu_aBCsQ-6tt3ofr1A2Oa_ta60tMb0yGGhiWSo,8561
5
+ nat/plugins/langchain/register.py,sha256=jgq6wSJoGQIZFJhS8RbUs25cLgNJjCkFu4M6qaWJS_4,906
6
+ nat/plugins/langchain/retriever.py,sha256=SWbXXOezEUuPACnmSSU497NAmEVEMj2SrFJGodkRg34,2644
7
+ nat/plugins/langchain/tool_wrapper.py,sha256=Zgb2_XB4bEhjPPeqS-ZH_OJT_pcQmteX7u03N_qCLfc,2121
8
+ nat/plugins/langchain/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
+ nat/plugins/langchain/tools/code_generation_tool.py,sha256=f5pna0WMOx3QOS4WnaMFKD7tBZ1-tS0PfI0IMYobtTQ,2723
10
+ nat/plugins/langchain/tools/register.py,sha256=uemxqLxcNk1bGX4crV52oMphLTZWonStzkXwTZeG2Rw,889
11
+ nat/plugins/langchain/tools/tavily_internet_search.py,sha256=W5sdZ9hobPc3xbnWPSbtFBClIn14EM8xT0XUVF2HpWo,2928
12
+ nat/plugins/langchain/tools/wikipedia_search.py,sha256=431YwLsjoC_mdvMZ_gY0Q37Uqaue2ASnAHpwr4jWCaU,2197
13
+ nvidia_nat_langchain-1.4.0a20251216.dist-info/licenses/LICENSE-3rd-party.txt,sha256=fOk5jMmCX9YoKWyYzTtfgl-SUy477audFC5hNY4oP7Q,284609
14
+ nvidia_nat_langchain-1.4.0a20251216.dist-info/licenses/LICENSE.md,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
15
+ nvidia_nat_langchain-1.4.0a20251216.dist-info/METADATA,sha256=RbqeB8fHZdx2VKxjL9gxJkgyeeu6njb_k7_8ES9H6F4,2263
16
+ nvidia_nat_langchain-1.4.0a20251216.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
17
+ nvidia_nat_langchain-1.4.0a20251216.dist-info/entry_points.txt,sha256=4deXsMn97I012HhDw0UjoqcZ8eEoZ7BnqaRx5QmzebY,123
18
+ nvidia_nat_langchain-1.4.0a20251216.dist-info/top_level.txt,sha256=8-CJ2cP6-f0ZReXe5Hzqp-5pvzzHz-5Ds5H2bGqh1-U,4
19
+ nvidia_nat_langchain-1.4.0a20251216.dist-info/RECORD,,