nvidia-nat-langchain 1.3.0.dev2__py3-none-any.whl → 1.3.0rc2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
nat/meta/pypi.md CHANGED
@@ -18,6 +18,6 @@ limitations under the License.
18
18
  ![NVIDIA NeMo Agent Toolkit](https://media.githubusercontent.com/media/NVIDIA/NeMo-Agent-Toolkit/refs/heads/main/docs/source/_static/banner.png "NeMo Agent toolkit banner image")
19
19
 
20
20
  # NVIDIA NeMo Agent Toolkit Subpackage
21
- This is a subpackage for LangChain and LangGraph integration in NeMo Agent toolkit.
21
+ This is a subpackage for LangChain/LangGraph integration in NeMo Agent toolkit.
22
22
 
23
23
  For more information about the NVIDIA NeMo Agent toolkit, please visit the [NeMo Agent toolkit GitHub Repo](https://github.com/NVIDIA/NeMo-Agent-Toolkit).
@@ -13,8 +13,6 @@
13
13
  # See the License for the specific language governing permissions and
14
14
  # limitations under the License.
15
15
 
16
- # pylint: disable=unused-argument
17
-
18
16
  from nat.builder.builder import Builder
19
17
  from nat.builder.framework_enum import LLMFrameworkEnum
20
18
  from nat.cli.register_workflow import register_embedder_client
@@ -30,7 +28,7 @@ async def azure_openai_langchain(embedder_config: AzureOpenAIEmbedderModelConfig
30
28
 
31
29
  from langchain_openai import AzureOpenAIEmbeddings
32
30
 
33
- client = AzureOpenAIEmbeddings(**embedder_config.model_dump(exclude={"type"}, by_alias=True))
31
+ client = AzureOpenAIEmbeddings(**embedder_config.model_dump(exclude={"type"}, by_alias=True, exclude_none=True))
34
32
 
35
33
  if isinstance(embedder_config, RetryMixin):
36
34
  client = patch_with_retry(client,
@@ -46,7 +44,7 @@ async def nim_langchain(embedder_config: NIMEmbedderModelConfig, builder: Builde
46
44
 
47
45
  from langchain_nvidia_ai_endpoints import NVIDIAEmbeddings
48
46
 
49
- client = NVIDIAEmbeddings(**embedder_config.model_dump(exclude={"type"}, by_alias=True))
47
+ client = NVIDIAEmbeddings(**embedder_config.model_dump(exclude={"type"}, by_alias=True, exclude_none=True))
50
48
 
51
49
  if isinstance(embedder_config, RetryMixin):
52
50
  client = patch_with_retry(client,
@@ -62,7 +60,7 @@ async def openai_langchain(embedder_config: OpenAIEmbedderModelConfig, builder:
62
60
 
63
61
  from langchain_openai import OpenAIEmbeddings
64
62
 
65
- client = OpenAIEmbeddings(**embedder_config.model_dump(exclude={"type"}, by_alias=True))
63
+ client = OpenAIEmbeddings(**embedder_config.model_dump(exclude={"type"}, by_alias=True, exclude_none=True))
66
64
 
67
65
  if isinstance(embedder_config, RetryMixin):
68
66
  client = patch_with_retry(client,
@@ -13,15 +13,94 @@
13
13
  # See the License for the specific language governing permissions and
14
14
  # limitations under the License.
15
15
 
16
+ from collections.abc import Sequence
17
+ from typing import TypeVar
18
+
16
19
  from nat.builder.builder import Builder
17
20
  from nat.builder.framework_enum import LLMFrameworkEnum
18
21
  from nat.cli.register_workflow import register_llm_client
22
+ from nat.data_models.llm import LLMBaseConfig
19
23
  from nat.data_models.retry_mixin import RetryMixin
24
+ from nat.data_models.thinking_mixin import ThinkingMixin
20
25
  from nat.llm.aws_bedrock_llm import AWSBedrockModelConfig
21
26
  from nat.llm.azure_openai_llm import AzureOpenAIModelConfig
27
+ from nat.llm.litellm_llm import LiteLlmModelConfig
22
28
  from nat.llm.nim_llm import NIMModelConfig
23
29
  from nat.llm.openai_llm import OpenAIModelConfig
30
+ from nat.llm.utils.thinking import BaseThinkingInjector
31
+ from nat.llm.utils.thinking import FunctionArgumentWrapper
32
+ from nat.llm.utils.thinking import patch_with_thinking
24
33
  from nat.utils.exception_handlers.automatic_retries import patch_with_retry
34
+ from nat.utils.type_utils import override
35
+
36
+ ModelType = TypeVar("ModelType")
37
+
38
+
39
+ def _patch_llm_based_on_config(client: ModelType, llm_config: LLMBaseConfig) -> ModelType:
40
+
41
+ from langchain_core.language_models import LanguageModelInput
42
+ from langchain_core.messages import BaseMessage
43
+ from langchain_core.messages import HumanMessage
44
+ from langchain_core.messages import SystemMessage
45
+ from langchain_core.prompt_values import PromptValue
46
+
47
+ class LangchainThinkingInjector(BaseThinkingInjector):
48
+
49
+ @override
50
+ def inject(self, messages: LanguageModelInput, *args, **kwargs) -> FunctionArgumentWrapper:
51
+ """
52
+ Inject a system prompt into the messages.
53
+
54
+ The messages are the first (non-object) argument to the function.
55
+ The rest of the arguments are passed through unchanged.
56
+
57
+ Args:
58
+ messages: The messages to inject the system prompt into.
59
+ *args: The rest of the arguments to the function.
60
+ **kwargs: The rest of the keyword arguments to the function.
61
+
62
+ Returns:
63
+ FunctionArgumentWrapper: An object that contains the transformed args and kwargs.
64
+
65
+ Raises:
66
+ ValueError: If the messages are not a valid type for LanguageModelInput.
67
+ """
68
+ system_message = SystemMessage(content=self.system_prompt)
69
+ if isinstance(messages, BaseMessage):
70
+ new_messages = [system_message, messages]
71
+ return FunctionArgumentWrapper(new_messages, *args, **kwargs)
72
+ elif isinstance(messages, PromptValue):
73
+ new_messages = [system_message, *messages.to_messages()]
74
+ return FunctionArgumentWrapper(new_messages, *args, **kwargs)
75
+ elif isinstance(messages, str):
76
+ new_messages = [system_message, HumanMessage(content=messages)]
77
+ return FunctionArgumentWrapper(new_messages, *args, **kwargs)
78
+ elif isinstance(messages, Sequence):
79
+ if all(isinstance(m, BaseMessage) for m in messages):
80
+ new_messages = [system_message, *list(messages)]
81
+ return FunctionArgumentWrapper(new_messages, *args, **kwargs)
82
+ raise ValueError(f"Unsupported message type: {type(messages)}")
83
+
84
+ if isinstance(llm_config, RetryMixin):
85
+ client = patch_with_retry(client,
86
+ retries=llm_config.num_retries,
87
+ retry_codes=llm_config.retry_on_status_codes,
88
+ retry_on_messages=llm_config.retry_on_errors)
89
+
90
+ if isinstance(llm_config, ThinkingMixin) and llm_config.thinking_system_prompt is not None:
91
+ client = patch_with_thinking(
92
+ client,
93
+ LangchainThinkingInjector(
94
+ system_prompt=llm_config.thinking_system_prompt,
95
+ function_names=[
96
+ "invoke",
97
+ "ainvoke",
98
+ "stream",
99
+ "astream",
100
+ ],
101
+ ))
102
+
103
+ return client
25
104
 
26
105
 
27
106
  @register_llm_client(config_type=AWSBedrockModelConfig, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
@@ -29,15 +108,13 @@ async def aws_bedrock_langchain(llm_config: AWSBedrockModelConfig, _builder: Bui
29
108
 
30
109
  from langchain_aws import ChatBedrockConverse
31
110
 
32
- client = ChatBedrockConverse(**llm_config.model_dump(exclude={"type", "context_size"}, by_alias=True))
111
+ client = ChatBedrockConverse(**llm_config.model_dump(
112
+ exclude={"type", "context_size", "thinking"},
113
+ by_alias=True,
114
+ exclude_none=True,
115
+ ))
33
116
 
34
- if isinstance(llm_config, RetryMixin):
35
- client = patch_with_retry(client,
36
- retries=llm_config.num_retries,
37
- retry_codes=llm_config.retry_on_status_codes,
38
- retry_on_messages=llm_config.retry_on_errors)
39
-
40
- yield client
117
+ yield _patch_llm_based_on_config(client, llm_config)
41
118
 
42
119
 
43
120
  @register_llm_client(config_type=AzureOpenAIModelConfig, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
@@ -45,15 +122,9 @@ async def azure_openai_langchain(llm_config: AzureOpenAIModelConfig, _builder: B
45
122
 
46
123
  from langchain_openai import AzureChatOpenAI
47
124
 
48
- client = AzureChatOpenAI(**llm_config.model_dump(exclude={"type"}, by_alias=True))
49
-
50
- if isinstance(llm_config, RetryMixin):
51
- client = patch_with_retry(client,
52
- retries=llm_config.num_retries,
53
- retry_codes=llm_config.retry_on_status_codes,
54
- retry_on_messages=llm_config.retry_on_errors)
125
+ client = AzureChatOpenAI(**llm_config.model_dump(exclude={"type", "thinking"}, by_alias=True, exclude_none=True))
55
126
 
56
- yield client
127
+ yield _patch_llm_based_on_config(client, llm_config)
57
128
 
58
129
 
59
130
  @register_llm_client(config_type=NIMModelConfig, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
@@ -61,15 +132,13 @@ async def nim_langchain(llm_config: NIMModelConfig, _builder: Builder):
61
132
 
62
133
  from langchain_nvidia_ai_endpoints import ChatNVIDIA
63
134
 
64
- client = ChatNVIDIA(**llm_config.model_dump(exclude={"type"}, by_alias=True))
135
+ # prefer max_completion_tokens over max_tokens
136
+ client = ChatNVIDIA(
137
+ **llm_config.model_dump(exclude={"type", "max_tokens", "thinking"}, by_alias=True, exclude_none=True),
138
+ max_completion_tokens=llm_config.max_tokens,
139
+ )
65
140
 
66
- if isinstance(llm_config, RetryMixin):
67
- client = patch_with_retry(client,
68
- retries=llm_config.num_retries,
69
- retry_codes=llm_config.retry_on_status_codes,
70
- retry_on_messages=llm_config.retry_on_errors)
71
-
72
- yield client
141
+ yield _patch_llm_based_on_config(client, llm_config)
73
142
 
74
143
 
75
144
  @register_llm_client(config_type=OpenAIModelConfig, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
@@ -77,18 +146,22 @@ async def openai_langchain(llm_config: OpenAIModelConfig, _builder: Builder):
77
146
 
78
147
  from langchain_openai import ChatOpenAI
79
148
 
80
- # Default kwargs for OpenAI to include usage metadata in the response. If the user has set stream_usage to False, we
81
- # will not include this.
82
- default_kwargs = {"stream_usage": True}
149
+ # If stream_usage is specified, it will override the default value of True.
150
+ client = ChatOpenAI(stream_usage=True,
151
+ **llm_config.model_dump(
152
+ exclude={"type", "thinking"},
153
+ by_alias=True,
154
+ exclude_none=True,
155
+ ))
83
156
 
84
- kwargs = {**default_kwargs, **llm_config.model_dump(exclude={"type"}, by_alias=True)}
157
+ yield _patch_llm_based_on_config(client, llm_config)
85
158
 
86
- client = ChatOpenAI(**kwargs)
87
159
 
88
- if isinstance(llm_config, RetryMixin):
89
- client = patch_with_retry(client,
90
- retries=llm_config.num_retries,
91
- retry_codes=llm_config.retry_on_status_codes,
92
- retry_on_messages=llm_config.retry_on_errors)
160
+ @register_llm_client(config_type=LiteLlmModelConfig, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
161
+ async def litellm_langchain(llm_config: LiteLlmModelConfig, _builder: Builder):
162
+
163
+ from langchain_litellm import ChatLiteLLM
164
+
165
+ client = ChatLiteLLM(**llm_config.model_dump(exclude={"type", "thinking"}, by_alias=True, exclude_none=True))
93
166
 
94
- yield client
167
+ yield _patch_llm_based_on_config(client, llm_config)
@@ -13,7 +13,6 @@
13
13
  # See the License for the specific language governing permissions and
14
14
  # limitations under the License.
15
15
 
16
- # pylint: disable=unused-import
17
16
  # flake8: noqa
18
17
  # isort:skip_file
19
18
 
@@ -24,7 +24,7 @@ logger = logging.getLogger(__name__)
24
24
 
25
25
 
26
26
  @register_tool_wrapper(wrapper_type=LLMFrameworkEnum.LANGCHAIN)
27
- def langchain_tool_wrapper(name: str, fn: Function, builder: Builder): # pylint: disable=unused-argument
27
+ def langchain_tool_wrapper(name: str, fn: Function, builder: Builder):
28
28
 
29
29
  import asyncio
30
30
 
@@ -61,6 +61,6 @@ You are a helpful code assistant that can teach a junior developer how to code.
61
61
  response = await tool.ainvoke({"question": query})
62
62
  if config.verbose:
63
63
  log.debug('Tool input was: %s\nTool output is: \n%s', query, response)
64
- return response.content
64
+ return response.text()
65
65
 
66
66
  yield FunctionInfo.from_fn(_inner, description=config.description)
@@ -13,7 +13,6 @@
13
13
  # See the License for the specific language governing permissions and
14
14
  # limitations under the License.
15
15
 
16
- # pylint: disable=unused-import
17
16
  # flake8: noqa
18
17
  # isort:skip_file
19
18
 
@@ -33,7 +33,7 @@ class TavilyInternetSearchToolConfig(FunctionBaseConfig, name="tavily_internet_s
33
33
  async def tavily_internet_search(tool_config: TavilyInternetSearchToolConfig, builder: Builder):
34
34
  import os
35
35
 
36
- from langchain_community.tools import TavilySearchResults
36
+ from langchain_tavily import TavilySearch
37
37
 
38
38
  if not os.environ.get("TAVILY_API_KEY"):
39
39
  os.environ["TAVILY_API_KEY"] = tool_config.api_key
@@ -41,20 +41,24 @@ async def tavily_internet_search(tool_config: TavilyInternetSearchToolConfig, bu
41
41
  # Refer to create_customize_workflow.md for instructions of getting the API key
42
42
 
43
43
  async def _tavily_internet_search(question: str) -> str:
44
+ """This tool retrieves relevant contexts from web search (using Tavily) for the given question.
45
+
46
+ Args:
47
+ question (str): The question to be answered.
48
+
49
+ Returns:
50
+ str: The web search results.
51
+ """
44
52
  # Search the web and get the requested amount of results
45
- tavily_search = TavilySearchResults(max_results=tool_config.max_results)
53
+ tavily_search = TavilySearch(max_results=tool_config.max_results)
46
54
  search_docs = await tavily_search.ainvoke({'query': question})
47
55
  # Format
48
56
  web_search_results = "\n\n---\n\n".join(
49
- [f'<Document href="{doc["url"]}"/>\n{doc["content"]}\n</Document>' for doc in search_docs])
57
+ [f'<Document href="{doc["url"]}"/>\n{doc["content"]}\n</Document>' for doc in search_docs["results"]])
50
58
  return web_search_results
51
59
 
52
60
  # Create a Generic NAT tool that can be used with any supported LLM framework
53
61
  yield FunctionInfo.from_fn(
54
62
  _tavily_internet_search,
55
- description=("""This tool retrieves relevant contexts from web search (using Tavily) for the given question.
56
-
57
- Args:
58
- question (str): The question to be answered.
59
- """),
63
+ description=_tavily_internet_search.__doc__,
60
64
  )
@@ -1,19 +1,23 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: nvidia-nat-langchain
3
- Version: 1.3.0.dev2
4
- Summary: Subpackage for LangChain and LangGraph integration in NeMo Agent toolkit
3
+ Version: 1.3.0rc2
4
+ Summary: Subpackage for LangChain/LangGraph integration in NeMo Agent toolkit
5
5
  Keywords: ai,rag,agents
6
6
  Classifier: Programming Language :: Python
7
- Requires-Python: <3.13,>=3.11
7
+ Classifier: Programming Language :: Python :: 3.11
8
+ Classifier: Programming Language :: Python :: 3.12
9
+ Classifier: Programming Language :: Python :: 3.13
10
+ Requires-Python: <3.14,>=3.11
8
11
  Description-Content-Type: text/markdown
9
- Requires-Dist: nvidia-nat==v1.3.0-dev2
10
- Requires-Dist: langchain-aws~=0.2.1
11
- Requires-Dist: langchain-core~=0.3.7
12
- Requires-Dist: langchain-nvidia-ai-endpoints~=0.3.5
13
- Requires-Dist: langchain-milvus~=0.1.5
14
- Requires-Dist: langchain-openai~=0.3.5
15
- Requires-Dist: langgraph~=0.2.50
16
- Requires-Dist: langchain-milvus~=0.1.8
12
+ Requires-Dist: nvidia-nat==v1.3.0-rc2
13
+ Requires-Dist: langchain-aws~=0.2.31
14
+ Requires-Dist: langchain-core~=0.3.75
15
+ Requires-Dist: langchain-litellm~=0.2.3
16
+ Requires-Dist: langchain-milvus~=0.2.1
17
+ Requires-Dist: langchain-nvidia-ai-endpoints~=0.3.17
18
+ Requires-Dist: langchain-openai~=0.3.32
19
+ Requires-Dist: langchain-tavily~=0.2.11
20
+ Requires-Dist: langgraph~=0.6.7
17
21
 
18
22
  <!--
19
23
  SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
@@ -35,6 +39,6 @@ limitations under the License.
35
39
  ![NVIDIA NeMo Agent Toolkit](https://media.githubusercontent.com/media/NVIDIA/NeMo-Agent-Toolkit/refs/heads/main/docs/source/_static/banner.png "NeMo Agent toolkit banner image")
36
40
 
37
41
  # NVIDIA NeMo Agent Toolkit Subpackage
38
- This is a subpackage for LangChain and LangGraph integration in NeMo Agent toolkit.
42
+ This is a subpackage for LangChain/LangGraph integration in NeMo Agent toolkit.
39
43
 
40
44
  For more information about the NVIDIA NeMo Agent toolkit, please visit the [NeMo Agent toolkit GitHub Repo](https://github.com/NVIDIA/NeMo-Agent-Toolkit).
@@ -0,0 +1,17 @@
1
+ nat/meta/pypi.md,sha256=T_KFtTXVxhFM8Y6K3OlNByA5sTXLQuqqUpHgNOCvZBU,1120
2
+ nat/plugins/langchain/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
+ nat/plugins/langchain/embedder.py,sha256=ZSESaazyz7y3F0GSSsWRe_xfvxOe0Mwd45wEAkQ2jJk,3339
4
+ nat/plugins/langchain/llm.py,sha256=vlR_4bMLmmpXTv4kp3xFOB1eFeJ-wX1_DqnsQpPunAo,7110
5
+ nat/plugins/langchain/register.py,sha256=jgq6wSJoGQIZFJhS8RbUs25cLgNJjCkFu4M6qaWJS_4,906
6
+ nat/plugins/langchain/retriever.py,sha256=SWbXXOezEUuPACnmSSU497NAmEVEMj2SrFJGodkRg34,2644
7
+ nat/plugins/langchain/tool_wrapper.py,sha256=Zgb2_XB4bEhjPPeqS-ZH_OJT_pcQmteX7u03N_qCLfc,2121
8
+ nat/plugins/langchain/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
+ nat/plugins/langchain/tools/code_generation_tool.py,sha256=f5pna0WMOx3QOS4WnaMFKD7tBZ1-tS0PfI0IMYobtTQ,2723
10
+ nat/plugins/langchain/tools/register.py,sha256=uemxqLxcNk1bGX4crV52oMphLTZWonStzkXwTZeG2Rw,889
11
+ nat/plugins/langchain/tools/tavily_internet_search.py,sha256=UFMP1xh_kC3fydMQBeV-oDZ-M7jnLcs5OkMSzgm7mng,2653
12
+ nat/plugins/langchain/tools/wikipedia_search.py,sha256=431YwLsjoC_mdvMZ_gY0Q37Uqaue2ASnAHpwr4jWCaU,2197
13
+ nvidia_nat_langchain-1.3.0rc2.dist-info/METADATA,sha256=Of1uSu5K0QU1cJa1mxcZy-PNdh71ScSSg50geCZPrkU,1913
14
+ nvidia_nat_langchain-1.3.0rc2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
15
+ nvidia_nat_langchain-1.3.0rc2.dist-info/entry_points.txt,sha256=4deXsMn97I012HhDw0UjoqcZ8eEoZ7BnqaRx5QmzebY,123
16
+ nvidia_nat_langchain-1.3.0rc2.dist-info/top_level.txt,sha256=8-CJ2cP6-f0ZReXe5Hzqp-5pvzzHz-5Ds5H2bGqh1-U,4
17
+ nvidia_nat_langchain-1.3.0rc2.dist-info/RECORD,,
@@ -1,17 +0,0 @@
1
- nat/meta/pypi.md,sha256=-RewrXPwhrT6398iluvXb5lefn18PybmvRFhmZF7KVI,1124
2
- nat/plugins/langchain/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
- nat/plugins/langchain/embedder.py,sha256=uFC1M9nF4nJ1GpgUO2vz-iy3T8zGDVJXpnSqQbuKJkk,3317
4
- nat/plugins/langchain/llm.py,sha256=HJZsk0i72ZN0QTb1hZSUV4IsLmE0Ab0JOYYnvtqJfno,4013
5
- nat/plugins/langchain/register.py,sha256=UwxFY-HsZ5n32XPpqDtLvBamiw1Pdavtf2oYSK_XGtY,938
6
- nat/plugins/langchain/retriever.py,sha256=SWbXXOezEUuPACnmSSU497NAmEVEMj2SrFJGodkRg34,2644
7
- nat/plugins/langchain/tool_wrapper.py,sha256=dLK09a8lrmytNkIuhTaSByuLdW23aVmIcr3REyuJTMA,2156
8
- nat/plugins/langchain/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
- nat/plugins/langchain/tools/code_generation_tool.py,sha256=qL3HBiOQzVPLw4EiUOWeswckuVX8ynG2UQXYBLxR_gI,2724
10
- nat/plugins/langchain/tools/register.py,sha256=3cf4RH2tQ_qOtZviwXQUqK5dKiVUJQYach4djxGpcOU,921
11
- nat/plugins/langchain/tools/tavily_internet_search.py,sha256=AnnLRY1xSU4DOzxbB8nFZRjHngXpqatPVOJ7yWV7jVw,2612
12
- nat/plugins/langchain/tools/wikipedia_search.py,sha256=431YwLsjoC_mdvMZ_gY0Q37Uqaue2ASnAHpwr4jWCaU,2197
13
- nvidia_nat_langchain-1.3.0.dev2.dist-info/METADATA,sha256=bKpfzosrGdCkCXCQu00t2VhKdTf5nJFahypMBHNn3I0,1727
14
- nvidia_nat_langchain-1.3.0.dev2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
15
- nvidia_nat_langchain-1.3.0.dev2.dist-info/entry_points.txt,sha256=4deXsMn97I012HhDw0UjoqcZ8eEoZ7BnqaRx5QmzebY,123
16
- nvidia_nat_langchain-1.3.0.dev2.dist-info/top_level.txt,sha256=8-CJ2cP6-f0ZReXe5Hzqp-5pvzzHz-5Ds5H2bGqh1-U,4
17
- nvidia_nat_langchain-1.3.0.dev2.dist-info/RECORD,,