nvidia-nat-langchain 1.3.dev0__py3-none-any.whl → 1.3.0rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
nat/meta/pypi.md CHANGED
@@ -18,6 +18,6 @@ limitations under the License.
18
18
  ![NVIDIA NeMo Agent Toolkit](https://media.githubusercontent.com/media/NVIDIA/NeMo-Agent-Toolkit/refs/heads/main/docs/source/_static/banner.png "NeMo Agent toolkit banner image")
19
19
 
20
20
  # NVIDIA NeMo Agent Toolkit Subpackage
21
- This is a subpackage for LangChain and LangGraph integration in NeMo Agent toolkit.
21
+ This is a subpackage for LangChain/LangGraph integration in NeMo Agent toolkit.
22
22
 
23
23
  For more information about the NVIDIA NeMo Agent toolkit, please visit the [NeMo Agent toolkit GitHub Repo](https://github.com/NVIDIA/NeMo-Agent-Toolkit).
@@ -12,23 +12,23 @@
12
12
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
13
  # See the License for the specific language governing permissions and
14
14
  # limitations under the License.
15
- # pylint: disable=unused-argument
16
15
 
17
16
  from nat.builder.builder import Builder
18
17
  from nat.builder.framework_enum import LLMFrameworkEnum
19
18
  from nat.cli.register_workflow import register_embedder_client
20
19
  from nat.data_models.retry_mixin import RetryMixin
20
+ from nat.embedder.azure_openai_embedder import AzureOpenAIEmbedderModelConfig
21
21
  from nat.embedder.nim_embedder import NIMEmbedderModelConfig
22
22
  from nat.embedder.openai_embedder import OpenAIEmbedderModelConfig
23
23
  from nat.utils.exception_handlers.automatic_retries import patch_with_retry
24
24
 
25
25
 
26
- @register_embedder_client(config_type=OpenAIEmbedderModelConfig, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
27
- async def openai_langchain(embedder_config: OpenAIEmbedderModelConfig, builder: Builder):
26
+ @register_embedder_client(config_type=AzureOpenAIEmbedderModelConfig, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
27
+ async def azure_openai_langchain(embedder_config: AzureOpenAIEmbedderModelConfig, builder: Builder):
28
28
 
29
- from langchain_openai import OpenAIEmbeddings
29
+ from langchain_openai import AzureOpenAIEmbeddings
30
30
 
31
- client = OpenAIEmbeddings(**embedder_config.model_dump(exclude={"type"}, by_alias=True))
31
+ client = AzureOpenAIEmbeddings(**embedder_config.model_dump(exclude={"type"}, by_alias=True, exclude_none=True))
32
32
 
33
33
  if isinstance(embedder_config, RetryMixin):
34
34
  client = patch_with_retry(client,
@@ -44,7 +44,23 @@ async def nim_langchain(embedder_config: NIMEmbedderModelConfig, builder: Builde
44
44
 
45
45
  from langchain_nvidia_ai_endpoints import NVIDIAEmbeddings
46
46
 
47
- client = NVIDIAEmbeddings(**embedder_config.model_dump(exclude={"type"}, by_alias=True))
47
+ client = NVIDIAEmbeddings(**embedder_config.model_dump(exclude={"type"}, by_alias=True, exclude_none=True))
48
+
49
+ if isinstance(embedder_config, RetryMixin):
50
+ client = patch_with_retry(client,
51
+ retries=embedder_config.num_retries,
52
+ retry_codes=embedder_config.retry_on_status_codes,
53
+ retry_on_messages=embedder_config.retry_on_errors)
54
+
55
+ yield client
56
+
57
+
58
+ @register_embedder_client(config_type=OpenAIEmbedderModelConfig, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
59
+ async def openai_langchain(embedder_config: OpenAIEmbedderModelConfig, builder: Builder):
60
+
61
+ from langchain_openai import OpenAIEmbeddings
62
+
63
+ client = OpenAIEmbeddings(**embedder_config.model_dump(exclude={"type"}, by_alias=True, exclude_none=True))
48
64
 
49
65
  if isinstance(embedder_config, RetryMixin):
50
66
  client = patch_with_retry(client,
@@ -13,22 +13,72 @@
13
13
  # See the License for the specific language governing permissions and
14
14
  # limitations under the License.
15
15
 
16
+ from collections.abc import Sequence
17
+ from typing import TypeVar
18
+
16
19
  from nat.builder.builder import Builder
17
20
  from nat.builder.framework_enum import LLMFrameworkEnum
18
21
  from nat.cli.register_workflow import register_llm_client
22
+ from nat.data_models.llm import LLMBaseConfig
19
23
  from nat.data_models.retry_mixin import RetryMixin
24
+ from nat.data_models.thinking_mixin import ThinkingMixin
20
25
  from nat.llm.aws_bedrock_llm import AWSBedrockModelConfig
26
+ from nat.llm.azure_openai_llm import AzureOpenAIModelConfig
21
27
  from nat.llm.nim_llm import NIMModelConfig
22
28
  from nat.llm.openai_llm import OpenAIModelConfig
29
+ from nat.llm.utils.thinking import BaseThinkingInjector
30
+ from nat.llm.utils.thinking import FunctionArgumentWrapper
31
+ from nat.llm.utils.thinking import patch_with_thinking
23
32
  from nat.utils.exception_handlers.automatic_retries import patch_with_retry
24
-
25
-
26
- @register_llm_client(config_type=NIMModelConfig, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
27
- async def nim_langchain(llm_config: NIMModelConfig, builder: Builder):
28
-
29
- from langchain_nvidia_ai_endpoints import ChatNVIDIA
30
-
31
- client = ChatNVIDIA(**llm_config.model_dump(exclude={"type"}, by_alias=True))
33
+ from nat.utils.type_utils import override
34
+
35
+ ModelType = TypeVar("ModelType")
36
+
37
+
38
+ def _patch_llm_based_on_config(client: ModelType, llm_config: LLMBaseConfig) -> ModelType:
39
+
40
+ from langchain_core.language_models import LanguageModelInput
41
+ from langchain_core.messages import BaseMessage
42
+ from langchain_core.messages import HumanMessage
43
+ from langchain_core.messages import SystemMessage
44
+ from langchain_core.prompt_values import PromptValue
45
+
46
+ class LangchainThinkingInjector(BaseThinkingInjector):
47
+
48
+ @override
49
+ def inject(self, messages: LanguageModelInput, *args, **kwargs) -> FunctionArgumentWrapper:
50
+ """
51
+ Inject a system prompt into the messages.
52
+
53
+ The messages are the first (non-object) argument to the function.
54
+ The rest of the arguments are passed through unchanged.
55
+
56
+ Args:
57
+ messages: The messages to inject the system prompt into.
58
+ *args: The rest of the arguments to the function.
59
+ **kwargs: The rest of the keyword arguments to the function.
60
+
61
+ Returns:
62
+ FunctionArgumentWrapper: An object that contains the transformed args and kwargs.
63
+
64
+ Raises:
65
+ ValueError: If the messages are not a valid type for LanguageModelInput.
66
+ """
67
+ system_message = SystemMessage(content=self.system_prompt)
68
+ if isinstance(messages, BaseMessage):
69
+ new_messages = [system_message, messages]
70
+ return FunctionArgumentWrapper(new_messages, *args, **kwargs)
71
+ elif isinstance(messages, PromptValue):
72
+ new_messages = [system_message, *messages.to_messages()]
73
+ return FunctionArgumentWrapper(new_messages, *args, **kwargs)
74
+ elif isinstance(messages, str):
75
+ new_messages = [system_message, HumanMessage(content=messages)]
76
+ return FunctionArgumentWrapper(new_messages, *args, **kwargs)
77
+ elif isinstance(messages, Sequence):
78
+ if all(isinstance(m, BaseMessage) for m in messages):
79
+ new_messages = [system_message, *list(messages)]
80
+ return FunctionArgumentWrapper(new_messages, *args, **kwargs)
81
+ raise ValueError(f"Unsupported message type: {type(messages)}")
32
82
 
33
83
  if isinstance(llm_config, RetryMixin):
34
84
  client = patch_with_retry(client,
@@ -36,42 +86,71 @@ async def nim_langchain(llm_config: NIMModelConfig, builder: Builder):
36
86
  retry_codes=llm_config.retry_on_status_codes,
37
87
  retry_on_messages=llm_config.retry_on_errors)
38
88
 
39
- yield client
89
+ if isinstance(llm_config, ThinkingMixin) and llm_config.thinking_system_prompt is not None:
90
+ client = patch_with_thinking(
91
+ client,
92
+ LangchainThinkingInjector(
93
+ system_prompt=llm_config.thinking_system_prompt,
94
+ function_names=[
95
+ "invoke",
96
+ "ainvoke",
97
+ "stream",
98
+ "astream",
99
+ ],
100
+ ))
40
101
 
102
+ return client
41
103
 
42
- @register_llm_client(config_type=OpenAIModelConfig, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
43
- async def openai_langchain(llm_config: OpenAIModelConfig, builder: Builder):
44
104
 
45
- from langchain_openai import ChatOpenAI
105
+ @register_llm_client(config_type=AWSBedrockModelConfig, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
106
+ async def aws_bedrock_langchain(llm_config: AWSBedrockModelConfig, _builder: Builder):
46
107
 
47
- # Default kwargs for OpenAI to include usage metadata in the response. If the user has set stream_usage to False, we
48
- # will not include this.
49
- default_kwargs = {"stream_usage": True}
108
+ from langchain_aws import ChatBedrockConverse
50
109
 
51
- kwargs = {**default_kwargs, **llm_config.model_dump(exclude={"type"}, by_alias=True)}
110
+ client = ChatBedrockConverse(**llm_config.model_dump(
111
+ exclude={"type", "context_size", "thinking"},
112
+ by_alias=True,
113
+ exclude_none=True,
114
+ ))
52
115
 
53
- client = ChatOpenAI(**kwargs)
116
+ yield _patch_llm_based_on_config(client, llm_config)
54
117
 
55
- if isinstance(llm_config, RetryMixin):
56
- client = patch_with_retry(client,
57
- retries=llm_config.num_retries,
58
- retry_codes=llm_config.retry_on_status_codes,
59
- retry_on_messages=llm_config.retry_on_errors)
60
118
 
61
- yield client
119
+ @register_llm_client(config_type=AzureOpenAIModelConfig, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
120
+ async def azure_openai_langchain(llm_config: AzureOpenAIModelConfig, _builder: Builder):
62
121
 
122
+ from langchain_openai import AzureChatOpenAI
63
123
 
64
- @register_llm_client(config_type=AWSBedrockModelConfig, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
65
- async def aws_bedrock_langchain(llm_config: AWSBedrockModelConfig, builder: Builder):
124
+ client = AzureChatOpenAI(**llm_config.model_dump(exclude={"type", "thinking"}, by_alias=True, exclude_none=True))
66
125
 
67
- from langchain_aws import ChatBedrockConverse
126
+ yield _patch_llm_based_on_config(client, llm_config)
68
127
 
69
- client = ChatBedrockConverse(**llm_config.model_dump(exclude={"type", "context_size"}, by_alias=True))
70
128
 
71
- if isinstance(llm_config, RetryMixin):
72
- client = patch_with_retry(client,
73
- retries=llm_config.num_retries,
74
- retry_codes=llm_config.retry_on_status_codes,
75
- retry_on_messages=llm_config.retry_on_errors)
129
+ @register_llm_client(config_type=NIMModelConfig, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
130
+ async def nim_langchain(llm_config: NIMModelConfig, _builder: Builder):
131
+
132
+ from langchain_nvidia_ai_endpoints import ChatNVIDIA
133
+
134
+ # prefer max_completion_tokens over max_tokens
135
+ client = ChatNVIDIA(
136
+ **llm_config.model_dump(exclude={"type", "max_tokens", "thinking"}, by_alias=True, exclude_none=True),
137
+ max_completion_tokens=llm_config.max_tokens,
138
+ )
139
+
140
+ yield _patch_llm_based_on_config(client, llm_config)
141
+
142
+
143
+ @register_llm_client(config_type=OpenAIModelConfig, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
144
+ async def openai_langchain(llm_config: OpenAIModelConfig, _builder: Builder):
145
+
146
+ from langchain_openai import ChatOpenAI
147
+
148
+ # If stream_usage is specified, it will override the default value of True.
149
+ client = ChatOpenAI(stream_usage=True,
150
+ **llm_config.model_dump(
151
+ exclude={"type", "thinking"},
152
+ by_alias=True,
153
+ exclude_none=True,
154
+ ))
76
155
 
77
- yield client
156
+ yield _patch_llm_based_on_config(client, llm_config)
@@ -13,7 +13,6 @@
13
13
  # See the License for the specific language governing permissions and
14
14
  # limitations under the License.
15
15
 
16
- # pylint: disable=unused-import
17
16
  # flake8: noqa
18
17
  # isort:skip_file
19
18
 
@@ -24,7 +24,7 @@ logger = logging.getLogger(__name__)
24
24
 
25
25
 
26
26
  @register_tool_wrapper(wrapper_type=LLMFrameworkEnum.LANGCHAIN)
27
- def langchain_tool_wrapper(name: str, fn: Function, builder: Builder): # pylint: disable=unused-argument
27
+ def langchain_tool_wrapper(name: str, fn: Function, builder: Builder):
28
28
 
29
29
  import asyncio
30
30
 
@@ -61,6 +61,6 @@ You are a helpful code assistant that can teach a junior developer how to code.
61
61
  response = await tool.ainvoke({"question": query})
62
62
  if config.verbose:
63
63
  log.debug('Tool input was: %s\nTool output is: \n%s', query, response)
64
- return response.content
64
+ return response.text()
65
65
 
66
66
  yield FunctionInfo.from_fn(_inner, description=config.description)
@@ -13,7 +13,6 @@
13
13
  # See the License for the specific language governing permissions and
14
14
  # limitations under the License.
15
15
 
16
- # pylint: disable=unused-import
17
16
  # flake8: noqa
18
17
  # isort:skip_file
19
18
 
@@ -33,7 +33,7 @@ class TavilyInternetSearchToolConfig(FunctionBaseConfig, name="tavily_internet_s
33
33
  async def tavily_internet_search(tool_config: TavilyInternetSearchToolConfig, builder: Builder):
34
34
  import os
35
35
 
36
- from langchain_community.tools import TavilySearchResults
36
+ from langchain_tavily import TavilySearch
37
37
 
38
38
  if not os.environ.get("TAVILY_API_KEY"):
39
39
  os.environ["TAVILY_API_KEY"] = tool_config.api_key
@@ -41,20 +41,24 @@ async def tavily_internet_search(tool_config: TavilyInternetSearchToolConfig, bu
41
41
  # Refer to create_customize_workflow.md for instructions of getting the API key
42
42
 
43
43
  async def _tavily_internet_search(question: str) -> str:
44
+ """This tool retrieves relevant contexts from web search (using Tavily) for the given question.
45
+
46
+ Args:
47
+ question (str): The question to be answered.
48
+
49
+ Returns:
50
+ str: The web search results.
51
+ """
44
52
  # Search the web and get the requested amount of results
45
- tavily_search = TavilySearchResults(max_results=tool_config.max_results)
53
+ tavily_search = TavilySearch(max_results=tool_config.max_results)
46
54
  search_docs = await tavily_search.ainvoke({'query': question})
47
55
  # Format
48
56
  web_search_results = "\n\n---\n\n".join(
49
- [f'<Document href="{doc["url"]}"/>\n{doc["content"]}\n</Document>' for doc in search_docs])
57
+ [f'<Document href="{doc["url"]}"/>\n{doc["content"]}\n</Document>' for doc in search_docs["results"]])
50
58
  return web_search_results
51
59
 
52
60
  # Create a Generic NAT tool that can be used with any supported LLM framework
53
61
  yield FunctionInfo.from_fn(
54
62
  _tavily_internet_search,
55
- description=("""This tool retrieves relevant contexts from web search (using Tavily) for the given question.
56
-
57
- Args:
58
- question (str): The question to be answered.
59
- """),
63
+ description=_tavily_internet_search.__doc__,
60
64
  )
@@ -1,19 +1,22 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: nvidia-nat-langchain
3
- Version: 1.3.dev0
4
- Summary: Subpackage for LangChain and LangGraph integration in NeMo Agent toolkit
3
+ Version: 1.3.0rc1
4
+ Summary: Subpackage for LangChain/LangGraph integration in NeMo Agent toolkit
5
5
  Keywords: ai,rag,agents
6
6
  Classifier: Programming Language :: Python
7
- Requires-Python: <3.13,>=3.11
7
+ Classifier: Programming Language :: Python :: 3.11
8
+ Classifier: Programming Language :: Python :: 3.12
9
+ Classifier: Programming Language :: Python :: 3.13
10
+ Requires-Python: <3.14,>=3.11
8
11
  Description-Content-Type: text/markdown
9
- Requires-Dist: nvidia-nat==v1.3-dev
10
- Requires-Dist: langchain-aws~=0.2.1
11
- Requires-Dist: langchain-core~=0.3.7
12
- Requires-Dist: langchain-nvidia-ai-endpoints~=0.3.5
13
- Requires-Dist: langchain-milvus~=0.1.5
14
- Requires-Dist: langchain-openai~=0.3.5
15
- Requires-Dist: langgraph~=0.2.50
16
- Requires-Dist: langchain-milvus~=0.1.8
12
+ Requires-Dist: nvidia-nat==v1.3.0-rc1
13
+ Requires-Dist: langchain-aws~=0.2.31
14
+ Requires-Dist: langchain-core~=0.3.75
15
+ Requires-Dist: langchain-milvus~=0.2.1
16
+ Requires-Dist: langchain-nvidia-ai-endpoints~=0.3.17
17
+ Requires-Dist: langchain-openai~=0.3.32
18
+ Requires-Dist: langchain-tavily~=0.2.11
19
+ Requires-Dist: langgraph~=0.6.7
17
20
 
18
21
  <!--
19
22
  SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
@@ -35,6 +38,6 @@ limitations under the License.
35
38
  ![NVIDIA NeMo Agent Toolkit](https://media.githubusercontent.com/media/NVIDIA/NeMo-Agent-Toolkit/refs/heads/main/docs/source/_static/banner.png "NeMo Agent toolkit banner image")
36
39
 
37
40
  # NVIDIA NeMo Agent Toolkit Subpackage
38
- This is a subpackage for LangChain and LangGraph integration in NeMo Agent toolkit.
41
+ This is a subpackage for LangChain/LangGraph integration in NeMo Agent toolkit.
39
42
 
40
43
  For more information about the NVIDIA NeMo Agent toolkit, please visit the [NeMo Agent toolkit GitHub Repo](https://github.com/NVIDIA/NeMo-Agent-Toolkit).
@@ -0,0 +1,17 @@
1
+ nat/meta/pypi.md,sha256=T_KFtTXVxhFM8Y6K3OlNByA5sTXLQuqqUpHgNOCvZBU,1120
2
+ nat/plugins/langchain/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
+ nat/plugins/langchain/embedder.py,sha256=ZSESaazyz7y3F0GSSsWRe_xfvxOe0Mwd45wEAkQ2jJk,3339
4
+ nat/plugins/langchain/llm.py,sha256=wWIRlCehT391X1KiISA5wIZe2dHoPw6K6Q2mhJfyvno,6663
5
+ nat/plugins/langchain/register.py,sha256=jgq6wSJoGQIZFJhS8RbUs25cLgNJjCkFu4M6qaWJS_4,906
6
+ nat/plugins/langchain/retriever.py,sha256=SWbXXOezEUuPACnmSSU497NAmEVEMj2SrFJGodkRg34,2644
7
+ nat/plugins/langchain/tool_wrapper.py,sha256=Zgb2_XB4bEhjPPeqS-ZH_OJT_pcQmteX7u03N_qCLfc,2121
8
+ nat/plugins/langchain/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
+ nat/plugins/langchain/tools/code_generation_tool.py,sha256=f5pna0WMOx3QOS4WnaMFKD7tBZ1-tS0PfI0IMYobtTQ,2723
10
+ nat/plugins/langchain/tools/register.py,sha256=uemxqLxcNk1bGX4crV52oMphLTZWonStzkXwTZeG2Rw,889
11
+ nat/plugins/langchain/tools/tavily_internet_search.py,sha256=UFMP1xh_kC3fydMQBeV-oDZ-M7jnLcs5OkMSzgm7mng,2653
12
+ nat/plugins/langchain/tools/wikipedia_search.py,sha256=431YwLsjoC_mdvMZ_gY0Q37Uqaue2ASnAHpwr4jWCaU,2197
13
+ nvidia_nat_langchain-1.3.0rc1.dist-info/METADATA,sha256=HJfTGrRraz-qPLO_ycuWtEiK37s-c752-CHbH0Z5MTY,1873
14
+ nvidia_nat_langchain-1.3.0rc1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
15
+ nvidia_nat_langchain-1.3.0rc1.dist-info/entry_points.txt,sha256=4deXsMn97I012HhDw0UjoqcZ8eEoZ7BnqaRx5QmzebY,123
16
+ nvidia_nat_langchain-1.3.0rc1.dist-info/top_level.txt,sha256=8-CJ2cP6-f0ZReXe5Hzqp-5pvzzHz-5Ds5H2bGqh1-U,4
17
+ nvidia_nat_langchain-1.3.0rc1.dist-info/RECORD,,
@@ -1,17 +0,0 @@
1
- nat/meta/pypi.md,sha256=-RewrXPwhrT6398iluvXb5lefn18PybmvRFhmZF7KVI,1124
2
- nat/plugins/langchain/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
- nat/plugins/langchain/embedder.py,sha256=UcDB4GYY0vW3pUiFqqKhNMgAmEZdRSUJUazjThkPHuQ,2519
4
- nat/plugins/langchain/llm.py,sha256=i_DffRmQ9PEELuXFYR2ouU0LRkPKAjwZi1z65xGrxws,3293
5
- nat/plugins/langchain/register.py,sha256=UwxFY-HsZ5n32XPpqDtLvBamiw1Pdavtf2oYSK_XGtY,938
6
- nat/plugins/langchain/retriever.py,sha256=SWbXXOezEUuPACnmSSU497NAmEVEMj2SrFJGodkRg34,2644
7
- nat/plugins/langchain/tool_wrapper.py,sha256=dLK09a8lrmytNkIuhTaSByuLdW23aVmIcr3REyuJTMA,2156
8
- nat/plugins/langchain/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
- nat/plugins/langchain/tools/code_generation_tool.py,sha256=qL3HBiOQzVPLw4EiUOWeswckuVX8ynG2UQXYBLxR_gI,2724
10
- nat/plugins/langchain/tools/register.py,sha256=3cf4RH2tQ_qOtZviwXQUqK5dKiVUJQYach4djxGpcOU,921
11
- nat/plugins/langchain/tools/tavily_internet_search.py,sha256=AnnLRY1xSU4DOzxbB8nFZRjHngXpqatPVOJ7yWV7jVw,2612
12
- nat/plugins/langchain/tools/wikipedia_search.py,sha256=431YwLsjoC_mdvMZ_gY0Q37Uqaue2ASnAHpwr4jWCaU,2197
13
- nvidia_nat_langchain-1.3.dev0.dist-info/METADATA,sha256=pZXhSbwaTCLrIWXDHlT2VigunZoMe3s1qak16B5W6Wc,1722
14
- nvidia_nat_langchain-1.3.dev0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
15
- nvidia_nat_langchain-1.3.dev0.dist-info/entry_points.txt,sha256=4deXsMn97I012HhDw0UjoqcZ8eEoZ7BnqaRx5QmzebY,123
16
- nvidia_nat_langchain-1.3.dev0.dist-info/top_level.txt,sha256=8-CJ2cP6-f0ZReXe5Hzqp-5pvzzHz-5Ds5H2bGqh1-U,4
17
- nvidia_nat_langchain-1.3.dev0.dist-info/RECORD,,