nvidia-nat-langchain 1.4.0a20251015__py3-none-any.whl → 1.4.0a20251021__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of nvidia-nat-langchain might be problematic. Click here for more details.
- nat/plugins/langchain/llm.py +54 -24
- {nvidia_nat_langchain-1.4.0a20251015.dist-info → nvidia_nat_langchain-1.4.0a20251021.dist-info}/METADATA +2 -2
- {nvidia_nat_langchain-1.4.0a20251015.dist-info → nvidia_nat_langchain-1.4.0a20251021.dist-info}/RECORD +8 -8
- {nvidia_nat_langchain-1.4.0a20251015.dist-info → nvidia_nat_langchain-1.4.0a20251021.dist-info}/WHEEL +0 -0
- {nvidia_nat_langchain-1.4.0a20251015.dist-info → nvidia_nat_langchain-1.4.0a20251021.dist-info}/entry_points.txt +0 -0
- {nvidia_nat_langchain-1.4.0a20251015.dist-info → nvidia_nat_langchain-1.4.0a20251021.dist-info}/licenses/LICENSE-3rd-party.txt +0 -0
- {nvidia_nat_langchain-1.4.0a20251015.dist-info → nvidia_nat_langchain-1.4.0a20251021.dist-info}/licenses/LICENSE.md +0 -0
- {nvidia_nat_langchain-1.4.0a20251015.dist-info → nvidia_nat_langchain-1.4.0a20251021.dist-info}/top_level.txt +0 -0
nat/plugins/langchain/llm.py
CHANGED
|
@@ -12,13 +12,16 @@
|
|
|
12
12
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
13
|
# See the License for the specific language governing permissions and
|
|
14
14
|
# limitations under the License.
|
|
15
|
+
# pylint: disable=unused-argument
|
|
15
16
|
|
|
17
|
+
import logging
|
|
16
18
|
from collections.abc import Sequence
|
|
17
19
|
from typing import TypeVar
|
|
18
20
|
|
|
19
21
|
from nat.builder.builder import Builder
|
|
20
22
|
from nat.builder.framework_enum import LLMFrameworkEnum
|
|
21
23
|
from nat.cli.register_workflow import register_llm_client
|
|
24
|
+
from nat.data_models.llm import APITypeEnum
|
|
22
25
|
from nat.data_models.llm import LLMBaseConfig
|
|
23
26
|
from nat.data_models.retry_mixin import RetryMixin
|
|
24
27
|
from nat.data_models.thinking_mixin import ThinkingMixin
|
|
@@ -31,8 +34,11 @@ from nat.llm.utils.thinking import BaseThinkingInjector
|
|
|
31
34
|
from nat.llm.utils.thinking import FunctionArgumentWrapper
|
|
32
35
|
from nat.llm.utils.thinking import patch_with_thinking
|
|
33
36
|
from nat.utils.exception_handlers.automatic_retries import patch_with_retry
|
|
37
|
+
from nat.utils.responses_api import validate_no_responses_api
|
|
34
38
|
from nat.utils.type_utils import override
|
|
35
39
|
|
|
40
|
+
logger = logging.getLogger(__name__)
|
|
41
|
+
|
|
36
42
|
ModelType = TypeVar("ModelType")
|
|
37
43
|
|
|
38
44
|
|
|
@@ -65,20 +71,22 @@ def _patch_llm_based_on_config(client: ModelType, llm_config: LLMBaseConfig) ->
|
|
|
65
71
|
Raises:
|
|
66
72
|
ValueError: If the messages are not a valid type for LanguageModelInput.
|
|
67
73
|
"""
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
new_messages = [system_message, messages]
|
|
71
|
-
return FunctionArgumentWrapper(new_messages, *args, **kwargs)
|
|
72
|
-
elif isinstance(messages, PromptValue):
|
|
73
|
-
new_messages = [system_message, *messages.to_messages()]
|
|
74
|
-
return FunctionArgumentWrapper(new_messages, *args, **kwargs)
|
|
74
|
+
if isinstance(messages, PromptValue):
|
|
75
|
+
messages = messages.to_messages()
|
|
75
76
|
elif isinstance(messages, str):
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
77
|
+
messages = [HumanMessage(content=messages)]
|
|
78
|
+
|
|
79
|
+
if isinstance(messages, Sequence) and all(isinstance(m, BaseMessage) for m in messages):
|
|
80
|
+
for i, message in enumerate(messages):
|
|
81
|
+
if isinstance(message, SystemMessage):
|
|
82
|
+
if self.system_prompt not in str(message.content):
|
|
83
|
+
messages = list(messages)
|
|
84
|
+
messages[i] = SystemMessage(content=f"{message.content}\n{self.system_prompt}")
|
|
85
|
+
break
|
|
86
|
+
else:
|
|
87
|
+
messages = list(messages)
|
|
88
|
+
messages.insert(0, SystemMessage(content=self.system_prompt))
|
|
89
|
+
return FunctionArgumentWrapper(messages, *args, **kwargs)
|
|
82
90
|
raise ValueError(f"Unsupported message type: {type(messages)}")
|
|
83
91
|
|
|
84
92
|
if isinstance(llm_config, RetryMixin):
|
|
@@ -108,8 +116,10 @@ async def aws_bedrock_langchain(llm_config: AWSBedrockModelConfig, _builder: Bui
|
|
|
108
116
|
|
|
109
117
|
from langchain_aws import ChatBedrockConverse
|
|
110
118
|
|
|
119
|
+
validate_no_responses_api(llm_config, LLMFrameworkEnum.LANGCHAIN)
|
|
120
|
+
|
|
111
121
|
client = ChatBedrockConverse(**llm_config.model_dump(
|
|
112
|
-
exclude={"type", "context_size", "thinking"},
|
|
122
|
+
exclude={"type", "context_size", "thinking", "api_type"},
|
|
113
123
|
by_alias=True,
|
|
114
124
|
exclude_none=True,
|
|
115
125
|
))
|
|
@@ -122,7 +132,10 @@ async def azure_openai_langchain(llm_config: AzureOpenAIModelConfig, _builder: B
|
|
|
122
132
|
|
|
123
133
|
from langchain_openai import AzureChatOpenAI
|
|
124
134
|
|
|
125
|
-
|
|
135
|
+
validate_no_responses_api(llm_config, LLMFrameworkEnum.LANGCHAIN)
|
|
136
|
+
|
|
137
|
+
client = AzureChatOpenAI(
|
|
138
|
+
**llm_config.model_dump(exclude={"type", "thinking", "api_type"}, by_alias=True, exclude_none=True))
|
|
126
139
|
|
|
127
140
|
yield _patch_llm_based_on_config(client, llm_config)
|
|
128
141
|
|
|
@@ -132,9 +145,13 @@ async def nim_langchain(llm_config: NIMModelConfig, _builder: Builder):
|
|
|
132
145
|
|
|
133
146
|
from langchain_nvidia_ai_endpoints import ChatNVIDIA
|
|
134
147
|
|
|
148
|
+
validate_no_responses_api(llm_config, LLMFrameworkEnum.LANGCHAIN)
|
|
149
|
+
|
|
135
150
|
# prefer max_completion_tokens over max_tokens
|
|
136
151
|
client = ChatNVIDIA(
|
|
137
|
-
**llm_config.model_dump(exclude={"type", "max_tokens", "thinking"
|
|
152
|
+
**llm_config.model_dump(exclude={"type", "max_tokens", "thinking", "api_type"},
|
|
153
|
+
by_alias=True,
|
|
154
|
+
exclude_none=True),
|
|
138
155
|
max_completion_tokens=llm_config.max_tokens,
|
|
139
156
|
)
|
|
140
157
|
|
|
@@ -146,13 +163,23 @@ async def openai_langchain(llm_config: OpenAIModelConfig, _builder: Builder):
|
|
|
146
163
|
|
|
147
164
|
from langchain_openai import ChatOpenAI
|
|
148
165
|
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
166
|
+
if llm_config.api_type == APITypeEnum.RESPONSES:
|
|
167
|
+
client = ChatOpenAI(stream_usage=True,
|
|
168
|
+
use_responses_api=True,
|
|
169
|
+
use_previous_response_id=True,
|
|
170
|
+
**llm_config.model_dump(
|
|
171
|
+
exclude={"type", "thinking", "api_type"},
|
|
172
|
+
by_alias=True,
|
|
173
|
+
exclude_none=True,
|
|
174
|
+
))
|
|
175
|
+
else:
|
|
176
|
+
# If stream_usage is specified, it will override the default value of True.
|
|
177
|
+
client = ChatOpenAI(stream_usage=True,
|
|
178
|
+
**llm_config.model_dump(
|
|
179
|
+
exclude={"type", "thinking", "api_type"},
|
|
180
|
+
by_alias=True,
|
|
181
|
+
exclude_none=True,
|
|
182
|
+
))
|
|
156
183
|
|
|
157
184
|
yield _patch_llm_based_on_config(client, llm_config)
|
|
158
185
|
|
|
@@ -162,6 +189,9 @@ async def litellm_langchain(llm_config: LiteLlmModelConfig, _builder: Builder):
|
|
|
162
189
|
|
|
163
190
|
from langchain_litellm import ChatLiteLLM
|
|
164
191
|
|
|
165
|
-
|
|
192
|
+
validate_no_responses_api(llm_config, LLMFrameworkEnum.LANGCHAIN)
|
|
193
|
+
|
|
194
|
+
client = ChatLiteLLM(
|
|
195
|
+
**llm_config.model_dump(exclude={"type", "thinking", "api_type"}, by_alias=True, exclude_none=True))
|
|
166
196
|
|
|
167
197
|
yield _patch_llm_based_on_config(client, llm_config)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: nvidia-nat-langchain
|
|
3
|
-
Version: 1.4.
|
|
3
|
+
Version: 1.4.0a20251021
|
|
4
4
|
Summary: Subpackage for LangChain/LangGraph integration in NeMo Agent toolkit
|
|
5
5
|
Author: NVIDIA Corporation
|
|
6
6
|
Maintainer: NVIDIA Corporation
|
|
@@ -16,7 +16,7 @@ Requires-Python: <3.14,>=3.11
|
|
|
16
16
|
Description-Content-Type: text/markdown
|
|
17
17
|
License-File: LICENSE-3rd-party.txt
|
|
18
18
|
License-File: LICENSE.md
|
|
19
|
-
Requires-Dist: nvidia-nat==v1.4.
|
|
19
|
+
Requires-Dist: nvidia-nat==v1.4.0a20251021
|
|
20
20
|
Requires-Dist: langchain-aws~=0.2.31
|
|
21
21
|
Requires-Dist: langchain-core~=0.3.75
|
|
22
22
|
Requires-Dist: langchain-litellm~=0.2.3
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
nat/meta/pypi.md,sha256=T_KFtTXVxhFM8Y6K3OlNByA5sTXLQuqqUpHgNOCvZBU,1120
|
|
2
2
|
nat/plugins/langchain/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
3
3
|
nat/plugins/langchain/embedder.py,sha256=ZSESaazyz7y3F0GSSsWRe_xfvxOe0Mwd45wEAkQ2jJk,3339
|
|
4
|
-
nat/plugins/langchain/llm.py,sha256=
|
|
4
|
+
nat/plugins/langchain/llm.py,sha256=6e-Ug6iahZdOJRLCJmVmQTgSYxu0xmslyANaUQG_qPY,8213
|
|
5
5
|
nat/plugins/langchain/register.py,sha256=jgq6wSJoGQIZFJhS8RbUs25cLgNJjCkFu4M6qaWJS_4,906
|
|
6
6
|
nat/plugins/langchain/retriever.py,sha256=SWbXXOezEUuPACnmSSU497NAmEVEMj2SrFJGodkRg34,2644
|
|
7
7
|
nat/plugins/langchain/tool_wrapper.py,sha256=Zgb2_XB4bEhjPPeqS-ZH_OJT_pcQmteX7u03N_qCLfc,2121
|
|
@@ -10,10 +10,10 @@ nat/plugins/langchain/tools/code_generation_tool.py,sha256=f5pna0WMOx3QOS4WnaMFK
|
|
|
10
10
|
nat/plugins/langchain/tools/register.py,sha256=uemxqLxcNk1bGX4crV52oMphLTZWonStzkXwTZeG2Rw,889
|
|
11
11
|
nat/plugins/langchain/tools/tavily_internet_search.py,sha256=UFMP1xh_kC3fydMQBeV-oDZ-M7jnLcs5OkMSzgm7mng,2653
|
|
12
12
|
nat/plugins/langchain/tools/wikipedia_search.py,sha256=431YwLsjoC_mdvMZ_gY0Q37Uqaue2ASnAHpwr4jWCaU,2197
|
|
13
|
-
nvidia_nat_langchain-1.4.
|
|
14
|
-
nvidia_nat_langchain-1.4.
|
|
15
|
-
nvidia_nat_langchain-1.4.
|
|
16
|
-
nvidia_nat_langchain-1.4.
|
|
17
|
-
nvidia_nat_langchain-1.4.
|
|
18
|
-
nvidia_nat_langchain-1.4.
|
|
19
|
-
nvidia_nat_langchain-1.4.
|
|
13
|
+
nvidia_nat_langchain-1.4.0a20251021.dist-info/licenses/LICENSE-3rd-party.txt,sha256=fOk5jMmCX9YoKWyYzTtfgl-SUy477audFC5hNY4oP7Q,284609
|
|
14
|
+
nvidia_nat_langchain-1.4.0a20251021.dist-info/licenses/LICENSE.md,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
|
|
15
|
+
nvidia_nat_langchain-1.4.0a20251021.dist-info/METADATA,sha256=uwIoYnbuSoH7yDUDbfEqhwtrxpo5UcFnEBiz006f1oY,2230
|
|
16
|
+
nvidia_nat_langchain-1.4.0a20251021.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
17
|
+
nvidia_nat_langchain-1.4.0a20251021.dist-info/entry_points.txt,sha256=4deXsMn97I012HhDw0UjoqcZ8eEoZ7BnqaRx5QmzebY,123
|
|
18
|
+
nvidia_nat_langchain-1.4.0a20251021.dist-info/top_level.txt,sha256=8-CJ2cP6-f0ZReXe5Hzqp-5pvzzHz-5Ds5H2bGqh1-U,4
|
|
19
|
+
nvidia_nat_langchain-1.4.0a20251021.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|