nvidia-nat-test 1.3.0a20250904__py3-none-any.whl → 1.3.0a20250909__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
nat/test/llm.py ADDED
@@ -0,0 +1,205 @@
1
+ # SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # pylint: disable=unused-argument,missing-class-docstring,missing-function-docstring,import-outside-toplevel
17
+ # pylint: disable=too-few-public-methods
18
+
19
+ import asyncio
20
+ import time
21
+ from collections.abc import AsyncGenerator
22
+ from collections.abc import Iterator
23
+ from itertools import cycle as iter_cycle
24
+ from typing import Any
25
+
26
+ from pydantic import Field
27
+
28
+ from nat.builder.builder import Builder
29
+ from nat.builder.framework_enum import LLMFrameworkEnum
30
+ from nat.builder.llm import LLMProviderInfo
31
+ from nat.cli.register_workflow import register_llm_client
32
+ from nat.cli.register_workflow import register_llm_provider
33
+ from nat.data_models.llm import LLMBaseConfig
34
+
35
+
36
+ class TestLLMConfig(LLMBaseConfig, name="nat_test_llm"):
37
+ """Test LLM configuration."""
38
+ __test__ = False
39
+ response_seq: list[str] = Field(
40
+ default=[],
41
+ description="Returns the next element in order (wraps)",
42
+ )
43
+ delay_ms: int = Field(default=0, ge=0, description="Artificial per-call delay in milliseconds to mimic latency")
44
+
45
+
46
+ class _ResponseChooser:
47
+ """
48
+ Helper class to choose the next response according to config using itertools.cycle and provide synchronous and
49
+ asynchronous sleep functions.
50
+ """
51
+
52
+ def __init__(self, response_seq: list[str], delay_ms: int):
53
+ self._cycler = iter_cycle(response_seq) if response_seq else None
54
+ self._delay_ms = delay_ms
55
+
56
+ def next_response(self) -> str:
57
+ """Return the next response in the cycle, or an empty string if no responses are configured."""
58
+ if self._cycler is None:
59
+ return ""
60
+ return next(self._cycler)
61
+
62
+ def sync_sleep(self) -> None:
63
+ time.sleep(self._delay_ms / 1000.0)
64
+
65
+ async def async_sleep(self) -> None:
66
+ await asyncio.sleep(self._delay_ms / 1000.0)
67
+
68
+
69
+ @register_llm_provider(config_type=TestLLMConfig)
70
+ async def test_llm_provider(config: TestLLMConfig, builder: Builder):
71
+ """Register the `nat_test_llm` provider for the NAT registry."""
72
+ yield LLMProviderInfo(config=config, description="Test LLM provider")
73
+
74
+
75
+ @register_llm_client(config_type=TestLLMConfig, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
76
+ async def test_llm_langchain(config: TestLLMConfig, builder: Builder):
77
+ """LLM client for LangChain."""
78
+
79
+ chooser = _ResponseChooser(response_seq=config.response_seq, delay_ms=config.delay_ms)
80
+
81
+ class LangChainTestLLM:
82
+
83
+ def invoke(self, messages: Any, **_kwargs: Any) -> str:
84
+ chooser.sync_sleep()
85
+ return chooser.next_response()
86
+
87
+ async def ainvoke(self, messages: Any, **_kwargs: Any) -> str:
88
+ await chooser.async_sleep()
89
+ return chooser.next_response()
90
+
91
+ def stream(self, messages: Any, **_kwargs: Any) -> Iterator[str]:
92
+ chooser.sync_sleep()
93
+ yield chooser.next_response()
94
+
95
+ async def astream(self, messages: Any, **_kwargs: Any) -> AsyncGenerator[str]:
96
+ await chooser.async_sleep()
97
+ yield chooser.next_response()
98
+
99
+ yield LangChainTestLLM()
100
+
101
+
102
+ @register_llm_client(config_type=TestLLMConfig, wrapper_type=LLMFrameworkEnum.LLAMA_INDEX)
103
+ async def test_llm_llama_index(config: TestLLMConfig, builder: Builder):
104
+
105
+ try:
106
+ from llama_index.core.base.llms.types import ChatMessage
107
+ from llama_index.core.base.llms.types import ChatResponse
108
+ except ImportError as exc:
109
+ raise ImportError("llama_index is required for using the test_llm with llama_index. "
110
+ "Please install the `nvidia-nat-llama-index` package. ") from exc
111
+
112
+ chooser = _ResponseChooser(response_seq=config.response_seq, delay_ms=config.delay_ms)
113
+
114
+ class LITestLLM:
115
+
116
+ def chat(self, messages: list[Any] | None = None, **_kwargs: Any) -> ChatResponse:
117
+ chooser.sync_sleep()
118
+ return ChatResponse(message=ChatMessage(chooser.next_response()))
119
+
120
+ async def achat(self, messages: list[Any] | None = None, **_kwargs: Any) -> ChatResponse:
121
+ await chooser.async_sleep()
122
+ return ChatResponse(message=ChatMessage(chooser.next_response()))
123
+
124
+ def stream_chat(self, messages: list[Any] | None = None, **_kwargs: Any) -> Iterator[ChatResponse]:
125
+ chooser.sync_sleep()
126
+ yield ChatResponse(message=ChatMessage(chooser.next_response()))
127
+
128
+ async def astream_chat(self,
129
+ messages: list[Any] | None = None,
130
+ **_kwargs: Any) -> AsyncGenerator[ChatResponse, None]:
131
+ await chooser.async_sleep()
132
+ yield ChatResponse(message=ChatMessage(chooser.next_response()))
133
+
134
+ yield LITestLLM()
135
+
136
+
137
+ @register_llm_client(config_type=TestLLMConfig, wrapper_type=LLMFrameworkEnum.CREWAI)
138
+ async def test_llm_crewai(config: TestLLMConfig, builder: Builder):
139
+ """LLM client for CrewAI."""
140
+
141
+ chooser = _ResponseChooser(response_seq=config.response_seq, delay_ms=config.delay_ms)
142
+
143
+ class CrewAITestLLM:
144
+
145
+ def call(self, messages: list[dict[str, str]] | None = None, **kwargs: Any) -> str:
146
+ chooser.sync_sleep()
147
+ return chooser.next_response()
148
+
149
+ yield CrewAITestLLM()
150
+
151
+
152
+ @register_llm_client(config_type=TestLLMConfig, wrapper_type=LLMFrameworkEnum.SEMANTIC_KERNEL)
153
+ async def test_llm_semantic_kernel(config: TestLLMConfig, builder: Builder):
154
+ """LLM client for SemanticKernel."""
155
+
156
+ try:
157
+ from semantic_kernel.contents.chat_message_content import ChatMessageContent
158
+ from semantic_kernel.contents.utils.author_role import AuthorRole
159
+ except ImportError as exc:
160
+ raise ImportError("Semantic Kernel is required for using the test_llm with semantic_kernel. "
161
+ "Please install the `nvidia-nat-semantic-kernel` package. ") from exc
162
+
163
+ chooser = _ResponseChooser(response_seq=config.response_seq, delay_ms=config.delay_ms)
164
+
165
+ class SKTestLLM:
166
+
167
+ async def get_chat_message_contents(self, chat_history: Any, **_kwargs: Any) -> list[ChatMessageContent]:
168
+ await chooser.async_sleep()
169
+ text = chooser.next_response()
170
+ return [ChatMessageContent(role=AuthorRole.ASSISTANT, content=text)]
171
+
172
+ async def get_streaming_chat_message_contents(self, chat_history: Any,
173
+ **_kwargs: Any) -> AsyncGenerator[ChatMessageContent, None]:
174
+ await chooser.async_sleep()
175
+ text = chooser.next_response()
176
+ yield ChatMessageContent(role=AuthorRole.ASSISTANT, content=text)
177
+
178
+ yield SKTestLLM()
179
+
180
+
181
+ @register_llm_client(config_type=TestLLMConfig, wrapper_type=LLMFrameworkEnum.AGNO)
182
+ async def test_llm_agno(config: TestLLMConfig, builder: Builder):
183
+ """LLM client for agno."""
184
+
185
+ chooser = _ResponseChooser(response_seq=config.response_seq, delay_ms=config.delay_ms)
186
+
187
+ class AgnoTestLLM:
188
+
189
+ def invoke(self, messages: Any | None = None, **_kwargs: Any) -> str:
190
+ chooser.sync_sleep()
191
+ return chooser.next_response()
192
+
193
+ async def ainvoke(self, messages: Any | None = None, **_kwargs: Any) -> str:
194
+ await chooser.async_sleep()
195
+ return chooser.next_response()
196
+
197
+ def invoke_stream(self, messages: Any | None = None, **_kwargs: Any) -> Iterator[str]:
198
+ chooser.sync_sleep()
199
+ yield chooser.next_response()
200
+
201
+ async def ainvoke_stream(self, messages: Any | None = None, **_kwargs: Any) -> AsyncGenerator[str, None]:
202
+ await chooser.async_sleep()
203
+ yield chooser.next_response()
204
+
205
+ yield AgnoTestLLM()
nat/test/register.py CHANGED
@@ -21,3 +21,4 @@
21
21
  from . import embedder
22
22
  from . import functions
23
23
  from . import memory
24
+ from . import llm
@@ -1,12 +1,15 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: nvidia-nat-test
3
- Version: 1.3.0a20250904
3
+ Version: 1.3.0a20250909
4
4
  Summary: Testing utilities for NeMo Agent toolkit
5
5
  Keywords: ai,rag,agents
6
6
  Classifier: Programming Language :: Python
7
- Requires-Python: <3.13,>=3.11
7
+ Classifier: Programming Language :: Python :: 3.11
8
+ Classifier: Programming Language :: Python :: 3.12
9
+ Classifier: Programming Language :: Python :: 3.13
10
+ Requires-Python: <3.14,>=3.11
8
11
  Description-Content-Type: text/markdown
9
- Requires-Dist: nvidia-nat==v1.3.0a20250904
12
+ Requires-Dist: nvidia-nat==v1.3.0a20250909
10
13
  Requires-Dist: langchain-community~=0.3
11
14
  Requires-Dist: pytest~=8.3
12
15
 
@@ -2,14 +2,14 @@ nat/meta/pypi.md,sha256=LLKJHg5oN1-M9Pqfk3Bmphkk4O2TFsyiixuK5T0Y-gw,1100
2
2
  nat/test/__init__.py,sha256=_RnTJnsUucHvla_nYKqD4O4g8Bz0tcuDRzWk1bEhcy0,875
3
3
  nat/test/embedder.py,sha256=ClDyK1kna4hCBSlz71gK1B-ZjlwcBHTDQRekoNM81Bs,1809
4
4
  nat/test/functions.py,sha256=0ScrdsjcxCsPRLnyb5gfwukmvZxFi_ptCswLSIG0DVY,3095
5
+ nat/test/llm.py,sha256=PpBtv9AaGGHbrpvL77skf4CMCXB0adk3rnkPBMv0_WI,8205
5
6
  nat/test/memory.py,sha256=xki_A2yiMhEZuQk60K7t04QRqf32nQqnfzD5Iv7fkvw,1456
6
7
  nat/test/object_store_tests.py,sha256=PyJioOtoSzILPq6LuD-sOZ_89PIcgXWZweoHBQpK2zQ,4281
7
8
  nat/test/plugin.py,sha256=sMZ7xupCgEpQCuwUksUDYMjbBj0VNlhR6SK5UcOrBzg,6953
8
- nat/test/register.py,sha256=fbCLr3E4u8PYMFUlkRNlg53Td2YJ80iQCyxpRIbGId4,859
9
- nat/test/test_env_fixtures.py,sha256=zGhFBiZmdDYuj8kOU__RL9LOrood3L58KG8OWXnyOjQ,2375
9
+ nat/test/register.py,sha256=FZLjc3-G1lniSUJ3qVOr0aQ-aoH1F493JMFtKbZG56w,877
10
10
  nat/test/tool_test_runner.py,sha256=2kCydvJ6LBZ3Lh04e5_Qg-8_kOpcIkNdwGeQOPtadek,20089
11
- nvidia_nat_test-1.3.0a20250904.dist-info/METADATA,sha256=62QuhFogXBLmR0k9QRnQDiOkvbNe-NEWVyAJDcR32JM,1466
12
- nvidia_nat_test-1.3.0a20250904.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
13
- nvidia_nat_test-1.3.0a20250904.dist-info/entry_points.txt,sha256=7dOP9XB6iMDqvav3gYx9VWUwA8RrFzhbAa8nGeC8e4Y,99
14
- nvidia_nat_test-1.3.0a20250904.dist-info/top_level.txt,sha256=8-CJ2cP6-f0ZReXe5Hzqp-5pvzzHz-5Ds5H2bGqh1-U,4
15
- nvidia_nat_test-1.3.0a20250904.dist-info/RECORD,,
11
+ nvidia_nat_test-1.3.0a20250909.dist-info/METADATA,sha256=WwYr0-xdTDimCA4Dsg4Op8_3YtlmqhTsDkmIptznzoY,1619
12
+ nvidia_nat_test-1.3.0a20250909.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
13
+ nvidia_nat_test-1.3.0a20250909.dist-info/entry_points.txt,sha256=7dOP9XB6iMDqvav3gYx9VWUwA8RrFzhbAa8nGeC8e4Y,99
14
+ nvidia_nat_test-1.3.0a20250909.dist-info/top_level.txt,sha256=8-CJ2cP6-f0ZReXe5Hzqp-5pvzzHz-5Ds5H2bGqh1-U,4
15
+ nvidia_nat_test-1.3.0a20250909.dist-info/RECORD,,
@@ -1,60 +0,0 @@
1
- # SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
- # SPDX-License-Identifier: Apache-2.0
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """
16
- Comprehensive tests for environment variable handling and API key fixtures.
17
- """
18
-
19
- import os
20
-
21
- import pytest
22
-
23
- from nat.test.plugin import require_env_variables
24
-
25
-
26
- @pytest.mark.usefixtures("restore_environ")
27
- @pytest.mark.parametrize("fail_on_missing", [True, False])
28
- @pytest.mark.parametrize("env_vars",
29
- [{
30
- "SOME_KEY": "xyz"
31
- }, {
32
- "SOME_KEY": "xyz", "OTHER_KEY": "abc"
33
- }, {
34
- "SOME_KEY": "xyz", "OTHER_KEY": "abc", "MISSING_KEY": None
35
- }, {
36
- "SOME_KEY": "xyz", "OTHER_KEY": "abc", "MISSING_KEY": None, "EMPTY_KEY": None
37
- }])
38
- def test_require_env_variables(fail_on_missing: bool, env_vars: dict[str, str | None]):
39
- # Note the variable name `fail_on_missing` is used to avoid conflict with the `fail_missing` fixture
40
- has_missing = False
41
- var_names = []
42
- for (env_var, value) in env_vars.items():
43
- var_names.append(env_var)
44
- if value is not None:
45
- os.environ[env_var] = value
46
- else:
47
- has_missing = True
48
- os.environ.pop(env_var, None)
49
-
50
- if has_missing:
51
- if fail_on_missing:
52
- expected_exception = RuntimeError
53
- else:
54
- expected_exception = pytest.skip.Exception
55
-
56
- with pytest.raises(expected_exception, match="unittest"):
57
- require_env_variables(varnames=var_names, reason="unittest", fail_missing=fail_on_missing)
58
-
59
- else:
60
- assert require_env_variables(varnames=var_names, reason="unittest", fail_missing=fail_on_missing) == env_vars