nvidia-nat-test 1.4.0a20260117__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nat/meta/pypi.md +23 -0
- nat/test/__init__.py +23 -0
- nat/test/embedder.py +44 -0
- nat/test/functions.py +99 -0
- nat/test/llm.py +244 -0
- nat/test/memory.py +41 -0
- nat/test/object_store_tests.py +117 -0
- nat/test/plugin.py +890 -0
- nat/test/register.py +25 -0
- nat/test/tool_test_runner.py +612 -0
- nat/test/utils.py +215 -0
- nvidia_nat_test-1.4.0a20260117.dist-info/METADATA +46 -0
- nvidia_nat_test-1.4.0a20260117.dist-info/RECORD +18 -0
- nvidia_nat_test-1.4.0a20260117.dist-info/WHEEL +5 -0
- nvidia_nat_test-1.4.0a20260117.dist-info/entry_points.txt +5 -0
- nvidia_nat_test-1.4.0a20260117.dist-info/licenses/LICENSE-3rd-party.txt +5478 -0
- nvidia_nat_test-1.4.0a20260117.dist-info/licenses/LICENSE.md +201 -0
- nvidia_nat_test-1.4.0a20260117.dist-info/top_level.txt +1 -0
nat/meta/pypi.md
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
<!--
|
|
2
|
+
SPDX-FileCopyrightText: Copyright (c) 2025-2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
3
|
+
SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
|
6
|
+
you may not use this file except in compliance with the License.
|
|
7
|
+
You may obtain a copy of the License at
|
|
8
|
+
|
|
9
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
|
10
|
+
|
|
11
|
+
Unless required by applicable law or agreed to in writing, software
|
|
12
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
|
13
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
14
|
+
See the License for the specific language governing permissions and
|
|
15
|
+
limitations under the License.
|
|
16
|
+
-->
|
|
17
|
+
|
|
18
|
+

|
|
19
|
+
|
|
20
|
+
# NVIDIA NeMo Agent Toolkit Subpackage
|
|
21
|
+
This is a subpackage for NeMo Agent toolkit test utilities.
|
|
22
|
+
|
|
23
|
+
For more information about the NVIDIA NeMo Agent toolkit, please visit the [NeMo Agent toolkit GitHub Repo](https://github.com/NVIDIA/NeMo-Agent-Toolkit).
|
nat/test/__init__.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2025-2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
# Tool testing utilities
|
|
17
|
+
from .tool_test_runner import ToolTestRunner
|
|
18
|
+
from .tool_test_runner import with_mocked_dependencies
|
|
19
|
+
|
|
20
|
+
__all__ = [
|
|
21
|
+
"ToolTestRunner",
|
|
22
|
+
"with_mocked_dependencies",
|
|
23
|
+
]
|
nat/test/embedder.py
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2025-2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
from pydantic import ConfigDict
|
|
17
|
+
|
|
18
|
+
from nat.builder.builder import Builder
|
|
19
|
+
from nat.builder.embedder import EmbedderProviderInfo
|
|
20
|
+
from nat.builder.framework_enum import LLMFrameworkEnum
|
|
21
|
+
from nat.cli.register_workflow import register_embedder_client
|
|
22
|
+
from nat.cli.register_workflow import register_embedder_provider
|
|
23
|
+
from nat.data_models.embedder import EmbedderBaseConfig
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class EmbedderTestConfig(EmbedderBaseConfig, name="test_embedder"):
|
|
27
|
+
model_config = ConfigDict(protected_namespaces=())
|
|
28
|
+
|
|
29
|
+
model_name: str = "nvidia/nv-embedqa-e5-v5"
|
|
30
|
+
embedding_size: int = 768
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
@register_embedder_provider(config_type=EmbedderTestConfig)
|
|
34
|
+
async def embedder_test_provider(config: EmbedderTestConfig, builder: Builder):
|
|
35
|
+
|
|
36
|
+
yield EmbedderProviderInfo(config=config, description="Test embedder provider")
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
@register_embedder_client(config_type=EmbedderTestConfig, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
|
|
40
|
+
async def embedder_langchain_test_client(config: EmbedderTestConfig, builder: Builder):
|
|
41
|
+
|
|
42
|
+
from langchain_community.embeddings import DeterministicFakeEmbedding
|
|
43
|
+
|
|
44
|
+
yield DeterministicFakeEmbedding(size=config.embedding_size)
|
nat/test/functions.py
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2025-2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
from collections.abc import AsyncGenerator
|
|
17
|
+
|
|
18
|
+
from nat.builder.builder import Builder
|
|
19
|
+
from nat.builder.function_info import FunctionInfo
|
|
20
|
+
from nat.cli.register_workflow import register_function
|
|
21
|
+
from nat.data_models.api_server import ChatRequest
|
|
22
|
+
from nat.data_models.api_server import ChatResponse
|
|
23
|
+
from nat.data_models.api_server import ChatResponseChunk
|
|
24
|
+
from nat.data_models.api_server import Usage
|
|
25
|
+
from nat.data_models.function import FunctionBaseConfig
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class EchoFunctionConfig(FunctionBaseConfig, name="test_echo"):
|
|
29
|
+
use_openai_api: bool = False
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
@register_function(config_type=EchoFunctionConfig)
|
|
33
|
+
async def echo_function(config: EchoFunctionConfig, builder: Builder):
|
|
34
|
+
|
|
35
|
+
async def inner(message: str) -> str:
|
|
36
|
+
return message
|
|
37
|
+
|
|
38
|
+
async def inner_oai(message: ChatRequest) -> ChatResponse:
|
|
39
|
+
content = message.messages[0].content
|
|
40
|
+
|
|
41
|
+
# Create usage statistics for the response
|
|
42
|
+
prompt_tokens = sum(len(str(msg.content).split()) for msg in message.messages)
|
|
43
|
+
completion_tokens = len(content.split()) if content else 0
|
|
44
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
45
|
+
usage = Usage(prompt_tokens=prompt_tokens, completion_tokens=completion_tokens, total_tokens=total_tokens)
|
|
46
|
+
return ChatResponse.from_string(content, usage=usage)
|
|
47
|
+
|
|
48
|
+
if (config.use_openai_api):
|
|
49
|
+
yield inner_oai
|
|
50
|
+
else:
|
|
51
|
+
yield inner
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
class StreamingEchoFunctionConfig(FunctionBaseConfig, name="test_streaming_echo"):
|
|
55
|
+
use_openai_api: bool = False
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
@register_function(config_type=StreamingEchoFunctionConfig)
|
|
59
|
+
async def streaming_function(config: StreamingEchoFunctionConfig, builder: Builder):
|
|
60
|
+
|
|
61
|
+
def oai_to_list(message: ChatRequest) -> list[str]:
|
|
62
|
+
return [m.content for m in message.messages]
|
|
63
|
+
|
|
64
|
+
async def inner(message: list[str]) -> AsyncGenerator[str]:
|
|
65
|
+
for value in message:
|
|
66
|
+
yield value
|
|
67
|
+
|
|
68
|
+
async def inner_oai(message: ChatRequest) -> AsyncGenerator[ChatResponseChunk]:
|
|
69
|
+
for value in oai_to_list(message):
|
|
70
|
+
yield ChatResponseChunk.from_string(value)
|
|
71
|
+
|
|
72
|
+
yield FunctionInfo.from_fn(inner_oai if config.use_openai_api else inner, converters=[oai_to_list])
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
class ConstantFunctionConfig(FunctionBaseConfig, name="test_constant"):
|
|
76
|
+
response: str
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
@register_function(config_type=ConstantFunctionConfig)
|
|
80
|
+
async def constant_function(config: ConstantFunctionConfig, builder: Builder):
|
|
81
|
+
|
|
82
|
+
async def inner() -> str:
|
|
83
|
+
return config.response
|
|
84
|
+
|
|
85
|
+
yield inner
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
class StreamingConstantFunctionConfig(FunctionBaseConfig, name="test_streaming_constant"):
|
|
89
|
+
responses: list[str]
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
@register_function(config_type=StreamingConstantFunctionConfig)
|
|
93
|
+
async def streaming_constant_function(config: StreamingConstantFunctionConfig, builder: Builder):
|
|
94
|
+
|
|
95
|
+
async def inner() -> AsyncGenerator[str]:
|
|
96
|
+
for value in config.responses:
|
|
97
|
+
yield value
|
|
98
|
+
|
|
99
|
+
yield inner
|
nat/test/llm.py
ADDED
|
@@ -0,0 +1,244 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
# pylint: disable=unused-argument,missing-class-docstring,missing-function-docstring,import-outside-toplevel
|
|
17
|
+
# pylint: disable=too-few-public-methods
|
|
18
|
+
|
|
19
|
+
import asyncio
|
|
20
|
+
import time
|
|
21
|
+
from collections.abc import AsyncGenerator
|
|
22
|
+
from collections.abc import Iterator
|
|
23
|
+
from itertools import cycle as iter_cycle
|
|
24
|
+
from typing import Any
|
|
25
|
+
|
|
26
|
+
from pydantic import Field
|
|
27
|
+
|
|
28
|
+
from nat.builder.builder import Builder
|
|
29
|
+
from nat.builder.framework_enum import LLMFrameworkEnum
|
|
30
|
+
from nat.builder.llm import LLMProviderInfo
|
|
31
|
+
from nat.cli.register_workflow import register_llm_client
|
|
32
|
+
from nat.cli.register_workflow import register_llm_provider
|
|
33
|
+
from nat.data_models.llm import LLMBaseConfig
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class MockLLMConfig(LLMBaseConfig, name="nat_test_llm"):
|
|
37
|
+
"""Mock LLM configuration."""
|
|
38
|
+
|
|
39
|
+
response_seq: list[str] = Field(
|
|
40
|
+
default=[],
|
|
41
|
+
description="Returns the next element in order (wraps)",
|
|
42
|
+
)
|
|
43
|
+
delay_ms: int = Field(default=0, ge=0, description="Artificial per-call delay in milliseconds to mimic latency")
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class _ResponseChooser:
|
|
47
|
+
"""
|
|
48
|
+
Helper class to choose the next response according to config using itertools.cycle and provide synchronous and
|
|
49
|
+
asynchronous sleep functions.
|
|
50
|
+
"""
|
|
51
|
+
|
|
52
|
+
def __init__(self, response_seq: list[str], delay_ms: int):
|
|
53
|
+
self._cycler = iter_cycle(response_seq) if response_seq else None
|
|
54
|
+
self._delay_ms = delay_ms
|
|
55
|
+
|
|
56
|
+
def next_response(self) -> str:
|
|
57
|
+
"""Return the next response in the cycle, or an empty string if no responses are configured."""
|
|
58
|
+
if self._cycler is None:
|
|
59
|
+
return ""
|
|
60
|
+
return next(self._cycler)
|
|
61
|
+
|
|
62
|
+
def sync_sleep(self) -> None:
|
|
63
|
+
time.sleep(self._delay_ms / 1000.0)
|
|
64
|
+
|
|
65
|
+
async def async_sleep(self) -> None:
|
|
66
|
+
await asyncio.sleep(self._delay_ms / 1000.0)
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
@register_llm_provider(config_type=MockLLMConfig)
|
|
70
|
+
async def test_llm_provider(config: MockLLMConfig, builder: Builder) -> AsyncGenerator[LLMProviderInfo, None]:
|
|
71
|
+
"""Register the `nat_test_llm` provider for the NAT registry."""
|
|
72
|
+
yield LLMProviderInfo(config=config, description="Test LLM provider")
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
@register_llm_client(config_type=MockLLMConfig, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
|
|
76
|
+
async def test_llm_langchain(config: MockLLMConfig, builder: Builder):
|
|
77
|
+
"""LLM client for LangChain/LangGraph."""
|
|
78
|
+
|
|
79
|
+
chooser = _ResponseChooser(response_seq=config.response_seq, delay_ms=config.delay_ms)
|
|
80
|
+
|
|
81
|
+
class LangChainTestLLM:
|
|
82
|
+
|
|
83
|
+
def invoke(self, messages: Any, **_kwargs: Any) -> str:
|
|
84
|
+
chooser.sync_sleep()
|
|
85
|
+
return chooser.next_response()
|
|
86
|
+
|
|
87
|
+
async def ainvoke(self, messages: Any, **_kwargs: Any) -> str:
|
|
88
|
+
await chooser.async_sleep()
|
|
89
|
+
return chooser.next_response()
|
|
90
|
+
|
|
91
|
+
def stream(self, messages: Any, **_kwargs: Any) -> Iterator[str]:
|
|
92
|
+
chooser.sync_sleep()
|
|
93
|
+
yield chooser.next_response()
|
|
94
|
+
|
|
95
|
+
async def astream(self, messages: Any, **_kwargs: Any) -> AsyncGenerator[str]:
|
|
96
|
+
await chooser.async_sleep()
|
|
97
|
+
yield chooser.next_response()
|
|
98
|
+
|
|
99
|
+
def bind_tools(self, tools: Any, **_kwargs: Any) -> "LangChainTestLLM":
|
|
100
|
+
"""Bind tools to the LLM. Returns self to maintain fluent interface."""
|
|
101
|
+
return self
|
|
102
|
+
|
|
103
|
+
def bind(self, **_kwargs: Any) -> "LangChainTestLLM":
|
|
104
|
+
"""Bind additional parameters to the LLM. Returns self to maintain fluent interface."""
|
|
105
|
+
return self
|
|
106
|
+
|
|
107
|
+
yield LangChainTestLLM()
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
@register_llm_client(config_type=MockLLMConfig, wrapper_type=LLMFrameworkEnum.LLAMA_INDEX)
|
|
111
|
+
async def test_llm_llama_index(config: MockLLMConfig, builder: Builder):
|
|
112
|
+
|
|
113
|
+
try:
|
|
114
|
+
from llama_index.core.base.llms.types import ChatMessage
|
|
115
|
+
from llama_index.core.base.llms.types import ChatResponse
|
|
116
|
+
except ImportError as exc:
|
|
117
|
+
raise ImportError("llama_index is required for using the test_llm with llama_index. "
|
|
118
|
+
"Please install the `nvidia-nat-llama-index` package. ") from exc
|
|
119
|
+
|
|
120
|
+
chooser = _ResponseChooser(response_seq=config.response_seq, delay_ms=config.delay_ms)
|
|
121
|
+
|
|
122
|
+
class LITestLLM:
|
|
123
|
+
|
|
124
|
+
def chat(self, messages: list[Any] | None = None, **_kwargs: Any) -> ChatResponse:
|
|
125
|
+
chooser.sync_sleep()
|
|
126
|
+
return ChatResponse(message=ChatMessage(chooser.next_response()))
|
|
127
|
+
|
|
128
|
+
async def achat(self, messages: list[Any] | None = None, **_kwargs: Any) -> ChatResponse:
|
|
129
|
+
await chooser.async_sleep()
|
|
130
|
+
return ChatResponse(message=ChatMessage(chooser.next_response()))
|
|
131
|
+
|
|
132
|
+
def stream_chat(self, messages: list[Any] | None = None, **_kwargs: Any) -> Iterator[ChatResponse]:
|
|
133
|
+
chooser.sync_sleep()
|
|
134
|
+
yield ChatResponse(message=ChatMessage(chooser.next_response()))
|
|
135
|
+
|
|
136
|
+
async def astream_chat(self,
|
|
137
|
+
messages: list[Any] | None = None,
|
|
138
|
+
**_kwargs: Any) -> AsyncGenerator[ChatResponse, None]:
|
|
139
|
+
await chooser.async_sleep()
|
|
140
|
+
yield ChatResponse(message=ChatMessage(chooser.next_response()))
|
|
141
|
+
|
|
142
|
+
yield LITestLLM()
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
@register_llm_client(config_type=MockLLMConfig, wrapper_type=LLMFrameworkEnum.CREWAI)
|
|
146
|
+
async def test_llm_crewai(config: MockLLMConfig, builder: Builder):
|
|
147
|
+
"""LLM client for CrewAI."""
|
|
148
|
+
|
|
149
|
+
chooser = _ResponseChooser(response_seq=config.response_seq, delay_ms=config.delay_ms)
|
|
150
|
+
|
|
151
|
+
class CrewAITestLLM:
|
|
152
|
+
|
|
153
|
+
def call(self, messages: list[dict[str, str]] | None = None, **kwargs: Any) -> str:
|
|
154
|
+
chooser.sync_sleep()
|
|
155
|
+
return chooser.next_response()
|
|
156
|
+
|
|
157
|
+
yield CrewAITestLLM()
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
@register_llm_client(config_type=MockLLMConfig, wrapper_type=LLMFrameworkEnum.SEMANTIC_KERNEL)
|
|
161
|
+
async def test_llm_semantic_kernel(config: MockLLMConfig, builder: Builder):
|
|
162
|
+
"""LLM client for SemanticKernel."""
|
|
163
|
+
|
|
164
|
+
try:
|
|
165
|
+
from semantic_kernel.contents.chat_message_content import ChatMessageContent
|
|
166
|
+
from semantic_kernel.contents.utils.author_role import AuthorRole
|
|
167
|
+
except ImportError as exc:
|
|
168
|
+
raise ImportError("Semantic Kernel is required for using the test_llm with semantic_kernel. "
|
|
169
|
+
"Please install the `nvidia-nat-semantic-kernel` package. ") from exc
|
|
170
|
+
|
|
171
|
+
chooser = _ResponseChooser(response_seq=config.response_seq, delay_ms=config.delay_ms)
|
|
172
|
+
|
|
173
|
+
class SKTestLLM:
|
|
174
|
+
|
|
175
|
+
async def get_chat_message_contents(self, chat_history: Any, **_kwargs: Any) -> list[ChatMessageContent]:
|
|
176
|
+
await chooser.async_sleep()
|
|
177
|
+
text = chooser.next_response()
|
|
178
|
+
return [ChatMessageContent(role=AuthorRole.ASSISTANT, content=text)]
|
|
179
|
+
|
|
180
|
+
async def get_streaming_chat_message_contents(self, chat_history: Any,
|
|
181
|
+
**_kwargs: Any) -> AsyncGenerator[ChatMessageContent, None]:
|
|
182
|
+
await chooser.async_sleep()
|
|
183
|
+
text = chooser.next_response()
|
|
184
|
+
yield ChatMessageContent(role=AuthorRole.ASSISTANT, content=text)
|
|
185
|
+
|
|
186
|
+
yield SKTestLLM()
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
@register_llm_client(config_type=MockLLMConfig, wrapper_type=LLMFrameworkEnum.AGNO)
|
|
190
|
+
async def test_llm_agno(config: MockLLMConfig, builder: Builder):
|
|
191
|
+
"""LLM client for agno."""
|
|
192
|
+
|
|
193
|
+
chooser = _ResponseChooser(response_seq=config.response_seq, delay_ms=config.delay_ms)
|
|
194
|
+
|
|
195
|
+
class AgnoTestLLM:
|
|
196
|
+
|
|
197
|
+
def invoke(self, messages: Any | None = None, **_kwargs: Any) -> str:
|
|
198
|
+
chooser.sync_sleep()
|
|
199
|
+
return chooser.next_response()
|
|
200
|
+
|
|
201
|
+
async def ainvoke(self, messages: Any | None = None, **_kwargs: Any) -> str:
|
|
202
|
+
await chooser.async_sleep()
|
|
203
|
+
return chooser.next_response()
|
|
204
|
+
|
|
205
|
+
def invoke_stream(self, messages: Any | None = None, **_kwargs: Any) -> Iterator[str]:
|
|
206
|
+
chooser.sync_sleep()
|
|
207
|
+
yield chooser.next_response()
|
|
208
|
+
|
|
209
|
+
async def ainvoke_stream(self, messages: Any | None = None, **_kwargs: Any) -> AsyncGenerator[str, None]:
|
|
210
|
+
await chooser.async_sleep()
|
|
211
|
+
yield chooser.next_response()
|
|
212
|
+
|
|
213
|
+
yield AgnoTestLLM()
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
@register_llm_client(config_type=MockLLMConfig, wrapper_type=LLMFrameworkEnum.ADK)
|
|
217
|
+
async def test_llm_adk(config: MockLLMConfig, builder: Builder):
|
|
218
|
+
"""LLM client for Google ADK."""
|
|
219
|
+
|
|
220
|
+
try:
|
|
221
|
+
from google.adk.models.base_llm import BaseLlm
|
|
222
|
+
from google.adk.models.llm_request import LlmRequest
|
|
223
|
+
from google.adk.models.llm_response import LlmResponse
|
|
224
|
+
from google.genai import types
|
|
225
|
+
except ImportError as exc:
|
|
226
|
+
raise ImportError("Google ADK is required for using the test_llm with ADK. "
|
|
227
|
+
"Please install the `nvidia-nat-adk` package. ") from exc
|
|
228
|
+
|
|
229
|
+
chooser = _ResponseChooser(response_seq=config.response_seq, delay_ms=config.delay_ms)
|
|
230
|
+
|
|
231
|
+
class ADKTestLLM(BaseLlm):
|
|
232
|
+
|
|
233
|
+
async def generate_content_async(self,
|
|
234
|
+
llm_request: LlmRequest,
|
|
235
|
+
stream: bool = False) -> AsyncGenerator[LlmResponse, None]:
|
|
236
|
+
self._maybe_append_user_content(llm_request)
|
|
237
|
+
await chooser.async_sleep()
|
|
238
|
+
text = chooser.next_response()
|
|
239
|
+
yield LlmResponse(content=types.Content(role="model", parts=[types.Part.from_text(text=text)]))
|
|
240
|
+
|
|
241
|
+
def connect(self, *_args: Any, **_kwargs: Any) -> None:
|
|
242
|
+
return None
|
|
243
|
+
|
|
244
|
+
yield ADKTestLLM(model="nat_test_llm")
|
nat/test/memory.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2025-2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
from nat.builder.builder import Builder
|
|
17
|
+
from nat.cli.register_workflow import register_memory
|
|
18
|
+
from nat.data_models.memory import MemoryBaseConfig
|
|
19
|
+
from nat.memory.interfaces import MemoryEditor
|
|
20
|
+
from nat.memory.models import MemoryItem
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class DummyMemoryConfig(MemoryBaseConfig, name="test_dummy"):
|
|
24
|
+
pass
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
@register_memory(config_type=DummyMemoryConfig)
|
|
28
|
+
async def echo_function(config: DummyMemoryConfig, builder: Builder):
|
|
29
|
+
|
|
30
|
+
class DummyMemoryEditor(MemoryEditor):
|
|
31
|
+
|
|
32
|
+
async def add_items(self, items: list[MemoryItem]) -> None:
|
|
33
|
+
pass
|
|
34
|
+
|
|
35
|
+
async def search(self, query: str, top_k: int = 5, **kwargs) -> list[MemoryItem]:
|
|
36
|
+
return []
|
|
37
|
+
|
|
38
|
+
async def remove_items(self, **kwargs) -> None:
|
|
39
|
+
pass
|
|
40
|
+
|
|
41
|
+
yield DummyMemoryEditor()
|
|
@@ -0,0 +1,117 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2025-2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
import uuid
|
|
17
|
+
from abc import abstractmethod
|
|
18
|
+
from contextlib import asynccontextmanager
|
|
19
|
+
|
|
20
|
+
import pytest
|
|
21
|
+
import pytest_asyncio
|
|
22
|
+
|
|
23
|
+
from nat.data_models.object_store import KeyAlreadyExistsError
|
|
24
|
+
from nat.data_models.object_store import NoSuchKeyError
|
|
25
|
+
from nat.object_store.interfaces import ObjectStore
|
|
26
|
+
from nat.object_store.models import ObjectStoreItem
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@pytest.mark.asyncio(loop_scope="class")
|
|
30
|
+
class ObjectStoreTests:
|
|
31
|
+
|
|
32
|
+
@abstractmethod
|
|
33
|
+
@asynccontextmanager
|
|
34
|
+
async def _get_store(self):
|
|
35
|
+
yield
|
|
36
|
+
|
|
37
|
+
@pytest_asyncio.fixture(loop_scope="class", scope="class")
|
|
38
|
+
async def store(self):
|
|
39
|
+
|
|
40
|
+
async with self._get_store() as store:
|
|
41
|
+
yield store
|
|
42
|
+
|
|
43
|
+
async def test_create_object_store(self, store: ObjectStore):
|
|
44
|
+
assert isinstance(store, ObjectStore)
|
|
45
|
+
|
|
46
|
+
async def test_put_object(self, store: ObjectStore):
|
|
47
|
+
|
|
48
|
+
# Use a random key to avoid conflicts with other tests
|
|
49
|
+
key = f"test_key_{uuid.uuid4()}"
|
|
50
|
+
|
|
51
|
+
initial_item = ObjectStoreItem(data=b"test_value")
|
|
52
|
+
await store.put_object(key, initial_item)
|
|
53
|
+
|
|
54
|
+
# Try to put the same object again
|
|
55
|
+
with pytest.raises(KeyAlreadyExistsError):
|
|
56
|
+
await store.put_object(key, initial_item)
|
|
57
|
+
|
|
58
|
+
async def test_upsert_object(self, store: ObjectStore):
|
|
59
|
+
key = f"test_key_{uuid.uuid4()}"
|
|
60
|
+
|
|
61
|
+
initial_item = ObjectStoreItem(data=b"test_value", content_type="text/plain", metadata={"key": "value"})
|
|
62
|
+
|
|
63
|
+
await store.upsert_object(key, initial_item)
|
|
64
|
+
|
|
65
|
+
# Check that the object exists
|
|
66
|
+
retrieved_item = await store.get_object(key)
|
|
67
|
+
assert retrieved_item.data == initial_item.data
|
|
68
|
+
assert retrieved_item.content_type == initial_item.content_type
|
|
69
|
+
assert retrieved_item.metadata == initial_item.metadata
|
|
70
|
+
|
|
71
|
+
# Upsert the object with a new value
|
|
72
|
+
new_item = ObjectStoreItem(data=b"new_value", content_type="application/json", metadata={"key": "new_value"})
|
|
73
|
+
await store.upsert_object(key, new_item)
|
|
74
|
+
|
|
75
|
+
# Check that the object was updated
|
|
76
|
+
retrieved_item = await store.get_object(key)
|
|
77
|
+
assert retrieved_item.data == new_item.data
|
|
78
|
+
assert retrieved_item.content_type == new_item.content_type
|
|
79
|
+
assert retrieved_item.metadata == new_item.metadata
|
|
80
|
+
|
|
81
|
+
async def test_get_object(self, store: ObjectStore):
|
|
82
|
+
|
|
83
|
+
key = f"test_key_{uuid.uuid4()}"
|
|
84
|
+
|
|
85
|
+
initial_item = ObjectStoreItem(data=b"test_value", content_type="text/plain", metadata={"key": "value"})
|
|
86
|
+
await store.put_object(key, initial_item)
|
|
87
|
+
|
|
88
|
+
retrieved_item = await store.get_object(key)
|
|
89
|
+
assert retrieved_item.data == initial_item.data
|
|
90
|
+
assert retrieved_item.content_type == initial_item.content_type
|
|
91
|
+
assert retrieved_item.metadata == initial_item.metadata
|
|
92
|
+
|
|
93
|
+
# Try to get an object that doesn't exist
|
|
94
|
+
with pytest.raises(NoSuchKeyError):
|
|
95
|
+
await store.get_object(f"test_key_{uuid.uuid4()}")
|
|
96
|
+
|
|
97
|
+
async def test_delete_object(self, store: ObjectStore):
|
|
98
|
+
|
|
99
|
+
key = f"test_key_{uuid.uuid4()}"
|
|
100
|
+
|
|
101
|
+
initial_item = ObjectStoreItem(data=b"test_value")
|
|
102
|
+
await store.put_object(key, initial_item)
|
|
103
|
+
|
|
104
|
+
# Check that the object exists
|
|
105
|
+
retrieved_item = await store.get_object(key)
|
|
106
|
+
assert retrieved_item.data == initial_item.data
|
|
107
|
+
|
|
108
|
+
# Delete the object
|
|
109
|
+
await store.delete_object(key)
|
|
110
|
+
|
|
111
|
+
# Try to get the object again
|
|
112
|
+
with pytest.raises(NoSuchKeyError):
|
|
113
|
+
await store.get_object(key)
|
|
114
|
+
|
|
115
|
+
# Try to delete the object again
|
|
116
|
+
with pytest.raises(NoSuchKeyError):
|
|
117
|
+
await store.delete_object(key)
|