nvidia-nat-agno 1.3.0.dev2__py3-none-any.whl → 1.3.0rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
nat/plugins/agno/llm.py CHANGED
@@ -13,75 +13,89 @@
13
13
  # See the License for the specific language governing permissions and
14
14
  # limitations under the License.
15
15
 
16
- import os
16
+ from typing import TypeVar
17
17
 
18
18
  from nat.builder.builder import Builder
19
19
  from nat.builder.framework_enum import LLMFrameworkEnum
20
20
  from nat.cli.register_workflow import register_llm_client
21
+ from nat.data_models.llm import LLMBaseConfig
21
22
  from nat.data_models.retry_mixin import RetryMixin
23
+ from nat.data_models.thinking_mixin import ThinkingMixin
22
24
  from nat.llm.nim_llm import NIMModelConfig
23
25
  from nat.llm.openai_llm import OpenAIModelConfig
26
+ from nat.llm.utils.thinking import BaseThinkingInjector
27
+ from nat.llm.utils.thinking import FunctionArgumentWrapper
28
+ from nat.llm.utils.thinking import patch_with_thinking
24
29
  from nat.utils.exception_handlers.automatic_retries import patch_with_retry
30
+ from nat.utils.type_utils import override
25
31
 
32
+ ModelType = TypeVar("ModelType")
26
33
 
27
- @register_llm_client(config_type=NIMModelConfig, wrapper_type=LLMFrameworkEnum.AGNO)
28
- async def nim_agno(llm_config: NIMModelConfig, builder: Builder):
29
-
30
- from agno.models.nvidia import Nvidia
31
-
32
- config_obj = {
33
- **llm_config.model_dump(exclude={"type", "model_name"}, by_alias=True),
34
- "id": f"{llm_config.model_name}",
35
- }
36
-
37
- # Because Agno uses a different environment variable for the API key, we need to set it here manually
38
- if ("api_key" not in config_obj or config_obj["api_key"] is None):
39
34
 
40
- if ("NVIDIA_API_KEY" in os.environ):
41
- # Dont need to do anything. User has already set the correct key
42
- pass
43
- else:
44
- nvidai_api_key = os.getenv("NVIDIA_API_KEY")
35
+ def _patch_llm_based_on_config(client: ModelType, llm_config: LLMBaseConfig) -> ModelType:
45
36
 
46
- if (nvidai_api_key is not None):
47
- # Transfer the key to the correct environment variable
48
- os.environ["NVIDIA_API_KEY"] = nvidai_api_key
37
+ from agno.models.message import Message
49
38
 
50
- # Create Nvidia instance with conditional base_url
51
- kwargs = {"id": config_obj.get("id")}
52
- if "base_url" in config_obj and config_obj.get("base_url") is not None:
53
- kwargs["base_url"] = config_obj.get("base_url")
39
+ class AgnoThinkingInjector(BaseThinkingInjector):
54
40
 
55
- client = Nvidia(**kwargs) # type: ignore[arg-type]
41
+ from agno.models.message import Message
56
42
 
57
- if isinstance(client, RetryMixin):
43
+ @override
44
+ def inject(self, messages: list[Message], *args, **kwargs) -> FunctionArgumentWrapper:
45
+ new_messages = [Message(role="system", content=self.system_prompt)] + messages
46
+ return FunctionArgumentWrapper(new_messages, *args, **kwargs)
58
47
 
48
+ if isinstance(llm_config, RetryMixin):
59
49
  client = patch_with_retry(client,
60
50
  retries=llm_config.num_retries,
61
51
  retry_codes=llm_config.retry_on_status_codes,
62
52
  retry_on_messages=llm_config.retry_on_errors)
63
53
 
64
- yield client
54
+ if isinstance(llm_config, ThinkingMixin) and llm_config.thinking_system_prompt is not None:
55
+ client = patch_with_thinking(
56
+ client,
57
+ AgnoThinkingInjector(system_prompt=llm_config.thinking_system_prompt,
58
+ function_names=[
59
+ "invoke_stream",
60
+ "invoke",
61
+ "ainvoke",
62
+ "ainvoke_stream",
63
+ ]))
65
64
 
65
+ return client
66
66
 
67
- @register_llm_client(config_type=OpenAIModelConfig, wrapper_type=LLMFrameworkEnum.AGNO)
68
- async def openai_agno(llm_config: OpenAIModelConfig, builder: Builder):
69
67
 
70
- from agno.models.openai import OpenAIChat
68
+ @register_llm_client(config_type=NIMModelConfig, wrapper_type=LLMFrameworkEnum.AGNO)
69
+ async def nim_agno(llm_config: NIMModelConfig, _builder: Builder):
70
+
71
+ from agno.models.nvidia import Nvidia
72
+
73
+ config_obj = {
74
+ **llm_config.model_dump(
75
+ exclude={"type", "model_name", "thinking"},
76
+ by_alias=True,
77
+ exclude_none=True,
78
+ ),
79
+ }
71
80
 
72
- # Use model_dump to get the proper field values with correct types
73
- kwargs = llm_config.model_dump(exclude={"type"}, by_alias=True)
81
+ client = Nvidia(**config_obj, id=llm_config.model_name)
74
82
 
75
- # AGNO uses 'id' instead of 'model' for the model name
76
- if "model" in kwargs:
77
- kwargs["id"] = kwargs.pop("model")
83
+ yield _patch_llm_based_on_config(client, llm_config)
78
84
 
79
- client = OpenAIChat(**kwargs)
80
85
 
81
- if isinstance(llm_config, RetryMixin):
82
- client = patch_with_retry(client,
83
- retries=llm_config.num_retries,
84
- retry_codes=llm_config.retry_on_status_codes,
85
- retry_on_messages=llm_config.retry_on_errors)
86
+ @register_llm_client(config_type=OpenAIModelConfig, wrapper_type=LLMFrameworkEnum.AGNO)
87
+ async def openai_agno(llm_config: OpenAIModelConfig, _builder: Builder):
88
+
89
+ from agno.models.openai import OpenAIChat
90
+
91
+ config_obj = {
92
+ **llm_config.model_dump(
93
+ exclude={"type", "model_name", "thinking"},
94
+ by_alias=True,
95
+ exclude_none=True,
96
+ ),
97
+ }
98
+
99
+ client = OpenAIChat(**config_obj, id=llm_config.model_name)
86
100
 
87
- yield client
101
+ yield _patch_llm_based_on_config(client, llm_config)
@@ -13,7 +13,6 @@
13
13
  # See the License for the specific language governing permissions and
14
14
  # limitations under the License.
15
15
 
16
- # pylint: disable=unused-import
17
16
  # flake8: noqa
18
17
  # isort:skip_file
19
18
 
@@ -17,11 +17,9 @@ import asyncio
17
17
  import json
18
18
  import logging
19
19
  import textwrap
20
- import traceback
20
+ from collections.abc import Awaitable
21
+ from collections.abc import Callable
21
22
  from typing import Any
22
- from typing import Awaitable
23
- from typing import Callable
24
- from typing import List
25
23
 
26
24
  from agno.tools import tool
27
25
 
@@ -134,7 +132,7 @@ async def process_result(result: Any, name: str) -> str:
134
132
 
135
133
  def execute_agno_tool(name: str,
136
134
  coroutine_fn: Callable[..., Awaitable[Any]],
137
- required_fields: List[str],
135
+ required_fields: list[str],
138
136
  loop: asyncio.AbstractEventLoop,
139
137
  **kwargs: Any) -> Any:
140
138
  """
@@ -146,7 +144,7 @@ def execute_agno_tool(name: str,
146
144
  The name of the tool
147
145
  coroutine_fn : Callable
148
146
  The async function to invoke
149
- required_fields : List[str]
147
+ required_fields : list[str]
150
148
  List of required fields for validation
151
149
  loop : asyncio.AbstractEventLoop
152
150
  The event loop to use for async execution
@@ -157,7 +155,6 @@ def execute_agno_tool(name: str,
157
155
  -------
158
156
  The result of the function execution as a string
159
157
  """
160
- global _tool_call_counters, _tool_initialization_done
161
158
 
162
159
  try:
163
160
  logger.debug(f"Running {name} with kwargs: {kwargs}")
@@ -288,9 +285,7 @@ def execute_agno_tool(name: str,
288
285
  return process_future.result(timeout=30) # 30-second timeout for processing
289
286
 
290
287
  except Exception as e:
291
- logger.exception(f"Error executing Agno tool {name}: {e}")
292
- error_traceback = traceback.format_exc()
293
- logger.error(f"Exception traceback: {error_traceback}")
288
+ logger.error("Error executing Agno tool %s: %s", name, e)
294
289
  raise
295
290
 
296
291
 
@@ -335,7 +330,7 @@ def agno_tool_wrapper(name: str, fn: Function, builder: Builder):
335
330
  if description:
336
331
  description = textwrap.dedent(description).strip()
337
332
 
338
- # Input schema handling from LangChain-style
333
+ # Input schema handling from LangChain/LangGraph-style
339
334
  required_fields = []
340
335
  if fn.input_schema is not None:
341
336
  try:
@@ -13,7 +13,6 @@
13
13
  # See the License for the specific language governing permissions and
14
14
  # limitations under the License.
15
15
 
16
- # pylint: disable=unused-import
17
16
  # flake8: noqa
18
17
  # isort:skip_file
19
18
 
@@ -1,14 +1,16 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: nvidia-nat-agno
3
- Version: 1.3.0.dev2
3
+ Version: 1.3.0rc1
4
4
  Summary: Subpackage for Agno integration in NeMo Agent toolkit
5
5
  Keywords: ai,rag,agents
6
6
  Classifier: Programming Language :: Python
7
- Requires-Python: <3.13,>=3.11
7
+ Classifier: Programming Language :: Python :: 3.11
8
+ Classifier: Programming Language :: Python :: 3.12
9
+ Classifier: Programming Language :: Python :: 3.13
10
+ Requires-Python: <3.14,>=3.11
8
11
  Description-Content-Type: text/markdown
9
- Requires-Dist: nvidia-nat==v1.3.0-dev2
12
+ Requires-Dist: nvidia-nat[openai]==v1.3.0-rc1
10
13
  Requires-Dist: agno~=1.2.3
11
- Requires-Dist: openai~=1.66
12
14
  Requires-Dist: google-search-results~=2.4.2
13
15
 
14
16
  <!--
@@ -0,0 +1,13 @@
1
+ nat/meta/pypi.md,sha256=tZD7hiOSYWgiAdddD1eIJ8T5ipZwEIjnd8ilgmasdmw,1198
2
+ nat/plugins/agno/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
+ nat/plugins/agno/llm.py,sha256=op02DUgf_DwYFxOwmUmd7NhFIf6TTXj313sFV6GOZZM,3828
4
+ nat/plugins/agno/register.py,sha256=q-es1KVb_PTHOFszltym7e7Pj2jmyieih_Ve-cguHI8,788
5
+ nat/plugins/agno/tool_wrapper.py,sha256=Iq8v0uO6HfSVQvcRwZOk4TMRpRyWUMM4s-XAtZx_Hj8,15729
6
+ nat/plugins/agno/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
+ nat/plugins/agno/tools/register.py,sha256=mpNgD1r51EoYlQiEAqPX15wCtx2alDcMIQiR8F_ub-M,743
8
+ nat/plugins/agno/tools/serp_api_tool.py,sha256=AJQH6-1iEUUrk_nzfZ3zZqutEKhJ_LMOUJi_iol65Sc,4442
9
+ nvidia_nat_agno-1.3.0rc1.dist-info/METADATA,sha256=8EKugYRK44pr-1SWNsOJGiobrJwL86I_iqcVD02yBAY,1731
10
+ nvidia_nat_agno-1.3.0rc1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
11
+ nvidia_nat_agno-1.3.0rc1.dist-info/entry_points.txt,sha256=qRhuHKj2WmdJkLpbVXpYkdtc2cZdG4LPlBsABG2ImVI,103
12
+ nvidia_nat_agno-1.3.0rc1.dist-info/top_level.txt,sha256=8-CJ2cP6-f0ZReXe5Hzqp-5pvzzHz-5Ds5H2bGqh1-U,4
13
+ nvidia_nat_agno-1.3.0rc1.dist-info/RECORD,,
@@ -1,13 +0,0 @@
1
- nat/meta/pypi.md,sha256=tZD7hiOSYWgiAdddD1eIJ8T5ipZwEIjnd8ilgmasdmw,1198
2
- nat/plugins/agno/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
- nat/plugins/agno/llm.py,sha256=hvPu0-gkRasnWZY5kwHOiA6rZ1ZIGlnaH3nPPfqrFJ8,3434
4
- nat/plugins/agno/register.py,sha256=6vC1TjMxo3igqTnEtVFgLEf_jgLYkBfBZxjwqxGng6w,820
5
- nat/plugins/agno/tool_wrapper.py,sha256=uClYG-LvRj1OBTMwzMuE40cibLhqVRXSCiqniVQND2Y,15914
6
- nat/plugins/agno/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
- nat/plugins/agno/tools/register.py,sha256=OCmzR03CHmQHm34ZEascM1dRVh-ALMs2mafDcqLDz6s,775
8
- nat/plugins/agno/tools/serp_api_tool.py,sha256=AJQH6-1iEUUrk_nzfZ3zZqutEKhJ_LMOUJi_iol65Sc,4442
9
- nvidia_nat_agno-1.3.0.dev2.dist-info/METADATA,sha256=lY_BM12mvdm83POL94boSvGvVqTMPFxA36rzGfQkp9k,1601
10
- nvidia_nat_agno-1.3.0.dev2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
11
- nvidia_nat_agno-1.3.0.dev2.dist-info/entry_points.txt,sha256=qRhuHKj2WmdJkLpbVXpYkdtc2cZdG4LPlBsABG2ImVI,103
12
- nvidia_nat_agno-1.3.0.dev2.dist-info/top_level.txt,sha256=8-CJ2cP6-f0ZReXe5Hzqp-5pvzzHz-5Ds5H2bGqh1-U,4
13
- nvidia_nat_agno-1.3.0.dev2.dist-info/RECORD,,