nvidia-nat-llama-index 1.1.0a20251020__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nat/meta/pypi.md +23 -0
- nat/plugins/llama_index/__init__.py +0 -0
- nat/plugins/llama_index/embedder.py +74 -0
- nat/plugins/llama_index/llm.py +128 -0
- nat/plugins/llama_index/register.py +23 -0
- nat/plugins/llama_index/tool_wrapper.py +32 -0
- nvidia_nat_llama_index-1.1.0a20251020.dist-info/METADATA +55 -0
- nvidia_nat_llama_index-1.1.0a20251020.dist-info/RECORD +13 -0
- nvidia_nat_llama_index-1.1.0a20251020.dist-info/WHEEL +5 -0
- nvidia_nat_llama_index-1.1.0a20251020.dist-info/entry_points.txt +2 -0
- nvidia_nat_llama_index-1.1.0a20251020.dist-info/licenses/LICENSE-3rd-party.txt +5478 -0
- nvidia_nat_llama_index-1.1.0a20251020.dist-info/licenses/LICENSE.md +201 -0
- nvidia_nat_llama_index-1.1.0a20251020.dist-info/top_level.txt +1 -0
nat/meta/pypi.md
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
<!--
|
|
2
|
+
SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
3
|
+
SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
|
6
|
+
you may not use this file except in compliance with the License.
|
|
7
|
+
You may obtain a copy of the License at
|
|
8
|
+
|
|
9
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
|
10
|
+
|
|
11
|
+
Unless required by applicable law or agreed to in writing, software
|
|
12
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
|
13
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
14
|
+
See the License for the specific language governing permissions and
|
|
15
|
+
limitations under the License.
|
|
16
|
+
-->
|
|
17
|
+
|
|
18
|
+

|
|
19
|
+
|
|
20
|
+
# NVIDIA NeMo Agent Toolkit Subpackage
|
|
21
|
+
This is a subpackage for Llama-Index integration in NeMo Agent toolkit.
|
|
22
|
+
|
|
23
|
+
For more information about the NVIDIA NeMo Agent toolkit, please visit the [NeMo Agent toolkit GitHub Repo](https://github.com/NVIDIA/NeMo-Agent-Toolkit).
|
|
File without changes
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
from nat.builder.builder import Builder
|
|
17
|
+
from nat.builder.framework_enum import LLMFrameworkEnum
|
|
18
|
+
from nat.cli.register_workflow import register_embedder_client
|
|
19
|
+
from nat.data_models.retry_mixin import RetryMixin
|
|
20
|
+
from nat.embedder.azure_openai_embedder import AzureOpenAIEmbedderModelConfig
|
|
21
|
+
from nat.embedder.nim_embedder import NIMEmbedderModelConfig
|
|
22
|
+
from nat.embedder.openai_embedder import OpenAIEmbedderModelConfig
|
|
23
|
+
from nat.utils.exception_handlers.automatic_retries import patch_with_retry
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
@register_embedder_client(config_type=AzureOpenAIEmbedderModelConfig, wrapper_type=LLMFrameworkEnum.LLAMA_INDEX)
|
|
27
|
+
async def azure_openai_llama_index(embedder_config: AzureOpenAIEmbedderModelConfig, _builder: Builder):
|
|
28
|
+
|
|
29
|
+
from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding
|
|
30
|
+
|
|
31
|
+
client = AzureOpenAIEmbedding(**embedder_config.model_dump(exclude={"type"}, by_alias=True, exclude_none=True))
|
|
32
|
+
|
|
33
|
+
if isinstance(embedder_config, RetryMixin):
|
|
34
|
+
client = patch_with_retry(client,
|
|
35
|
+
retries=embedder_config.num_retries,
|
|
36
|
+
retry_codes=embedder_config.retry_on_status_codes,
|
|
37
|
+
retry_on_messages=embedder_config.retry_on_errors)
|
|
38
|
+
|
|
39
|
+
yield client
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
@register_embedder_client(config_type=NIMEmbedderModelConfig, wrapper_type=LLMFrameworkEnum.LLAMA_INDEX)
|
|
43
|
+
async def nim_llama_index(embedder_config: NIMEmbedderModelConfig, _builder: Builder):
|
|
44
|
+
|
|
45
|
+
from llama_index.embeddings.nvidia import NVIDIAEmbedding # pylint: disable=no-name-in-module
|
|
46
|
+
|
|
47
|
+
client = NVIDIAEmbedding(
|
|
48
|
+
**embedder_config.model_dump(exclude={"type", "model_name"}, by_alias=True, exclude_none=True),
|
|
49
|
+
model=embedder_config.model_name,
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
if isinstance(embedder_config, RetryMixin):
|
|
53
|
+
client = patch_with_retry(client,
|
|
54
|
+
retries=embedder_config.num_retries,
|
|
55
|
+
retry_codes=embedder_config.retry_on_status_codes,
|
|
56
|
+
retry_on_messages=embedder_config.retry_on_errors)
|
|
57
|
+
|
|
58
|
+
yield client
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
@register_embedder_client(config_type=OpenAIEmbedderModelConfig, wrapper_type=LLMFrameworkEnum.LLAMA_INDEX)
|
|
62
|
+
async def openai_llama_index(embedder_config: OpenAIEmbedderModelConfig, _builder: Builder):
|
|
63
|
+
|
|
64
|
+
from llama_index.embeddings.openai import OpenAIEmbedding
|
|
65
|
+
|
|
66
|
+
client = OpenAIEmbedding(**embedder_config.model_dump(exclude={"type"}, by_alias=True, exclude_none=True))
|
|
67
|
+
|
|
68
|
+
if isinstance(embedder_config, RetryMixin):
|
|
69
|
+
client = patch_with_retry(client,
|
|
70
|
+
retries=embedder_config.num_retries,
|
|
71
|
+
retry_codes=embedder_config.retry_on_status_codes,
|
|
72
|
+
retry_on_messages=embedder_config.retry_on_errors)
|
|
73
|
+
|
|
74
|
+
yield client
|
|
@@ -0,0 +1,128 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
from collections.abc import Sequence
|
|
17
|
+
from typing import TypeVar
|
|
18
|
+
|
|
19
|
+
from nat.builder.builder import Builder
|
|
20
|
+
from nat.builder.framework_enum import LLMFrameworkEnum
|
|
21
|
+
from nat.cli.register_workflow import register_llm_client
|
|
22
|
+
from nat.data_models.llm import LLMBaseConfig
|
|
23
|
+
from nat.data_models.retry_mixin import RetryMixin
|
|
24
|
+
from nat.data_models.thinking_mixin import ThinkingMixin
|
|
25
|
+
from nat.llm.aws_bedrock_llm import AWSBedrockModelConfig
|
|
26
|
+
from nat.llm.azure_openai_llm import AzureOpenAIModelConfig
|
|
27
|
+
from nat.llm.litellm_llm import LiteLlmModelConfig
|
|
28
|
+
from nat.llm.nim_llm import NIMModelConfig
|
|
29
|
+
from nat.llm.openai_llm import OpenAIModelConfig
|
|
30
|
+
from nat.llm.utils.thinking import BaseThinkingInjector
|
|
31
|
+
from nat.llm.utils.thinking import FunctionArgumentWrapper
|
|
32
|
+
from nat.llm.utils.thinking import patch_with_thinking
|
|
33
|
+
from nat.utils.exception_handlers.automatic_retries import patch_with_retry
|
|
34
|
+
from nat.utils.type_utils import override
|
|
35
|
+
|
|
36
|
+
ModelType = TypeVar("ModelType")
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def _patch_llm_based_on_config(client: ModelType, llm_config: LLMBaseConfig) -> ModelType:
|
|
40
|
+
|
|
41
|
+
from llama_index.core.base.llms.types import ChatMessage
|
|
42
|
+
|
|
43
|
+
class LlamaIndexThinkingInjector(BaseThinkingInjector):
|
|
44
|
+
|
|
45
|
+
@override
|
|
46
|
+
def inject(self, messages: Sequence[ChatMessage], *args, **kwargs) -> FunctionArgumentWrapper:
|
|
47
|
+
for i, message in enumerate(messages):
|
|
48
|
+
if message.role == "system":
|
|
49
|
+
if self.system_prompt not in str(message.content):
|
|
50
|
+
messages = list(messages)
|
|
51
|
+
messages[i] = ChatMessage(role="system", content=f"{message.content}\n{self.system_prompt}")
|
|
52
|
+
break
|
|
53
|
+
else:
|
|
54
|
+
messages = list(messages)
|
|
55
|
+
messages.insert(0, ChatMessage(role="system", content=self.system_prompt))
|
|
56
|
+
return FunctionArgumentWrapper(messages, *args, **kwargs)
|
|
57
|
+
|
|
58
|
+
if isinstance(llm_config, RetryMixin):
|
|
59
|
+
client = patch_with_retry(client,
|
|
60
|
+
retries=llm_config.num_retries,
|
|
61
|
+
retry_codes=llm_config.retry_on_status_codes,
|
|
62
|
+
retry_on_messages=llm_config.retry_on_errors)
|
|
63
|
+
|
|
64
|
+
if isinstance(llm_config, ThinkingMixin) and llm_config.thinking_system_prompt is not None:
|
|
65
|
+
client = patch_with_thinking(
|
|
66
|
+
client,
|
|
67
|
+
LlamaIndexThinkingInjector(
|
|
68
|
+
system_prompt=llm_config.thinking_system_prompt,
|
|
69
|
+
function_names=[
|
|
70
|
+
"chat",
|
|
71
|
+
"stream_chat",
|
|
72
|
+
"achat",
|
|
73
|
+
"astream_chat",
|
|
74
|
+
],
|
|
75
|
+
))
|
|
76
|
+
|
|
77
|
+
return client
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
@register_llm_client(config_type=AWSBedrockModelConfig, wrapper_type=LLMFrameworkEnum.LLAMA_INDEX)
|
|
81
|
+
async def aws_bedrock_llama_index(llm_config: AWSBedrockModelConfig, _builder: Builder):
|
|
82
|
+
|
|
83
|
+
from llama_index.llms.bedrock import Bedrock
|
|
84
|
+
|
|
85
|
+
# LlamaIndex uses context_size instead of max_tokens
|
|
86
|
+
llm = Bedrock(**llm_config.model_dump(exclude={"type", "top_p", "thinking"}, by_alias=True))
|
|
87
|
+
|
|
88
|
+
yield _patch_llm_based_on_config(llm, llm_config)
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
@register_llm_client(config_type=AzureOpenAIModelConfig, wrapper_type=LLMFrameworkEnum.LLAMA_INDEX)
|
|
92
|
+
async def azure_openai_llama_index(llm_config: AzureOpenAIModelConfig, _builder: Builder):
|
|
93
|
+
|
|
94
|
+
from llama_index.llms.azure_openai import AzureOpenAI
|
|
95
|
+
|
|
96
|
+
llm = AzureOpenAI(**llm_config.model_dump(exclude={"type", "thinking"}, by_alias=True))
|
|
97
|
+
|
|
98
|
+
yield _patch_llm_based_on_config(llm, llm_config)
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
@register_llm_client(config_type=NIMModelConfig, wrapper_type=LLMFrameworkEnum.LLAMA_INDEX)
|
|
102
|
+
async def nim_llama_index(llm_config: NIMModelConfig, _builder: Builder):
|
|
103
|
+
|
|
104
|
+
from llama_index.llms.nvidia import NVIDIA
|
|
105
|
+
|
|
106
|
+
llm = NVIDIA(**llm_config.model_dump(exclude={"type", "thinking"}, by_alias=True, exclude_none=True))
|
|
107
|
+
|
|
108
|
+
yield _patch_llm_based_on_config(llm, llm_config)
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
@register_llm_client(config_type=OpenAIModelConfig, wrapper_type=LLMFrameworkEnum.LLAMA_INDEX)
|
|
112
|
+
async def openai_llama_index(llm_config: OpenAIModelConfig, _builder: Builder):
|
|
113
|
+
|
|
114
|
+
from llama_index.llms.openai import OpenAI
|
|
115
|
+
|
|
116
|
+
llm = OpenAI(**llm_config.model_dump(exclude={"type", "thinking"}, by_alias=True, exclude_none=True))
|
|
117
|
+
|
|
118
|
+
yield _patch_llm_based_on_config(llm, llm_config)
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
@register_llm_client(config_type=LiteLlmModelConfig, wrapper_type=LLMFrameworkEnum.LLAMA_INDEX)
|
|
122
|
+
async def litellm_llama_index(llm_config: LiteLlmModelConfig, _builder: Builder):
|
|
123
|
+
|
|
124
|
+
from llama_index.llms.litellm import LiteLLM
|
|
125
|
+
|
|
126
|
+
llm = LiteLLM(**llm_config.model_dump(exclude={"type", "thinking"}, by_alias=True, exclude_none=True))
|
|
127
|
+
|
|
128
|
+
yield _patch_llm_based_on_config(llm, llm_config)
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
# flake8: noqa
|
|
17
|
+
# isort:skip_file
|
|
18
|
+
|
|
19
|
+
# Import any providers which need to be automatically registered here
|
|
20
|
+
|
|
21
|
+
from . import embedder
|
|
22
|
+
from . import llm
|
|
23
|
+
from . import tool_wrapper
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
from nat.builder.builder import Builder
|
|
17
|
+
from nat.builder.framework_enum import LLMFrameworkEnum
|
|
18
|
+
from nat.builder.function import Function
|
|
19
|
+
from nat.cli.register_workflow import register_tool_wrapper
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@register_tool_wrapper(wrapper_type=LLMFrameworkEnum.LLAMA_INDEX)
|
|
23
|
+
def langchain_tool_wrapper(name: str, fn: Function, builder: Builder):
|
|
24
|
+
|
|
25
|
+
from llama_index.core.tools import FunctionTool
|
|
26
|
+
|
|
27
|
+
assert fn.input_schema is not None, "Tool must have input schema"
|
|
28
|
+
|
|
29
|
+
return FunctionTool.from_defaults(async_fn=fn.acall_invoke,
|
|
30
|
+
name=name,
|
|
31
|
+
description=fn.description,
|
|
32
|
+
fn_schema=fn.input_schema)
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: nvidia-nat-llama-index
|
|
3
|
+
Version: 1.1.0a20251020
|
|
4
|
+
Summary: Subpackage for Llama-Index integration in NeMo Agent toolkit
|
|
5
|
+
Author: NVIDIA Corporation
|
|
6
|
+
Maintainer: NVIDIA Corporation
|
|
7
|
+
License: Apache-2.0
|
|
8
|
+
Project-URL: documentation, https://docs.nvidia.com/nemo/agent-toolkit/latest/
|
|
9
|
+
Project-URL: source, https://github.com/NVIDIA/NeMo-Agent-Toolkit
|
|
10
|
+
Keywords: ai,rag,agents
|
|
11
|
+
Classifier: Programming Language :: Python
|
|
12
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
15
|
+
Requires-Python: <3.14,>=3.11
|
|
16
|
+
Description-Content-Type: text/markdown
|
|
17
|
+
License-File: LICENSE-3rd-party.txt
|
|
18
|
+
License-File: LICENSE.md
|
|
19
|
+
Requires-Dist: nvidia-nat==v1.1.0a20251020
|
|
20
|
+
Requires-Dist: llama-index-core~=0.12.21
|
|
21
|
+
Requires-Dist: llama-index-embeddings-azure-openai~=0.3.9
|
|
22
|
+
Requires-Dist: llama-index-embeddings-nvidia~=0.3.1
|
|
23
|
+
Requires-Dist: llama-index-embeddings-openai~=0.3.1
|
|
24
|
+
Requires-Dist: llama-index-llms-azure-openai~=0.3.2
|
|
25
|
+
Requires-Dist: llama-index-llms-bedrock~=0.3.8
|
|
26
|
+
Requires-Dist: llama-index-llms-litellm~=0.5.1
|
|
27
|
+
Requires-Dist: llama-index-llms-nvidia~=0.3.1
|
|
28
|
+
Requires-Dist: llama-index-llms-openai~=0.3.42
|
|
29
|
+
Requires-Dist: llama-index-readers-file~=0.4.4
|
|
30
|
+
Requires-Dist: llama-index~=0.12.21
|
|
31
|
+
Dynamic: license-file
|
|
32
|
+
|
|
33
|
+
<!--
|
|
34
|
+
SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
35
|
+
SPDX-License-Identifier: Apache-2.0
|
|
36
|
+
|
|
37
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
|
38
|
+
you may not use this file except in compliance with the License.
|
|
39
|
+
You may obtain a copy of the License at
|
|
40
|
+
|
|
41
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
|
42
|
+
|
|
43
|
+
Unless required by applicable law or agreed to in writing, software
|
|
44
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
|
45
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
46
|
+
See the License for the specific language governing permissions and
|
|
47
|
+
limitations under the License.
|
|
48
|
+
-->
|
|
49
|
+
|
|
50
|
+

|
|
51
|
+
|
|
52
|
+
# NVIDIA NeMo Agent Toolkit Subpackage
|
|
53
|
+
This is a subpackage for Llama-Index integration in NeMo Agent toolkit.
|
|
54
|
+
|
|
55
|
+
For more information about the NVIDIA NeMo Agent toolkit, please visit the [NeMo Agent toolkit GitHub Repo](https://github.com/NVIDIA/NeMo-Agent-Toolkit).
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
nat/meta/pypi.md,sha256=s9C3pgWB0HLIXTx5QPryNOWN0O2fIRIap0p9_zCHlTs,1112
|
|
2
|
+
nat/plugins/llama_index/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
3
|
+
nat/plugins/llama_index/embedder.py,sha256=4c4uAa19UArqA_Npe5mwEnUhCaguTbyWcAntBzcPEeQ,3493
|
|
4
|
+
nat/plugins/llama_index/llm.py,sha256=7fneFsjXjZJ-ReLAy1aUslkGhj6_fG_gFbyy4ME-QH4,5350
|
|
5
|
+
nat/plugins/llama_index/register.py,sha256=1x_b8u6cuQwh4Iz_7TcIFWXvLIL9IIKUPE-zR9d6ug8,859
|
|
6
|
+
nat/plugins/llama_index/tool_wrapper.py,sha256=VFKMIIeLdWqHwW2Ax11E2w-_9w3ow6Iuhra1Hk78RYM,1387
|
|
7
|
+
nvidia_nat_llama_index-1.1.0a20251020.dist-info/licenses/LICENSE-3rd-party.txt,sha256=fOk5jMmCX9YoKWyYzTtfgl-SUy477audFC5hNY4oP7Q,284609
|
|
8
|
+
nvidia_nat_llama_index-1.1.0a20251020.dist-info/licenses/LICENSE.md,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
|
|
9
|
+
nvidia_nat_llama_index-1.1.0a20251020.dist-info/METADATA,sha256=Nd0eKWPPvyiacUcqYPWndvRt9uM8-TohPBvhmGwjujY,2422
|
|
10
|
+
nvidia_nat_llama_index-1.1.0a20251020.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
11
|
+
nvidia_nat_llama_index-1.1.0a20251020.dist-info/entry_points.txt,sha256=2LqRRju5448P2v8B3y6TSPnk-nOd5T3AmV5JibCnoQc,68
|
|
12
|
+
nvidia_nat_llama_index-1.1.0a20251020.dist-info/top_level.txt,sha256=8-CJ2cP6-f0ZReXe5Hzqp-5pvzzHz-5Ds5H2bGqh1-U,4
|
|
13
|
+
nvidia_nat_llama_index-1.1.0a20251020.dist-info/RECORD,,
|