nvidia-nat-semantic-kernel 1.3.dev0__py3-none-any.whl → 1.3.0rc1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of nvidia-nat-semantic-kernel might be problematic. Click here for more details.
- nat/plugins/semantic_kernel/llm.py +82 -12
- nat/plugins/semantic_kernel/register.py +0 -1
- nat/plugins/semantic_kernel/tool_wrapper.py +0 -2
- {nvidia_nat_semantic_kernel-1.3.dev0.dist-info → nvidia_nat_semantic_kernel-1.3.0rc1.dist-info}/METADATA +7 -4
- nvidia_nat_semantic_kernel-1.3.0rc1.dist-info/RECORD +10 -0
- nvidia_nat_semantic_kernel-1.3.dev0.dist-info/RECORD +0 -10
- {nvidia_nat_semantic_kernel-1.3.dev0.dist-info → nvidia_nat_semantic_kernel-1.3.0rc1.dist-info}/WHEEL +0 -0
- {nvidia_nat_semantic_kernel-1.3.dev0.dist-info → nvidia_nat_semantic_kernel-1.3.0rc1.dist-info}/entry_points.txt +0 -0
- {nvidia_nat_semantic_kernel-1.3.dev0.dist-info → nvidia_nat_semantic_kernel-1.3.0rc1.dist-info}/top_level.txt +0 -0
|
@@ -13,29 +13,99 @@
|
|
|
13
13
|
# See the License for the specific language governing permissions and
|
|
14
14
|
# limitations under the License.
|
|
15
15
|
|
|
16
|
+
from typing import TypeVar
|
|
17
|
+
|
|
16
18
|
from nat.builder.builder import Builder
|
|
17
19
|
from nat.builder.framework_enum import LLMFrameworkEnum
|
|
18
20
|
from nat.cli.register_workflow import register_llm_client
|
|
21
|
+
from nat.data_models.llm import LLMBaseConfig
|
|
19
22
|
from nat.data_models.retry_mixin import RetryMixin
|
|
23
|
+
from nat.data_models.thinking_mixin import ThinkingMixin
|
|
24
|
+
from nat.llm.azure_openai_llm import AzureOpenAIModelConfig
|
|
20
25
|
from nat.llm.openai_llm import OpenAIModelConfig
|
|
26
|
+
from nat.llm.utils.thinking import BaseThinkingInjector
|
|
27
|
+
from nat.llm.utils.thinking import FunctionArgumentWrapper
|
|
28
|
+
from nat.llm.utils.thinking import patch_with_thinking
|
|
21
29
|
from nat.utils.exception_handlers.automatic_retries import patch_with_retry
|
|
30
|
+
from nat.utils.type_utils import override
|
|
22
31
|
|
|
32
|
+
ModelType = TypeVar("ModelType")
|
|
23
33
|
|
|
24
|
-
@register_llm_client(config_type=OpenAIModelConfig, wrapper_type=LLMFrameworkEnum.SEMANTIC_KERNEL)
|
|
25
|
-
async def openai_semantic_kernel(llm_config: OpenAIModelConfig, builder: Builder):
|
|
26
34
|
|
|
27
|
-
|
|
35
|
+
def _patch_llm_based_on_config(client: ModelType, llm_config: LLMBaseConfig) -> ModelType:
|
|
36
|
+
|
|
37
|
+
from semantic_kernel.contents.chat_history import ChatHistory
|
|
38
|
+
from semantic_kernel.contents.chat_message_content import ChatMessageContent
|
|
39
|
+
from semantic_kernel.contents.utils.author_role import AuthorRole
|
|
40
|
+
|
|
41
|
+
class SemanticKernelThinkingInjector(BaseThinkingInjector):
|
|
28
42
|
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
43
|
+
@override
|
|
44
|
+
def inject(self, chat_history: ChatHistory, *args, **kwargs) -> FunctionArgumentWrapper:
|
|
45
|
+
"""
|
|
46
|
+
Inject a system prompt into the chat_history.
|
|
32
47
|
|
|
33
|
-
|
|
48
|
+
The chat_history is the first (non-object) argument to the function.
|
|
49
|
+
The rest of the arguments are passed through unchanged.
|
|
50
|
+
|
|
51
|
+
Args:
|
|
52
|
+
chat_history: The ChatHistory object to inject the system prompt into.
|
|
53
|
+
*args: The rest of the arguments to the function.
|
|
54
|
+
**kwargs: The rest of the keyword arguments to the function.
|
|
55
|
+
|
|
56
|
+
Returns:
|
|
57
|
+
FunctionArgumentWrapper: An object that contains the transformed args and kwargs.
|
|
58
|
+
"""
|
|
59
|
+
if chat_history.system_message is None:
|
|
60
|
+
new_messages = ChatHistory(chat_history.messages, system_message=self.system_prompt)
|
|
61
|
+
return FunctionArgumentWrapper(new_messages, *args, **kwargs)
|
|
62
|
+
else:
|
|
63
|
+
new_messages = ChatHistory(
|
|
64
|
+
[ChatMessageContent(role=AuthorRole.SYSTEM, content=self.system_prompt)] + chat_history.messages,
|
|
65
|
+
system_message=chat_history.system_message,
|
|
66
|
+
)
|
|
67
|
+
return FunctionArgumentWrapper(new_messages, *args, **kwargs)
|
|
34
68
|
|
|
35
69
|
if isinstance(llm_config, RetryMixin):
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
70
|
+
client = patch_with_retry(client,
|
|
71
|
+
retries=llm_config.num_retries,
|
|
72
|
+
retry_codes=llm_config.retry_on_status_codes,
|
|
73
|
+
retry_on_messages=llm_config.retry_on_errors)
|
|
74
|
+
|
|
75
|
+
if isinstance(llm_config, ThinkingMixin) and llm_config.thinking_system_prompt is not None:
|
|
76
|
+
client = patch_with_thinking(
|
|
77
|
+
client,
|
|
78
|
+
SemanticKernelThinkingInjector(
|
|
79
|
+
system_prompt=llm_config.thinking_system_prompt,
|
|
80
|
+
function_names=[
|
|
81
|
+
"get_chat_message_contents",
|
|
82
|
+
"get_streaming_chat_message_contents",
|
|
83
|
+
],
|
|
84
|
+
))
|
|
85
|
+
|
|
86
|
+
return client
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
@register_llm_client(config_type=AzureOpenAIModelConfig, wrapper_type=LLMFrameworkEnum.SEMANTIC_KERNEL)
|
|
90
|
+
async def azure_openai_semantic_kernel(llm_config: AzureOpenAIModelConfig, _builder: Builder):
|
|
91
|
+
|
|
92
|
+
from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion
|
|
93
|
+
|
|
94
|
+
llm = AzureChatCompletion(
|
|
95
|
+
api_key=llm_config.api_key,
|
|
96
|
+
api_version=llm_config.api_version,
|
|
97
|
+
endpoint=llm_config.azure_endpoint,
|
|
98
|
+
deployment_name=llm_config.azure_deployment,
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
yield _patch_llm_based_on_config(llm, llm_config)
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
@register_llm_client(config_type=OpenAIModelConfig, wrapper_type=LLMFrameworkEnum.SEMANTIC_KERNEL)
|
|
105
|
+
async def openai_semantic_kernel(llm_config: OpenAIModelConfig, _builder: Builder):
|
|
106
|
+
|
|
107
|
+
from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion
|
|
108
|
+
|
|
109
|
+
llm = OpenAIChatCompletion(ai_model_id=llm_config.model_name)
|
|
40
110
|
|
|
41
|
-
yield llm
|
|
111
|
+
yield _patch_llm_based_on_config(llm, llm_config)
|
|
@@ -1,13 +1,16 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: nvidia-nat-semantic-kernel
|
|
3
|
-
Version: 1.3.
|
|
3
|
+
Version: 1.3.0rc1
|
|
4
4
|
Summary: Subpackage for Semantic-Kernel integration in NeMo Agent toolkit
|
|
5
5
|
Keywords: ai,rag,agents
|
|
6
6
|
Classifier: Programming Language :: Python
|
|
7
|
-
|
|
7
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
8
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
9
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
10
|
+
Requires-Python: <3.14,>=3.11
|
|
8
11
|
Description-Content-Type: text/markdown
|
|
9
|
-
Requires-Dist: nvidia-nat==v1.3-
|
|
10
|
-
Requires-Dist: semantic-kernel~=1.
|
|
12
|
+
Requires-Dist: nvidia-nat==v1.3.0-rc1
|
|
13
|
+
Requires-Dist: semantic-kernel~=1.35
|
|
11
14
|
|
|
12
15
|
<!--
|
|
13
16
|
SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
nat/meta/pypi.md,sha256=rFmwVds3akmoz0TE1SOjCjCUbB6SWfaRex_Vi5OfUAk,1116
|
|
2
|
+
nat/plugins/semantic_kernel/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
3
|
+
nat/plugins/semantic_kernel/llm.py,sha256=FbvPa1kmmijn4DspeVrHNnS8Az69j3syam2EIDwIeuE,4819
|
|
4
|
+
nat/plugins/semantic_kernel/register.py,sha256=_R3bhGmz___696_NwyIcpw3koMBiWqIFoWEFJ0VAgXs,831
|
|
5
|
+
nat/plugins/semantic_kernel/tool_wrapper.py,sha256=N6WGEdveLYFKtOKjxEMMNT5vG8QJUoSddtswQ1fPEzQ,7121
|
|
6
|
+
nvidia_nat_semantic_kernel-1.3.0rc1.dist-info/METADATA,sha256=IXAzzmWvfVjfU1iGrfp8ApfF4INlNQqJt6nyAJE-s-c,1629
|
|
7
|
+
nvidia_nat_semantic_kernel-1.3.0rc1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
8
|
+
nvidia_nat_semantic_kernel-1.3.0rc1.dist-info/entry_points.txt,sha256=0jCtQBAn5Ohs9XoVCF34WvNCV33OwAsH8bjFzgw_ByM,76
|
|
9
|
+
nvidia_nat_semantic_kernel-1.3.0rc1.dist-info/top_level.txt,sha256=8-CJ2cP6-f0ZReXe5Hzqp-5pvzzHz-5Ds5H2bGqh1-U,4
|
|
10
|
+
nvidia_nat_semantic_kernel-1.3.0rc1.dist-info/RECORD,,
|
|
@@ -1,10 +0,0 @@
|
|
|
1
|
-
nat/meta/pypi.md,sha256=rFmwVds3akmoz0TE1SOjCjCUbB6SWfaRex_Vi5OfUAk,1116
|
|
2
|
-
nat/plugins/semantic_kernel/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
3
|
-
nat/plugins/semantic_kernel/llm.py,sha256=C2ISvuCkz_YN0yOgyf1QJPtbJmX_1kgLpQumwpKfcOI,1744
|
|
4
|
-
nat/plugins/semantic_kernel/register.py,sha256=RKXyuaXy4ftA5IL2RrCofIBjpie_-2lP9YZoHAiyPU0,863
|
|
5
|
-
nat/plugins/semantic_kernel/tool_wrapper.py,sha256=PEtzcYhZ73ftsg80Jz9yWTWQtks1y7PDZdS89seFb3I,7175
|
|
6
|
-
nvidia_nat_semantic_kernel-1.3.dev0.dist-info/METADATA,sha256=TfDloI28VQzcLq8zQZZKzc0m7T-x2PjnsBtGC89K97g,1476
|
|
7
|
-
nvidia_nat_semantic_kernel-1.3.dev0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
8
|
-
nvidia_nat_semantic_kernel-1.3.dev0.dist-info/entry_points.txt,sha256=0jCtQBAn5Ohs9XoVCF34WvNCV33OwAsH8bjFzgw_ByM,76
|
|
9
|
-
nvidia_nat_semantic_kernel-1.3.dev0.dist-info/top_level.txt,sha256=8-CJ2cP6-f0ZReXe5Hzqp-5pvzzHz-5Ds5H2bGqh1-U,4
|
|
10
|
-
nvidia_nat_semantic_kernel-1.3.dev0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|