nvidia-nat-crewai 1.2.1rc1__py3-none-any.whl → 1.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nat/plugins/crewai/crewai_callback_handler.py +15 -8
- nat/plugins/crewai/llm.py +102 -34
- nat/plugins/crewai/register.py +0 -1
- {nvidia_nat_crewai-1.2.1rc1.dist-info → nvidia_nat_crewai-1.3.0.dist-info}/METADATA +15 -4
- nvidia_nat_crewai-1.3.0.dist-info/RECORD +13 -0
- nvidia_nat_crewai-1.3.0.dist-info/licenses/LICENSE-3rd-party.txt +5478 -0
- nvidia_nat_crewai-1.3.0.dist-info/licenses/LICENSE.md +201 -0
- nvidia_nat_crewai-1.2.1rc1.dist-info/RECORD +0 -11
- {nvidia_nat_crewai-1.2.1rc1.dist-info → nvidia_nat_crewai-1.3.0.dist-info}/WHEEL +0 -0
- {nvidia_nat_crewai-1.2.1rc1.dist-info → nvidia_nat_crewai-1.3.0.dist-info}/entry_points.txt +0 -0
- {nvidia_nat_crewai-1.2.1rc1.dist-info → nvidia_nat_crewai-1.3.0.dist-info}/top_level.txt +0 -0
|
@@ -41,6 +41,7 @@ class CrewAIProfilerHandler(BaseProfilerCallback):
|
|
|
41
41
|
A callback manager/handler for CrewAI that intercepts calls to:
|
|
42
42
|
- ToolUsage._use
|
|
43
43
|
- LLM Calls
|
|
44
|
+
|
|
44
45
|
to collect usage statistics (tokens, inputs, outputs, time intervals, etc.)
|
|
45
46
|
and store them in NAT's usage_stats queue for subsequent analysis.
|
|
46
47
|
"""
|
|
@@ -94,7 +95,7 @@ class CrewAIProfilerHandler(BaseProfilerCallback):
|
|
|
94
95
|
if tool_info:
|
|
95
96
|
tool_name = tool_info.name
|
|
96
97
|
except Exception as e:
|
|
97
|
-
logger.exception("Error getting tool name: %s", e
|
|
98
|
+
logger.exception("Error getting tool name: %s", e)
|
|
98
99
|
|
|
99
100
|
try:
|
|
100
101
|
# Pre-call usage event
|
|
@@ -132,7 +133,7 @@ class CrewAIProfilerHandler(BaseProfilerCallback):
|
|
|
132
133
|
return result
|
|
133
134
|
|
|
134
135
|
except Exception as e:
|
|
135
|
-
logger.
|
|
136
|
+
logger.error("ToolUsage._use error: %s", e)
|
|
136
137
|
raise
|
|
137
138
|
|
|
138
139
|
return wrapped_tool_use
|
|
@@ -153,12 +154,15 @@ class CrewAIProfilerHandler(BaseProfilerCallback):
|
|
|
153
154
|
seconds_between_calls = int(now - self.last_call_ts)
|
|
154
155
|
model_name = kwargs.get('model', "")
|
|
155
156
|
|
|
156
|
-
model_input =
|
|
157
|
+
model_input = []
|
|
157
158
|
try:
|
|
158
159
|
for message in kwargs.get('messages', []):
|
|
159
|
-
|
|
160
|
+
content = message.get('content', "")
|
|
161
|
+
model_input.append("" if content is None else str(content))
|
|
160
162
|
except Exception as e:
|
|
161
|
-
logger.exception("Error getting model input: %s", e
|
|
163
|
+
logger.exception("Error getting model input: %s", e)
|
|
164
|
+
|
|
165
|
+
model_input = "".join(model_input)
|
|
162
166
|
|
|
163
167
|
# Record the start event
|
|
164
168
|
input_stats = IntermediateStepPayload(
|
|
@@ -176,13 +180,16 @@ class CrewAIProfilerHandler(BaseProfilerCallback):
|
|
|
176
180
|
# Call the original litellm.completion(...)
|
|
177
181
|
output = original_func(*args, **kwargs)
|
|
178
182
|
|
|
179
|
-
model_output =
|
|
183
|
+
model_output = []
|
|
180
184
|
try:
|
|
181
185
|
for choice in output.choices:
|
|
182
186
|
msg = choice.model_extra["message"]
|
|
183
|
-
|
|
187
|
+
content = msg.get('content', "")
|
|
188
|
+
model_output.append("" if content is None else str(content))
|
|
184
189
|
except Exception as e:
|
|
185
|
-
logger.exception("Error getting model output: %s", e
|
|
190
|
+
logger.exception("Error getting model output: %s", e)
|
|
191
|
+
|
|
192
|
+
model_output = "".join(model_output)
|
|
186
193
|
|
|
187
194
|
now = time.time()
|
|
188
195
|
# Record the end event
|
nat/plugins/crewai/llm.py
CHANGED
|
@@ -14,67 +14,135 @@
|
|
|
14
14
|
# limitations under the License.
|
|
15
15
|
|
|
16
16
|
import os
|
|
17
|
+
from typing import TypeVar
|
|
17
18
|
|
|
18
19
|
from nat.builder.builder import Builder
|
|
19
20
|
from nat.builder.framework_enum import LLMFrameworkEnum
|
|
20
21
|
from nat.cli.register_workflow import register_llm_client
|
|
22
|
+
from nat.data_models.llm import LLMBaseConfig
|
|
21
23
|
from nat.data_models.retry_mixin import RetryMixin
|
|
24
|
+
from nat.data_models.thinking_mixin import ThinkingMixin
|
|
25
|
+
from nat.llm.azure_openai_llm import AzureOpenAIModelConfig
|
|
26
|
+
from nat.llm.litellm_llm import LiteLlmModelConfig
|
|
22
27
|
from nat.llm.nim_llm import NIMModelConfig
|
|
23
28
|
from nat.llm.openai_llm import OpenAIModelConfig
|
|
29
|
+
from nat.llm.utils.thinking import BaseThinkingInjector
|
|
30
|
+
from nat.llm.utils.thinking import FunctionArgumentWrapper
|
|
31
|
+
from nat.llm.utils.thinking import patch_with_thinking
|
|
24
32
|
from nat.utils.exception_handlers.automatic_retries import patch_with_retry
|
|
33
|
+
from nat.utils.type_utils import override
|
|
25
34
|
|
|
35
|
+
ModelType = TypeVar("ModelType")
|
|
26
36
|
|
|
27
|
-
@register_llm_client(config_type=NIMModelConfig, wrapper_type=LLMFrameworkEnum.CREWAI)
|
|
28
|
-
async def nim_crewai(llm_config: NIMModelConfig, builder: Builder):
|
|
29
|
-
|
|
30
|
-
from crewai import LLM
|
|
31
|
-
|
|
32
|
-
config_obj = {
|
|
33
|
-
**llm_config.model_dump(exclude={"type"}, by_alias=True),
|
|
34
|
-
"model": f"nvidia_nim/{llm_config.model_name}",
|
|
35
|
-
}
|
|
36
37
|
|
|
37
|
-
|
|
38
|
-
if ("api_key" not in config_obj or config_obj["api_key"] is None):
|
|
39
|
-
|
|
40
|
-
if ("NVIDIA_NIM_API_KEY" in os.environ):
|
|
41
|
-
# Dont need to do anything. User has already set the correct key
|
|
42
|
-
pass
|
|
43
|
-
else:
|
|
44
|
-
nvidai_api_key = os.getenv("NVIDIA_API_KEY")
|
|
38
|
+
def _patch_llm_based_on_config(client: ModelType, llm_config: LLMBaseConfig) -> ModelType:
|
|
45
39
|
|
|
46
|
-
|
|
47
|
-
# Transfer the key to the correct environment variable for LiteLLM
|
|
48
|
-
os.environ["NVIDIA_NIM_API_KEY"] = nvidai_api_key
|
|
40
|
+
class CrewAIThinkingInjector(BaseThinkingInjector):
|
|
49
41
|
|
|
50
|
-
|
|
42
|
+
@override
|
|
43
|
+
def inject(self, messages: list[dict[str, str]], *args, **kwargs) -> FunctionArgumentWrapper:
|
|
44
|
+
# Attempt to inject the system prompt into the first system message
|
|
45
|
+
for i, message in enumerate(messages):
|
|
46
|
+
if message["role"] == "system":
|
|
47
|
+
if self.system_prompt not in message["content"]:
|
|
48
|
+
messages = list(messages)
|
|
49
|
+
messages[i] = {"role": "system", "content": f"{message['content']}\n{self.system_prompt}"}
|
|
50
|
+
break
|
|
51
|
+
else:
|
|
52
|
+
messages = list(messages)
|
|
53
|
+
messages.insert(0, {"role": "system", "content": self.system_prompt})
|
|
54
|
+
return FunctionArgumentWrapper(messages, *args, **kwargs)
|
|
51
55
|
|
|
52
56
|
if isinstance(llm_config, RetryMixin):
|
|
53
|
-
|
|
54
57
|
client = patch_with_retry(client,
|
|
55
58
|
retries=llm_config.num_retries,
|
|
56
59
|
retry_codes=llm_config.retry_on_status_codes,
|
|
57
60
|
retry_on_messages=llm_config.retry_on_errors)
|
|
58
61
|
|
|
59
|
-
|
|
62
|
+
if isinstance(llm_config, ThinkingMixin) and llm_config.thinking_system_prompt is not None:
|
|
63
|
+
client = patch_with_thinking(
|
|
64
|
+
client, CrewAIThinkingInjector(
|
|
65
|
+
system_prompt=llm_config.thinking_system_prompt,
|
|
66
|
+
function_names=["call"],
|
|
67
|
+
))
|
|
68
|
+
|
|
69
|
+
return client
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
@register_llm_client(config_type=AzureOpenAIModelConfig, wrapper_type=LLMFrameworkEnum.CREWAI)
|
|
73
|
+
async def azure_openai_crewai(llm_config: AzureOpenAIModelConfig, _builder: Builder):
|
|
74
|
+
|
|
75
|
+
from crewai import LLM
|
|
76
|
+
|
|
77
|
+
# https://docs.crewai.com/en/concepts/llms#azure
|
|
78
|
+
|
|
79
|
+
api_key = llm_config.api_key or os.environ.get("AZURE_OPENAI_API_KEY") or os.environ.get("AZURE_API_KEY")
|
|
80
|
+
if api_key is None:
|
|
81
|
+
raise ValueError("Azure API key is not set")
|
|
82
|
+
os.environ["AZURE_API_KEY"] = api_key
|
|
83
|
+
api_base = (llm_config.azure_endpoint or os.environ.get("AZURE_OPENAI_ENDPOINT")
|
|
84
|
+
or os.environ.get("AZURE_API_BASE"))
|
|
85
|
+
if api_base is None:
|
|
86
|
+
raise ValueError("Azure endpoint is not set")
|
|
87
|
+
os.environ["AZURE_API_BASE"] = api_base
|
|
88
|
+
|
|
89
|
+
os.environ["AZURE_API_VERSION"] = llm_config.api_version
|
|
90
|
+
model = llm_config.azure_deployment or os.environ.get("AZURE_MODEL_DEPLOYMENT")
|
|
91
|
+
if model is None:
|
|
92
|
+
raise ValueError("Azure model deployment is not set")
|
|
93
|
+
|
|
94
|
+
client = LLM(
|
|
95
|
+
**llm_config.model_dump(
|
|
96
|
+
exclude={
|
|
97
|
+
"type",
|
|
98
|
+
"api_key",
|
|
99
|
+
"azure_endpoint",
|
|
100
|
+
"azure_deployment",
|
|
101
|
+
"thinking",
|
|
102
|
+
},
|
|
103
|
+
by_alias=True,
|
|
104
|
+
exclude_none=True,
|
|
105
|
+
),
|
|
106
|
+
model=model,
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
yield _patch_llm_based_on_config(client, llm_config)
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
@register_llm_client(config_type=NIMModelConfig, wrapper_type=LLMFrameworkEnum.CREWAI)
|
|
113
|
+
async def nim_crewai(llm_config: NIMModelConfig, _builder: Builder):
|
|
114
|
+
|
|
115
|
+
from crewai import LLM
|
|
116
|
+
|
|
117
|
+
# Because CrewAI uses a different environment variable for the API key, we need to set it here manually
|
|
118
|
+
if llm_config.api_key is None and "NVIDIA_NIM_API_KEY" not in os.environ:
|
|
119
|
+
nvidia_api_key = os.getenv("NVIDIA_API_KEY")
|
|
120
|
+
if nvidia_api_key is not None:
|
|
121
|
+
os.environ["NVIDIA_NIM_API_KEY"] = nvidia_api_key
|
|
122
|
+
|
|
123
|
+
client = LLM(
|
|
124
|
+
**llm_config.model_dump(exclude={"type", "model_name", "thinking"}, by_alias=True, exclude_none=True),
|
|
125
|
+
model=f"nvidia_nim/{llm_config.model_name}",
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
yield _patch_llm_based_on_config(client, llm_config)
|
|
60
129
|
|
|
61
130
|
|
|
62
131
|
@register_llm_client(config_type=OpenAIModelConfig, wrapper_type=LLMFrameworkEnum.CREWAI)
|
|
63
|
-
async def openai_crewai(llm_config: OpenAIModelConfig,
|
|
132
|
+
async def openai_crewai(llm_config: OpenAIModelConfig, _builder: Builder):
|
|
64
133
|
|
|
65
134
|
from crewai import LLM
|
|
66
135
|
|
|
67
|
-
|
|
68
|
-
**llm_config.model_dump(exclude={"type"}, by_alias=True),
|
|
69
|
-
}
|
|
136
|
+
client = LLM(**llm_config.model_dump(exclude={"type", "thinking"}, by_alias=True, exclude_none=True))
|
|
70
137
|
|
|
71
|
-
client
|
|
138
|
+
yield _patch_llm_based_on_config(client, llm_config)
|
|
72
139
|
|
|
73
|
-
if isinstance(llm_config, RetryMixin):
|
|
74
140
|
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
141
|
+
@register_llm_client(config_type=LiteLlmModelConfig, wrapper_type=LLMFrameworkEnum.CREWAI)
|
|
142
|
+
async def litellm_crewai(llm_config: LiteLlmModelConfig, _builder: Builder):
|
|
143
|
+
|
|
144
|
+
from crewai import LLM
|
|
145
|
+
|
|
146
|
+
client = LLM(**llm_config.model_dump(exclude={"type", "thinking"}, by_alias=True, exclude_none=True))
|
|
79
147
|
|
|
80
|
-
yield client
|
|
148
|
+
yield _patch_llm_based_on_config(client, llm_config)
|
nat/plugins/crewai/register.py
CHANGED
|
@@ -1,13 +1,24 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: nvidia-nat-crewai
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.3.0
|
|
4
4
|
Summary: Subpackage for CrewAI integration in NeMo Agent toolkit
|
|
5
|
+
Author: NVIDIA Corporation
|
|
6
|
+
Maintainer: NVIDIA Corporation
|
|
7
|
+
License: Apache-2.0
|
|
8
|
+
Project-URL: documentation, https://docs.nvidia.com/nemo/agent-toolkit/latest/
|
|
9
|
+
Project-URL: source, https://github.com/NVIDIA/NeMo-Agent-Toolkit
|
|
5
10
|
Keywords: ai,rag,agents
|
|
6
11
|
Classifier: Programming Language :: Python
|
|
7
|
-
|
|
12
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
15
|
+
Requires-Python: <3.14,>=3.11
|
|
8
16
|
Description-Content-Type: text/markdown
|
|
9
|
-
|
|
10
|
-
|
|
17
|
+
License-File: LICENSE-3rd-party.txt
|
|
18
|
+
License-File: LICENSE.md
|
|
19
|
+
Requires-Dist: nvidia-nat[litellm]==v1.3.0
|
|
20
|
+
Requires-Dist: crewai~=0.193.2
|
|
21
|
+
Dynamic: license-file
|
|
11
22
|
|
|
12
23
|
<!--
|
|
13
24
|
SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
nat/meta/pypi.md,sha256=T68FnThRzDGFf1LR8u-okM-r11-skSnKqSyI6HOktQY,1107
|
|
2
|
+
nat/plugins/crewai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
3
|
+
nat/plugins/crewai/crewai_callback_handler.py,sha256=il537F5tD9pFL1P9Q38ReOZasD-GgcBrm8BX_w0-xdo,8582
|
|
4
|
+
nat/plugins/crewai/llm.py,sha256=sfQsdSK6WGWHscald4tG4dyXEcn6zqdhkFjE-1zAtJE,5984
|
|
5
|
+
nat/plugins/crewai/register.py,sha256=_R3bhGmz___696_NwyIcpw3koMBiWqIFoWEFJ0VAgXs,831
|
|
6
|
+
nat/plugins/crewai/tool_wrapper.py,sha256=BNKEPQQCLKtXNzGDAKBLCdmGJXe9lBOVI1hObha8hoI,1569
|
|
7
|
+
nvidia_nat_crewai-1.3.0.dist-info/licenses/LICENSE-3rd-party.txt,sha256=fOk5jMmCX9YoKWyYzTtfgl-SUy477audFC5hNY4oP7Q,284609
|
|
8
|
+
nvidia_nat_crewai-1.3.0.dist-info/licenses/LICENSE.md,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
|
|
9
|
+
nvidia_nat_crewai-1.3.0.dist-info/METADATA,sha256=73nP1c_6H0NHvZVzKlfZLNlPCysIQchATIPnFsXtH7w,1904
|
|
10
|
+
nvidia_nat_crewai-1.3.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
11
|
+
nvidia_nat_crewai-1.3.0.dist-info/entry_points.txt,sha256=YF5PUdQGr_OUDXB4TykElHJTsKT8yKkuE0bMX5n_RXs,58
|
|
12
|
+
nvidia_nat_crewai-1.3.0.dist-info/top_level.txt,sha256=8-CJ2cP6-f0ZReXe5Hzqp-5pvzzHz-5Ds5H2bGqh1-U,4
|
|
13
|
+
nvidia_nat_crewai-1.3.0.dist-info/RECORD,,
|