nvidia-nat-mcp 1.4.0a20251103__py3-none-any.whl → 1.5.0a20260118__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nat/meta/pypi.md +1 -1
- nat/plugins/mcp/__init__.py +1 -1
- nat/plugins/mcp/auth/__init__.py +1 -1
- nat/plugins/mcp/auth/auth_flow_handler.py +1 -1
- nat/plugins/mcp/auth/auth_provider.py +3 -2
- nat/plugins/mcp/auth/auth_provider_config.py +2 -1
- nat/plugins/mcp/auth/register.py +9 -1
- nat/plugins/mcp/auth/service_account/__init__.py +14 -0
- nat/plugins/mcp/auth/service_account/provider.py +136 -0
- nat/plugins/mcp/auth/service_account/provider_config.py +137 -0
- nat/plugins/mcp/auth/service_account/token_client.py +156 -0
- nat/plugins/mcp/auth/token_storage.py +2 -2
- nat/plugins/mcp/cli/__init__.py +15 -0
- nat/plugins/mcp/cli/commands.py +1094 -0
- nat/plugins/mcp/client/__init__.py +15 -0
- nat/plugins/mcp/{client_base.py → client/client_base.py} +18 -10
- nat/plugins/mcp/{client_config.py → client/client_config.py} +24 -9
- nat/plugins/mcp/{client_impl.py → client/client_impl.py} +253 -62
- nat/plugins/mcp/exception_handler.py +1 -1
- nat/plugins/mcp/exceptions.py +1 -1
- nat/plugins/mcp/register.py +5 -4
- nat/plugins/mcp/server/__init__.py +15 -0
- nat/plugins/mcp/server/front_end_config.py +109 -0
- nat/plugins/mcp/server/front_end_plugin.py +155 -0
- nat/plugins/mcp/server/front_end_plugin_worker.py +415 -0
- nat/plugins/mcp/server/introspection_token_verifier.py +72 -0
- nat/plugins/mcp/server/memory_profiler.py +320 -0
- nat/plugins/mcp/server/register_frontend.py +27 -0
- nat/plugins/mcp/server/tool_converter.py +290 -0
- nat/plugins/mcp/utils.py +1 -1
- {nvidia_nat_mcp-1.4.0a20251103.dist-info → nvidia_nat_mcp-1.5.0a20260118.dist-info}/METADATA +5 -5
- nvidia_nat_mcp-1.5.0a20260118.dist-info/RECORD +37 -0
- nvidia_nat_mcp-1.5.0a20260118.dist-info/entry_points.txt +9 -0
- nat/plugins/mcp/tool.py +0 -138
- nvidia_nat_mcp-1.4.0a20251103.dist-info/RECORD +0 -23
- nvidia_nat_mcp-1.4.0a20251103.dist-info/entry_points.txt +0 -3
- {nvidia_nat_mcp-1.4.0a20251103.dist-info → nvidia_nat_mcp-1.5.0a20260118.dist-info}/WHEEL +0 -0
- {nvidia_nat_mcp-1.4.0a20251103.dist-info → nvidia_nat_mcp-1.5.0a20260118.dist-info}/licenses/LICENSE-3rd-party.txt +0 -0
- {nvidia_nat_mcp-1.4.0a20251103.dist-info → nvidia_nat_mcp-1.5.0a20260118.dist-info}/licenses/LICENSE.md +0 -0
- {nvidia_nat_mcp-1.4.0a20251103.dist-info → nvidia_nat_mcp-1.5.0a20260118.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2025-2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
"""OAuth 2.0 Token Introspection verifier implementation for MCP servers."""
|
|
16
|
+
|
|
17
|
+
import logging
|
|
18
|
+
|
|
19
|
+
from mcp.server.auth.provider import AccessToken
|
|
20
|
+
from mcp.server.auth.provider import TokenVerifier
|
|
21
|
+
from nat.authentication.credential_validator.bearer_token_validator import BearerTokenValidator
|
|
22
|
+
from nat.authentication.oauth2.oauth2_resource_server_config import OAuth2ResourceServerConfig
|
|
23
|
+
|
|
24
|
+
logger = logging.getLogger(__name__)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class IntrospectionTokenVerifier(TokenVerifier):
|
|
28
|
+
"""Token verifier that delegates token verification to BearerTokenValidator."""
|
|
29
|
+
|
|
30
|
+
def __init__(self, config: OAuth2ResourceServerConfig):
|
|
31
|
+
"""Create IntrospectionTokenVerifier from OAuth2ResourceServerConfig.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
config: OAuth2ResourceServerConfig
|
|
35
|
+
"""
|
|
36
|
+
issuer = config.issuer_url
|
|
37
|
+
scopes = config.scopes or []
|
|
38
|
+
audience = config.audience
|
|
39
|
+
jwks_uri = config.jwks_uri
|
|
40
|
+
introspection_endpoint = config.introspection_endpoint
|
|
41
|
+
discovery_url = config.discovery_url
|
|
42
|
+
client_id = config.client_id
|
|
43
|
+
client_secret = config.client_secret
|
|
44
|
+
|
|
45
|
+
self._bearer_token_validator = BearerTokenValidator(
|
|
46
|
+
issuer=issuer,
|
|
47
|
+
audience=audience,
|
|
48
|
+
scopes=scopes,
|
|
49
|
+
jwks_uri=jwks_uri,
|
|
50
|
+
introspection_endpoint=introspection_endpoint,
|
|
51
|
+
discovery_url=discovery_url,
|
|
52
|
+
client_id=client_id,
|
|
53
|
+
client_secret=client_secret,
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
async def verify_token(self, token: str) -> AccessToken | None:
|
|
57
|
+
"""Verify token by delegating to BearerTokenValidator.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
token: The Bearer token to verify
|
|
61
|
+
|
|
62
|
+
Returns:
|
|
63
|
+
AccessToken | None: AccessToken if valid, None if invalid
|
|
64
|
+
"""
|
|
65
|
+
validation_result = await self._bearer_token_validator.verify(token)
|
|
66
|
+
|
|
67
|
+
if validation_result.active:
|
|
68
|
+
return AccessToken(token=token,
|
|
69
|
+
expires_at=validation_result.expires_at,
|
|
70
|
+
scopes=validation_result.scopes or [],
|
|
71
|
+
client_id=validation_result.client_id or "")
|
|
72
|
+
return None
|
|
@@ -0,0 +1,320 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2025-2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
"""Memory profiling utilities for MCP frontend."""
|
|
16
|
+
|
|
17
|
+
import gc
|
|
18
|
+
import logging
|
|
19
|
+
import tracemalloc
|
|
20
|
+
from typing import Any
|
|
21
|
+
|
|
22
|
+
logger = logging.getLogger(__name__)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class MemoryProfiler:
|
|
26
|
+
"""Memory profiler for tracking memory usage and potential leaks."""
|
|
27
|
+
|
|
28
|
+
def __init__(self, enabled: bool = False, log_interval: int = 50, top_n: int = 10, log_level: str = "DEBUG"):
|
|
29
|
+
"""Initialize the memory profiler.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
enabled: Whether memory profiling is enabled
|
|
33
|
+
log_interval: Log stats every N requests
|
|
34
|
+
top_n: Number of top allocations to log
|
|
35
|
+
log_level: Log level for memory profiling output (e.g., "DEBUG", "INFO")
|
|
36
|
+
"""
|
|
37
|
+
self.enabled = enabled
|
|
38
|
+
# normalize interval to avoid modulo-by-zero
|
|
39
|
+
self.log_interval = max(1, int(log_interval))
|
|
40
|
+
self.top_n = top_n
|
|
41
|
+
self.log_level = getattr(logging, log_level.upper(), logging.DEBUG)
|
|
42
|
+
self.request_count = 0
|
|
43
|
+
self.baseline_snapshot = None
|
|
44
|
+
|
|
45
|
+
# Track whether this instance started tracemalloc (to avoid resetting external tracing)
|
|
46
|
+
self._we_started_tracemalloc = False
|
|
47
|
+
|
|
48
|
+
if self.enabled:
|
|
49
|
+
logger.info("Memory profiling ENABLED (interval=%d, top_n=%d, log_level=%s)",
|
|
50
|
+
self.log_interval,
|
|
51
|
+
top_n,
|
|
52
|
+
log_level)
|
|
53
|
+
try:
|
|
54
|
+
if not tracemalloc.is_tracing():
|
|
55
|
+
tracemalloc.start()
|
|
56
|
+
self._we_started_tracemalloc = True
|
|
57
|
+
# Take baseline snapshot
|
|
58
|
+
gc.collect()
|
|
59
|
+
self.baseline_snapshot = tracemalloc.take_snapshot()
|
|
60
|
+
except RuntimeError as e:
|
|
61
|
+
logger.warning("tracemalloc unavailable or not tracing: %s", e)
|
|
62
|
+
else:
|
|
63
|
+
logger.info("Memory profiling DISABLED")
|
|
64
|
+
|
|
65
|
+
def _log(self, message: str, *args: Any) -> None:
|
|
66
|
+
"""Log a message at the configured log level.
|
|
67
|
+
|
|
68
|
+
Args:
|
|
69
|
+
message: Log message format string
|
|
70
|
+
args: Arguments for the format string
|
|
71
|
+
"""
|
|
72
|
+
logger.log(self.log_level, message, *args)
|
|
73
|
+
|
|
74
|
+
def on_request_complete(self) -> None:
|
|
75
|
+
"""Called after each request completes."""
|
|
76
|
+
if not self.enabled:
|
|
77
|
+
return
|
|
78
|
+
self.request_count += 1
|
|
79
|
+
if self.request_count % self.log_interval == 0:
|
|
80
|
+
self.log_memory_stats()
|
|
81
|
+
|
|
82
|
+
def _ensure_tracing(self) -> bool:
|
|
83
|
+
"""Ensure tracemalloc is running if we started it originally.
|
|
84
|
+
|
|
85
|
+
Returns:
|
|
86
|
+
True if tracemalloc is active, False otherwise
|
|
87
|
+
"""
|
|
88
|
+
if tracemalloc.is_tracing():
|
|
89
|
+
return True
|
|
90
|
+
|
|
91
|
+
# Only restart if we started it originally (respect external control)
|
|
92
|
+
if not self._we_started_tracemalloc:
|
|
93
|
+
return False
|
|
94
|
+
|
|
95
|
+
# Attempt to restart
|
|
96
|
+
try:
|
|
97
|
+
logger.warning("tracemalloc was stopped externally; restarting (we started it originally)")
|
|
98
|
+
tracemalloc.start()
|
|
99
|
+
|
|
100
|
+
# Reset baseline since old tracking data is lost
|
|
101
|
+
gc.collect()
|
|
102
|
+
self.baseline_snapshot = tracemalloc.take_snapshot()
|
|
103
|
+
logger.info("Baseline snapshot reset after tracemalloc restart")
|
|
104
|
+
|
|
105
|
+
return True
|
|
106
|
+
except RuntimeError as e:
|
|
107
|
+
logger.error("Failed to restart tracemalloc: %s", e)
|
|
108
|
+
return False
|
|
109
|
+
|
|
110
|
+
def _safe_traced_memory(self) -> tuple[float, float] | None:
|
|
111
|
+
"""Return (current, peak usage in MB) if tracemalloc is active, else None."""
|
|
112
|
+
if not self._ensure_tracing():
|
|
113
|
+
return None
|
|
114
|
+
|
|
115
|
+
try:
|
|
116
|
+
current, peak = tracemalloc.get_traced_memory()
|
|
117
|
+
megabyte = (1 << 20)
|
|
118
|
+
return (current / megabyte, peak / megabyte)
|
|
119
|
+
except RuntimeError:
|
|
120
|
+
return None
|
|
121
|
+
|
|
122
|
+
def _safe_snapshot(self) -> tracemalloc.Snapshot | None:
|
|
123
|
+
"""Return a tracemalloc Snapshot if available, else None."""
|
|
124
|
+
if not self._ensure_tracing():
|
|
125
|
+
return None
|
|
126
|
+
|
|
127
|
+
try:
|
|
128
|
+
return tracemalloc.take_snapshot()
|
|
129
|
+
except RuntimeError:
|
|
130
|
+
return None
|
|
131
|
+
|
|
132
|
+
def log_memory_stats(self) -> dict[str, Any]:
|
|
133
|
+
"""Log current memory statistics and return them."""
|
|
134
|
+
if not self.enabled:
|
|
135
|
+
return {}
|
|
136
|
+
|
|
137
|
+
# Force garbage collection first
|
|
138
|
+
gc.collect()
|
|
139
|
+
|
|
140
|
+
# Get current memory usage
|
|
141
|
+
mem = self._safe_traced_memory()
|
|
142
|
+
if mem is None:
|
|
143
|
+
logger.info("tracemalloc is not active; cannot collect memory stats.")
|
|
144
|
+
# still return structural fields
|
|
145
|
+
stats = {
|
|
146
|
+
"request_count": self.request_count,
|
|
147
|
+
"current_memory_mb": None,
|
|
148
|
+
"peak_memory_mb": None,
|
|
149
|
+
"active_intermediate_managers": self._safe_intermediate_step_manager_count(),
|
|
150
|
+
"outstanding_steps": self._safe_outstanding_step_count(),
|
|
151
|
+
"active_exporters": self._safe_exporter_count(),
|
|
152
|
+
"isolated_exporters": self._safe_isolated_exporter_count(),
|
|
153
|
+
"subject_instances": self._count_instances_of_type("Subject"),
|
|
154
|
+
}
|
|
155
|
+
return stats
|
|
156
|
+
|
|
157
|
+
current_mb, peak_mb = mem
|
|
158
|
+
|
|
159
|
+
# Take snapshot and compare to baseline
|
|
160
|
+
snapshot = self._safe_snapshot()
|
|
161
|
+
|
|
162
|
+
# Track BaseExporter instances (observability layer)
|
|
163
|
+
exporter_count = self._safe_exporter_count()
|
|
164
|
+
isolated_exporter_count = self._safe_isolated_exporter_count()
|
|
165
|
+
|
|
166
|
+
# Track Subject instances (event streams)
|
|
167
|
+
subject_count = self._count_instances_of_type("Subject")
|
|
168
|
+
|
|
169
|
+
stats = {
|
|
170
|
+
"request_count": self.request_count,
|
|
171
|
+
"current_memory_mb": round(current_mb, 2),
|
|
172
|
+
"peak_memory_mb": round(peak_mb, 2),
|
|
173
|
+
"active_intermediate_managers": self._safe_intermediate_step_manager_count(),
|
|
174
|
+
"outstanding_steps": self._safe_outstanding_step_count(),
|
|
175
|
+
"active_exporters": exporter_count,
|
|
176
|
+
"isolated_exporters": isolated_exporter_count,
|
|
177
|
+
"subject_instances": subject_count,
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
self._log("=" * 80)
|
|
181
|
+
self._log("MEMORY PROFILE AFTER %d REQUESTS:", self.request_count)
|
|
182
|
+
self._log(" Current Memory: %.2f MB", current_mb)
|
|
183
|
+
self._log(" Peak Memory: %.2f MB", peak_mb)
|
|
184
|
+
self._log("")
|
|
185
|
+
self._log("NAT COMPONENT INSTANCES:")
|
|
186
|
+
self._log(" IntermediateStepManagers: %d active (%d outstanding steps)",
|
|
187
|
+
stats["active_intermediate_managers"],
|
|
188
|
+
stats["outstanding_steps"])
|
|
189
|
+
self._log(" BaseExporters: %d active (%d isolated)", stats["active_exporters"], stats["isolated_exporters"])
|
|
190
|
+
self._log(" Subject (event streams): %d instances", stats["subject_instances"])
|
|
191
|
+
|
|
192
|
+
# Show top allocations
|
|
193
|
+
if snapshot is None:
|
|
194
|
+
self._log("tracemalloc snapshot unavailable.")
|
|
195
|
+
else:
|
|
196
|
+
if self.baseline_snapshot:
|
|
197
|
+
self._log("TOP %d MEMORY GROWTH SINCE BASELINE:", self.top_n)
|
|
198
|
+
top_stats = snapshot.compare_to(self.baseline_snapshot, 'lineno')
|
|
199
|
+
else:
|
|
200
|
+
self._log("TOP %d MEMORY ALLOCATIONS:", self.top_n)
|
|
201
|
+
top_stats = snapshot.statistics('lineno')
|
|
202
|
+
|
|
203
|
+
for i, stat in enumerate(top_stats[:self.top_n], 1):
|
|
204
|
+
self._log(" #%d: %s", i, stat)
|
|
205
|
+
|
|
206
|
+
self._log("=" * 80)
|
|
207
|
+
|
|
208
|
+
return stats
|
|
209
|
+
|
|
210
|
+
def _count_instances_of_type(self, type_name: str) -> int:
|
|
211
|
+
"""Count instances of a specific type in memory."""
|
|
212
|
+
count = 0
|
|
213
|
+
for obj in gc.get_objects():
|
|
214
|
+
try:
|
|
215
|
+
if type(obj).__name__ == type_name:
|
|
216
|
+
count += 1
|
|
217
|
+
except Exception:
|
|
218
|
+
pass
|
|
219
|
+
return count
|
|
220
|
+
|
|
221
|
+
def _safe_exporter_count(self) -> int:
|
|
222
|
+
try:
|
|
223
|
+
from nat.observability.exporter.base_exporter import BaseExporter
|
|
224
|
+
return BaseExporter.get_active_instance_count()
|
|
225
|
+
except Exception as e:
|
|
226
|
+
logger.debug("Could not get BaseExporter stats: %s", e)
|
|
227
|
+
return 0
|
|
228
|
+
|
|
229
|
+
def _safe_isolated_exporter_count(self) -> int:
|
|
230
|
+
try:
|
|
231
|
+
from nat.observability.exporter.base_exporter import BaseExporter
|
|
232
|
+
return BaseExporter.get_isolated_instance_count()
|
|
233
|
+
except Exception:
|
|
234
|
+
return 0
|
|
235
|
+
|
|
236
|
+
def _safe_intermediate_step_manager_count(self) -> int:
|
|
237
|
+
try:
|
|
238
|
+
from nat.builder.intermediate_step_manager import IntermediateStepManager
|
|
239
|
+
# len() is atomic in CPython, but catch RuntimeError just in case
|
|
240
|
+
try:
|
|
241
|
+
return IntermediateStepManager.get_active_instance_count()
|
|
242
|
+
except RuntimeError:
|
|
243
|
+
# Set was modified during len() - very rare
|
|
244
|
+
logger.debug("Set changed during count, returning 0")
|
|
245
|
+
return 0
|
|
246
|
+
except Exception as e:
|
|
247
|
+
logger.debug("Could not get IntermediateStepManager stats: %s", e)
|
|
248
|
+
return 0
|
|
249
|
+
|
|
250
|
+
def _safe_outstanding_step_count(self) -> int:
|
|
251
|
+
"""Get total outstanding steps across all active IntermediateStepManager instances."""
|
|
252
|
+
try:
|
|
253
|
+
from nat.builder.intermediate_step_manager import IntermediateStepManager
|
|
254
|
+
|
|
255
|
+
# Make a snapshot to avoid "Set changed size during iteration" if GC runs
|
|
256
|
+
try:
|
|
257
|
+
instances_snapshot = list(IntermediateStepManager._active_instances)
|
|
258
|
+
except RuntimeError:
|
|
259
|
+
# Set changed during list() call - rare but possible
|
|
260
|
+
logger.debug("Set changed during snapshot, returning 0 for outstanding steps")
|
|
261
|
+
return 0
|
|
262
|
+
|
|
263
|
+
total_outstanding = 0
|
|
264
|
+
# Iterate through snapshot safely
|
|
265
|
+
for ref in instances_snapshot:
|
|
266
|
+
try:
|
|
267
|
+
manager = ref()
|
|
268
|
+
if manager is not None:
|
|
269
|
+
total_outstanding += manager.get_outstanding_step_count()
|
|
270
|
+
except (ReferenceError, AttributeError):
|
|
271
|
+
# Manager was GC'd or in invalid state - skip it
|
|
272
|
+
continue
|
|
273
|
+
return total_outstanding
|
|
274
|
+
except Exception as e:
|
|
275
|
+
logger.debug("Could not get outstanding step count: %s", e)
|
|
276
|
+
return 0
|
|
277
|
+
|
|
278
|
+
def get_stats(self) -> dict[str, Any]:
|
|
279
|
+
"""Get current memory statistics without logging."""
|
|
280
|
+
if not self.enabled:
|
|
281
|
+
return {"enabled": False}
|
|
282
|
+
|
|
283
|
+
mem = self._safe_traced_memory()
|
|
284
|
+
if mem is None:
|
|
285
|
+
return {
|
|
286
|
+
"enabled": True,
|
|
287
|
+
"request_count": self.request_count,
|
|
288
|
+
"current_memory_mb": None,
|
|
289
|
+
"peak_memory_mb": None,
|
|
290
|
+
"active_intermediate_managers": self._safe_intermediate_step_manager_count(),
|
|
291
|
+
"outstanding_steps": self._safe_outstanding_step_count(),
|
|
292
|
+
"active_exporters": self._safe_exporter_count(),
|
|
293
|
+
"isolated_exporters": self._safe_isolated_exporter_count(),
|
|
294
|
+
"subject_instances": self._count_instances_of_type("Subject"),
|
|
295
|
+
}
|
|
296
|
+
|
|
297
|
+
current_mb, peak_mb = mem
|
|
298
|
+
return {
|
|
299
|
+
"enabled": True,
|
|
300
|
+
"request_count": self.request_count,
|
|
301
|
+
"current_memory_mb": round(current_mb, 2),
|
|
302
|
+
"peak_memory_mb": round(peak_mb, 2),
|
|
303
|
+
"active_intermediate_managers": self._safe_intermediate_step_manager_count(),
|
|
304
|
+
"outstanding_steps": self._safe_outstanding_step_count(),
|
|
305
|
+
"active_exporters": self._safe_exporter_count(),
|
|
306
|
+
"isolated_exporters": self._safe_isolated_exporter_count(),
|
|
307
|
+
"subject_instances": self._count_instances_of_type("Subject"),
|
|
308
|
+
}
|
|
309
|
+
|
|
310
|
+
def reset_baseline(self) -> None:
|
|
311
|
+
"""Reset the baseline snapshot to current state."""
|
|
312
|
+
if not self.enabled:
|
|
313
|
+
return
|
|
314
|
+
gc.collect()
|
|
315
|
+
snap = self._safe_snapshot()
|
|
316
|
+
if snap is None:
|
|
317
|
+
logger.info("Cannot reset baseline: tracemalloc is not active.")
|
|
318
|
+
return
|
|
319
|
+
self.baseline_snapshot = snap
|
|
320
|
+
logger.info("Memory profiling baseline reset at request %d", self.request_count)
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2025-2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
from collections.abc import AsyncIterator
|
|
17
|
+
|
|
18
|
+
from nat.cli.register_workflow import register_front_end
|
|
19
|
+
from nat.data_models.config import Config
|
|
20
|
+
from nat.plugins.mcp.server.front_end_config import MCPFrontEndConfig
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@register_front_end(config_type=MCPFrontEndConfig)
|
|
24
|
+
async def register_mcp_front_end(config: MCPFrontEndConfig, full_config: Config) -> AsyncIterator:
|
|
25
|
+
from nat.plugins.mcp.server.front_end_plugin import MCPFrontEndPlugin
|
|
26
|
+
|
|
27
|
+
yield MCPFrontEndPlugin(full_config=full_config)
|