datarobot-genai 0.2.23__py3-none-any.whl → 0.2.25__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- datarobot_genai/drmcp/core/dr_mcp_server.py +0 -3
- datarobot_genai/drmcp/core/mcp_instance.py +3 -88
- datarobot_genai/drmcp/core/tool_filter.py +10 -1
- datarobot_genai/drmcp/tools/predictive/project.py +45 -27
- datarobot_genai/drmcp/tools/predictive/training.py +160 -151
- {datarobot_genai-0.2.23.dist-info → datarobot_genai-0.2.25.dist-info}/METADATA +1 -1
- {datarobot_genai-0.2.23.dist-info → datarobot_genai-0.2.25.dist-info}/RECORD +11 -12
- datarobot_genai/drmcp/core/mcp_server_tools.py +0 -129
- {datarobot_genai-0.2.23.dist-info → datarobot_genai-0.2.25.dist-info}/WHEEL +0 -0
- {datarobot_genai-0.2.23.dist-info → datarobot_genai-0.2.25.dist-info}/entry_points.txt +0 -0
- {datarobot_genai-0.2.23.dist-info → datarobot_genai-0.2.25.dist-info}/licenses/AUTHORS +0 -0
- {datarobot_genai-0.2.23.dist-info → datarobot_genai-0.2.25.dist-info}/licenses/LICENSE +0 -0
|
@@ -31,9 +31,6 @@ from .dynamic_prompts.register import register_prompts_from_datarobot_prompt_man
|
|
|
31
31
|
from .dynamic_tools.deployment.register import register_tools_of_datarobot_deployments
|
|
32
32
|
from .logging import MCPLogging
|
|
33
33
|
from .mcp_instance import mcp
|
|
34
|
-
from .mcp_server_tools import get_all_available_tags # noqa # pylint: disable=unused-import
|
|
35
|
-
from .mcp_server_tools import get_tool_info_by_name # noqa # pylint: disable=unused-import
|
|
36
|
-
from .mcp_server_tools import list_tools_by_tags # noqa # pylint: disable=unused-import
|
|
37
34
|
from .memory_management.manager import MemoryManager
|
|
38
35
|
from .routes import register_routes
|
|
39
36
|
from .routes_utils import prefix_mount_path
|
|
@@ -16,17 +16,13 @@ import logging
|
|
|
16
16
|
from collections.abc import Callable
|
|
17
17
|
from functools import wraps
|
|
18
18
|
from typing import Any
|
|
19
|
-
from typing import overload
|
|
20
19
|
|
|
21
20
|
from fastmcp import Context
|
|
22
21
|
from fastmcp import FastMCP
|
|
23
22
|
from fastmcp.exceptions import NotFoundError
|
|
24
23
|
from fastmcp.prompts.prompt import Prompt
|
|
25
24
|
from fastmcp.server.dependencies import get_context
|
|
26
|
-
from fastmcp.tools import FunctionTool
|
|
27
25
|
from fastmcp.tools import Tool
|
|
28
|
-
from fastmcp.utilities.types import NotSet
|
|
29
|
-
from fastmcp.utilities.types import NotSetT
|
|
30
26
|
from mcp.types import AnyFunction
|
|
31
27
|
from mcp.types import Tool as MCPTool
|
|
32
28
|
from mcp.types import ToolAnnotations
|
|
@@ -120,86 +116,6 @@ class TaggedFastMCP(FastMCP):
|
|
|
120
116
|
"In stateless mode, clients will see changes on next request."
|
|
121
117
|
)
|
|
122
118
|
|
|
123
|
-
@overload
|
|
124
|
-
def tool(
|
|
125
|
-
self,
|
|
126
|
-
name_or_fn: AnyFunction,
|
|
127
|
-
*,
|
|
128
|
-
name: str | None = None,
|
|
129
|
-
title: str | None = None,
|
|
130
|
-
description: str | None = None,
|
|
131
|
-
tags: set[str] | None = None,
|
|
132
|
-
output_schema: dict[str, Any] | None | NotSetT = NotSet,
|
|
133
|
-
annotations: ToolAnnotations | dict[str, Any] | None = None,
|
|
134
|
-
exclude_args: list[str] | None = None,
|
|
135
|
-
meta: dict[str, Any] | None = None,
|
|
136
|
-
enabled: bool | None = None,
|
|
137
|
-
) -> FunctionTool: ...
|
|
138
|
-
|
|
139
|
-
@overload
|
|
140
|
-
def tool(
|
|
141
|
-
self,
|
|
142
|
-
name_or_fn: str | None = None,
|
|
143
|
-
*,
|
|
144
|
-
name: str | None = None,
|
|
145
|
-
title: str | None = None,
|
|
146
|
-
description: str | None = None,
|
|
147
|
-
tags: set[str] | None = None,
|
|
148
|
-
output_schema: dict[str, Any] | None | NotSetT = NotSet,
|
|
149
|
-
annotations: ToolAnnotations | dict[str, Any] | None = None,
|
|
150
|
-
exclude_args: list[str] | None = None,
|
|
151
|
-
meta: dict[str, Any] | None = None,
|
|
152
|
-
enabled: bool | None = None,
|
|
153
|
-
) -> Callable[[AnyFunction], FunctionTool]: ...
|
|
154
|
-
|
|
155
|
-
def tool(
|
|
156
|
-
self,
|
|
157
|
-
name_or_fn: str | Callable[..., Any] | None = None,
|
|
158
|
-
*,
|
|
159
|
-
name: str | None = None,
|
|
160
|
-
title: str | None = None,
|
|
161
|
-
description: str | None = None,
|
|
162
|
-
tags: set[str] | None = None,
|
|
163
|
-
output_schema: dict[str, Any] | None | NotSetT = NotSet,
|
|
164
|
-
annotations: ToolAnnotations | dict[str, Any] | None = None,
|
|
165
|
-
exclude_args: list[str] | None = None,
|
|
166
|
-
meta: dict[str, Any] | None = None,
|
|
167
|
-
enabled: bool | None = None,
|
|
168
|
-
**kwargs: Any,
|
|
169
|
-
) -> Callable[[AnyFunction], FunctionTool] | FunctionTool:
|
|
170
|
-
"""
|
|
171
|
-
Extend tool decorator that supports tags and other annotations, while remaining
|
|
172
|
-
signature-compatible with FastMCP.tool to avoid recursion issues with partials.
|
|
173
|
-
"""
|
|
174
|
-
if isinstance(annotations, dict):
|
|
175
|
-
annotations = ToolAnnotations(**annotations)
|
|
176
|
-
|
|
177
|
-
# Ensure tags are available both via native fastmcp `tags` and inside annotations
|
|
178
|
-
if tags is not None:
|
|
179
|
-
tags_ = sorted(tags)
|
|
180
|
-
if annotations is None:
|
|
181
|
-
annotations = ToolAnnotations() # type: ignore[call-arg]
|
|
182
|
-
annotations.tags = tags_ # type: ignore[attr-defined, union-attr]
|
|
183
|
-
else:
|
|
184
|
-
# At this point, annotations is ToolAnnotations (not dict)
|
|
185
|
-
assert isinstance(annotations, ToolAnnotations)
|
|
186
|
-
annotations.tags = tags_ # type: ignore[attr-defined]
|
|
187
|
-
|
|
188
|
-
return super().tool(
|
|
189
|
-
name_or_fn,
|
|
190
|
-
name=name,
|
|
191
|
-
title=title,
|
|
192
|
-
description=description,
|
|
193
|
-
tags=tags,
|
|
194
|
-
output_schema=output_schema
|
|
195
|
-
if output_schema is not None
|
|
196
|
-
else kwargs.get("output_schema"),
|
|
197
|
-
annotations=annotations,
|
|
198
|
-
exclude_args=exclude_args,
|
|
199
|
-
meta=meta,
|
|
200
|
-
enabled=enabled,
|
|
201
|
-
)
|
|
202
|
-
|
|
203
119
|
async def list_tools(
|
|
204
120
|
self, tags: list[str] | None = None, match_all: bool = False
|
|
205
121
|
) -> list[MCPTool]:
|
|
@@ -488,11 +404,10 @@ async def register_tools(
|
|
|
488
404
|
# Apply dr_mcp_extras to the memory-aware function
|
|
489
405
|
wrapped_fn = dr_mcp_extras()(memory_aware_fn)
|
|
490
406
|
|
|
491
|
-
# Create annotations
|
|
492
|
-
annotations =
|
|
493
|
-
if tags is not None:
|
|
494
|
-
annotations.tags = tags # type: ignore[attr-defined]
|
|
407
|
+
# Create annotations only when additional metadata is required
|
|
408
|
+
annotations: ToolAnnotations | None = None # type: ignore[assignment]
|
|
495
409
|
if deployment_id is not None:
|
|
410
|
+
annotations = ToolAnnotations() # type: ignore[call-arg]
|
|
496
411
|
annotations.deployment_id = deployment_id # type: ignore[attr-defined]
|
|
497
412
|
|
|
498
413
|
tool = Tool.from_function(
|
|
@@ -41,7 +41,7 @@ def filter_tools_by_tags(
|
|
|
41
41
|
filtered_tools = []
|
|
42
42
|
|
|
43
43
|
for tool in tools:
|
|
44
|
-
tool_tags =
|
|
44
|
+
tool_tags = get_tool_tags(tool)
|
|
45
45
|
|
|
46
46
|
if not tool_tags:
|
|
47
47
|
continue
|
|
@@ -68,9 +68,18 @@ def get_tool_tags(tool: Tool | MCPTool) -> list[str]:
|
|
|
68
68
|
-------
|
|
69
69
|
List of tags for the tool
|
|
70
70
|
"""
|
|
71
|
+
# Primary: native FastMCP meta location
|
|
72
|
+
if hasattr(tool, "meta") and getattr(tool, "meta"):
|
|
73
|
+
fastmcp_meta = tool.meta.get("_fastmcp", {})
|
|
74
|
+
meta_tags = fastmcp_meta.get("tags", [])
|
|
75
|
+
if isinstance(meta_tags, list):
|
|
76
|
+
return meta_tags
|
|
77
|
+
|
|
78
|
+
# Fallback: annotations.tags (for compatibility during transition)
|
|
71
79
|
if tool.annotations and hasattr(tool.annotations, "tags"):
|
|
72
80
|
tags = getattr(tool.annotations, "tags", [])
|
|
73
81
|
return tags if isinstance(tags, list) else []
|
|
82
|
+
|
|
74
83
|
return []
|
|
75
84
|
|
|
76
85
|
|
|
@@ -14,6 +14,10 @@
|
|
|
14
14
|
|
|
15
15
|
import json
|
|
16
16
|
import logging
|
|
17
|
+
from typing import Annotated
|
|
18
|
+
|
|
19
|
+
from fastmcp.exceptions import ToolError
|
|
20
|
+
from fastmcp.tools.tool import ToolResult
|
|
17
21
|
|
|
18
22
|
from datarobot_genai.drmcp.core.clients import get_sdk_client
|
|
19
23
|
from datarobot_genai.drmcp.core.mcp_instance import dr_mcp_tool
|
|
@@ -21,35 +25,39 @@ from datarobot_genai.drmcp.core.mcp_instance import dr_mcp_tool
|
|
|
21
25
|
logger = logging.getLogger(__name__)
|
|
22
26
|
|
|
23
27
|
|
|
24
|
-
@dr_mcp_tool(tags={"project", "management", "list"})
|
|
25
|
-
async def list_projects() ->
|
|
26
|
-
"""
|
|
27
|
-
List all DataRobot projects for the authenticated user.
|
|
28
|
-
|
|
29
|
-
Returns
|
|
30
|
-
-------
|
|
31
|
-
A string summary of the user's DataRobot projects.
|
|
32
|
-
"""
|
|
28
|
+
@dr_mcp_tool(tags={"predictive", "project", "read", "management", "list"})
|
|
29
|
+
async def list_projects() -> ToolResult:
|
|
30
|
+
"""List all DataRobot projects for the authenticated user."""
|
|
33
31
|
client = get_sdk_client()
|
|
34
32
|
projects = client.Project.list()
|
|
35
|
-
|
|
36
|
-
return "No projects found."
|
|
37
|
-
return "\n".join(f"{p.id}: {p.project_name}" for p in projects)
|
|
33
|
+
projects = {p.id: p.project_name for p in projects}
|
|
38
34
|
|
|
35
|
+
return ToolResult(
|
|
36
|
+
content=(
|
|
37
|
+
json.dumps(projects, indent=2)
|
|
38
|
+
if projects
|
|
39
|
+
else json.dumps({"message": "No projects found."}, indent=2)
|
|
40
|
+
),
|
|
41
|
+
structured_content=projects,
|
|
42
|
+
)
|
|
39
43
|
|
|
40
|
-
@dr_mcp_tool(tags={"project", "data", "info"})
|
|
41
|
-
async def get_project_dataset_by_name(project_id: str, dataset_name: str) -> str:
|
|
42
|
-
"""
|
|
43
|
-
Get a dataset ID by name for a given project.
|
|
44
44
|
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
45
|
+
@dr_mcp_tool(tags={"predictive", "project", "read", "data", "info"})
|
|
46
|
+
async def get_project_dataset_by_name(
|
|
47
|
+
*,
|
|
48
|
+
project_id: Annotated[str, "The ID of the DataRobot project."] | None = None,
|
|
49
|
+
dataset_name: Annotated[str, "The name of the dataset to find (e.g., 'training', 'holdout')."]
|
|
50
|
+
| None = None,
|
|
51
|
+
) -> ToolError | ToolResult:
|
|
52
|
+
"""Get a dataset ID by name for a given project.
|
|
48
53
|
|
|
49
|
-
|
|
50
|
-
-------
|
|
51
|
-
The dataset ID and the dataset type (source or prediction) as a string, or an error message.
|
|
54
|
+
The dataset ID and the dataset type (source or prediction) as a string, or an error message.
|
|
52
55
|
"""
|
|
56
|
+
if not project_id:
|
|
57
|
+
return ToolError("Project ID is required.")
|
|
58
|
+
if not dataset_name:
|
|
59
|
+
return ToolError("Dataset name is required.")
|
|
60
|
+
|
|
53
61
|
client = get_sdk_client()
|
|
54
62
|
project = client.Project.get(project_id)
|
|
55
63
|
all_datasets = []
|
|
@@ -61,12 +69,22 @@ async def get_project_dataset_by_name(project_id: str, dataset_name: str) -> str
|
|
|
61
69
|
all_datasets.extend([{"type": "prediction", "dataset": ds} for ds in prediction_datasets])
|
|
62
70
|
for ds in all_datasets:
|
|
63
71
|
if dataset_name.lower() in ds["dataset"].name.lower():
|
|
64
|
-
return
|
|
65
|
-
|
|
72
|
+
return ToolResult(
|
|
73
|
+
content=(
|
|
74
|
+
json.dumps(
|
|
75
|
+
{
|
|
76
|
+
"dataset_id": ds["dataset"].id,
|
|
77
|
+
"dataset_type": ds["type"],
|
|
78
|
+
},
|
|
79
|
+
indent=2,
|
|
80
|
+
)
|
|
81
|
+
),
|
|
82
|
+
structured_content={
|
|
66
83
|
"dataset_id": ds["dataset"].id,
|
|
67
84
|
"dataset_type": ds["type"],
|
|
68
|
-
"ui_panel": ["dataset"],
|
|
69
85
|
},
|
|
70
|
-
indent=2,
|
|
71
86
|
)
|
|
72
|
-
return
|
|
87
|
+
return ToolResult(
|
|
88
|
+
content=f"Dataset with name containing '{dataset_name}' not found in project {project_id}.",
|
|
89
|
+
structured_content={},
|
|
90
|
+
)
|
|
@@ -18,8 +18,11 @@ import json
|
|
|
18
18
|
import logging
|
|
19
19
|
from dataclasses import asdict
|
|
20
20
|
from dataclasses import dataclass
|
|
21
|
+
from typing import Annotated
|
|
21
22
|
|
|
22
23
|
import pandas as pd
|
|
24
|
+
from fastmcp.exceptions import ToolError
|
|
25
|
+
from fastmcp.tools.tool import ToolResult
|
|
23
26
|
|
|
24
27
|
from datarobot_genai.drmcp.core.clients import get_sdk_client
|
|
25
28
|
from datarobot_genai.drmcp.core.mcp_instance import dr_mcp_tool
|
|
@@ -53,22 +56,15 @@ class DatasetInsight:
|
|
|
53
56
|
missing_data_summary: dict[str, float]
|
|
54
57
|
|
|
55
58
|
|
|
56
|
-
@dr_mcp_tool(tags={"training", "analysis", "dataset"})
|
|
57
|
-
async def analyze_dataset(
|
|
58
|
-
|
|
59
|
-
|
|
59
|
+
@dr_mcp_tool(tags={"predictive", "training", "read", "analysis", "dataset"})
|
|
60
|
+
async def analyze_dataset(
|
|
61
|
+
*,
|
|
62
|
+
dataset_id: Annotated[str, "The ID of the DataRobot dataset to analyze"] | None = None,
|
|
63
|
+
) -> ToolError | ToolResult:
|
|
64
|
+
"""Analyze a dataset to understand its structure and potential use cases."""
|
|
65
|
+
if not dataset_id:
|
|
66
|
+
return ToolError("Dataset ID must be provided")
|
|
60
67
|
|
|
61
|
-
Args:
|
|
62
|
-
dataset_id: The ID of the DataRobot dataset to analyze
|
|
63
|
-
|
|
64
|
-
Returns
|
|
65
|
-
-------
|
|
66
|
-
JSON string containing dataset insights including:
|
|
67
|
-
- Basic statistics (rows, columns)
|
|
68
|
-
- Column types (numerical, categorical, datetime, text)
|
|
69
|
-
- Potential target columns
|
|
70
|
-
- Missing data summary
|
|
71
|
-
"""
|
|
72
68
|
client = get_sdk_client()
|
|
73
69
|
dataset = client.Dataset.get(dataset_id)
|
|
74
70
|
df = dataset.get_as_dataframe()
|
|
@@ -105,27 +101,23 @@ async def analyze_dataset(dataset_id: str) -> str:
|
|
|
105
101
|
potential_targets=potential_targets,
|
|
106
102
|
missing_data_summary=missing_data,
|
|
107
103
|
)
|
|
104
|
+
insights_dict = asdict(insights)
|
|
108
105
|
|
|
109
|
-
return
|
|
110
|
-
|
|
106
|
+
return ToolResult(
|
|
107
|
+
content=json.dumps(insights_dict, indent=2),
|
|
108
|
+
structured_content=insights_dict,
|
|
109
|
+
)
|
|
111
110
|
|
|
112
|
-
@dr_mcp_tool(tags={"training", "analysis", "usecase"})
|
|
113
|
-
async def suggest_use_cases(dataset_id: str) -> str:
|
|
114
|
-
"""
|
|
115
|
-
Analyze a dataset and suggest potential machine learning use cases.
|
|
116
111
|
|
|
117
|
-
|
|
118
|
-
|
|
112
|
+
@dr_mcp_tool(tags={"predictive", "training", "read", "analysis", "usecase"})
|
|
113
|
+
async def suggest_use_cases(
|
|
114
|
+
*,
|
|
115
|
+
dataset_id: Annotated[str, "The ID of the DataRobot dataset to analyze"] | None = None,
|
|
116
|
+
) -> ToolError | ToolResult:
|
|
117
|
+
"""Analyze a dataset and suggest potential machine learning use cases."""
|
|
118
|
+
if not dataset_id:
|
|
119
|
+
return ToolError("Dataset ID must be provided")
|
|
119
120
|
|
|
120
|
-
Returns
|
|
121
|
-
-------
|
|
122
|
-
JSON string containing suggested use cases with:
|
|
123
|
-
- Use case name and description
|
|
124
|
-
- Suggested target column
|
|
125
|
-
- Problem type
|
|
126
|
-
- Confidence score
|
|
127
|
-
- Reasoning for the suggestion
|
|
128
|
-
"""
|
|
129
121
|
client = get_sdk_client()
|
|
130
122
|
dataset = client.Dataset.get(dataset_id)
|
|
131
123
|
df = dataset.get_as_dataframe()
|
|
@@ -141,27 +133,23 @@ async def suggest_use_cases(dataset_id: str) -> str:
|
|
|
141
133
|
|
|
142
134
|
# Sort by confidence score
|
|
143
135
|
suggestions.sort(key=lambda x: x["confidence"], reverse=True)
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
- Feature correlations with target
|
|
162
|
-
- Missing data analysis
|
|
163
|
-
- Data type distribution
|
|
164
|
-
"""
|
|
136
|
+
|
|
137
|
+
return ToolResult(
|
|
138
|
+
content=json.dumps(suggestions, indent=2),
|
|
139
|
+
structured_content={"use_case_suggestions": suggestions},
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
@dr_mcp_tool(tags={"predictive", "training", "read", "analysis", "eda"})
|
|
144
|
+
async def get_exploratory_insights(
|
|
145
|
+
*,
|
|
146
|
+
dataset_id: Annotated[str, "The ID of the DataRobot dataset to analyze"] | None = None,
|
|
147
|
+
target_col: Annotated[str, "Optional target column to focus EDA insights on"] | None = None,
|
|
148
|
+
) -> ToolError | ToolResult:
|
|
149
|
+
"""Generate exploratory data insights for a dataset."""
|
|
150
|
+
if not dataset_id:
|
|
151
|
+
return ToolError("Dataset ID must be provided")
|
|
152
|
+
|
|
165
153
|
client = get_sdk_client()
|
|
166
154
|
dataset = client.Dataset.get(dataset_id)
|
|
167
155
|
df = dataset.get_as_dataframe()
|
|
@@ -238,8 +226,10 @@ async def get_exploratory_insights(dataset_id: str, target_col: str | None = Non
|
|
|
238
226
|
sorted(correlations.items(), key=lambda x: abs(x[1]), reverse=True)
|
|
239
227
|
)
|
|
240
228
|
|
|
241
|
-
|
|
242
|
-
|
|
229
|
+
return ToolResult(
|
|
230
|
+
content=json.dumps(eda_insights, indent=2),
|
|
231
|
+
structured_content=eda_insights,
|
|
232
|
+
)
|
|
243
233
|
|
|
244
234
|
|
|
245
235
|
def _identify_potential_targets(
|
|
@@ -450,47 +440,50 @@ def _analyze_target_for_use_cases(df: pd.DataFrame, target_col: str) -> list[Use
|
|
|
450
440
|
return suggestions
|
|
451
441
|
|
|
452
442
|
|
|
453
|
-
@dr_mcp_tool(tags={"training", "autopilot", "model"})
|
|
443
|
+
@dr_mcp_tool(tags={"predictive", "training", "write", "autopilot", "model"})
|
|
454
444
|
async def start_autopilot(
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
445
|
+
*,
|
|
446
|
+
target: Annotated[str, "Name of the target column for modeling"] | None = None,
|
|
447
|
+
project_id: Annotated[
|
|
448
|
+
str, "Optional, the ID of the DataRobot project or a new project if no id is provided"
|
|
449
|
+
]
|
|
450
|
+
| None = None,
|
|
451
|
+
mode: Annotated[str, "Optional, Autopilot mode ('quick', 'comprehensive', or 'manual')"]
|
|
452
|
+
| None = "quick",
|
|
453
|
+
dataset_url: Annotated[
|
|
454
|
+
str,
|
|
455
|
+
"""
|
|
456
|
+
Optional, The URL to the dataset to upload
|
|
457
|
+
(optional if dataset_id is provided) for a new project.
|
|
458
|
+
""",
|
|
459
|
+
]
|
|
460
|
+
| None = None,
|
|
461
|
+
dataset_id: Annotated[
|
|
462
|
+
str,
|
|
463
|
+
"""
|
|
464
|
+
Optional, The ID of an existing dataset in AI Catalog
|
|
465
|
+
(optional if dataset_url is provided) for a new project.
|
|
466
|
+
""",
|
|
467
|
+
]
|
|
468
|
+
| None = None,
|
|
469
|
+
project_name: Annotated[
|
|
470
|
+
str, "Optional, name for the project if no id is provided, creates a new project"
|
|
471
|
+
]
|
|
472
|
+
| None = "MCP Project",
|
|
473
|
+
use_case_id: Annotated[
|
|
474
|
+
str,
|
|
475
|
+
"Optional, ID of the use case to associate this project (required for next-gen platform)",
|
|
476
|
+
]
|
|
477
|
+
| None = None,
|
|
478
|
+
) -> ToolError | ToolResult:
|
|
479
|
+
"""Start automated model training (Autopilot) for a project."""
|
|
487
480
|
client = get_sdk_client()
|
|
488
481
|
|
|
489
482
|
if not project_id:
|
|
490
483
|
if not dataset_url and not dataset_id:
|
|
491
|
-
return "
|
|
484
|
+
return ToolError("Either dataset_url or dataset_id must be provided")
|
|
492
485
|
if dataset_url and dataset_id:
|
|
493
|
-
return "
|
|
486
|
+
return ToolError("Please provide either dataset_url or dataset_id, not both")
|
|
494
487
|
|
|
495
488
|
if dataset_url:
|
|
496
489
|
dataset = client.Dataset.create_from_url(dataset_url)
|
|
@@ -504,7 +497,7 @@ async def start_autopilot(
|
|
|
504
497
|
project = client.Project.get(project_id)
|
|
505
498
|
|
|
506
499
|
if not target:
|
|
507
|
-
return "
|
|
500
|
+
return ToolError("Target variable must be specified")
|
|
508
501
|
|
|
509
502
|
try:
|
|
510
503
|
# Start modeling
|
|
@@ -515,40 +508,48 @@ async def start_autopilot(
|
|
|
515
508
|
"target": target,
|
|
516
509
|
"mode": mode,
|
|
517
510
|
"status": project.get_status(),
|
|
518
|
-
"ui_panel": ["eda", "model-training", "leaderboard"],
|
|
519
511
|
"use_case_id": project.use_case_id,
|
|
520
512
|
}
|
|
521
513
|
|
|
522
|
-
return
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
{
|
|
526
|
-
"error": f"Failed to start Autopilot: {str(e)}",
|
|
527
|
-
"project_id": project.id,
|
|
528
|
-
"target": target,
|
|
529
|
-
"mode": mode,
|
|
530
|
-
},
|
|
531
|
-
indent=2,
|
|
514
|
+
return ToolResult(
|
|
515
|
+
content=json.dumps(result, indent=2),
|
|
516
|
+
structured_content=result,
|
|
532
517
|
)
|
|
533
518
|
|
|
519
|
+
except Exception as e:
|
|
520
|
+
return ToolError(
|
|
521
|
+
content=json.dumps(
|
|
522
|
+
{
|
|
523
|
+
"error": f"Failed to start Autopilot: {str(e)}",
|
|
524
|
+
"project_id": project.id if project else None,
|
|
525
|
+
"target": target,
|
|
526
|
+
"mode": mode,
|
|
527
|
+
},
|
|
528
|
+
indent=2,
|
|
529
|
+
)
|
|
530
|
+
)
|
|
534
531
|
|
|
535
|
-
@dr_mcp_tool(tags={"training", "model", "evaluation"})
|
|
536
|
-
async def get_model_roc_curve(project_id: str, model_id: str, source: str = "validation") -> str:
|
|
537
|
-
"""
|
|
538
|
-
Get detailed ROC curve for a specific model.
|
|
539
532
|
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
533
|
+
@dr_mcp_tool(tags={"prediction", "training", "read", "model", "evaluation"})
|
|
534
|
+
async def get_model_roc_curve(
|
|
535
|
+
*,
|
|
536
|
+
project_id: Annotated[str, "The ID of the DataRobot project"] | None = None,
|
|
537
|
+
model_id: Annotated[str, "The ID of the model to analyze"] | None = None,
|
|
538
|
+
source: Annotated[
|
|
539
|
+
str,
|
|
540
|
+
"""
|
|
541
|
+
The source of the data to use for the ROC curve
|
|
542
|
+
('validation' or 'holdout' or 'crossValidation')
|
|
543
|
+
""",
|
|
544
|
+
]
|
|
545
|
+
| str = "validation",
|
|
546
|
+
) -> ToolError | ToolResult:
|
|
547
|
+
"""Get detailed ROC curve for a specific model."""
|
|
548
|
+
if not project_id:
|
|
549
|
+
return ToolError("Project ID must be provided")
|
|
550
|
+
if not model_id:
|
|
551
|
+
return ToolError("Model ID must be provided")
|
|
545
552
|
|
|
546
|
-
Returns
|
|
547
|
-
-------
|
|
548
|
-
JSON string containing:
|
|
549
|
-
- roc_curve: ROC curve data
|
|
550
|
-
- ui_panel: List of recommended UI panels for visualization
|
|
551
|
-
"""
|
|
552
553
|
client = get_sdk_client()
|
|
553
554
|
project = client.Project.get(project_id)
|
|
554
555
|
model = client.Model.get(project=project, model_id=model_id)
|
|
@@ -581,26 +582,26 @@ async def get_model_roc_curve(project_id: str, model_id: str, source: str = "val
|
|
|
581
582
|
"source": source,
|
|
582
583
|
}
|
|
583
584
|
|
|
584
|
-
return
|
|
585
|
+
return ToolResult(
|
|
586
|
+
content=json.dumps({"data": roc_data}, indent=2),
|
|
587
|
+
structured_content={"data": roc_data},
|
|
588
|
+
)
|
|
585
589
|
except Exception as e:
|
|
586
|
-
return
|
|
590
|
+
return ToolError(f"Failed to get ROC curve: {str(e)}")
|
|
587
591
|
|
|
588
592
|
|
|
589
|
-
@dr_mcp_tool(tags={"training", "model", "evaluation"})
|
|
590
|
-
async def get_model_feature_impact(
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
593
|
+
@dr_mcp_tool(tags={"predictive", "training", "read", "model", "evaluation"})
|
|
594
|
+
async def get_model_feature_impact(
|
|
595
|
+
*,
|
|
596
|
+
project_id: Annotated[str, "The ID of the DataRobot project"] | None = None,
|
|
597
|
+
model_id: Annotated[str, "The ID of the model to analyze"] | None = None,
|
|
598
|
+
) -> ToolError | ToolResult:
|
|
599
|
+
"""Get detailed feature impact for a specific model."""
|
|
600
|
+
if not project_id:
|
|
601
|
+
return ToolError("Project ID must be provided")
|
|
602
|
+
if not model_id:
|
|
603
|
+
return ToolError("Model ID must be provided")
|
|
597
604
|
|
|
598
|
-
Returns
|
|
599
|
-
-------
|
|
600
|
-
JSON string containing:
|
|
601
|
-
- feature_impact: Feature importance scores
|
|
602
|
-
- ui_panel: List of recommended UI panels for visualization
|
|
603
|
-
"""
|
|
604
605
|
client = get_sdk_client()
|
|
605
606
|
project = client.Project.get(project_id)
|
|
606
607
|
model = client.Model.get(project=project, model_id=model_id)
|
|
@@ -608,26 +609,31 @@ async def get_model_feature_impact(project_id: str, model_id: str) -> str:
|
|
|
608
609
|
model.request_feature_impact()
|
|
609
610
|
feature_impact = model.get_or_request_feature_impact()
|
|
610
611
|
|
|
611
|
-
return
|
|
612
|
-
|
|
612
|
+
return ToolResult(
|
|
613
|
+
content=json.dumps({"data": feature_impact}, indent=2),
|
|
614
|
+
structured_content={"data": feature_impact},
|
|
615
|
+
)
|
|
613
616
|
|
|
614
|
-
@dr_mcp_tool(tags={"training", "model", "evaluation"})
|
|
615
|
-
async def get_model_lift_chart(project_id: str, model_id: str, source: str = "validation") -> str:
|
|
616
|
-
"""
|
|
617
|
-
Get detailed lift chart for a specific model.
|
|
618
617
|
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
618
|
+
@dr_mcp_tool(tags={"predictive", "training", "read", "model", "evaluation"})
|
|
619
|
+
async def get_model_lift_chart(
|
|
620
|
+
project_id: Annotated[str, "The ID of the DataRobot project"] | None = None,
|
|
621
|
+
model_id: Annotated[str, "The ID of the model to analyze"] | None = None,
|
|
622
|
+
source: Annotated[
|
|
623
|
+
str,
|
|
624
|
+
"""
|
|
625
|
+
The source of the data to use for the lift chart
|
|
626
|
+
('validation' or 'holdout' or 'crossValidation')
|
|
627
|
+
""",
|
|
628
|
+
]
|
|
629
|
+
| str = "validation",
|
|
630
|
+
) -> ToolError | ToolResult:
|
|
631
|
+
"""Get detailed lift chart for a specific model."""
|
|
632
|
+
if not project_id:
|
|
633
|
+
return ToolError("Project ID must be provided")
|
|
634
|
+
if not model_id:
|
|
635
|
+
return ToolError("Model ID must be provided")
|
|
624
636
|
|
|
625
|
-
Returns
|
|
626
|
-
-------
|
|
627
|
-
JSON string containing:
|
|
628
|
-
- lift_chart: Lift chart data
|
|
629
|
-
- ui_panel: List of recommended UI panels for visualization
|
|
630
|
-
"""
|
|
631
637
|
client = get_sdk_client()
|
|
632
638
|
project = client.Project.get(project_id)
|
|
633
639
|
model = client.Model.get(project=project, model_id=model_id)
|
|
@@ -648,4 +654,7 @@ async def get_model_lift_chart(project_id: str, model_id: str, source: str = "va
|
|
|
648
654
|
"target_class": lift_chart.target_class,
|
|
649
655
|
}
|
|
650
656
|
|
|
651
|
-
return
|
|
657
|
+
return ToolResult(
|
|
658
|
+
content=json.dumps({"data": lift_chart_data}, indent=2),
|
|
659
|
+
structured_content={"data": lift_chart_data},
|
|
660
|
+
)
|
|
@@ -31,18 +31,17 @@ datarobot_genai/drmcp/core/config.py,sha256=69QDsVVSvjzv1uIHOjtQGzdg7_Ic4sA3vLA6
|
|
|
31
31
|
datarobot_genai/drmcp/core/config_utils.py,sha256=U-aieWw7MyP03cGDFIp97JH99ZUfr3vD9uuTzBzxn7w,6428
|
|
32
32
|
datarobot_genai/drmcp/core/constants.py,sha256=lUwoW_PTrbaBGqRJifKqCn3EoFacoEgdO-CpoFVrUoU,739
|
|
33
33
|
datarobot_genai/drmcp/core/credentials.py,sha256=PYEUDNMVw1BoMzZKLkPVTypNkVevEPtmk3scKnE-zYg,6706
|
|
34
|
-
datarobot_genai/drmcp/core/dr_mcp_server.py,sha256=
|
|
34
|
+
datarobot_genai/drmcp/core/dr_mcp_server.py,sha256=czcjbwhZAeW9EtG_Bys0GARPOuQulstkiU7FG48Q9bg,14118
|
|
35
35
|
datarobot_genai/drmcp/core/dr_mcp_server_logo.py,sha256=hib-nfR1SNTW6CnpFsFCkL9H_OMwa4YYyinV7VNOuLk,4708
|
|
36
36
|
datarobot_genai/drmcp/core/exceptions.py,sha256=eqsGI-lxybgvWL5w4BFhbm3XzH1eU5tetwjnhJxelpc,905
|
|
37
37
|
datarobot_genai/drmcp/core/logging.py,sha256=Y_hig4eBWiXGaVV7B_3wBcaYVRNH4ydptbEQhrP9-mY,3414
|
|
38
|
-
datarobot_genai/drmcp/core/mcp_instance.py,sha256=
|
|
39
|
-
datarobot_genai/drmcp/core/mcp_server_tools.py,sha256=odNZKozfx0VV38SLZHw9lY0C0JM_JnRI06W3BBXnyE4,4278
|
|
38
|
+
datarobot_genai/drmcp/core/mcp_instance.py,sha256=cbhy9vSw7LMZWPAYkpCyT0V55wfxyRLkzOlgwKh1T2o,17794
|
|
40
39
|
datarobot_genai/drmcp/core/routes.py,sha256=dqE2M0UzAyyN9vQjlyTjYW4rpju3LT039po5weuO__I,17936
|
|
41
40
|
datarobot_genai/drmcp/core/routes_utils.py,sha256=vSseXWlplMSnRgoJgtP_rHxWSAVYcx_tpTv4lyTpQoc,944
|
|
42
41
|
datarobot_genai/drmcp/core/server_life_cycle.py,sha256=WKGJWGxalvqxupzJ2y67Kklc_9PgpZT0uyjlv_sr5wc,3419
|
|
43
42
|
datarobot_genai/drmcp/core/telemetry.py,sha256=NEkSTC1w6uQgtukLHI-sWvR4EMgInysgATcvfQ5CplM,15378
|
|
44
43
|
datarobot_genai/drmcp/core/tool_config.py,sha256=5JCWO70ZH-K-34yS7vYJG2nl4i9UO_q_W9NCoWSXXno,3271
|
|
45
|
-
datarobot_genai/drmcp/core/tool_filter.py,sha256=
|
|
44
|
+
datarobot_genai/drmcp/core/tool_filter.py,sha256=yKQlEtzyIeXGxZJkHbK36QI19vmgQkvqmfx5cTo2pp4,3156
|
|
46
45
|
datarobot_genai/drmcp/core/utils.py,sha256=EvfpqKZ3tECMoxpIQ_tA_3rOgy6KJEYKC0lWZo_Daag,4517
|
|
47
46
|
datarobot_genai/drmcp/core/dynamic_prompts/__init__.py,sha256=y4yapzp3KnFMzSR6HlNDS4uSuyNT7I1iPBvaCLsS0sU,577
|
|
48
47
|
datarobot_genai/drmcp/core/dynamic_prompts/controllers.py,sha256=AGJlKqgHRO0Kd7Gl-Ulw9KYBgzjTTFXWBvOUF-SuKUI,5454
|
|
@@ -95,8 +94,8 @@ datarobot_genai/drmcp/tools/predictive/deployment_info.py,sha256=BGEF_dmbxOBJR0n
|
|
|
95
94
|
datarobot_genai/drmcp/tools/predictive/model.py,sha256=Yih5-KedJ-1yupPLXCJsCXOdyWWi9pRvgapXDlgXWJA,4891
|
|
96
95
|
datarobot_genai/drmcp/tools/predictive/predict.py,sha256=Qoob2_t2crfWtyPzkXMRz2ITZumnczU6Dq4C7q9RBMI,9370
|
|
97
96
|
datarobot_genai/drmcp/tools/predictive/predict_realtime.py,sha256=urq6rPyZFsAP-bPyclSNzrkvb6FTamdlFau8q0IWWJ0,13472
|
|
98
|
-
datarobot_genai/drmcp/tools/predictive/project.py,sha256=
|
|
99
|
-
datarobot_genai/drmcp/tools/predictive/training.py,sha256=
|
|
97
|
+
datarobot_genai/drmcp/tools/predictive/project.py,sha256=xC52UdYvuFeNZC7Y5MfXcvzTL70WwAacQXESr6rqN6s,3255
|
|
98
|
+
datarobot_genai/drmcp/tools/predictive/training.py,sha256=S9V7AlO6mAgIAJNww0g5agFOw4YqRiCsIGaRDJcOe4A,23991
|
|
100
99
|
datarobot_genai/langgraph/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
101
100
|
datarobot_genai/langgraph/agent.py,sha256=DRnywmS9KDywyChtuIZZwNKbJs8BpC259EG_kxYbiQ8,15828
|
|
102
101
|
datarobot_genai/langgraph/mcp.py,sha256=iA2_j46mZAaNaL7ntXT-LW6C-NMJkzr3VfKDDfe7mh8,2851
|
|
@@ -111,9 +110,9 @@ datarobot_genai/nat/datarobot_llm_clients.py,sha256=Yu208Ed_p_4P3HdpuM7fYnKcXtim
|
|
|
111
110
|
datarobot_genai/nat/datarobot_llm_providers.py,sha256=aDoQcTeGI-odqydPXEX9OGGNFbzAtpqzTvHHEkmJuEQ,4963
|
|
112
111
|
datarobot_genai/nat/datarobot_mcp_client.py,sha256=35FzilxNp4VqwBYI0NsOc91-xZm1C-AzWqrOdDy962A,9612
|
|
113
112
|
datarobot_genai/nat/helpers.py,sha256=Q7E3ADZdtFfS8E6OQPyw2wgA6laQ58N3bhLj5CBWwJs,3265
|
|
114
|
-
datarobot_genai-0.2.
|
|
115
|
-
datarobot_genai-0.2.
|
|
116
|
-
datarobot_genai-0.2.
|
|
117
|
-
datarobot_genai-0.2.
|
|
118
|
-
datarobot_genai-0.2.
|
|
119
|
-
datarobot_genai-0.2.
|
|
113
|
+
datarobot_genai-0.2.25.dist-info/METADATA,sha256=C2_0ev_hz7YV8hiWuMVjoS5O9fz62WZ_oN3DuYDj6rA,6301
|
|
114
|
+
datarobot_genai-0.2.25.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
115
|
+
datarobot_genai-0.2.25.dist-info/entry_points.txt,sha256=jEW3WxDZ8XIK9-ISmTyt5DbmBb047rFlzQuhY09rGrM,284
|
|
116
|
+
datarobot_genai-0.2.25.dist-info/licenses/AUTHORS,sha256=isJGUXdjq1U7XZ_B_9AH8Qf0u4eX0XyQifJZ_Sxm4sA,80
|
|
117
|
+
datarobot_genai-0.2.25.dist-info/licenses/LICENSE,sha256=U2_VkLIktQoa60Nf6Tbt7E4RMlfhFSjWjcJJfVC-YCE,11341
|
|
118
|
+
datarobot_genai-0.2.25.dist-info/RECORD,,
|
|
@@ -1,129 +0,0 @@
|
|
|
1
|
-
# Copyright 2025 DataRobot, Inc.
|
|
2
|
-
#
|
|
3
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
-
# you may not use this file except in compliance with the License.
|
|
5
|
-
# You may obtain a copy of the License at
|
|
6
|
-
#
|
|
7
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
-
#
|
|
9
|
-
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
-
# See the License for the specific language governing permissions and
|
|
13
|
-
# limitations under the License.
|
|
14
|
-
|
|
15
|
-
import logging
|
|
16
|
-
|
|
17
|
-
from .mcp_instance import dr_core_mcp_tool
|
|
18
|
-
from .mcp_instance import mcp
|
|
19
|
-
|
|
20
|
-
logger = logging.getLogger(__name__)
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
@dr_core_mcp_tool(tags={"mcp_server_tools", "metadata"})
|
|
24
|
-
async def get_all_available_tags() -> str:
|
|
25
|
-
"""
|
|
26
|
-
List all unique tags from all registered tools.
|
|
27
|
-
|
|
28
|
-
Returns
|
|
29
|
-
-------
|
|
30
|
-
A string containing all available tags, one per line.
|
|
31
|
-
"""
|
|
32
|
-
tags = await mcp.get_all_tags()
|
|
33
|
-
if not tags:
|
|
34
|
-
return "No tags found in any tools."
|
|
35
|
-
|
|
36
|
-
return "\n".join(sorted(tags))
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
@dr_core_mcp_tool(tags={"mcp_server_tools", "metadata", "discovery"})
|
|
40
|
-
async def list_tools_by_tags(tags: list[str] | None = None, match_all: bool = False) -> str:
|
|
41
|
-
"""
|
|
42
|
-
List tools filtered by tags.
|
|
43
|
-
|
|
44
|
-
Args:
|
|
45
|
-
tags: Optional list of tags to filter by. If None, returns all tools.
|
|
46
|
-
match_all: If True, tool must have all specified tags (AND logic).
|
|
47
|
-
If False, tool must have at least one tag (OR logic).
|
|
48
|
-
Only used when tags is provided.
|
|
49
|
-
|
|
50
|
-
Returns
|
|
51
|
-
-------
|
|
52
|
-
A formatted string listing tools that match the tag criteria.
|
|
53
|
-
"""
|
|
54
|
-
tools = await mcp.list_tools(tags=tags, match_all=match_all)
|
|
55
|
-
|
|
56
|
-
if not tools:
|
|
57
|
-
if tags:
|
|
58
|
-
logic = "all" if match_all else "any"
|
|
59
|
-
return f"No tools found with {logic} of the tags: {', '.join(tags)}"
|
|
60
|
-
else:
|
|
61
|
-
return "No tools found."
|
|
62
|
-
|
|
63
|
-
result = []
|
|
64
|
-
if tags:
|
|
65
|
-
logic = "all" if match_all else "any"
|
|
66
|
-
result.append(f"Tools with {logic} of the tags: {', '.join(tags)}")
|
|
67
|
-
else:
|
|
68
|
-
result.append("All available tools:")
|
|
69
|
-
|
|
70
|
-
result.append("")
|
|
71
|
-
|
|
72
|
-
for i, tool in enumerate(tools, 1):
|
|
73
|
-
tool_tags = []
|
|
74
|
-
if tool.annotations and hasattr(tool.annotations, "extra") and tool.annotations.extra:
|
|
75
|
-
tool_tags = tool.annotations.extra.get("tags", [])
|
|
76
|
-
|
|
77
|
-
result.append(f"{i}. {tool.name}")
|
|
78
|
-
result.append(f" Description: {tool.description}")
|
|
79
|
-
if tool_tags:
|
|
80
|
-
result.append(f" Tags: {', '.join(tool_tags)}")
|
|
81
|
-
result.append("")
|
|
82
|
-
|
|
83
|
-
return "\n".join(result)
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
@dr_core_mcp_tool(tags={"mcp_server_tools", "metadata", "discovery"})
|
|
87
|
-
async def get_tool_info_by_name(tool_name: str) -> str:
|
|
88
|
-
"""
|
|
89
|
-
Get detailed information about a specific tool by name.
|
|
90
|
-
|
|
91
|
-
Args:
|
|
92
|
-
tool_name: The name of the tool to get information about.
|
|
93
|
-
|
|
94
|
-
Returns
|
|
95
|
-
-------
|
|
96
|
-
A formatted string with detailed information about the tool.
|
|
97
|
-
"""
|
|
98
|
-
all_tools = await mcp.list_tools()
|
|
99
|
-
|
|
100
|
-
for tool in all_tools:
|
|
101
|
-
if tool.name == tool_name:
|
|
102
|
-
result = [f"Tool: {tool.name}"]
|
|
103
|
-
result.append(f"Description: {tool.description}")
|
|
104
|
-
|
|
105
|
-
# Get tags
|
|
106
|
-
tool_tags = []
|
|
107
|
-
if tool.annotations and hasattr(tool.annotations, "extra") and tool.annotations.extra:
|
|
108
|
-
tool_tags = tool.annotations.extra.get("tags", [])
|
|
109
|
-
|
|
110
|
-
if tool_tags:
|
|
111
|
-
result.append(f"Tags: {', '.join(tool_tags)}")
|
|
112
|
-
else:
|
|
113
|
-
result.append("Tags: None")
|
|
114
|
-
|
|
115
|
-
# Get input schema info
|
|
116
|
-
if (
|
|
117
|
-
tool.inputSchema
|
|
118
|
-
and hasattr(tool.inputSchema, "properties")
|
|
119
|
-
and tool.inputSchema.properties
|
|
120
|
-
):
|
|
121
|
-
result.append("Parameters:")
|
|
122
|
-
for param_name, param_info in tool.inputSchema.properties.items():
|
|
123
|
-
param_type = param_info.get("type", "unknown")
|
|
124
|
-
param_desc = param_info.get("description", "No description")
|
|
125
|
-
result.append(f" - {param_name} ({param_type}): {param_desc}")
|
|
126
|
-
|
|
127
|
-
return "\n".join(result)
|
|
128
|
-
|
|
129
|
-
return f"Tool '{tool_name}' not found."
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|