fiftyone-mcp-server 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fiftyone_mcp/__init__.py +11 -0
- fiftyone_mcp/config/settings.json +15 -0
- fiftyone_mcp/server.py +127 -0
- fiftyone_mcp/tools/__init__.py +7 -0
- fiftyone_mcp/tools/datasets.py +210 -0
- fiftyone_mcp/tools/operators.py +575 -0
- fiftyone_mcp/tools/plugins.py +289 -0
- fiftyone_mcp/tools/session.py +352 -0
- fiftyone_mcp/tools/utils.py +112 -0
- fiftyone_mcp_server-0.1.0.dist-info/METADATA +174 -0
- fiftyone_mcp_server-0.1.0.dist-info/RECORD +13 -0
- fiftyone_mcp_server-0.1.0.dist-info/WHEEL +4 -0
- fiftyone_mcp_server-0.1.0.dist-info/entry_points.txt +3 -0
fiftyone_mcp/__init__.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
{
|
|
2
|
+
"server": {
|
|
3
|
+
"name": "fiftyone-mcp",
|
|
4
|
+
"version": "0.1.0",
|
|
5
|
+
"default_port": 5149
|
|
6
|
+
},
|
|
7
|
+
"fiftyone": {
|
|
8
|
+
"database_dir": null,
|
|
9
|
+
"default_dataset": null
|
|
10
|
+
},
|
|
11
|
+
"logging": {
|
|
12
|
+
"level": "INFO",
|
|
13
|
+
"format": "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
|
14
|
+
}
|
|
15
|
+
}
|
fiftyone_mcp/server.py
ADDED
|
@@ -0,0 +1,127 @@
|
|
|
1
|
+
"""
|
|
2
|
+
FiftyOne MCP Server.
|
|
3
|
+
|
|
4
|
+
Main entrypoint for the FiftyOne Model Context Protocol server.
|
|
5
|
+
|
|
6
|
+
| Copyright 2017-2025, Voxel51, Inc.
|
|
7
|
+
| `voxel51.com <https://voxel51.com/>`_
|
|
8
|
+
|
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import asyncio
|
|
12
|
+
import json
|
|
13
|
+
import logging
|
|
14
|
+
from pathlib import Path
|
|
15
|
+
|
|
16
|
+
from mcp.server import Server
|
|
17
|
+
from mcp.server.stdio import stdio_server
|
|
18
|
+
from mcp.types import TextContent
|
|
19
|
+
|
|
20
|
+
from .tools.datasets import get_dataset_tools
|
|
21
|
+
from .tools.operators import get_operator_tools
|
|
22
|
+
from .tools.plugins import get_plugin_tools
|
|
23
|
+
from .tools.session import get_session_tools
|
|
24
|
+
from .tools import datasets, operators, plugins, session
|
|
25
|
+
from .tools.utils import format_response
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
logging.basicConfig(
|
|
29
|
+
level=logging.INFO,
|
|
30
|
+
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
|
31
|
+
)
|
|
32
|
+
logger = logging.getLogger(__name__)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def load_config():
|
|
36
|
+
"""Loads configuration from settings.json.
|
|
37
|
+
|
|
38
|
+
Returns:
|
|
39
|
+
a config dict
|
|
40
|
+
"""
|
|
41
|
+
config_path = Path(__file__).parent / "config" / "settings.json"
|
|
42
|
+
try:
|
|
43
|
+
with open(config_path, "r") as f:
|
|
44
|
+
return json.load(f)
|
|
45
|
+
except Exception as e:
|
|
46
|
+
logger.warning(f"Could not load config from {config_path}: {e}")
|
|
47
|
+
return {}
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
async def main():
|
|
51
|
+
"""Main server function."""
|
|
52
|
+
config = load_config()
|
|
53
|
+
server_config = config.get("server", {})
|
|
54
|
+
server_name = server_config.get("name", "fiftyone-mcp")
|
|
55
|
+
|
|
56
|
+
logger.info(f"Starting {server_name} server...")
|
|
57
|
+
|
|
58
|
+
server = Server(server_name)
|
|
59
|
+
|
|
60
|
+
all_tools = (
|
|
61
|
+
get_dataset_tools()
|
|
62
|
+
+ get_operator_tools()
|
|
63
|
+
+ get_plugin_tools()
|
|
64
|
+
+ get_session_tools()
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
@server.list_tools()
|
|
68
|
+
async def list_tools_handler():
|
|
69
|
+
return all_tools
|
|
70
|
+
|
|
71
|
+
@server.call_tool()
|
|
72
|
+
async def call_tool_handler(name, arguments):
|
|
73
|
+
if name in ["list_datasets", "load_dataset", "dataset_summary"]:
|
|
74
|
+
return await datasets.handle_tool_call(name, arguments)
|
|
75
|
+
elif name in [
|
|
76
|
+
"set_context",
|
|
77
|
+
"get_context",
|
|
78
|
+
"list_operators",
|
|
79
|
+
"get_operator_schema",
|
|
80
|
+
"execute_operator",
|
|
81
|
+
]:
|
|
82
|
+
return await operators.handle_tool_call(name, arguments)
|
|
83
|
+
elif name in [
|
|
84
|
+
"list_plugins",
|
|
85
|
+
"get_plugin_info",
|
|
86
|
+
"download_plugin",
|
|
87
|
+
"enable_plugin",
|
|
88
|
+
"disable_plugin",
|
|
89
|
+
]:
|
|
90
|
+
return await plugins.handle_plugin_tool(name, arguments)
|
|
91
|
+
elif name in [
|
|
92
|
+
"launch_app",
|
|
93
|
+
"close_app",
|
|
94
|
+
"get_session_info",
|
|
95
|
+
"set_view",
|
|
96
|
+
"clear_view",
|
|
97
|
+
]:
|
|
98
|
+
return await session.handle_session_tool(name, arguments)
|
|
99
|
+
else:
|
|
100
|
+
result = format_response(
|
|
101
|
+
None, success=False, error=f"Unknown tool: {name}"
|
|
102
|
+
)
|
|
103
|
+
return [
|
|
104
|
+
TextContent(type="text", text=json.dumps(result, indent=2))
|
|
105
|
+
]
|
|
106
|
+
|
|
107
|
+
logger.info(f"{server_name} server initialized successfully")
|
|
108
|
+
|
|
109
|
+
async with stdio_server() as (read_stream, write_stream):
|
|
110
|
+
await server.run(
|
|
111
|
+
read_stream, write_stream, server.create_initialization_options()
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def run():
|
|
116
|
+
"""Entry point for the server."""
|
|
117
|
+
try:
|
|
118
|
+
asyncio.run(main())
|
|
119
|
+
except KeyboardInterrupt:
|
|
120
|
+
logger.info("Server stopped by user")
|
|
121
|
+
except Exception as e:
|
|
122
|
+
logger.error(f"Server error: {e}", exc_info=True)
|
|
123
|
+
raise
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
if __name__ == "__main__":
|
|
127
|
+
run()
|
|
@@ -0,0 +1,210 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Dataset management tools for FiftyOne MCP server.
|
|
3
|
+
|
|
4
|
+
| Copyright 2017-2025, Voxel51, Inc.
|
|
5
|
+
| `voxel51.com <https://voxel51.com/>`_
|
|
6
|
+
|
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import json
|
|
10
|
+
import logging
|
|
11
|
+
|
|
12
|
+
import fiftyone as fo
|
|
13
|
+
from mcp.types import Tool, TextContent
|
|
14
|
+
|
|
15
|
+
from .utils import format_response, safe_serialize, dataset_to_summary
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
logger = logging.getLogger(__name__)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def list_datasets():
|
|
22
|
+
"""Lists all available FiftyOne datasets.
|
|
23
|
+
|
|
24
|
+
Returns:
|
|
25
|
+
a dict containing list of dataset names and metadata
|
|
26
|
+
"""
|
|
27
|
+
try:
|
|
28
|
+
datasets = fo.list_datasets()
|
|
29
|
+
dataset_info = []
|
|
30
|
+
|
|
31
|
+
for name in datasets:
|
|
32
|
+
try:
|
|
33
|
+
dataset = fo.load_dataset(name)
|
|
34
|
+
dataset_info.append(
|
|
35
|
+
{
|
|
36
|
+
"name": name,
|
|
37
|
+
"media_type": dataset.media_type,
|
|
38
|
+
"num_samples": len(dataset),
|
|
39
|
+
"persistent": dataset.persistent,
|
|
40
|
+
"tags": dataset.tags,
|
|
41
|
+
}
|
|
42
|
+
)
|
|
43
|
+
except Exception as e:
|
|
44
|
+
logger.warning(f"Could not load dataset '{name}': {e}")
|
|
45
|
+
dataset_info.append({"name": name, "error": str(e)})
|
|
46
|
+
|
|
47
|
+
return format_response(
|
|
48
|
+
{"count": len(datasets), "datasets": dataset_info}
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
except Exception as e:
|
|
52
|
+
logger.error(f"Failed to list datasets: {e}")
|
|
53
|
+
return format_response(None, success=False, error=str(e))
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def load_dataset(name):
|
|
57
|
+
"""Loads a FiftyOne dataset by name and returns basic info.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
name: the name of the dataset to load
|
|
61
|
+
|
|
62
|
+
Returns:
|
|
63
|
+
a dict containing dataset information
|
|
64
|
+
"""
|
|
65
|
+
try:
|
|
66
|
+
dataset = fo.load_dataset(name)
|
|
67
|
+
|
|
68
|
+
info = {
|
|
69
|
+
"name": dataset.name,
|
|
70
|
+
"media_type": dataset.media_type,
|
|
71
|
+
"num_samples": len(dataset),
|
|
72
|
+
"persistent": dataset.persistent,
|
|
73
|
+
"tags": dataset.tags,
|
|
74
|
+
"info": safe_serialize(dataset.info),
|
|
75
|
+
"fields": list(dataset.get_field_schema().keys()),
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
return format_response(info)
|
|
79
|
+
|
|
80
|
+
except Exception as e:
|
|
81
|
+
logger.error(f"Failed to load dataset '{name}': {e}")
|
|
82
|
+
return format_response(None, success=False, error=str(e))
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def dataset_summary(name):
|
|
86
|
+
"""Gets detailed summary statistics for a dataset.
|
|
87
|
+
|
|
88
|
+
Args:
|
|
89
|
+
name: the name of the dataset
|
|
90
|
+
|
|
91
|
+
Returns:
|
|
92
|
+
a dict containing detailed dataset statistics
|
|
93
|
+
"""
|
|
94
|
+
try:
|
|
95
|
+
dataset = fo.load_dataset(name)
|
|
96
|
+
summary = dataset_to_summary(dataset)
|
|
97
|
+
|
|
98
|
+
summary["stats"] = {"total_samples": len(dataset), "tags": {}}
|
|
99
|
+
|
|
100
|
+
for tag in dataset.tags:
|
|
101
|
+
tagged_view = dataset.match_tags(tag)
|
|
102
|
+
summary["stats"]["tags"][tag] = len(tagged_view)
|
|
103
|
+
|
|
104
|
+
schema = dataset.get_field_schema()
|
|
105
|
+
summary["value_counts"] = {}
|
|
106
|
+
|
|
107
|
+
for field_name in schema.keys():
|
|
108
|
+
if field_name in ["id", "filepath", "metadata"]:
|
|
109
|
+
continue
|
|
110
|
+
|
|
111
|
+
try:
|
|
112
|
+
if hasattr(dataset, "count_values"):
|
|
113
|
+
counts = dataset.count_values(field_name)
|
|
114
|
+
if counts and len(counts) < 100:
|
|
115
|
+
summary["value_counts"][field_name] = dict(counts)
|
|
116
|
+
except Exception:
|
|
117
|
+
pass
|
|
118
|
+
|
|
119
|
+
return format_response(summary)
|
|
120
|
+
|
|
121
|
+
except Exception as e:
|
|
122
|
+
logger.error(f"Failed to get summary for dataset '{name}': {e}")
|
|
123
|
+
return format_response(None, success=False, error=str(e))
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
def get_dataset_tools():
|
|
127
|
+
"""Gets dataset tool definitions.
|
|
128
|
+
|
|
129
|
+
Returns:
|
|
130
|
+
a list of :class:`mcp.types.Tool` instances
|
|
131
|
+
"""
|
|
132
|
+
return [
|
|
133
|
+
Tool(
|
|
134
|
+
name="list_datasets",
|
|
135
|
+
description="List all available FiftyOne datasets with metadata",
|
|
136
|
+
inputSchema={"type": "object", "properties": {}, "required": []},
|
|
137
|
+
),
|
|
138
|
+
Tool(
|
|
139
|
+
name="load_dataset",
|
|
140
|
+
description="Load a FiftyOne dataset by name and return basic information",
|
|
141
|
+
inputSchema={
|
|
142
|
+
"type": "object",
|
|
143
|
+
"properties": {
|
|
144
|
+
"name": {
|
|
145
|
+
"type": "string",
|
|
146
|
+
"description": "Name of the dataset to load",
|
|
147
|
+
}
|
|
148
|
+
},
|
|
149
|
+
"required": ["name"],
|
|
150
|
+
},
|
|
151
|
+
),
|
|
152
|
+
Tool(
|
|
153
|
+
name="dataset_summary",
|
|
154
|
+
description="Get detailed summary statistics and metadata for a dataset",
|
|
155
|
+
inputSchema={
|
|
156
|
+
"type": "object",
|
|
157
|
+
"properties": {
|
|
158
|
+
"name": {
|
|
159
|
+
"type": "string",
|
|
160
|
+
"description": "Name of the dataset",
|
|
161
|
+
}
|
|
162
|
+
},
|
|
163
|
+
"required": ["name"],
|
|
164
|
+
},
|
|
165
|
+
),
|
|
166
|
+
]
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
async def handle_tool_call(name, arguments):
|
|
170
|
+
"""Handles tool calls for dataset operations.
|
|
171
|
+
|
|
172
|
+
Args:
|
|
173
|
+
name: the name of the tool
|
|
174
|
+
arguments: a dict of arguments for the tool
|
|
175
|
+
|
|
176
|
+
Returns:
|
|
177
|
+
a list of :class:`mcp.types.TextContent` instances
|
|
178
|
+
"""
|
|
179
|
+
try:
|
|
180
|
+
if name == "list_datasets":
|
|
181
|
+
result = list_datasets()
|
|
182
|
+
elif name == "load_dataset":
|
|
183
|
+
dataset_name = arguments.get("name")
|
|
184
|
+
if not dataset_name:
|
|
185
|
+
result = format_response(
|
|
186
|
+
None, success=False, error="Dataset name is required"
|
|
187
|
+
)
|
|
188
|
+
else:
|
|
189
|
+
result = load_dataset(dataset_name)
|
|
190
|
+
elif name == "dataset_summary":
|
|
191
|
+
dataset_name = arguments.get("name")
|
|
192
|
+
if not dataset_name:
|
|
193
|
+
result = format_response(
|
|
194
|
+
None, success=False, error="Dataset name is required"
|
|
195
|
+
)
|
|
196
|
+
else:
|
|
197
|
+
result = dataset_summary(dataset_name)
|
|
198
|
+
else:
|
|
199
|
+
result = format_response(
|
|
200
|
+
None, success=False, error=f"Unknown tool: {name}"
|
|
201
|
+
)
|
|
202
|
+
|
|
203
|
+
return [TextContent(type="text", text=json.dumps(result, indent=2))]
|
|
204
|
+
|
|
205
|
+
except Exception as e:
|
|
206
|
+
logger.error(f"Error handling tool call '{name}': {e}")
|
|
207
|
+
error_result = format_response(None, success=False, error=str(e))
|
|
208
|
+
return [
|
|
209
|
+
TextContent(type="text", text=json.dumps(error_result, indent=2))
|
|
210
|
+
]
|