fs-mcp 1.3.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fs_mcp/__init__.py +0 -0
- fs_mcp/__main__.py +122 -0
- fs_mcp/http_runner.py +28 -0
- fs_mcp/server.py +495 -0
- fs_mcp/web_ui.py +395 -0
- fs_mcp-1.3.2.dist-info/METADATA +180 -0
- fs_mcp-1.3.2.dist-info/RECORD +9 -0
- fs_mcp-1.3.2.dist-info/WHEEL +4 -0
- fs_mcp-1.3.2.dist-info/entry_points.txt +2 -0
fs_mcp/__init__.py
ADDED
|
File without changes
|
fs_mcp/__main__.py
ADDED
|
@@ -0,0 +1,122 @@
|
|
|
1
|
+
# src/fs_mcp/__main__.py
|
|
2
|
+
|
|
3
|
+
import argparse
|
|
4
|
+
import sys
|
|
5
|
+
import subprocess
|
|
6
|
+
import time
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from importlib import metadata
|
|
9
|
+
import toml
|
|
10
|
+
from pyfiglet import Figlet
|
|
11
|
+
|
|
12
|
+
try:
|
|
13
|
+
# Use importlib.metadata to find the version of the installed package
|
|
14
|
+
__version__ = metadata.version("fs-mcp")
|
|
15
|
+
except metadata.PackageNotFoundError:
|
|
16
|
+
# Fallback for when the package is not installed, e.g., in development
|
|
17
|
+
try:
|
|
18
|
+
pyproject_path = Path(__file__).parent.parent.parent / 'pyproject.toml'
|
|
19
|
+
with open(pyproject_path, 'r') as f:
|
|
20
|
+
data = toml.load(f)
|
|
21
|
+
__version__ = data['project']['version']
|
|
22
|
+
except (ImportError, FileNotFoundError, KeyError):
|
|
23
|
+
__version__ = "unknown"
|
|
24
|
+
|
|
25
|
+
def main():
|
|
26
|
+
f = Figlet(font='slant')
|
|
27
|
+
banner = f.renderText('fs-mcp')
|
|
28
|
+
print(banner)
|
|
29
|
+
print(f"version {__version__}")
|
|
30
|
+
parser = argparse.ArgumentParser(
|
|
31
|
+
description="fs-mcp server. By default, runs both UI and HTTP servers.",
|
|
32
|
+
formatter_class=argparse.RawTextHelpFormatter
|
|
33
|
+
)
|
|
34
|
+
# UI flags - now inverted to disable the default
|
|
35
|
+
ui_group = parser.add_argument_group('UI Options')
|
|
36
|
+
ui_group.add_argument("--no-ui", action="store_false", dest="run_ui", help="Do not launch the Streamlit Web UI.")
|
|
37
|
+
ui_group.add_argument("--host", default="0.0.0.0", help="Host for the Streamlit UI.")
|
|
38
|
+
ui_group.add_argument("--port", default="8123", type=int, help="Port for the Streamlit UI.")
|
|
39
|
+
|
|
40
|
+
# Background HTTP server flags - now inverted
|
|
41
|
+
http_group = parser.add_argument_group('HTTP Server Options')
|
|
42
|
+
http_group.add_argument("--no-http", action="store_false", dest="run_http", help="Do not run a background HTTP MCP server.")
|
|
43
|
+
http_group.add_argument("--http-host", default="0.0.0.0", help="Host for the background HTTP server.")
|
|
44
|
+
http_group.add_argument("--http-port", type=int, default=8124, help="Port for the background HTTP server.")
|
|
45
|
+
|
|
46
|
+
# Common args
|
|
47
|
+
parser.add_argument("dirs", nargs="*", help="Allowed directories (applies to all server modes).")
|
|
48
|
+
|
|
49
|
+
args, unknown = parser.parse_known_args()
|
|
50
|
+
dirs = args.dirs or [str(Path.cwd())]
|
|
51
|
+
|
|
52
|
+
http_process = None
|
|
53
|
+
try:
|
|
54
|
+
# --- Start Background HTTP Server if requested ---
|
|
55
|
+
if args.run_http:
|
|
56
|
+
# Command to run our dedicated HTTP runner script
|
|
57
|
+
http_cmd = [
|
|
58
|
+
sys.executable, "-m", "fs_mcp.http_runner",
|
|
59
|
+
"--host", args.http_host,
|
|
60
|
+
"--port", str(args.http_port),
|
|
61
|
+
*dirs
|
|
62
|
+
]
|
|
63
|
+
print(f"🚀 Launching background HTTP MCP server process on http://{args.http_host}:{args.http_port}", file=sys.stderr)
|
|
64
|
+
|
|
65
|
+
# Use Popen to start the process without blocking.
|
|
66
|
+
# We pipe stdout/stderr so they don't clutter the main console unless there's an error.
|
|
67
|
+
http_process = subprocess.Popen(http_cmd, stdout=sys.stderr, stderr=sys.stderr)
|
|
68
|
+
|
|
69
|
+
# Give it a moment to start up and check for instant failure.
|
|
70
|
+
time.sleep(2)
|
|
71
|
+
if http_process.poll() is not None:
|
|
72
|
+
print("❌ Background HTTP server failed to start. Check logs.", file=sys.stderr)
|
|
73
|
+
sys.exit(1)
|
|
74
|
+
|
|
75
|
+
# --- Start Foreground Application (UI or wait) ---
|
|
76
|
+
if args.run_ui:
|
|
77
|
+
current_dir = Path(__file__).parent
|
|
78
|
+
ui_path = (current_dir / "web_ui.py").resolve()
|
|
79
|
+
if not ui_path.exists():
|
|
80
|
+
raise FileNotFoundError(f"Could not find web_ui.py at {ui_path}")
|
|
81
|
+
|
|
82
|
+
ui_cmd = [
|
|
83
|
+
sys.executable, "-m", "streamlit", "run", str(ui_path),
|
|
84
|
+
"--server.address", args.host,
|
|
85
|
+
"--server.port", str(args.port),
|
|
86
|
+
"--", *[str(Path(d).resolve()) for d in dirs]
|
|
87
|
+
]
|
|
88
|
+
print(f"🚀 Launching UI on http://{args.host}:{args.port}", file=sys.stderr)
|
|
89
|
+
# This is a blocking call. The script waits here until Streamlit exits.
|
|
90
|
+
subprocess.run(ui_cmd)
|
|
91
|
+
|
|
92
|
+
elif args.run_http and http_process:
|
|
93
|
+
# If ONLY the http server is running, we just need to wait.
|
|
94
|
+
print("Background HTTP server is running. Press Ctrl+C to stop.", file=sys.stderr)
|
|
95
|
+
http_process.wait()
|
|
96
|
+
|
|
97
|
+
if not args.run_ui and not args.run_http:
|
|
98
|
+
# Default: run the original stdio server. This should be a direct import.
|
|
99
|
+
from fs_mcp import server
|
|
100
|
+
print("🚀 Launching Stdio MCP server", file=sys.stderr)
|
|
101
|
+
server.initialize(dirs)
|
|
102
|
+
server.mcp.run()
|
|
103
|
+
|
|
104
|
+
except KeyboardInterrupt:
|
|
105
|
+
print("\nCaught interrupt, shutting down...", file=sys.stderr)
|
|
106
|
+
|
|
107
|
+
finally:
|
|
108
|
+
# This block is GUARANTEED to run, ensuring the background process is cleaned up.
|
|
109
|
+
if http_process:
|
|
110
|
+
print("Terminating background HTTP server...", file=sys.stderr)
|
|
111
|
+
http_process.terminate() # Sends SIGTERM for a graceful shutdown
|
|
112
|
+
try:
|
|
113
|
+
# Wait up to 5 seconds for it to shut down
|
|
114
|
+
http_process.wait(timeout=5)
|
|
115
|
+
print("Background server stopped gracefully.", file=sys.stderr)
|
|
116
|
+
except subprocess.TimeoutExpired:
|
|
117
|
+
# If it's stuck, force kill it.
|
|
118
|
+
print("Server did not terminate gracefully, killing.", file=sys.stderr)
|
|
119
|
+
http_process.kill()
|
|
120
|
+
|
|
121
|
+
if __name__ == "__main__":
|
|
122
|
+
main()
|
fs_mcp/http_runner.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
from fs_mcp import server
|
|
3
|
+
|
|
4
|
+
def main():
|
|
5
|
+
"""
|
|
6
|
+
This is a dedicated entry point for running the FastMCP server in HTTP mode.
|
|
7
|
+
It's designed to be called as a subprocess from the main CLI.
|
|
8
|
+
"""
|
|
9
|
+
parser = argparse.ArgumentParser()
|
|
10
|
+
parser.add_argument("--host", required=True)
|
|
11
|
+
parser.add_argument("--port", type=int, required=True)
|
|
12
|
+
parser.add_argument("dirs", nargs="*")
|
|
13
|
+
args = parser.parse_args()
|
|
14
|
+
|
|
15
|
+
try:
|
|
16
|
+
server.initialize(args.dirs)
|
|
17
|
+
server.mcp.run(
|
|
18
|
+
transport="streamable-http",
|
|
19
|
+
host=args.host,
|
|
20
|
+
port=args.port
|
|
21
|
+
)
|
|
22
|
+
except KeyboardInterrupt:
|
|
23
|
+
pass # The main process will handle termination.
|
|
24
|
+
except Exception as e:
|
|
25
|
+
print(f"HTTP runner failed: {e}")
|
|
26
|
+
|
|
27
|
+
if __name__ == "__main__":
|
|
28
|
+
main()
|
fs_mcp/server.py
ADDED
|
@@ -0,0 +1,495 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from pydantic import BaseModel
|
|
3
|
+
from typing import Optional
|
|
4
|
+
|
|
5
|
+
class FileReadRequest(BaseModel):
|
|
6
|
+
path: str
|
|
7
|
+
head: Optional[int] = None
|
|
8
|
+
tail: Optional[int] = None
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
import os
|
|
12
|
+
import base64
|
|
13
|
+
import mimetypes
|
|
14
|
+
import fnmatch
|
|
15
|
+
from pathlib import Path
|
|
16
|
+
from typing import List, Optional, Literal, Dict
|
|
17
|
+
from datetime import datetime
|
|
18
|
+
from fastmcp import FastMCP
|
|
19
|
+
import tempfile
|
|
20
|
+
import time
|
|
21
|
+
import shutil
|
|
22
|
+
import subprocess
|
|
23
|
+
|
|
24
|
+
from dataclasses import dataclass
|
|
25
|
+
import difflib
|
|
26
|
+
|
|
27
|
+
# The new structure for returning detailed results from the edit tool.
|
|
28
|
+
@dataclass
|
|
29
|
+
class EditResult:
|
|
30
|
+
success: bool
|
|
31
|
+
message: str
|
|
32
|
+
diff: Optional[str] = None
|
|
33
|
+
error_type: Optional[str] = None
|
|
34
|
+
original_content: Optional[str] = None
|
|
35
|
+
new_content: Optional[str] = None
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
# --- Global Configuration ---
|
|
39
|
+
USER_ACCESSIBLE_DIRS: List[Path] = []
|
|
40
|
+
ALLOWED_DIRS: List[Path] = []
|
|
41
|
+
mcp = FastMCP("filesystem")
|
|
42
|
+
IS_VSCODE_CLI_AVAILABLE = False
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def initialize(directories: List[str]):
|
|
46
|
+
"""Initialize the allowed directories and check for VS Code CLI."""
|
|
47
|
+
global ALLOWED_DIRS, USER_ACCESSIBLE_DIRS, IS_VSCODE_CLI_AVAILABLE
|
|
48
|
+
ALLOWED_DIRS.clear()
|
|
49
|
+
USER_ACCESSIBLE_DIRS.clear()
|
|
50
|
+
|
|
51
|
+
IS_VSCODE_CLI_AVAILABLE = shutil.which('code') is not None
|
|
52
|
+
|
|
53
|
+
raw_dirs = directories or [str(Path.cwd())]
|
|
54
|
+
|
|
55
|
+
# Process user-specified directories
|
|
56
|
+
for d in raw_dirs:
|
|
57
|
+
try:
|
|
58
|
+
p = Path(d).expanduser().resolve()
|
|
59
|
+
if not p.exists() or not p.is_dir():
|
|
60
|
+
print(f"Warning: Skipping invalid directory: {p}")
|
|
61
|
+
continue
|
|
62
|
+
USER_ACCESSIBLE_DIRS.append(p)
|
|
63
|
+
except Exception as e:
|
|
64
|
+
print(f"Warning: Could not resolve {d}: {e}")
|
|
65
|
+
|
|
66
|
+
# The full list of allowed directories includes the user-accessible ones
|
|
67
|
+
# and the system's temporary directory for internal review sessions.
|
|
68
|
+
ALLOWED_DIRS.extend(USER_ACCESSIBLE_DIRS)
|
|
69
|
+
ALLOWED_DIRS.append(Path(tempfile.gettempdir()).resolve())
|
|
70
|
+
|
|
71
|
+
if not USER_ACCESSIBLE_DIRS:
|
|
72
|
+
print("Warning: No valid user directories. Defaulting to CWD.")
|
|
73
|
+
cwd = Path.cwd()
|
|
74
|
+
USER_ACCESSIBLE_DIRS.append(cwd)
|
|
75
|
+
if cwd not in ALLOWED_DIRS:
|
|
76
|
+
ALLOWED_DIRS.append(cwd)
|
|
77
|
+
|
|
78
|
+
return USER_ACCESSIBLE_DIRS
|
|
79
|
+
|
|
80
|
+
def validate_path(requested_path: str) -> Path:
|
|
81
|
+
"""
|
|
82
|
+
Security barrier: Ensures path is within ALLOWED_DIRS.
|
|
83
|
+
Handles both absolute and relative paths. Relative paths are resolved
|
|
84
|
+
against the first directory in ALLOWED_DIRS.
|
|
85
|
+
"""
|
|
86
|
+
|
|
87
|
+
# an 'empty' path should always resolve to the primary allowed directory
|
|
88
|
+
if not requested_path or requested_path == ".":
|
|
89
|
+
return ALLOWED_DIRS[0]
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
p = Path(requested_path).expanduser()
|
|
93
|
+
|
|
94
|
+
# If the path is relative, resolve it against the primary allowed directory.
|
|
95
|
+
if not p.is_absolute():
|
|
96
|
+
# Ensure the base directory for relative paths is always the first one.
|
|
97
|
+
base_dir = ALLOWED_DIRS[0]
|
|
98
|
+
p = base_dir / p
|
|
99
|
+
|
|
100
|
+
# --- Security Check: Resolve the final path and verify it's within bounds ---
|
|
101
|
+
try:
|
|
102
|
+
# .resolve() is crucial for security as it canonicalizes the path,
|
|
103
|
+
# removing any ".." components and resolving symlinks.
|
|
104
|
+
path_obj = p.resolve()
|
|
105
|
+
except Exception:
|
|
106
|
+
# Fallback for paths that might not exist yet but are being created.
|
|
107
|
+
path_obj = p.absolute()
|
|
108
|
+
|
|
109
|
+
is_allowed = any(
|
|
110
|
+
str(path_obj).startswith(str(allowed))
|
|
111
|
+
for allowed in ALLOWED_DIRS
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
# If the path is in the temp directory, apply extra security checks.
|
|
115
|
+
temp_dir = Path(tempfile.gettempdir()).resolve()
|
|
116
|
+
if is_allowed and str(path_obj).startswith(str(temp_dir)):
|
|
117
|
+
# Allow access to the temp directory itself, but apply stricter checks for its contents.
|
|
118
|
+
if path_obj != temp_dir:
|
|
119
|
+
path_str = str(path_obj)
|
|
120
|
+
is_review_dir = "mcp_review_" in path_str
|
|
121
|
+
is_pytest_dir = "pytest-" in path_str
|
|
122
|
+
|
|
123
|
+
if not (is_review_dir or is_pytest_dir):
|
|
124
|
+
is_allowed = False
|
|
125
|
+
# For review directories, apply stricter checks.
|
|
126
|
+
elif is_review_dir and not (path_obj.name.startswith("current_") or path_obj.name.startswith("future_")):
|
|
127
|
+
is_allowed = False
|
|
128
|
+
|
|
129
|
+
if not is_allowed:
|
|
130
|
+
raise ValueError(f"Access denied: {requested_path} is outside allowed directories: {ALLOWED_DIRS}")
|
|
131
|
+
|
|
132
|
+
return path_obj
|
|
133
|
+
|
|
134
|
+
def format_size(size_bytes: float) -> str:
|
|
135
|
+
for unit in ['B', 'KB', 'MB', 'GB', 'TB']:
|
|
136
|
+
if size_bytes < 1024.0:
|
|
137
|
+
return f"{size_bytes:.2f} {unit}"
|
|
138
|
+
size_bytes /= 1024.0
|
|
139
|
+
return f"{size_bytes:.2f} PB"
|
|
140
|
+
|
|
141
|
+
# --- Tools ---
|
|
142
|
+
|
|
143
|
+
@mcp.tool()
|
|
144
|
+
def list_allowed_directories() -> str:
|
|
145
|
+
"""List the directories this server is allowed to access."""
|
|
146
|
+
return "\n".join(str(d) for d in USER_ACCESSIBLE_DIRS)
|
|
147
|
+
|
|
148
|
+
@mcp.tool()
|
|
149
|
+
def read_files(files: List[FileReadRequest]) -> str:
|
|
150
|
+
"""
|
|
151
|
+
Read the contents of multiple files simultaneously.
|
|
152
|
+
Returns path and content separated by dashes.
|
|
153
|
+
Prefer relative paths.
|
|
154
|
+
"""
|
|
155
|
+
results = []
|
|
156
|
+
for file_request_data in files:
|
|
157
|
+
if isinstance(file_request_data, dict):
|
|
158
|
+
file_request = FileReadRequest(**file_request_data)
|
|
159
|
+
else:
|
|
160
|
+
file_request = file_request_data
|
|
161
|
+
|
|
162
|
+
try:
|
|
163
|
+
path_obj = validate_path(file_request.path)
|
|
164
|
+
if file_request.head is not None and file_request.tail is not None:
|
|
165
|
+
raise ValueError("Cannot specify both head and tail for a single file.")
|
|
166
|
+
|
|
167
|
+
if path_obj.is_dir():
|
|
168
|
+
content = "Error: Is a directory"
|
|
169
|
+
else:
|
|
170
|
+
try:
|
|
171
|
+
with open(path_obj, 'r', encoding='utf-8') as f:
|
|
172
|
+
if file_request.head is not None:
|
|
173
|
+
content = "".join([next(f) for _ in range(file_request.head)])
|
|
174
|
+
elif file_request.tail is not None:
|
|
175
|
+
content = "".join(f.readlines()[-file_request.tail:])
|
|
176
|
+
else:
|
|
177
|
+
content = f.read()
|
|
178
|
+
except UnicodeDecodeError:
|
|
179
|
+
content = "Error: Binary file. Use read_media_file."
|
|
180
|
+
|
|
181
|
+
results.append(f"File: {file_request.path}\n{content}")
|
|
182
|
+
except Exception as e:
|
|
183
|
+
results.append(f"File: {file_request.path}\nError: {e}")
|
|
184
|
+
|
|
185
|
+
return "\n\n---\n\n".join(results)
|
|
186
|
+
|
|
187
|
+
@mcp.tool()
|
|
188
|
+
def read_media_file(path: str) -> dict:
|
|
189
|
+
"""Read an image or audio file as base64. Prefer relative paths."""
|
|
190
|
+
path_obj = validate_path(path)
|
|
191
|
+
mime_type, _ = mimetypes.guess_type(path_obj)
|
|
192
|
+
if not mime_type: mime_type = "application/octet-stream"
|
|
193
|
+
|
|
194
|
+
try:
|
|
195
|
+
with open(path_obj, "rb") as f:
|
|
196
|
+
data = base64.b64encode(f.read()).decode("utf-8")
|
|
197
|
+
|
|
198
|
+
type_category = "image" if mime_type.startswith("image/") else "audio" if mime_type.startswith("audio/") else "blob"
|
|
199
|
+
return {"type": type_category, "data": data, "mimeType": mime_type}
|
|
200
|
+
except Exception as e:
|
|
201
|
+
return {"error": str(e)}
|
|
202
|
+
|
|
203
|
+
@mcp.tool()
|
|
204
|
+
def write_file(path: str, content: str) -> str:
|
|
205
|
+
"""Create a new file or completely overwrite an existing file. Prefer relative paths."""
|
|
206
|
+
path_obj = validate_path(path)
|
|
207
|
+
with open(path_obj, 'w', encoding='utf-8') as f:
|
|
208
|
+
f.write(content)
|
|
209
|
+
return f"Successfully wrote to {path}"
|
|
210
|
+
|
|
211
|
+
@mcp.tool()
|
|
212
|
+
def create_directory(path: str) -> str:
|
|
213
|
+
"""Create a new directory or ensure it exists. Prefer relative paths."""
|
|
214
|
+
path_obj = validate_path(path)
|
|
215
|
+
os.makedirs(path_obj, exist_ok=True)
|
|
216
|
+
return f"Successfully created directory {path}"
|
|
217
|
+
|
|
218
|
+
@mcp.tool()
|
|
219
|
+
def list_directory(path: str) -> str:
|
|
220
|
+
"""Get a detailed listing of all files and directories. Prefer relative paths."""
|
|
221
|
+
path_obj = validate_path(path)
|
|
222
|
+
if not path_obj.is_dir(): return f"Error: {path} is not a directory"
|
|
223
|
+
|
|
224
|
+
entries = []
|
|
225
|
+
for entry in path_obj.iterdir():
|
|
226
|
+
prefix = "[DIR]" if entry.is_dir() else "[FILE]"
|
|
227
|
+
entries.append(f"{prefix} {entry.name}")
|
|
228
|
+
return "\n".join(sorted(entries))
|
|
229
|
+
|
|
230
|
+
@mcp.tool()
|
|
231
|
+
def list_directory_with_sizes(path: str) -> str:
|
|
232
|
+
"""Get listing with file sizes. Prefer relative paths."""
|
|
233
|
+
path_obj = validate_path(path)
|
|
234
|
+
if not path_obj.is_dir(): return f"Error: Not a directory"
|
|
235
|
+
|
|
236
|
+
output = []
|
|
237
|
+
for entry in path_obj.iterdir():
|
|
238
|
+
try:
|
|
239
|
+
s = entry.stat().st_size if not entry.is_dir() else 0
|
|
240
|
+
prefix = "[DIR]" if entry.is_dir() else "[FILE]"
|
|
241
|
+
size_str = "" if entry.is_dir() else format_size(s)
|
|
242
|
+
output.append(f"{prefix} {entry.name.ljust(30)} {size_str}")
|
|
243
|
+
except: continue
|
|
244
|
+
return "\n".join(sorted(output))
|
|
245
|
+
|
|
246
|
+
@mcp.tool()
|
|
247
|
+
def move_file(source: str, destination: str) -> str:
|
|
248
|
+
"""Move or rename files. Prefer relative paths."""
|
|
249
|
+
src = validate_path(source)
|
|
250
|
+
dst = validate_path(destination)
|
|
251
|
+
if dst.exists(): raise ValueError(f"Destination {destination} already exists")
|
|
252
|
+
src.rename(dst)
|
|
253
|
+
return f"Moved {source} to {destination}"
|
|
254
|
+
|
|
255
|
+
@mcp.tool()
|
|
256
|
+
def search_files(path: str, pattern: str) -> str:
|
|
257
|
+
"""Recursively search for files matching a glob pattern. Prefer relative paths."""
|
|
258
|
+
root = validate_path(path)
|
|
259
|
+
try:
|
|
260
|
+
results = [str(p.relative_to(root)) for p in root.rglob(pattern) if p.is_file()]
|
|
261
|
+
return "\n".join(results) or "No matches found."
|
|
262
|
+
except Exception as e:
|
|
263
|
+
return f"Error during search: {e}"
|
|
264
|
+
|
|
265
|
+
|
|
266
|
+
@mcp.tool()
|
|
267
|
+
def get_file_info(path: str) -> str:
|
|
268
|
+
"""Retrieve detailed metadata. Prefer relative paths."""
|
|
269
|
+
p = validate_path(path)
|
|
270
|
+
s = p.stat()
|
|
271
|
+
return f"Path: {p}\nType: {'Dir' if p.is_dir() else 'File'}\nSize: {format_size(s.st_size)}\nModified: {datetime.fromtimestamp(s.st_mtime)}"
|
|
272
|
+
|
|
273
|
+
@mcp.tool()
|
|
274
|
+
def directory_tree(path: str, max_depth: int = 4, exclude_dirs: Optional[List[str]] = None) -> str:
|
|
275
|
+
"""Get recursive JSON tree with depth limit and default excludes."""
|
|
276
|
+
root = validate_path(path)
|
|
277
|
+
|
|
278
|
+
# Use provided excludes or our new smart defaults
|
|
279
|
+
default_excludes = ['.git', '.venv', '__pycache__', 'node_modules', '.pytest_cache']
|
|
280
|
+
excluded = exclude_dirs if exclude_dirs is not None else default_excludes
|
|
281
|
+
max_depth = 3 if isinstance(max_depth,str) else max_depth
|
|
282
|
+
|
|
283
|
+
def build(current: Path, depth: int) -> Optional[Dict]:
|
|
284
|
+
if depth > max_depth or current.name in excluded:
|
|
285
|
+
return None
|
|
286
|
+
|
|
287
|
+
node: Dict[str, object] = {"name": current.name, "type": "directory" if current.is_dir() else "file"}
|
|
288
|
+
|
|
289
|
+
if current.is_dir():
|
|
290
|
+
children: List[Dict] = []
|
|
291
|
+
try:
|
|
292
|
+
for entry in sorted(current.iterdir(), key=lambda x: x.name):
|
|
293
|
+
child = build(entry, depth + 1)
|
|
294
|
+
if child:
|
|
295
|
+
children.append(child)
|
|
296
|
+
if children:
|
|
297
|
+
node["children"] = children
|
|
298
|
+
except PermissionError:
|
|
299
|
+
node["error"] = "Permission Denied"
|
|
300
|
+
return node
|
|
301
|
+
|
|
302
|
+
tree = build(root, 0)
|
|
303
|
+
return json.dumps(tree, indent=2)
|
|
304
|
+
|
|
305
|
+
class RooStyleEditTool:
|
|
306
|
+
"""A robust, agent-friendly file editing tool."""
|
|
307
|
+
def count_occurrences(self, content: str, substr: str) -> int:
|
|
308
|
+
return content.count(substr) if substr else 0
|
|
309
|
+
def normalize_line_endings(self, content: str) -> str:
|
|
310
|
+
return content.replace('\r\n', '\n').replace('\r', '\n')
|
|
311
|
+
|
|
312
|
+
def _prepare_edit(self, file_path: str, old_string: str, new_string: str, expected_replacements: int) -> EditResult:
|
|
313
|
+
p = validate_path(file_path)
|
|
314
|
+
file_exists = p.exists()
|
|
315
|
+
is_new_file = not file_exists and old_string == ""
|
|
316
|
+
if not file_exists and not is_new_file:
|
|
317
|
+
return EditResult(success=False, message=f"File not found: {file_path}", error_type="file_not_found")
|
|
318
|
+
if file_exists and is_new_file:
|
|
319
|
+
return EditResult(success=False, message=f"File '{file_path}' already exists.", error_type="file_exists")
|
|
320
|
+
original_content = p.read_text(encoding='utf-8') if file_exists else ""
|
|
321
|
+
normalized_content = self.normalize_line_endings(original_content)
|
|
322
|
+
normalized_old = self.normalize_line_endings(old_string)
|
|
323
|
+
if not is_new_file:
|
|
324
|
+
if old_string == new_string:
|
|
325
|
+
return EditResult(success=False, message="No changes to apply.", error_type="validation_error")
|
|
326
|
+
occurrences = self.count_occurrences(normalized_content, normalized_old)
|
|
327
|
+
if occurrences == 0:
|
|
328
|
+
return EditResult(success=False, message="No match found for 'old_string'.", error_type="validation_error")
|
|
329
|
+
if occurrences != expected_replacements:
|
|
330
|
+
return EditResult(success=False, message=f"Expected {expected_replacements} occurrences but found {occurrences}.", error_type="validation_error")
|
|
331
|
+
new_content = new_string if is_new_file else normalized_content.replace(normalized_old, new_string)
|
|
332
|
+
return EditResult(success=True, message="Edit prepared.", original_content=original_content, new_content=new_content)
|
|
333
|
+
|
|
334
|
+
# --- Interactive Human-in-the-Loop Tools ---
|
|
335
|
+
APPROVAL_KEYWORD = "##APPROVE##"
|
|
336
|
+
|
|
337
|
+
|
|
338
|
+
|
|
339
|
+
|
|
340
|
+
@mcp.tool()
|
|
341
|
+
def propose_and_review(path: str, new_string: str, old_string: str = "", expected_replacements: int = 1, session_path: Optional[str] = None) -> str:
|
|
342
|
+
"""
|
|
343
|
+
Starts or continues an interactive review session using a VS Code diff view. This smart tool adapts its behavior based on the arguments provided.
|
|
344
|
+
|
|
345
|
+
Intents:
|
|
346
|
+
|
|
347
|
+
1. **Start New Review (Patch):** Provide `path`, `old_string`, `new_string`. Validates the patch against the original file.
|
|
348
|
+
2. **Start New Review (Full Rewrite):** Provide `path`, `new_string`, and leave `old_string` empty.
|
|
349
|
+
3. **Continue Review (Contextual Patch):** Provide `path`, `session_path`, `old_string`, and `new_string`.
|
|
350
|
+
* **CRITICAL:** If the user modified the file in the previous turn (indicated by `user_feedback_diff`), `old_string` MUST match the text **as modified by the user**, not your previous `new_string`. You must apply the user's diff to your previous output to determine the correct `old_string` for this step.
|
|
351
|
+
4. **Continue Review (Full Rewrite / Recovery):** Provide `path`, `session_path`, `new_string`, and the full content of the file as `old_string`.
|
|
352
|
+
|
|
353
|
+
Note: `path` is always required to identify the file being edited, even when continuing a session.
|
|
354
|
+
|
|
355
|
+
It blocks and waits for the user to save the file, then returns their action ('APPROVE' or 'REVIEW').
|
|
356
|
+
"""
|
|
357
|
+
tool = RooStyleEditTool()
|
|
358
|
+
original_path_obj = Path(path)
|
|
359
|
+
active_proposal_content = ""
|
|
360
|
+
|
|
361
|
+
# --- Step 1: Determine Intent and Prepare Session ---
|
|
362
|
+
if session_path:
|
|
363
|
+
# --- INTENT: CONTINUING AN EXISTING SESSION ---
|
|
364
|
+
temp_dir = Path(session_path)
|
|
365
|
+
if not temp_dir.is_dir():
|
|
366
|
+
raise ValueError(f"Session path {session_path} does not exist.")
|
|
367
|
+
|
|
368
|
+
current_file_path = temp_dir / f"current_{original_path_obj.name}"
|
|
369
|
+
future_file_path = temp_dir / f"future_{original_path_obj.name}"
|
|
370
|
+
|
|
371
|
+
staged_content = current_file_path.read_text(encoding='utf-8')
|
|
372
|
+
|
|
373
|
+
# The `old_string` is the "contextual anchor". We try to apply it as a patch.
|
|
374
|
+
occurrences = tool.count_occurrences(staged_content, old_string)
|
|
375
|
+
|
|
376
|
+
if occurrences != 1:
|
|
377
|
+
# SAFETY VALVE: The patch is ambiguous or invalid. Fail gracefully.
|
|
378
|
+
raise ValueError(f"Contextual patch failed. The provided 'old_string' anchor was found {occurrences} times in the user's last version, but expected exactly 1. Please provide the full file content as 'old_string' to recover.")
|
|
379
|
+
|
|
380
|
+
# Patch successfully applied.
|
|
381
|
+
active_proposal_content = staged_content.replace(old_string, new_string, 1)
|
|
382
|
+
future_file_path.write_text(active_proposal_content, encoding='utf-8')
|
|
383
|
+
|
|
384
|
+
else:
|
|
385
|
+
# --- INTENT: STARTING A NEW SESSION ---
|
|
386
|
+
temp_dir = Path(tempfile.mkdtemp(prefix="mcp_review_"))
|
|
387
|
+
current_file_path = temp_dir / f"current_{original_path_obj.name}"
|
|
388
|
+
future_file_path = temp_dir / f"future_{original_path_obj.name}"
|
|
389
|
+
|
|
390
|
+
prep_result = tool._prepare_edit(path, old_string, new_string, expected_replacements)
|
|
391
|
+
if not prep_result.success:
|
|
392
|
+
if temp_dir.exists(): shutil.rmtree(temp_dir)
|
|
393
|
+
raise ValueError(f"Edit preparation failed: {prep_result.message} (Error type: {prep_result.error_type})")
|
|
394
|
+
|
|
395
|
+
if prep_result.original_content is not None:
|
|
396
|
+
current_file_path.write_text(prep_result.original_content, encoding='utf-8')
|
|
397
|
+
active_proposal_content = prep_result.new_content
|
|
398
|
+
if active_proposal_content is not None:
|
|
399
|
+
future_file_path.write_text(active_proposal_content, encoding='utf-8')
|
|
400
|
+
|
|
401
|
+
# --- Step 2: Display, Launch, and Wait for Human ---
|
|
402
|
+
vscode_command = f'code --diff "{current_file_path}" "{future_file_path}"'
|
|
403
|
+
|
|
404
|
+
print(f"\n--- WAITING FOR HUMAN REVIEW ---\nPlease review the proposed changes in VS Code:\n\n{vscode_command}\n")
|
|
405
|
+
print(f'To approve, add a double newline to the end of the file before saving.')
|
|
406
|
+
if IS_VSCODE_CLI_AVAILABLE:
|
|
407
|
+
try:
|
|
408
|
+
subprocess.Popen(vscode_command, shell=True)
|
|
409
|
+
print("✅ Automatically launched VS Code diff view.")
|
|
410
|
+
except Exception as e:
|
|
411
|
+
print(f"⚠️ Failed to launch VS Code automatically: {e}")
|
|
412
|
+
|
|
413
|
+
initial_mod_time = future_file_path.stat().st_mtime
|
|
414
|
+
while True:
|
|
415
|
+
time.sleep(1)
|
|
416
|
+
if future_file_path.stat().st_mtime > initial_mod_time: break
|
|
417
|
+
|
|
418
|
+
# --- Step 3: Interpret User's Action ---
|
|
419
|
+
user_edited_content = future_file_path.read_text(encoding='utf-8')
|
|
420
|
+
response = {"session_path": str(temp_dir)}
|
|
421
|
+
|
|
422
|
+
if user_edited_content.endswith("\n\n"):
|
|
423
|
+
# Remove trailing newlines
|
|
424
|
+
clean_content = user_edited_content.rstrip('\n')
|
|
425
|
+
# hey roo confirm if this triggers ONLY IF the user manually appends 2 newline in their review. otherwise we'll have false positive.
|
|
426
|
+
try:
|
|
427
|
+
future_file_path.write_text(clean_content, encoding='utf-8')
|
|
428
|
+
print("✅ Approval detected. You can safely close the diff view.")
|
|
429
|
+
except Exception as e:
|
|
430
|
+
print(f"⚠️ Could not auto-remove keyword from review file: {e}")
|
|
431
|
+
response["user_action"] = "APPROVE"
|
|
432
|
+
response["message"] = "User has approved the changes. Call 'commit_review' to finalize."
|
|
433
|
+
else:
|
|
434
|
+
current_file_path.write_text(user_edited_content, encoding='utf-8')
|
|
435
|
+
user_feedback_diff = "".join(difflib.unified_diff(
|
|
436
|
+
active_proposal_content.splitlines(keepends=True) if active_proposal_content is not None else [],
|
|
437
|
+
user_edited_content.splitlines(keepends=True),
|
|
438
|
+
fromfile=f"a/{future_file_path.name} (agent proposal)",
|
|
439
|
+
tofile=f"b/{future_file_path.name} (user feedback)"
|
|
440
|
+
))
|
|
441
|
+
response["user_action"] = "REVIEW"
|
|
442
|
+
response["message"] = "User provided feedback. A diff is included. Propose a new edit against the updated content."
|
|
443
|
+
response["user_feedback_diff"] = user_feedback_diff
|
|
444
|
+
|
|
445
|
+
return json.dumps(response, indent=2)
|
|
446
|
+
|
|
447
|
+
@mcp.tool()
|
|
448
|
+
def commit_review(session_path: str, original_path: str) -> str:
|
|
449
|
+
"""Finalizes an interactive review session by committing the approved changes."""
|
|
450
|
+
session_dir = Path(session_path)
|
|
451
|
+
original_file = validate_path(original_path)
|
|
452
|
+
if not session_dir.is_dir():
|
|
453
|
+
raise ValueError(f"Invalid session path: {session_path}")
|
|
454
|
+
future_file = session_dir / f"future_{original_file.name}"
|
|
455
|
+
if not future_file.exists():
|
|
456
|
+
raise FileNotFoundError(f"Approved file not found in session: {future_file}")
|
|
457
|
+
approved_content = future_file.read_text(encoding='utf-8')
|
|
458
|
+
final_content = approved_content.rstrip('\n')
|
|
459
|
+
try:
|
|
460
|
+
original_file.write_text(final_content, encoding='utf-8')
|
|
461
|
+
except Exception as e:
|
|
462
|
+
raise IOError(f"Failed to write final content to {original_path}: {e}")
|
|
463
|
+
try:
|
|
464
|
+
shutil.rmtree(session_dir)
|
|
465
|
+
except Exception as e:
|
|
466
|
+
return f"Successfully committed changes to {original_path}, but failed to clean up session dir {session_path}: {e}"
|
|
467
|
+
return f"Successfully committed changes to '{original_path}' and cleaned up the review session."
|
|
468
|
+
@mcp.tool()
|
|
469
|
+
def grounding_search(query: str) -> str:
|
|
470
|
+
"""[NEW] A custom search tool. Accepts a natural language query and returns a grounded response."""
|
|
471
|
+
# This is a placeholder for a future RAG or other search implementation.
|
|
472
|
+
print(f"Received grounding search query: {query}")
|
|
473
|
+
return "DEVELOPER PLEASE UPDATE THIS WITH ACTUAL CONTENT"
|
|
474
|
+
|
|
475
|
+
|
|
476
|
+
@mcp.tool()
|
|
477
|
+
def append_text(path: str, content: str) -> str:
|
|
478
|
+
"""
|
|
479
|
+
Append text to the end of a file.
|
|
480
|
+
Use this as a fallback if edit_file fails to find a match.
|
|
481
|
+
Prefer relative paths.
|
|
482
|
+
"""
|
|
483
|
+
p = validate_path(path)
|
|
484
|
+
if not p.exists():
|
|
485
|
+
raise FileNotFoundError(f"File not found: {path}. Cannot append to a non-existent file.")
|
|
486
|
+
|
|
487
|
+
# Ensure there is a newline at the start of the append if the file doesn't have one
|
|
488
|
+
# to avoid clashing with the existing last line.
|
|
489
|
+
with open(p, 'a', encoding='utf-8') as f:
|
|
490
|
+
# Check if we need a leading newline
|
|
491
|
+
if p.stat().st_size > 0:
|
|
492
|
+
f.write("\n")
|
|
493
|
+
f.write(content)
|
|
494
|
+
|
|
495
|
+
return f"Successfully appended content to '{path}'."
|
fs_mcp/web_ui.py
ADDED
|
@@ -0,0 +1,395 @@
|
|
|
1
|
+
import streamlit as st
|
|
2
|
+
import sys
|
|
3
|
+
import inspect
|
|
4
|
+
import json
|
|
5
|
+
import base64
|
|
6
|
+
import asyncio
|
|
7
|
+
import copy
|
|
8
|
+
from typing import Optional, Union, List, Dict, Any
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
from dataclasses import asdict
|
|
11
|
+
from fastmcp.utilities.inspect import inspect_fastmcp
|
|
12
|
+
from streamlit_js_eval import streamlit_js_eval
|
|
13
|
+
|
|
14
|
+
# --- 1. SETUP & CONFIG ---
|
|
15
|
+
st.set_page_config(page_title="FS-MCP", layout="wide", page_icon="📂")
|
|
16
|
+
|
|
17
|
+
import tempfile
|
|
18
|
+
|
|
19
|
+
def get_workspace_description():
|
|
20
|
+
"""
|
|
21
|
+
Calls list_allowed_directories and list_directory to generate
|
|
22
|
+
a descriptive text of the workspace, excluding temporary directories.
|
|
23
|
+
"""
|
|
24
|
+
try:
|
|
25
|
+
# 1. Get allowed directories
|
|
26
|
+
list_dirs_fn = tools.get('list_allowed_directories')
|
|
27
|
+
list_dir_fn = tools.get('list_directory')
|
|
28
|
+
directory_tree_fn = tools.get('directory_tree')
|
|
29
|
+
|
|
30
|
+
if not list_dirs_fn or not list_dir_fn or not directory_tree_fn:
|
|
31
|
+
return "Error: Core directory tools not found."
|
|
32
|
+
|
|
33
|
+
allowed_dirs_str = list_dirs_fn()
|
|
34
|
+
|
|
35
|
+
# Filter out the temporary directory
|
|
36
|
+
temp_dir = tempfile.gettempdir()
|
|
37
|
+
allowed_dirs = [
|
|
38
|
+
d.strip() for d in allowed_dirs_str.split('\n')
|
|
39
|
+
if d.strip() and not d.strip().startswith(temp_dir)
|
|
40
|
+
]
|
|
41
|
+
|
|
42
|
+
# Reconstruct the string for display, excluding the temp dir
|
|
43
|
+
filtered_dirs_str = "\n".join(allowed_dirs)
|
|
44
|
+
|
|
45
|
+
dir_tree_listings = []
|
|
46
|
+
for d in allowed_dirs:
|
|
47
|
+
tree = directory_tree_fn(path=d)
|
|
48
|
+
dir_tree_listings.append(f"Directory: {d}\n---\n{tree}\n")
|
|
49
|
+
dir_tree_str = "\n".join(dir_tree_listings)
|
|
50
|
+
|
|
51
|
+
# 2. Get directory listings for each allowed directory
|
|
52
|
+
dir_listings = []
|
|
53
|
+
for d in allowed_dirs:
|
|
54
|
+
listing = list_dir_fn(path=d)
|
|
55
|
+
dir_listings.append(f"Directory: {d}\n---\n{listing}\n")
|
|
56
|
+
|
|
57
|
+
# 3. Format the final output
|
|
58
|
+
full_listing_str = "\n".join(dir_listings)
|
|
59
|
+
output = (
|
|
60
|
+
"\n```\n"
|
|
61
|
+
"This is the initial result for MCP calls :\n\n"
|
|
62
|
+
"== list_allowed_directories ==\n"
|
|
63
|
+
f"{filtered_dirs_str}\n\n"
|
|
64
|
+
"== list_directory ==\n"
|
|
65
|
+
f"{full_listing_str}\n\n"
|
|
66
|
+
"== directory_tree ==\n"
|
|
67
|
+
f"{dir_tree_str}"
|
|
68
|
+
"\n```\n"
|
|
69
|
+
)
|
|
70
|
+
return output
|
|
71
|
+
|
|
72
|
+
except Exception as e:
|
|
73
|
+
return f"Error generating room description: {e}"
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
# [NEW] Import Google GenAI transformers for schema standardization
|
|
77
|
+
try:
|
|
78
|
+
from google.genai import _transformers
|
|
79
|
+
except ImportError:
|
|
80
|
+
st.error("❌ 'google-genai' library not found. Please run: uv add google-genai")
|
|
81
|
+
st.stop()
|
|
82
|
+
|
|
83
|
+
try:
|
|
84
|
+
from fs_mcp import server
|
|
85
|
+
except ImportError:
|
|
86
|
+
st.error("❌ Could not import 'fs_mcp.server'. Is the package installed?")
|
|
87
|
+
st.stop()
|
|
88
|
+
|
|
89
|
+
# Initialize Config from CLI Args
|
|
90
|
+
try:
|
|
91
|
+
if "--" in sys.argv:
|
|
92
|
+
raw_args = sys.argv[sys.argv.index("--") + 1:]
|
|
93
|
+
else:
|
|
94
|
+
raw_args = [a for a in sys.argv[1:] if not a.startswith("-")]
|
|
95
|
+
server.initialize(raw_args)
|
|
96
|
+
except Exception as e:
|
|
97
|
+
st.error(f"❌ Configuration Error: {e}")
|
|
98
|
+
st.stop()
|
|
99
|
+
|
|
100
|
+
# --- 2. HEADER ---
|
|
101
|
+
st.title("📂 FS-MCP Explorer")
|
|
102
|
+
if not server.ALLOWED_DIRS:
|
|
103
|
+
st.warning("⚠️ No directories configured! Defaulting to CWD.")
|
|
104
|
+
|
|
105
|
+
st.sidebar.header("Active Configuration")
|
|
106
|
+
st.sidebar.code("\n".join(str(d) for d in server.ALLOWED_DIRS))
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
# --- 3. TOOL DISCOVERY & SCHEMA EXPORT ---
|
|
110
|
+
tools = {}
|
|
111
|
+
tool_schemas = []
|
|
112
|
+
gemini_schemas = []
|
|
113
|
+
|
|
114
|
+
def prune_for_gemini_strictness(obj: Any) -> Any:
|
|
115
|
+
"""
|
|
116
|
+
Recursively removes keys that are valid in JSON Schema/OpenAPI
|
|
117
|
+
but strictly forbidden by the Gemini Function Calling API.
|
|
118
|
+
"""
|
|
119
|
+
# Keys forbidden by Gemini's strict validator
|
|
120
|
+
FORBIDDEN_KEYS = {"default", "title", "property_ordering", "propertyOrdering"}
|
|
121
|
+
|
|
122
|
+
if isinstance(obj, dict):
|
|
123
|
+
return {
|
|
124
|
+
k: prune_for_gemini_strictness(v)
|
|
125
|
+
for k, v in obj.items()
|
|
126
|
+
if k not in FORBIDDEN_KEYS
|
|
127
|
+
}
|
|
128
|
+
elif isinstance(obj, list):
|
|
129
|
+
return [prune_for_gemini_strictness(i) for i in obj]
|
|
130
|
+
return obj
|
|
131
|
+
|
|
132
|
+
def convert_to_gemini_schema(tool_dict: Dict[str, Any]) -> Dict[str, Any]:
|
|
133
|
+
"""
|
|
134
|
+
Uses official google-genai transformers + strict pruning to adapt
|
|
135
|
+
schemas for Gemini Function Declarations.
|
|
136
|
+
"""
|
|
137
|
+
# 1. Deep copy the input schema
|
|
138
|
+
raw_schema = copy.deepcopy(tool_dict.get("input_schema", {}))
|
|
139
|
+
|
|
140
|
+
# 2. Use the official library transformer to handle anyOf -> nullable
|
|
141
|
+
# This handles the complex logic.
|
|
142
|
+
_transformers.process_schema(raw_schema, client=None)
|
|
143
|
+
|
|
144
|
+
# 3. [NEW] Strict Pruning
|
|
145
|
+
# Gemini rejects "default", "title", and "property_ordering"
|
|
146
|
+
clean_schema = prune_for_gemini_strictness(raw_schema)
|
|
147
|
+
|
|
148
|
+
# 4. Ensure root type is object
|
|
149
|
+
if "type" not in clean_schema:
|
|
150
|
+
clean_schema["type"] = "object"
|
|
151
|
+
if "properties" not in clean_schema:
|
|
152
|
+
clean_schema["properties"] = {}
|
|
153
|
+
|
|
154
|
+
return {
|
|
155
|
+
"name": tool_dict["name"],
|
|
156
|
+
"description": tool_dict.get("description", ""),
|
|
157
|
+
"parameters": clean_schema
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
try:
|
|
162
|
+
# 1. Use the official inspect utility to get a structured server blueprint
|
|
163
|
+
server_info = asyncio.run(inspect_fastmcp(server.mcp))
|
|
164
|
+
|
|
165
|
+
# 2. Convert the ToolInfo dataclasses to dictionaries
|
|
166
|
+
tool_schemas = [asdict(tool) for tool in server_info.tools]
|
|
167
|
+
|
|
168
|
+
# 3. [NEW] Generate Gemini-compatible schemas using the official library
|
|
169
|
+
gemini_schemas = [convert_to_gemini_schema(ts) for ts in tool_schemas]
|
|
170
|
+
|
|
171
|
+
# 4. Map the functions for the UI to execute
|
|
172
|
+
for tool_info in server_info.tools:
|
|
173
|
+
name = tool_info.name
|
|
174
|
+
if hasattr(server, name):
|
|
175
|
+
wrapper = getattr(server, name)
|
|
176
|
+
# Unwrap FastMCP decorators if needed
|
|
177
|
+
fn = wrapper.fn if hasattr(wrapper, 'fn') else wrapper
|
|
178
|
+
tools[name] = fn
|
|
179
|
+
else:
|
|
180
|
+
st.warning(f"Tool '{name}' has a schema but no matching function found in server.py")
|
|
181
|
+
|
|
182
|
+
except Exception as e:
|
|
183
|
+
st.error(f"Failed to inspect MCP server: {e}")
|
|
184
|
+
st.exception(e)
|
|
185
|
+
st.stop()
|
|
186
|
+
|
|
187
|
+
# --- 3.5. ROOM DESCRIPTION ---
|
|
188
|
+
if 'workspace_description' not in st.session_state:
|
|
189
|
+
st.session_state.workspace_description = get_workspace_description()
|
|
190
|
+
|
|
191
|
+
with st.sidebar.expander("📝 Workspace Description", expanded=False):
|
|
192
|
+
st.caption("Copy this to quickly onboard the agent")
|
|
193
|
+
st.code(st.session_state.workspace_description, language="md")
|
|
194
|
+
|
|
195
|
+
# --- SIDEBAR: EXPORT SECTION ---
|
|
196
|
+
with st.sidebar.expander("🔌 Gemini API Schemas", expanded=False):
|
|
197
|
+
st.caption("Copy this JSON for Gemini Function Declarations:")
|
|
198
|
+
st.code(json.dumps(gemini_schemas, indent=2), language="json")
|
|
199
|
+
|
|
200
|
+
with st.sidebar.expander("⚙️ Raw OpenAI MCP Schemas", expanded=False):
|
|
201
|
+
st.caption("Internal MCP representation:")
|
|
202
|
+
st.code(json.dumps(tool_schemas, indent=2), language="json")
|
|
203
|
+
|
|
204
|
+
# --- 4. EXECUTION HANDLER ---
|
|
205
|
+
def execute_tool(func, args):
|
|
206
|
+
"""Executes tool and returns both raw result and protocol view"""
|
|
207
|
+
try:
|
|
208
|
+
# Run the actual function
|
|
209
|
+
result = func(**args)
|
|
210
|
+
|
|
211
|
+
# Simulate Protocol Response (Agent View)
|
|
212
|
+
protocol_response = {
|
|
213
|
+
"content": [],
|
|
214
|
+
"isError": False
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
# Format Content Block
|
|
218
|
+
if isinstance(result, dict) and result.get("type") == "image":
|
|
219
|
+
# Image protocol format
|
|
220
|
+
protocol_response["content"].append({
|
|
221
|
+
"type": "image",
|
|
222
|
+
"data": result["data"],
|
|
223
|
+
"mimeType": result.get("mimeType", "image/png")
|
|
224
|
+
})
|
|
225
|
+
display_type = "image"
|
|
226
|
+
elif isinstance(result, (dict, list)):
|
|
227
|
+
# Structured data usually sent as embedded text JSON
|
|
228
|
+
text_content = json.dumps(result, indent=2)
|
|
229
|
+
protocol_response["content"].append({
|
|
230
|
+
"type": "text",
|
|
231
|
+
"text": text_content
|
|
232
|
+
})
|
|
233
|
+
display_type = "json"
|
|
234
|
+
else:
|
|
235
|
+
# Plain text
|
|
236
|
+
text_result = str(result)
|
|
237
|
+
protocol_response["content"].append({
|
|
238
|
+
"type": "text",
|
|
239
|
+
"text": text_result
|
|
240
|
+
})
|
|
241
|
+
display_type = "text"
|
|
242
|
+
|
|
243
|
+
return result, protocol_response, display_type, None
|
|
244
|
+
|
|
245
|
+
except Exception as e:
|
|
246
|
+
error_resp = {
|
|
247
|
+
"content": [{"type": "text", "text": str(e)}],
|
|
248
|
+
"isError": True
|
|
249
|
+
}
|
|
250
|
+
return None, error_resp, "error", str(e)
|
|
251
|
+
|
|
252
|
+
|
|
253
|
+
# --- 5. UI LOGIC ---
|
|
254
|
+
if not tools:
|
|
255
|
+
st.error("No tools found.")
|
|
256
|
+
st.stop()
|
|
257
|
+
|
|
258
|
+
selected = st.sidebar.radio("Available Tools", sorted(tools.keys()))
|
|
259
|
+
fn = tools[selected]
|
|
260
|
+
sig = inspect.signature(fn)
|
|
261
|
+
|
|
262
|
+
st.header(f"🔧 {selected}")
|
|
263
|
+
if inspect.getdoc(fn):
|
|
264
|
+
st.info(inspect.getdoc(fn))
|
|
265
|
+
|
|
266
|
+
# INPUT TABS
|
|
267
|
+
tab_raw, tab_compact, tab_form = st.tabs(["📄 Raw JSON", "⚡ Compact JSON", "📝 Interactive Form"])
|
|
268
|
+
|
|
269
|
+
execution_args = None
|
|
270
|
+
trigger_run = False
|
|
271
|
+
|
|
272
|
+
# --- TAB 1: INTERACTIVE FORM ---
|
|
273
|
+
with tab_form:
|
|
274
|
+
with st.form("interactive_form"):
|
|
275
|
+
form_inputs = {}
|
|
276
|
+
for name, param in sig.parameters.items():
|
|
277
|
+
if name in ['ctx', 'context']: continue
|
|
278
|
+
|
|
279
|
+
# Type Checking
|
|
280
|
+
annotation = param.annotation
|
|
281
|
+
is_number = (annotation in [int, float]) or (getattr(annotation, "__origin__", None) is Union and int in getattr(annotation, "__args__", []))
|
|
282
|
+
is_bool = (annotation == bool) or (getattr(annotation, "__origin__", None) is Union and bool in getattr(annotation, "__args__", []))
|
|
283
|
+
|
|
284
|
+
if name in ['path', 'source', 'destination']:
|
|
285
|
+
def_val = str(server.ALLOWED_DIRS[0]) if server.ALLOWED_DIRS else ""
|
|
286
|
+
form_inputs[name] = st.text_input(name, value=def_val)
|
|
287
|
+
elif name == 'content':
|
|
288
|
+
st.caption("Literal Content (WYSIWYG - Enter creates newlines)")
|
|
289
|
+
form_inputs[name] = st.text_area(name, height=200)
|
|
290
|
+
elif name == 'edits':
|
|
291
|
+
st.write("Edits (JSON List)")
|
|
292
|
+
val = st.text_area("JSON", value='[{"oldText": "foo", "newText": "bar"}]')
|
|
293
|
+
form_inputs[name] = val # Parse later
|
|
294
|
+
elif name in ['exclude_patterns', 'paths']:
|
|
295
|
+
val = st.text_area(f"{name} (one per line)")
|
|
296
|
+
form_inputs[name] = val
|
|
297
|
+
elif is_bool:
|
|
298
|
+
form_inputs[name] = st.checkbox(name)
|
|
299
|
+
elif is_number:
|
|
300
|
+
form_inputs[name] = st.number_input(name, value=0)
|
|
301
|
+
else:
|
|
302
|
+
form_inputs[name] = st.text_input(name)
|
|
303
|
+
|
|
304
|
+
if st.form_submit_button("Run Form"):
|
|
305
|
+
# Process Form Inputs
|
|
306
|
+
try:
|
|
307
|
+
processed = {}
|
|
308
|
+
for k, v in form_inputs.items():
|
|
309
|
+
# Handle lists
|
|
310
|
+
if k in ['exclude_patterns', 'paths']:
|
|
311
|
+
processed[k] = [x.strip() for x in v.split('\n') if x.strip()]
|
|
312
|
+
# Handle JSON fields
|
|
313
|
+
elif k == 'edits':
|
|
314
|
+
processed[k] = json.loads(v)
|
|
315
|
+
# Handle Optionals
|
|
316
|
+
elif v == 0 and k in ['head', 'tail']:
|
|
317
|
+
processed[k] = None
|
|
318
|
+
else:
|
|
319
|
+
processed[k] = v
|
|
320
|
+
execution_args = processed
|
|
321
|
+
trigger_run = True
|
|
322
|
+
except Exception as e:
|
|
323
|
+
st.error(f"Form Error: {e}")
|
|
324
|
+
|
|
325
|
+
# --- TAB 2 & 3: JSON INPUTS ---
|
|
326
|
+
# Helper to generate template
|
|
327
|
+
default_args = {}
|
|
328
|
+
for name, param in sig.parameters.items():
|
|
329
|
+
if name in ['ctx', 'context']: continue
|
|
330
|
+
if name in ['path', 'source', 'destination']:
|
|
331
|
+
default_args[name] = str(server.ALLOWED_DIRS[0]) if server.ALLOWED_DIRS else ""
|
|
332
|
+
elif name == 'content': default_args[name] = "Line 1\nLine 2"
|
|
333
|
+
elif name == 'paths': default_args[name] = [str(server.ALLOWED_DIRS[0])] if server.ALLOWED_DIRS else []
|
|
334
|
+
else: default_args[name] = ""
|
|
335
|
+
|
|
336
|
+
json_template = json.dumps(default_args, indent=2)
|
|
337
|
+
|
|
338
|
+
with tab_raw:
|
|
339
|
+
with st.form("json_raw_form"):
|
|
340
|
+
raw_text = st.text_area("JSON Input", value=json_template, height=300)
|
|
341
|
+
if st.form_submit_button("Run Raw JSON"):
|
|
342
|
+
try:
|
|
343
|
+
execution_args = json.loads(raw_text)
|
|
344
|
+
trigger_run = True
|
|
345
|
+
except Exception as e:
|
|
346
|
+
st.error(f"Invalid JSON: {e}")
|
|
347
|
+
|
|
348
|
+
with tab_compact:
|
|
349
|
+
with st.form("json_compact_form"):
|
|
350
|
+
compact_text = st.text_input("One-line JSON", value=json.dumps(default_args))
|
|
351
|
+
if st.form_submit_button("Run Compact JSON"):
|
|
352
|
+
try:
|
|
353
|
+
execution_args = json.loads(compact_text)
|
|
354
|
+
trigger_run = True
|
|
355
|
+
except Exception as e:
|
|
356
|
+
st.error(f"Invalid JSON: {e}")
|
|
357
|
+
|
|
358
|
+
# --- OUTPUT DISPLAY ---
|
|
359
|
+
if trigger_run and execution_args is not None:
|
|
360
|
+
st.divider()
|
|
361
|
+
|
|
362
|
+
with st.spinner("Running tool..."):
|
|
363
|
+
res_raw, res_proto, dtype, err = execute_tool(fn, execution_args)
|
|
364
|
+
|
|
365
|
+
json_response = json.dumps(res_proto, indent=None)
|
|
366
|
+
escaped_json = json.dumps(json_response)
|
|
367
|
+
streamlit_js_eval(js_expressions=f"navigator.clipboard.writeText({escaped_json})")
|
|
368
|
+
if err:
|
|
369
|
+
st.error("Tool Execution Failed")
|
|
370
|
+
st.toast("Something went wrong - error copied to clipboard", icon="❌")
|
|
371
|
+
else:
|
|
372
|
+
st.success("Tool Execution Successful")
|
|
373
|
+
st.toast("Tool response copied to clipboard!", icon="✅")
|
|
374
|
+
|
|
375
|
+
col_human, col_agent = st.columns(2)
|
|
376
|
+
|
|
377
|
+
with col_human:
|
|
378
|
+
st.subheader("👀 Human View")
|
|
379
|
+
if err:
|
|
380
|
+
st.error(err)
|
|
381
|
+
elif dtype == "image":
|
|
382
|
+
st.image(base64.b64decode(res_proto["content"][0]["data"]))
|
|
383
|
+
elif dtype == "json":
|
|
384
|
+
st.json(res_raw)
|
|
385
|
+
else:
|
|
386
|
+
text = str(res_raw)
|
|
387
|
+
if text.startswith("---") or text.startswith("+++"):
|
|
388
|
+
st.code(text, language="diff")
|
|
389
|
+
else:
|
|
390
|
+
st.code(text)
|
|
391
|
+
|
|
392
|
+
with col_agent:
|
|
393
|
+
st.subheader("🤖 Agent Protocol View")
|
|
394
|
+
st.caption("This is exactly what the LLM receives.")
|
|
395
|
+
st.code(json.dumps(res_proto, indent=None), language="json")
|
|
@@ -0,0 +1,180 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: fs-mcp
|
|
3
|
+
Version: 1.3.2
|
|
4
|
+
Summary: A secure MCP filesystem server with Stdio and Web UI modes.
|
|
5
|
+
Author-email: luutuankiet <luutuankiet.ftu2@gmail.com>
|
|
6
|
+
Requires-Python: >=3.10
|
|
7
|
+
Requires-Dist: fastmcp>=0.1.0
|
|
8
|
+
Requires-Dist: google-genai>=1.56.0
|
|
9
|
+
Requires-Dist: httpx>=0.28.1
|
|
10
|
+
Requires-Dist: pydantic>=2.0
|
|
11
|
+
Requires-Dist: pyfiglet
|
|
12
|
+
Requires-Dist: streamlit-js-eval>=0.1.5
|
|
13
|
+
Requires-Dist: streamlit>=1.30.0
|
|
14
|
+
Requires-Dist: toml
|
|
15
|
+
Description-Content-Type: text/markdown
|
|
16
|
+
|
|
17
|
+
# fs-mcp 📂
|
|
18
|
+
|
|
19
|
+
**The "Human-in-the-Loop" Filesystem MCP Server**
|
|
20
|
+
|
|
21
|
+
---
|
|
22
|
+
|
|
23
|
+
https://github.com/user-attachments/assets/132acdd9-014c-4ba0-845a-7db74644e655
|
|
24
|
+
|
|
25
|
+
## 💡 Why This Exists
|
|
26
|
+
|
|
27
|
+
I built this because I was tired of jumping through hoops.
|
|
28
|
+
|
|
29
|
+
The promise of the Model Context Protocol (MCP) is incredible, but the reality of using the standard filesystem server hit a few walls for my workflow:
|
|
30
|
+
|
|
31
|
+
1. **The Container Gap:** I do most of my work in Docker. Connecting a local agent (like Claude Desktop) to a filesystem inside a container via Stdio is a networking nightmare.
|
|
32
|
+
2. **The Free Tier Lockout:** I wanted to use the free tier of [Google AI Studio](https://aistudio.google.com/) to edit code, but you can't easily plug MCP into a web interface.
|
|
33
|
+
3. **Schema Hell:** Even if you *do* copy-paste schemas into Gemini, they often break because Gemini's strict validation is only a [subset of the standard OpenAPI spec](https://ai.google.dev/gemini-api/docs/function-calling).
|
|
34
|
+
|
|
35
|
+
**fs-mcp solves this.** It is a Python-based server built on `fastmcp` that treats "Human-in-the-Loop" as a first-class citizen, enabling seamless and interactive collaboration between LLM agents and a developer's local environment.
|
|
36
|
+
|
|
37
|
+
---
|
|
38
|
+
|
|
39
|
+
## 🚀 Key Features
|
|
40
|
+
|
|
41
|
+
### 1. HTTP by Default (Remote Ready)
|
|
42
|
+
|
|
43
|
+
It runs a background HTTP server alongside the CLI. You can finally connect agents to remote environments or containers without SSH tunneling wizardry.
|
|
44
|
+
|
|
45
|
+
### 2. Zero-Config Inspector
|
|
46
|
+
|
|
47
|
+
No `npm install inspector`. I baked a **Streamlit Web UI** directly into the package. Launch it, and you instantly have a visual form to test tools, view results, and generate configs.
|
|
48
|
+
|
|
49
|
+
### 3. Copy-Paste Gemini Schemas 📋
|
|
50
|
+
|
|
51
|
+
The UI automatically sanitizes and translates your tool schemas specifically for **Google GenAI**. It strips forbidden keys (`default`, `title`, etc.) so you can paste function definitions directly into AI Studio and start coding for free.
|
|
52
|
+
|
|
53
|
+
### 4. Human-in-the-Loop Diffing 🤝
|
|
54
|
+
|
|
55
|
+
The **`propose_and_review`** tool bridges the gap between agent proposals and human oversight. It opens a VS Code diff window for you to inspect changes.
|
|
56
|
+
|
|
57
|
+
**How it Works:**
|
|
58
|
+
1. The agent calls `propose_and_review` with a code change.
|
|
59
|
+
2. A VS Code window pops up showing the **Diff**.
|
|
60
|
+
3. **To Approve:** Add a double newline at the very end of the file and Save.
|
|
61
|
+
4. **To Review:** Just edit the code directly in the diff window and Save. The agent will receive your edits as feedback and try again!
|
|
62
|
+
|
|
63
|
+
```mermaid
|
|
64
|
+
sequenceDiagram
|
|
65
|
+
participant User
|
|
66
|
+
participant Agent
|
|
67
|
+
participant MCP_Server
|
|
68
|
+
|
|
69
|
+
User->>Agent: "Propose an edit to README.md"
|
|
70
|
+
activate Agent
|
|
71
|
+
Agent->>MCP_Server: call propose_and_review(path="README.md", old, new)
|
|
72
|
+
activate MCP_Server
|
|
73
|
+
Note right of MCP_Server: Creates temp files & prints vscode_command.<br/>Now enters "watch loop", waiting for user to save.
|
|
74
|
+
MCP_Server-->>User: (via console) `code --diff ...`
|
|
75
|
+
|
|
76
|
+
Note right of User: User opens VS Code, is happy with the change,<br/>adds a double newline to the end of the file, and saves.
|
|
77
|
+
|
|
78
|
+
Note right of MCP_Server: Save detected! Checks file content.
|
|
79
|
+
MCP_Server-->>Agent: return { user_action: "APPROVE", message: "User approved. Call commit_review." }
|
|
80
|
+
deactivate MCP_Server
|
|
81
|
+
|
|
82
|
+
Note right of Agent: Agent sees "APPROVE" and knows what to do next.
|
|
83
|
+
Agent->>MCP_Server: call commit_review(session_path, original_path="README.md")
|
|
84
|
+
activate MCP_Server
|
|
85
|
+
Note right of MCP_Server: Copies 'future' file to original path,<br/>removes the trailing newlines,<br/>and cleans up the temp directory.
|
|
86
|
+
MCP_Server-->>Agent: return "Successfully committed changes."
|
|
87
|
+
deactivate MCP_Server
|
|
88
|
+
|
|
89
|
+
Agent-->>User: "Changes have been committed!"
|
|
90
|
+
deactivate Agent
|
|
91
|
+
```
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
---
|
|
96
|
+
|
|
97
|
+
## ⚡ Quick Start
|
|
98
|
+
|
|
99
|
+
### Run Instantly
|
|
100
|
+
|
|
101
|
+
By default, this command launches the **Web UI (8123)** and a **Background HTTP Server (8124)**.
|
|
102
|
+
|
|
103
|
+
```bash
|
|
104
|
+
# Allow access to the current dir
|
|
105
|
+
uvx fs-mcp .
|
|
106
|
+
```
|
|
107
|
+
|
|
108
|
+
### Selective Launch
|
|
109
|
+
|
|
110
|
+
Want to disable a component? Use the flags:
|
|
111
|
+
|
|
112
|
+
```bash
|
|
113
|
+
# UI Only (No background HTTP)
|
|
114
|
+
fs-mcp --no-http .
|
|
115
|
+
|
|
116
|
+
# HTTP Only (Headless / Docker mode)
|
|
117
|
+
fs-mcp --no-ui .
|
|
118
|
+
```
|
|
119
|
+
|
|
120
|
+
---
|
|
121
|
+
|
|
122
|
+
## 🔌 Configuration
|
|
123
|
+
|
|
124
|
+
### Claude Desktop (Stdio Mode)
|
|
125
|
+
|
|
126
|
+
Add this to your `claude_desktop_config.json`:
|
|
127
|
+
|
|
128
|
+
```json
|
|
129
|
+
{
|
|
130
|
+
"mcpServers": {
|
|
131
|
+
"fs-mcp": {
|
|
132
|
+
"command": "uvx",
|
|
133
|
+
"args": [
|
|
134
|
+
"fs-mcp",
|
|
135
|
+
"/absolute/path/to/your/project"
|
|
136
|
+
]
|
|
137
|
+
}
|
|
138
|
+
}
|
|
139
|
+
}
|
|
140
|
+
```
|
|
141
|
+
|
|
142
|
+
### Docker (HTTP Mode)
|
|
143
|
+
|
|
144
|
+
To run inside a container and expose the filesystem to a local agent:
|
|
145
|
+
|
|
146
|
+
```bash
|
|
147
|
+
# In your entrypoint or CMD
|
|
148
|
+
uvx fs-mcp --no-ui --http-host 0.0.0.0 --http-port 8124 /app
|
|
149
|
+
```
|
|
150
|
+
|
|
151
|
+
---
|
|
152
|
+
|
|
153
|
+
## The Toolbox 🧰
|
|
154
|
+
|
|
155
|
+
| Tool | Description |
|
|
156
|
+
| -------------------------- | -------------------------------------------------------------------------- |
|
|
157
|
+
| `propose_and_review` | **Interactive Review:** Opens VS Code diff. Add a double newline to finalize. |
|
|
158
|
+
| `commit_review` | Finalizes the changes from an interactive review session. |
|
|
159
|
+
| `read_multiple_files` | Reads content of multiple files to save context window. |
|
|
160
|
+
| `directory_tree` | **Fast:** Returns recursive JSON tree. Skips `.venv`/`.git` automatically. |
|
|
161
|
+
| `search_files` | Recursive pattern discovery using `rglob`. |
|
|
162
|
+
| `grounding_search` | **New:** Natural language query for grounded search results. |
|
|
163
|
+
| `read_text_file` | Standard text reader (supports `head`/`tail` for large files). |
|
|
164
|
+
| `list_directory_with_sizes`| Detailed listing including formatted file sizes. |
|
|
165
|
+
| `list_allowed_directories` | List security-approved paths. |
|
|
166
|
+
| `get_file_info` | Metadata retrieval (size, modified time). |
|
|
167
|
+
| `read_media_file` | Returns base64 encoded images/audio. |
|
|
168
|
+
| `write_file` | Creates or overwrites files (atomic operations). |
|
|
169
|
+
| `create_directory` | Create a new directory. |
|
|
170
|
+
| `move_file` | Move or rename files. |
|
|
171
|
+
| `append_text` | Safe fallback for appending content to EOF. |
|
|
172
|
+
|
|
173
|
+
---
|
|
174
|
+
|
|
175
|
+
## License & Credits
|
|
176
|
+
|
|
177
|
+
Built with ❤️ for the MCP Community by **luutuankiet**.
|
|
178
|
+
Powered by **FastMCP** and **Streamlit**.
|
|
179
|
+
|
|
180
|
+
**Now go build some agents.** 🚀
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
fs_mcp/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
+
fs_mcp/__main__.py,sha256=DDedxJgD1D-nIy5kyrG1S5Ow2gZESO_3pFf4bXsFBFY,5426
|
|
3
|
+
fs_mcp/http_runner.py,sha256=VFzP4DETV-cM4ShwAHiAJjKSf8nfVFGyl5NPK9v51Tg,814
|
|
4
|
+
fs_mcp/server.py,sha256=VxU3n2tvsnRG183bH777H1TLf0n6z9ddgB4LSLYaJ70,21428
|
|
5
|
+
fs_mcp/web_ui.py,sha256=5-D2yV4AqdX0BpoA7kfR7ccOIL9IASQli0Ol2bjoUyg,14119
|
|
6
|
+
fs_mcp-1.3.2.dist-info/METADATA,sha256=LJ_7htz8hkmxrZH7EmBhPd_xxFh1R22J_EXtiYW1XW4,7123
|
|
7
|
+
fs_mcp-1.3.2.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
8
|
+
fs_mcp-1.3.2.dist-info/entry_points.txt,sha256=CELnPTTv4c_WWstoX8CPBWUDh4Z8KK0enZQeSxG3wf0,48
|
|
9
|
+
fs_mcp-1.3.2.dist-info/RECORD,,
|