mle-kit-mcp 0.0.8__py3-none-any.whl → 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mle_kit_mcp/llm_proxy_source.py +51 -0
- mle_kit_mcp/server.py +8 -0
- mle_kit_mcp/tools/__init__.py +6 -0
- mle_kit_mcp/tools/bash.py +40 -28
- mle_kit_mcp/tools/llm_proxy.py +148 -0
- mle_kit_mcp/tools/remote_gpu.py +36 -35
- mle_kit_mcp/utils.py +15 -0
- {mle_kit_mcp-0.0.8.dist-info → mle_kit_mcp-0.1.0.dist-info}/METADATA +1 -1
- mle_kit_mcp-0.1.0.dist-info/RECORD +18 -0
- mle_kit_mcp-0.0.8.dist-info/RECORD +0 -16
- {mle_kit_mcp-0.0.8.dist-info → mle_kit_mcp-0.1.0.dist-info}/WHEEL +0 -0
- {mle_kit_mcp-0.0.8.dist-info → mle_kit_mcp-0.1.0.dist-info}/entry_points.txt +0 -0
- {mle_kit_mcp-0.0.8.dist-info → mle_kit_mcp-0.1.0.dist-info}/licenses/LICENSE +0 -0
- {mle_kit_mcp-0.0.8.dist-info → mle_kit_mcp-0.1.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,51 @@
|
|
1
|
+
import os
|
2
|
+
|
3
|
+
import fire # type: ignore
|
4
|
+
import uvicorn
|
5
|
+
from openai import AsyncOpenAI
|
6
|
+
from fastapi import FastAPI, Request, HTTPException
|
7
|
+
from fastapi.responses import JSONResponse
|
8
|
+
|
9
|
+
ACCESS_TOKEN = os.getenv("ACCESS_TOKEN", "")
|
10
|
+
OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY", "")
|
11
|
+
OPENROUTER_BASE_URL = os.getenv("OPENROUTER_BASE_URL", "https://openrouter.ai/api/v1")
|
12
|
+
|
13
|
+
app = FastAPI()
|
14
|
+
client = AsyncOpenAI(base_url=OPENROUTER_BASE_URL, api_key=OPENROUTER_API_KEY)
|
15
|
+
|
16
|
+
|
17
|
+
def _check_auth(request: Request) -> None:
|
18
|
+
auth = request.headers.get("authorization", "")
|
19
|
+
if not ACCESS_TOKEN or auth != f"Bearer {ACCESS_TOKEN}":
|
20
|
+
raise HTTPException(status_code=401, detail="Unauthorized")
|
21
|
+
if not OPENROUTER_API_KEY:
|
22
|
+
raise HTTPException(status_code=500, detail="OpenRouter key not configured")
|
23
|
+
|
24
|
+
|
25
|
+
@app.post("/v1/chat/completions")
|
26
|
+
async def chat_completions(request: Request) -> JSONResponse:
|
27
|
+
_check_auth(request)
|
28
|
+
payload = await request.json()
|
29
|
+
if isinstance(payload, dict) and payload.get("stream"):
|
30
|
+
payload.pop("stream", None)
|
31
|
+
|
32
|
+
try:
|
33
|
+
resp = await client.chat.completions.create(**payload)
|
34
|
+
except Exception as e:
|
35
|
+
raise HTTPException(status_code=502, detail=str(e))
|
36
|
+
return JSONResponse(resp.model_dump())
|
37
|
+
|
38
|
+
|
39
|
+
@app.get("/health")
|
40
|
+
async def health() -> JSONResponse:
|
41
|
+
if not OPENROUTER_API_KEY:
|
42
|
+
return JSONResponse({"error": "missing OPENROUTER_API_KEY"}, 500)
|
43
|
+
return JSONResponse({"result": "ok"})
|
44
|
+
|
45
|
+
|
46
|
+
def main(host: str = "127.0.0.1", port: int = 8001) -> None:
|
47
|
+
uvicorn.run("llm_proxy:app", host=host, port=port)
|
48
|
+
|
49
|
+
|
50
|
+
if __name__ == "__main__":
|
51
|
+
fire.Fire(main)
|
mle_kit_mcp/server.py
CHANGED
@@ -1,3 +1,4 @@
|
|
1
|
+
import os
|
1
2
|
from pathlib import Path
|
2
3
|
from typing import Optional
|
3
4
|
|
@@ -12,6 +13,10 @@ from .tools.remote_gpu import (
|
|
12
13
|
create_remote_text_editor,
|
13
14
|
remote_download,
|
14
15
|
)
|
16
|
+
from .tools.llm_proxy import (
|
17
|
+
llm_proxy_local,
|
18
|
+
llm_proxy_remote,
|
19
|
+
)
|
15
20
|
from .files import get_workspace_dir, WorkspaceDirectory
|
16
21
|
|
17
22
|
|
@@ -30,6 +35,9 @@ def run(host: str = "0.0.0.0", port: int = 5050, workspace: Optional[str] = None
|
|
30
35
|
server.add_tool(remote_bash)
|
31
36
|
server.add_tool(remote_text_editor)
|
32
37
|
server.add_tool(remote_download)
|
38
|
+
if os.getenv("OPENROUTER_API_KEY"):
|
39
|
+
server.add_tool(llm_proxy_local)
|
40
|
+
server.add_tool(llm_proxy_remote)
|
33
41
|
|
34
42
|
http_app = server.streamable_http_app()
|
35
43
|
|
mle_kit_mcp/tools/__init__.py
CHANGED
@@ -4,6 +4,10 @@ from .remote_gpu import (
|
|
4
4
|
remote_bash,
|
5
5
|
remote_download,
|
6
6
|
)
|
7
|
+
from .llm_proxy import (
|
8
|
+
llm_proxy_local,
|
9
|
+
llm_proxy_remote,
|
10
|
+
)
|
7
11
|
|
8
12
|
|
9
13
|
__all__ = [
|
@@ -11,4 +15,6 @@ __all__ = [
|
|
11
15
|
"text_editor",
|
12
16
|
"remote_bash",
|
13
17
|
"remote_download",
|
18
|
+
"llm_proxy_local",
|
19
|
+
"llm_proxy_remote",
|
14
20
|
]
|
mle_kit_mcp/tools/bash.py
CHANGED
@@ -1,8 +1,11 @@
|
|
1
|
-
import docker # type: ignore
|
2
1
|
import atexit
|
3
2
|
import signal
|
4
3
|
from typing import Optional, Any
|
5
4
|
|
5
|
+
from docker import from_env as docker_from_env # type: ignore
|
6
|
+
from docker import DockerClient
|
7
|
+
from docker.models.containers import Container # type: ignore
|
8
|
+
|
6
9
|
from mle_kit_mcp.files import get_workspace_dir
|
7
10
|
|
8
11
|
|
@@ -13,6 +16,40 @@ BASE_IMAGE = "python:3.12-slim"
|
|
13
16
|
DOCKER_WORKSPACE_DIR_PATH = "/workdir"
|
14
17
|
|
15
18
|
|
19
|
+
def get_docker_client() -> DockerClient:
|
20
|
+
global _client
|
21
|
+
if not _client:
|
22
|
+
_client = docker_from_env()
|
23
|
+
return _client
|
24
|
+
|
25
|
+
|
26
|
+
def create_container() -> Container:
|
27
|
+
client = get_docker_client()
|
28
|
+
container = client.containers.run(
|
29
|
+
BASE_IMAGE,
|
30
|
+
"tail -f /dev/null",
|
31
|
+
detach=True,
|
32
|
+
remove=True,
|
33
|
+
tty=True,
|
34
|
+
stdin_open=True,
|
35
|
+
volumes={
|
36
|
+
get_workspace_dir(): {
|
37
|
+
"bind": DOCKER_WORKSPACE_DIR_PATH,
|
38
|
+
"mode": "rw",
|
39
|
+
}
|
40
|
+
},
|
41
|
+
working_dir=DOCKER_WORKSPACE_DIR_PATH,
|
42
|
+
)
|
43
|
+
return container
|
44
|
+
|
45
|
+
|
46
|
+
def get_container() -> Container:
|
47
|
+
global _container
|
48
|
+
if not _container:
|
49
|
+
_container = create_container()
|
50
|
+
return _container
|
51
|
+
|
52
|
+
|
16
53
|
def cleanup_container(signum: Optional[Any] = None, frame: Optional[Any] = None) -> None:
|
17
54
|
global _container
|
18
55
|
if _container:
|
@@ -45,33 +82,8 @@ def bash(command: str) -> str:
|
|
45
82
|
command: The bash command to run.
|
46
83
|
"""
|
47
84
|
|
48
|
-
|
49
|
-
|
50
|
-
if not _client:
|
51
|
-
_client = docker.from_env()
|
52
|
-
|
53
|
-
if not _container:
|
54
|
-
try:
|
55
|
-
_container = _client.containers.get("bash_runner")
|
56
|
-
except docker.errors.NotFound:
|
57
|
-
_container = _client.containers.run(
|
58
|
-
BASE_IMAGE,
|
59
|
-
"tail -f /dev/null",
|
60
|
-
detach=True,
|
61
|
-
remove=True,
|
62
|
-
name="bash_runner",
|
63
|
-
tty=True,
|
64
|
-
stdin_open=True,
|
65
|
-
volumes={
|
66
|
-
get_workspace_dir(): {
|
67
|
-
"bind": DOCKER_WORKSPACE_DIR_PATH,
|
68
|
-
"mode": "rw",
|
69
|
-
}
|
70
|
-
},
|
71
|
-
working_dir=DOCKER_WORKSPACE_DIR_PATH,
|
72
|
-
)
|
73
|
-
|
74
|
-
result = _container.exec_run(
|
85
|
+
container = get_container()
|
86
|
+
result = container.exec_run(
|
75
87
|
["bash", "-c", command],
|
76
88
|
workdir=DOCKER_WORKSPACE_DIR_PATH,
|
77
89
|
stdout=True,
|
@@ -0,0 +1,148 @@
|
|
1
|
+
import os
|
2
|
+
import json
|
3
|
+
import time
|
4
|
+
import random
|
5
|
+
import secrets
|
6
|
+
from pathlib import Path
|
7
|
+
from typing import Optional
|
8
|
+
|
9
|
+
from dotenv import load_dotenv
|
10
|
+
|
11
|
+
from mle_kit_mcp.tools.bash import get_container
|
12
|
+
from mle_kit_mcp.files import get_workspace_dir
|
13
|
+
from mle_kit_mcp.utils import find_free_port
|
14
|
+
|
15
|
+
from mle_kit_mcp.tools.remote_gpu import (
|
16
|
+
get_instance as _remote_get_instance,
|
17
|
+
run_command as _remote_run_command,
|
18
|
+
send_rsync as _remote_send_rsync,
|
19
|
+
)
|
20
|
+
|
21
|
+
INPUT_SCRIPT_FILE_NAME = "llm_proxy_source.py"
|
22
|
+
OUTPUT_SCRIPT_FILE_NAME = "llm_proxy.py"
|
23
|
+
DEPENDENCIES = "fastapi uvicorn httpx openai fire"
|
24
|
+
START_TIMEOUT = 30
|
25
|
+
|
26
|
+
|
27
|
+
def _write_proxy_script(script_path: Path) -> None:
|
28
|
+
source_script_path = Path(__file__).parent.parent / INPUT_SCRIPT_FILE_NAME
|
29
|
+
script = source_script_path.read_text()
|
30
|
+
script_path.write_text(script)
|
31
|
+
|
32
|
+
|
33
|
+
def llm_proxy_local(port: Optional[int] = None) -> str:
|
34
|
+
"""
|
35
|
+
Start a lightweight OpenRouter proxy inside the same Docker container used by the "bash" tool.
|
36
|
+
|
37
|
+
Returns a JSON string with fields: url, token, scope.
|
38
|
+
The url is reachable from inside the "bash" container as localhost.
|
39
|
+
Use the token in the Authorization header: "Bearer <token>" when calling the proxy.
|
40
|
+
It runs a standard OpenAI compatible server, so you can use it with any OpenAI compatible client.
|
41
|
+
You can use all models available on OpenRouter, for instance:
|
42
|
+
- openai/gpt-5-mini
|
43
|
+
- google/gemini-2.5-pro
|
44
|
+
- anthropic/claude-sonnet-4
|
45
|
+
|
46
|
+
Args:
|
47
|
+
port: Optional fixed port to bind inside the container. Random if omitted.
|
48
|
+
"""
|
49
|
+
|
50
|
+
load_dotenv()
|
51
|
+
api_key = os.getenv("OPENROUTER_API_KEY")
|
52
|
+
assert api_key, "Set OPENROUTER_API_KEY in the environment before starting the proxy."
|
53
|
+
|
54
|
+
_write_proxy_script(get_workspace_dir() / OUTPUT_SCRIPT_FILE_NAME)
|
55
|
+
|
56
|
+
container = get_container()
|
57
|
+
dependencies_cmd = f"python -m pip install --quiet --no-input {DEPENDENCIES}"
|
58
|
+
container.exec_run(["bash", "-lc", dependencies_cmd])
|
59
|
+
|
60
|
+
chosen_port = port or find_free_port()
|
61
|
+
token = secrets.token_urlsafe(24)
|
62
|
+
launch_cmd = (
|
63
|
+
f"OPENROUTER_API_KEY='{api_key}' ACCESS_TOKEN='{token}' "
|
64
|
+
f"nohup python {OUTPUT_SCRIPT_FILE_NAME} "
|
65
|
+
f"--host 127.0.0.1 --port {chosen_port} "
|
66
|
+
f"> llm_proxy.log 2>&1 "
|
67
|
+
f"& echo $! > llm_proxy.pid"
|
68
|
+
)
|
69
|
+
container.exec_run(["bash", "-lc", launch_cmd])
|
70
|
+
|
71
|
+
health_cmd = f'import httpx; print(httpx.get("http://127.0.0.1:{chosen_port}/health").json())'
|
72
|
+
start_time = time.time()
|
73
|
+
while time.time() - start_time < START_TIMEOUT:
|
74
|
+
result = container.exec_run(["python", "-c", health_cmd])
|
75
|
+
if result.exit_code == 0 and "ok" in result.output.decode("utf-8").strip():
|
76
|
+
break
|
77
|
+
time.sleep(1)
|
78
|
+
else:
|
79
|
+
raise Exception("Failed to start the proxy")
|
80
|
+
|
81
|
+
return json.dumps(
|
82
|
+
{
|
83
|
+
"url": f"http://127.0.0.1:{chosen_port}/v1/chat/completions",
|
84
|
+
"token": token,
|
85
|
+
"scope": "bash-container",
|
86
|
+
}
|
87
|
+
)
|
88
|
+
|
89
|
+
|
90
|
+
def llm_proxy_remote(port: Optional[int] = None) -> str:
|
91
|
+
"""
|
92
|
+
Start a lightweight OpenRouter proxy on the remote GPU machine.
|
93
|
+
|
94
|
+
Returns a JSON string with fields: url, token, scope.
|
95
|
+
The url is reachable from inside the remote machine as localhost.
|
96
|
+
Use the token in the Authorization header: "Bearer <token>" when calling the proxy.
|
97
|
+
It runs a standard OpenAI compatible server, so you can use it with any OpenAI compatible client.
|
98
|
+
You can use all models available on OpenRouter, for instance:
|
99
|
+
- openai/gpt-5-mini
|
100
|
+
- google/gemini-2.5-pro
|
101
|
+
- anthropic/claude-sonnet-4
|
102
|
+
|
103
|
+
Args:
|
104
|
+
port: Optional fixed port to bind on the remote. Random if omitted.
|
105
|
+
"""
|
106
|
+
|
107
|
+
load_dotenv()
|
108
|
+
api_key = os.getenv("OPENROUTER_API_KEY")
|
109
|
+
assert api_key, "Set OPENROUTER_API_KEY in the environment before starting the proxy."
|
110
|
+
|
111
|
+
instance = _remote_get_instance()
|
112
|
+
script_path = get_workspace_dir() / OUTPUT_SCRIPT_FILE_NAME
|
113
|
+
_write_proxy_script(script_path)
|
114
|
+
_remote_send_rsync(instance, f"{script_path}", "/root")
|
115
|
+
|
116
|
+
chosen_port = port or random.randint(5000, 6000)
|
117
|
+
token = secrets.token_urlsafe(24)
|
118
|
+
dependencies_cmd = f"python3 -m pip install -q --no-input {DEPENDENCIES}"
|
119
|
+
_remote_run_command(instance, dependencies_cmd, timeout=300, raise_exc=True)
|
120
|
+
|
121
|
+
launch_cmd = (
|
122
|
+
f"OPENROUTER_API_KEY='{api_key}' ACCESS_TOKEN='{token}' "
|
123
|
+
f"nohup python {OUTPUT_SCRIPT_FILE_NAME} "
|
124
|
+
f"--host 127.0.0.1 --port {chosen_port} "
|
125
|
+
f"> openrouter_proxy.log 2>&1 "
|
126
|
+
f"& echo $! > openrouter_proxy.pid"
|
127
|
+
)
|
128
|
+
_remote_run_command(instance, launch_cmd, timeout=60, raise_exc=True)
|
129
|
+
|
130
|
+
health_cmd = f'import httpx; print(httpx.get("http://127.0.0.1:{chosen_port}/health").json())'
|
131
|
+
start_time = time.time()
|
132
|
+
while time.time() - start_time < START_TIMEOUT:
|
133
|
+
result = _remote_run_command(
|
134
|
+
instance, f"python -c '{health_cmd}'", timeout=10, raise_exc=False
|
135
|
+
)
|
136
|
+
if result.returncode == 0 and "ok" in result.stdout.strip():
|
137
|
+
break
|
138
|
+
time.sleep(1)
|
139
|
+
else:
|
140
|
+
raise Exception("Failed to start the proxy")
|
141
|
+
|
142
|
+
return json.dumps(
|
143
|
+
{
|
144
|
+
"url": f"http://127.0.0.1:{chosen_port}/v1/chat/completions",
|
145
|
+
"token": token,
|
146
|
+
"scope": "remote-gpu",
|
147
|
+
}
|
148
|
+
)
|
mle_kit_mcp/tools/remote_gpu.py
CHANGED
@@ -36,7 +36,28 @@ _sdk: Optional[VastAI] = None
|
|
36
36
|
_instance_info: Optional[InstanceInfo] = None
|
37
37
|
|
38
38
|
|
39
|
-
def
|
39
|
+
def get_sdk() -> VastAI:
|
40
|
+
global _sdk
|
41
|
+
if not _sdk:
|
42
|
+
_sdk = VastAI(api_key=os.getenv("VAST_AI_KEY"))
|
43
|
+
return _sdk
|
44
|
+
|
45
|
+
|
46
|
+
def get_instance() -> InstanceInfo:
|
47
|
+
load_dotenv()
|
48
|
+
signal.alarm(GLOBAL_TIMEOUT)
|
49
|
+
global _instance_info
|
50
|
+
if not _instance_info:
|
51
|
+
_instance_info = launch_instance(get_sdk(), DEFAULT_GPU_TYPE)
|
52
|
+
|
53
|
+
if _instance_info:
|
54
|
+
send_scripts()
|
55
|
+
|
56
|
+
assert _instance_info, "Failed to connect to a remote instance! Try again"
|
57
|
+
return _instance_info
|
58
|
+
|
59
|
+
|
60
|
+
def cleanup_instance(signum: Optional[Any] = None, frame: Optional[Any] = None) -> None:
|
40
61
|
global _instance_info
|
41
62
|
signal.alarm(0)
|
42
63
|
if _instance_info and _sdk:
|
@@ -50,10 +71,10 @@ def cleanup_machine(signum: Optional[Any] = None, frame: Optional[Any] = None) -
|
|
50
71
|
raise KeyboardInterrupt()
|
51
72
|
|
52
73
|
|
53
|
-
atexit.register(
|
54
|
-
signal.signal(signal.SIGINT,
|
55
|
-
signal.signal(signal.SIGTERM,
|
56
|
-
signal.signal(signal.SIGALRM,
|
74
|
+
atexit.register(cleanup_instance)
|
75
|
+
signal.signal(signal.SIGINT, cleanup_instance)
|
76
|
+
signal.signal(signal.SIGTERM, cleanup_instance)
|
77
|
+
signal.signal(signal.SIGALRM, cleanup_instance)
|
57
78
|
|
58
79
|
|
59
80
|
def wait_for_instance(vast_sdk: VastAI, instance_id: str, max_wait_time: int = 300) -> bool:
|
@@ -182,7 +203,7 @@ def launch_instance(vast_sdk: VastAI, gpu_name: str) -> Optional[InstanceInfo]:
|
|
182
203
|
|
183
204
|
for offer_id in offer_ids:
|
184
205
|
print(f"Launching offer {offer_id}...")
|
185
|
-
instance = vast_sdk.create_instance(id=offer_id, image=BASE_IMAGE, disk=
|
206
|
+
instance = vast_sdk.create_instance(id=offer_id, image=BASE_IMAGE, disk=300.0)
|
186
207
|
if not instance["success"]:
|
187
208
|
continue
|
188
209
|
instance_id = instance["new_contract"]
|
@@ -264,25 +285,6 @@ def send_scripts() -> None:
|
|
264
285
|
send_rsync(_instance_info, f"{get_workspace_dir()}/{name}", "/root")
|
265
286
|
|
266
287
|
|
267
|
-
def init_all() -> None:
|
268
|
-
global _sdk, _instance_info
|
269
|
-
|
270
|
-
load_dotenv()
|
271
|
-
|
272
|
-
if not _sdk:
|
273
|
-
_sdk = VastAI(api_key=os.getenv("VAST_AI_KEY"))
|
274
|
-
assert _sdk
|
275
|
-
|
276
|
-
signal.alarm(GLOBAL_TIMEOUT)
|
277
|
-
if not _instance_info:
|
278
|
-
_instance_info = launch_instance(_sdk, DEFAULT_GPU_TYPE)
|
279
|
-
|
280
|
-
if _instance_info:
|
281
|
-
send_scripts()
|
282
|
-
|
283
|
-
assert _instance_info, "Failed to connect to a remote instance! Try again"
|
284
|
-
|
285
|
-
|
286
288
|
def remote_bash(command: str, timeout: int = 60) -> str:
|
287
289
|
"""
|
288
290
|
Run commands in a bash shell on a remote machine with GPU cards.
|
@@ -300,10 +302,10 @@ def remote_bash(command: str, timeout: int = 60) -> str:
|
|
300
302
|
timeout: Timeout for the command execution. 60 seconds by default. Set a higher value for heavy jobs.
|
301
303
|
"""
|
302
304
|
|
303
|
-
|
304
|
-
assert
|
305
|
+
instance = get_instance()
|
306
|
+
assert instance
|
305
307
|
assert timeout
|
306
|
-
result = run_command(
|
308
|
+
result = run_command(instance, command, timeout=timeout, raise_exc=False)
|
307
309
|
output = ("STDOUT: " + result.stdout + "\n") if result.stdout else ""
|
308
310
|
output += ("STDERR: " + result.stderr) if result.stderr else ""
|
309
311
|
return output.replace(VAST_AI_GREETING, "")
|
@@ -316,9 +318,9 @@ def remote_download(file_path: str) -> str:
|
|
316
318
|
Args:
|
317
319
|
file_path: Path to the file on a remote machine.
|
318
320
|
"""
|
319
|
-
|
320
|
-
assert
|
321
|
-
recieve_rsync(
|
321
|
+
instance = get_instance()
|
322
|
+
assert instance
|
323
|
+
recieve_rsync(instance, f"/root/{file_path}", f"{get_workspace_dir()}")
|
322
324
|
return f"File '{file_path}' downloaded!"
|
323
325
|
|
324
326
|
|
@@ -327,8 +329,7 @@ def create_remote_text_editor(
|
|
327
329
|
) -> Callable[..., str]:
|
328
330
|
@functools.wraps(text_editor_func)
|
329
331
|
def wrapper(*args: Any, **kwargs: Any) -> str:
|
330
|
-
|
331
|
-
assert _instance_info
|
332
|
+
instance = get_instance()
|
332
333
|
|
333
334
|
args_dict = {k: v for k, v in kwargs.items()}
|
334
335
|
if args:
|
@@ -337,12 +338,12 @@ def create_remote_text_editor(
|
|
337
338
|
command = args_dict["command"]
|
338
339
|
|
339
340
|
if command != "write":
|
340
|
-
recieve_rsync(
|
341
|
+
recieve_rsync(instance, f"/root/{path}", f"{get_workspace_dir()}")
|
341
342
|
|
342
343
|
result: str = text_editor_func(*args, **kwargs)
|
343
344
|
|
344
345
|
if command != "view":
|
345
|
-
send_rsync(
|
346
|
+
send_rsync(instance, f"{get_workspace_dir()}/{path}", "/root")
|
346
347
|
|
347
348
|
return result
|
348
349
|
|
mle_kit_mcp/utils.py
CHANGED
@@ -1,3 +1,5 @@
|
|
1
|
+
import random
|
2
|
+
import socket
|
1
3
|
from typing import Optional
|
2
4
|
|
3
5
|
|
@@ -48,3 +50,16 @@ def truncate_content(
|
|
48
50
|
prefix = content[:half_length]
|
49
51
|
suffix = content[-half_length:]
|
50
52
|
return prefix + disclaimer + suffix
|
53
|
+
|
54
|
+
|
55
|
+
def find_free_port() -> Optional[int]:
|
56
|
+
ports = list(range(5000, 6001))
|
57
|
+
random.shuffle(ports)
|
58
|
+
for port in ports:
|
59
|
+
try:
|
60
|
+
with socket.socket() as s:
|
61
|
+
s.bind(("", port))
|
62
|
+
return port
|
63
|
+
except OSError:
|
64
|
+
continue
|
65
|
+
return None
|
@@ -0,0 +1,18 @@
|
|
1
|
+
mle_kit_mcp/__init__.py,sha256=2Ru2I5u4cE7DrkkAsibDUEF1K6sYtqppb9VyFrRoQKI,94
|
2
|
+
mle_kit_mcp/__main__.py,sha256=rcmsOtJd3SA82exjrcGBuxuptcoxF8AXI7jNjiVq2BY,59
|
3
|
+
mle_kit_mcp/files.py,sha256=ux53kWw7hBAcOmS9qNI4gpQX8XcQPT2LICC--S5-TGI,635
|
4
|
+
mle_kit_mcp/llm_proxy_source.py,sha256=zDfNDvG1nAkft-irj0KhdB-1z-UUtKweRJYG0fCveVE,1637
|
5
|
+
mle_kit_mcp/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
6
|
+
mle_kit_mcp/server.py,sha256=W4YJ3m1-NKheJ5QlGggfMQyXPghzKgg5zXqb-TLYH1U,1271
|
7
|
+
mle_kit_mcp/utils.py,sha256=iHNcEZZzPD37bEYE18SzJ3WUjLP3Ym-kc91SwcW1vlI,1984
|
8
|
+
mle_kit_mcp/tools/__init__.py,sha256=r2fIg2mZ6zaeq0CzEKCEdeUTjV0pcA9NZaaOfBNVTnE,332
|
9
|
+
mle_kit_mcp/tools/bash.py,sha256=9YmwRCR2HHTJ6Jhnn-fHuLRkDjsHo9m26tpdXoGCFtQ,2608
|
10
|
+
mle_kit_mcp/tools/llm_proxy.py,sha256=9tm3k1R5y9o6Z4nkZXwILkYt6Ludc_TnQSwRw0KcNHA,5312
|
11
|
+
mle_kit_mcp/tools/remote_gpu.py,sha256=Fv8SJC8bE3Oo8JHOKXoYpISXFMzUMFjEcCQOTyW7V1I,11544
|
12
|
+
mle_kit_mcp/tools/text_editor.py,sha256=EJ832vgdFdycrLOf3eOYMXnlMgB2_iH_u4ANvZVrMsE,9139
|
13
|
+
mle_kit_mcp-0.1.0.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
14
|
+
mle_kit_mcp-0.1.0.dist-info/METADATA,sha256=aNQJCc6kZW4IaOvKi7KrxPJ0NqQgnFCmQqVk1uJ3yTg,1011
|
15
|
+
mle_kit_mcp-0.1.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
16
|
+
mle_kit_mcp-0.1.0.dist-info/entry_points.txt,sha256=-iHSUVPN49jkBj1ySpc-P0rVF5-IPHw-KWNayNIiEsk,49
|
17
|
+
mle_kit_mcp-0.1.0.dist-info/top_level.txt,sha256=XeBtCq_CnVI0gh0Z_daZOLmGl5XPlkA8RgHaj5s5VQY,12
|
18
|
+
mle_kit_mcp-0.1.0.dist-info/RECORD,,
|
@@ -1,16 +0,0 @@
|
|
1
|
-
mle_kit_mcp/__init__.py,sha256=2Ru2I5u4cE7DrkkAsibDUEF1K6sYtqppb9VyFrRoQKI,94
|
2
|
-
mle_kit_mcp/__main__.py,sha256=rcmsOtJd3SA82exjrcGBuxuptcoxF8AXI7jNjiVq2BY,59
|
3
|
-
mle_kit_mcp/files.py,sha256=ux53kWw7hBAcOmS9qNI4gpQX8XcQPT2LICC--S5-TGI,635
|
4
|
-
mle_kit_mcp/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
5
|
-
mle_kit_mcp/server.py,sha256=E9cXdKwVBASBzKyrZGHCTvj6BIMN-EbVSQZDFNg0YnE,1062
|
6
|
-
mle_kit_mcp/utils.py,sha256=wd7wSyddHRHOYdxmXw8uoAOBxVZOL2_vjNomss07inc,1654
|
7
|
-
mle_kit_mcp/tools/__init__.py,sha256=0aLl0gD-JteSvOs2PgVhbv0Wnh6fodFySgQWQvoI1xI,215
|
8
|
-
mle_kit_mcp/tools/bash.py,sha256=10hYo_hIBvQh8wbjssixb89SXG05H6nHtndShr6V31E,2461
|
9
|
-
mle_kit_mcp/tools/remote_gpu.py,sha256=NHdg52BuTL8fBFXHgv0s3CDpsWs4I1WykfTdik3c6F4,11482
|
10
|
-
mle_kit_mcp/tools/text_editor.py,sha256=EJ832vgdFdycrLOf3eOYMXnlMgB2_iH_u4ANvZVrMsE,9139
|
11
|
-
mle_kit_mcp-0.0.8.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
12
|
-
mle_kit_mcp-0.0.8.dist-info/METADATA,sha256=uynrL08zcDIQCyEiVhmRA6KJ-Umk1c2vHUrT2qJnCRw,1011
|
13
|
-
mle_kit_mcp-0.0.8.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
14
|
-
mle_kit_mcp-0.0.8.dist-info/entry_points.txt,sha256=-iHSUVPN49jkBj1ySpc-P0rVF5-IPHw-KWNayNIiEsk,49
|
15
|
-
mle_kit_mcp-0.0.8.dist-info/top_level.txt,sha256=XeBtCq_CnVI0gh0Z_daZOLmGl5XPlkA8RgHaj5s5VQY,12
|
16
|
-
mle_kit_mcp-0.0.8.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|