mle-kit-mcp 0.0.9__tar.gz → 0.1.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.1}/PKG-INFO +1 -1
  2. mle_kit_mcp-0.1.1/mle_kit_mcp/llm_proxy_source.py +51 -0
  3. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.1}/mle_kit_mcp/server.py +8 -0
  4. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.1}/mle_kit_mcp/tools/__init__.py +6 -0
  5. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.1}/mle_kit_mcp/tools/bash.py +46 -30
  6. mle_kit_mcp-0.1.1/mle_kit_mcp/tools/llm_proxy.py +148 -0
  7. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.1}/mle_kit_mcp/tools/remote_gpu.py +35 -34
  8. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.1}/mle_kit_mcp/tools/text_editor.py +14 -5
  9. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.1}/mle_kit_mcp/utils.py +15 -0
  10. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.1}/mle_kit_mcp.egg-info/PKG-INFO +1 -1
  11. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.1}/mle_kit_mcp.egg-info/SOURCES.txt +3 -0
  12. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.1}/pyproject.toml +1 -1
  13. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.1}/tests/test_bash.py +6 -0
  14. mle_kit_mcp-0.1.1/tests/test_llm_proxy.py +24 -0
  15. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.1}/tests/test_text_editor.py +12 -0
  16. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.1}/LICENSE +0 -0
  17. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.1}/README.md +0 -0
  18. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.1}/mle_kit_mcp/__init__.py +0 -0
  19. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.1}/mle_kit_mcp/__main__.py +0 -0
  20. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.1}/mle_kit_mcp/files.py +0 -0
  21. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.1}/mle_kit_mcp/py.typed +0 -0
  22. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.1}/mle_kit_mcp.egg-info/dependency_links.txt +0 -0
  23. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.1}/mle_kit_mcp.egg-info/entry_points.txt +0 -0
  24. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.1}/mle_kit_mcp.egg-info/requires.txt +0 -0
  25. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.1}/mle_kit_mcp.egg-info/top_level.txt +0 -0
  26. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.1}/setup.cfg +0 -0
  27. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.1}/tests/test_truncate_context.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mle-kit-mcp
3
- Version: 0.0.9
3
+ Version: 0.1.1
4
4
  Summary: MCP server that provides different tools for MLE
5
5
  Author-email: Ilya Gusev <phoenixilya@gmail.com>
6
6
  Project-URL: Homepage, https://github.com/IlyaGusev/mle_kit_mcp
@@ -0,0 +1,51 @@
1
+ import os
2
+
3
+ import fire # type: ignore
4
+ import uvicorn
5
+ from openai import AsyncOpenAI
6
+ from fastapi import FastAPI, Request, HTTPException
7
+ from fastapi.responses import JSONResponse
8
+
9
+ ACCESS_TOKEN = os.getenv("ACCESS_TOKEN", "")
10
+ OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY", "")
11
+ OPENROUTER_BASE_URL = os.getenv("OPENROUTER_BASE_URL", "https://openrouter.ai/api/v1")
12
+
13
+ app = FastAPI()
14
+ client = AsyncOpenAI(base_url=OPENROUTER_BASE_URL, api_key=OPENROUTER_API_KEY)
15
+
16
+
17
+ def _check_auth(request: Request) -> None:
18
+ auth = request.headers.get("authorization", "")
19
+ if not ACCESS_TOKEN or auth != f"Bearer {ACCESS_TOKEN}":
20
+ raise HTTPException(status_code=401, detail="Unauthorized")
21
+ if not OPENROUTER_API_KEY:
22
+ raise HTTPException(status_code=500, detail="OpenRouter key not configured")
23
+
24
+
25
+ @app.post("/v1/chat/completions")
26
+ async def chat_completions(request: Request) -> JSONResponse:
27
+ _check_auth(request)
28
+ payload = await request.json()
29
+ if isinstance(payload, dict) and payload.get("stream"):
30
+ payload.pop("stream", None)
31
+
32
+ try:
33
+ resp = await client.chat.completions.create(**payload)
34
+ except Exception as e:
35
+ raise HTTPException(status_code=502, detail=str(e))
36
+ return JSONResponse(resp.model_dump())
37
+
38
+
39
+ @app.get("/health")
40
+ async def health() -> JSONResponse:
41
+ if not OPENROUTER_API_KEY:
42
+ return JSONResponse({"error": "missing OPENROUTER_API_KEY"}, 500)
43
+ return JSONResponse({"result": "ok"})
44
+
45
+
46
+ def main(host: str = "127.0.0.1", port: int = 8001) -> None:
47
+ uvicorn.run("llm_proxy:app", host=host, port=port)
48
+
49
+
50
+ if __name__ == "__main__":
51
+ fire.Fire(main)
@@ -1,3 +1,4 @@
1
+ import os
1
2
  from pathlib import Path
2
3
  from typing import Optional
3
4
 
@@ -12,6 +13,10 @@ from .tools.remote_gpu import (
12
13
  create_remote_text_editor,
13
14
  remote_download,
14
15
  )
16
+ from .tools.llm_proxy import (
17
+ llm_proxy_local,
18
+ llm_proxy_remote,
19
+ )
15
20
  from .files import get_workspace_dir, WorkspaceDirectory
16
21
 
17
22
 
@@ -30,6 +35,9 @@ def run(host: str = "0.0.0.0", port: int = 5050, workspace: Optional[str] = None
30
35
  server.add_tool(remote_bash)
31
36
  server.add_tool(remote_text_editor)
32
37
  server.add_tool(remote_download)
38
+ if os.getenv("OPENROUTER_API_KEY"):
39
+ server.add_tool(llm_proxy_local)
40
+ server.add_tool(llm_proxy_remote)
33
41
 
34
42
  http_app = server.streamable_http_app()
35
43
 
@@ -4,6 +4,10 @@ from .remote_gpu import (
4
4
  remote_bash,
5
5
  remote_download,
6
6
  )
7
+ from .llm_proxy import (
8
+ llm_proxy_local,
9
+ llm_proxy_remote,
10
+ )
7
11
 
8
12
 
9
13
  __all__ = [
@@ -11,4 +15,6 @@ __all__ = [
11
15
  "text_editor",
12
16
  "remote_bash",
13
17
  "remote_download",
18
+ "llm_proxy_local",
19
+ "llm_proxy_remote",
14
20
  ]
@@ -1,8 +1,11 @@
1
- import docker # type: ignore
2
1
  import atexit
3
2
  import signal
4
3
  from typing import Optional, Any
5
4
 
5
+ from docker import from_env as docker_from_env # type: ignore
6
+ from docker import DockerClient
7
+ from docker.models.containers import Container # type: ignore
8
+
6
9
  from mle_kit_mcp.files import get_workspace_dir
7
10
 
8
11
 
@@ -13,6 +16,40 @@ BASE_IMAGE = "python:3.12-slim"
13
16
  DOCKER_WORKSPACE_DIR_PATH = "/workdir"
14
17
 
15
18
 
19
+ def get_docker_client() -> DockerClient:
20
+ global _client
21
+ if not _client:
22
+ _client = docker_from_env()
23
+ return _client
24
+
25
+
26
+ def create_container() -> Container:
27
+ client = get_docker_client()
28
+ container = client.containers.run(
29
+ BASE_IMAGE,
30
+ "tail -f /dev/null",
31
+ detach=True,
32
+ remove=True,
33
+ tty=True,
34
+ stdin_open=True,
35
+ volumes={
36
+ get_workspace_dir(): {
37
+ "bind": DOCKER_WORKSPACE_DIR_PATH,
38
+ "mode": "rw",
39
+ }
40
+ },
41
+ working_dir=DOCKER_WORKSPACE_DIR_PATH,
42
+ )
43
+ return container
44
+
45
+
46
+ def get_container() -> Container:
47
+ global _container
48
+ if not _container:
49
+ _container = create_container()
50
+ return _container
51
+
52
+
16
53
  def cleanup_container(signum: Optional[Any] = None, frame: Optional[Any] = None) -> None:
17
54
  global _container
18
55
  if _container:
@@ -30,7 +67,7 @@ signal.signal(signal.SIGINT, cleanup_container)
30
67
  signal.signal(signal.SIGTERM, cleanup_container)
31
68
 
32
69
 
33
- def bash(command: str) -> str:
70
+ def bash(command: str, cwd: Optional[str] = None) -> str:
34
71
  """
35
72
  Run commands in a bash shell.
36
73
  When invoking this tool, the contents of the "command" parameter does NOT need to be XML-escaped.
@@ -43,37 +80,16 @@ def bash(command: str) -> str:
43
80
 
44
81
  Args:
45
82
  command: The bash command to run.
83
+ cwd: The working directory to run the command in. Relative to the workspace directory.
46
84
  """
47
85
 
48
- global _container, _client
49
-
50
- if not _client:
51
- _client = docker.from_env()
52
-
53
- if not _container:
54
- try:
55
- _container = _client.containers.get("bash_runner")
56
- except docker.errors.NotFound:
57
- _container = _client.containers.run(
58
- BASE_IMAGE,
59
- "tail -f /dev/null",
60
- detach=True,
61
- remove=True,
62
- name="bash_runner",
63
- tty=True,
64
- stdin_open=True,
65
- volumes={
66
- get_workspace_dir(): {
67
- "bind": DOCKER_WORKSPACE_DIR_PATH,
68
- "mode": "rw",
69
- }
70
- },
71
- working_dir=DOCKER_WORKSPACE_DIR_PATH,
72
- )
73
-
74
- result = _container.exec_run(
86
+ container = get_container()
87
+ workdir = DOCKER_WORKSPACE_DIR_PATH
88
+ if cwd:
89
+ workdir = DOCKER_WORKSPACE_DIR_PATH + "/" + cwd
90
+ result = container.exec_run(
75
91
  ["bash", "-c", command],
76
- workdir=DOCKER_WORKSPACE_DIR_PATH,
92
+ workdir=workdir,
77
93
  stdout=True,
78
94
  stderr=True,
79
95
  )
@@ -0,0 +1,148 @@
1
+ import os
2
+ import json
3
+ import time
4
+ import random
5
+ import secrets
6
+ from pathlib import Path
7
+ from typing import Optional
8
+
9
+ from dotenv import load_dotenv
10
+
11
+ from mle_kit_mcp.tools.bash import get_container
12
+ from mle_kit_mcp.files import get_workspace_dir
13
+ from mle_kit_mcp.utils import find_free_port
14
+
15
+ from mle_kit_mcp.tools.remote_gpu import (
16
+ get_instance as _remote_get_instance,
17
+ run_command as _remote_run_command,
18
+ send_rsync as _remote_send_rsync,
19
+ )
20
+
21
+ INPUT_SCRIPT_FILE_NAME = "llm_proxy_source.py"
22
+ OUTPUT_SCRIPT_FILE_NAME = "llm_proxy.py"
23
+ DEPENDENCIES = "fastapi uvicorn httpx openai fire"
24
+ START_TIMEOUT = 30
25
+
26
+
27
+ def _write_proxy_script(script_path: Path) -> None:
28
+ source_script_path = Path(__file__).parent.parent / INPUT_SCRIPT_FILE_NAME
29
+ script = source_script_path.read_text()
30
+ script_path.write_text(script)
31
+
32
+
33
+ def llm_proxy_local(port: Optional[int] = None) -> str:
34
+ """
35
+ Start a lightweight OpenRouter proxy inside the same Docker container used by the "bash" tool.
36
+
37
+ Returns a JSON string with fields: url, token, scope.
38
+ The url is reachable from inside the "bash" container as localhost.
39
+ Use the token in the Authorization header: "Bearer <token>" when calling the proxy.
40
+ It runs a standard OpenAI compatible server, so you can use it with any OpenAI compatible client.
41
+ You can use all models available on OpenRouter, for instance:
42
+ - openai/gpt-5-mini
43
+ - google/gemini-2.5-pro
44
+ - anthropic/claude-sonnet-4
45
+
46
+ Args:
47
+ port: Optional fixed port to bind inside the container. Random if omitted.
48
+ """
49
+
50
+ load_dotenv()
51
+ api_key = os.getenv("OPENROUTER_API_KEY")
52
+ assert api_key, "Set OPENROUTER_API_KEY in the environment before starting the proxy."
53
+
54
+ _write_proxy_script(get_workspace_dir() / OUTPUT_SCRIPT_FILE_NAME)
55
+
56
+ container = get_container()
57
+ dependencies_cmd = f"python -m pip install --quiet --no-input {DEPENDENCIES}"
58
+ container.exec_run(["bash", "-lc", dependencies_cmd])
59
+
60
+ chosen_port = port or find_free_port()
61
+ token = secrets.token_urlsafe(24)
62
+ launch_cmd = (
63
+ f"OPENROUTER_API_KEY='{api_key}' ACCESS_TOKEN='{token}' "
64
+ f"nohup python {OUTPUT_SCRIPT_FILE_NAME} "
65
+ f"--host 127.0.0.1 --port {chosen_port} "
66
+ f"> llm_proxy.log 2>&1 "
67
+ f"& echo $! > llm_proxy.pid"
68
+ )
69
+ container.exec_run(["bash", "-lc", launch_cmd])
70
+
71
+ health_cmd = f'import httpx; print(httpx.get("http://127.0.0.1:{chosen_port}/health").json())'
72
+ start_time = time.time()
73
+ while time.time() - start_time < START_TIMEOUT:
74
+ result = container.exec_run(["python", "-c", health_cmd])
75
+ if result.exit_code == 0 and "ok" in result.output.decode("utf-8").strip():
76
+ break
77
+ time.sleep(1)
78
+ else:
79
+ raise Exception("Failed to start the proxy")
80
+
81
+ return json.dumps(
82
+ {
83
+ "url": f"http://127.0.0.1:{chosen_port}/v1/chat/completions",
84
+ "token": token,
85
+ "scope": "bash-container",
86
+ }
87
+ )
88
+
89
+
90
+ def llm_proxy_remote(port: Optional[int] = None) -> str:
91
+ """
92
+ Start a lightweight OpenRouter proxy on the remote GPU machine.
93
+
94
+ Returns a JSON string with fields: url, token, scope.
95
+ The url is reachable from inside the remote machine as localhost.
96
+ Use the token in the Authorization header: "Bearer <token>" when calling the proxy.
97
+ It runs a standard OpenAI compatible server, so you can use it with any OpenAI compatible client.
98
+ You can use all models available on OpenRouter, for instance:
99
+ - openai/gpt-5-mini
100
+ - google/gemini-2.5-pro
101
+ - anthropic/claude-sonnet-4
102
+
103
+ Args:
104
+ port: Optional fixed port to bind on the remote. Random if omitted.
105
+ """
106
+
107
+ load_dotenv()
108
+ api_key = os.getenv("OPENROUTER_API_KEY")
109
+ assert api_key, "Set OPENROUTER_API_KEY in the environment before starting the proxy."
110
+
111
+ instance = _remote_get_instance()
112
+ script_path = get_workspace_dir() / OUTPUT_SCRIPT_FILE_NAME
113
+ _write_proxy_script(script_path)
114
+ _remote_send_rsync(instance, f"{script_path}", "/root")
115
+
116
+ chosen_port = port or random.randint(5000, 6000)
117
+ token = secrets.token_urlsafe(24)
118
+ dependencies_cmd = f"python3 -m pip install -q --no-input {DEPENDENCIES}"
119
+ _remote_run_command(instance, dependencies_cmd, timeout=300, raise_exc=True)
120
+
121
+ launch_cmd = (
122
+ f"OPENROUTER_API_KEY='{api_key}' ACCESS_TOKEN='{token}' "
123
+ f"nohup python {OUTPUT_SCRIPT_FILE_NAME} "
124
+ f"--host 127.0.0.1 --port {chosen_port} "
125
+ f"> openrouter_proxy.log 2>&1 "
126
+ f"& echo $! > openrouter_proxy.pid"
127
+ )
128
+ _remote_run_command(instance, launch_cmd, timeout=60, raise_exc=True)
129
+
130
+ health_cmd = f'import httpx; print(httpx.get("http://127.0.0.1:{chosen_port}/health").json())'
131
+ start_time = time.time()
132
+ while time.time() - start_time < START_TIMEOUT:
133
+ result = _remote_run_command(
134
+ instance, f"python -c '{health_cmd}'", timeout=10, raise_exc=False
135
+ )
136
+ if result.returncode == 0 and "ok" in result.stdout.strip():
137
+ break
138
+ time.sleep(1)
139
+ else:
140
+ raise Exception("Failed to start the proxy")
141
+
142
+ return json.dumps(
143
+ {
144
+ "url": f"http://127.0.0.1:{chosen_port}/v1/chat/completions",
145
+ "token": token,
146
+ "scope": "remote-gpu",
147
+ }
148
+ )
@@ -36,7 +36,28 @@ _sdk: Optional[VastAI] = None
36
36
  _instance_info: Optional[InstanceInfo] = None
37
37
 
38
38
 
39
- def cleanup_machine(signum: Optional[Any] = None, frame: Optional[Any] = None) -> None:
39
+ def get_sdk() -> VastAI:
40
+ global _sdk
41
+ if not _sdk:
42
+ _sdk = VastAI(api_key=os.getenv("VAST_AI_KEY"))
43
+ return _sdk
44
+
45
+
46
+ def get_instance() -> InstanceInfo:
47
+ load_dotenv()
48
+ signal.alarm(GLOBAL_TIMEOUT)
49
+ global _instance_info
50
+ if not _instance_info:
51
+ _instance_info = launch_instance(get_sdk(), DEFAULT_GPU_TYPE)
52
+
53
+ if _instance_info:
54
+ send_scripts()
55
+
56
+ assert _instance_info, "Failed to connect to a remote instance! Try again"
57
+ return _instance_info
58
+
59
+
60
+ def cleanup_instance(signum: Optional[Any] = None, frame: Optional[Any] = None) -> None:
40
61
  global _instance_info
41
62
  signal.alarm(0)
42
63
  if _instance_info and _sdk:
@@ -50,10 +71,10 @@ def cleanup_machine(signum: Optional[Any] = None, frame: Optional[Any] = None) -
50
71
  raise KeyboardInterrupt()
51
72
 
52
73
 
53
- atexit.register(cleanup_machine)
54
- signal.signal(signal.SIGINT, cleanup_machine)
55
- signal.signal(signal.SIGTERM, cleanup_machine)
56
- signal.signal(signal.SIGALRM, cleanup_machine)
74
+ atexit.register(cleanup_instance)
75
+ signal.signal(signal.SIGINT, cleanup_instance)
76
+ signal.signal(signal.SIGTERM, cleanup_instance)
77
+ signal.signal(signal.SIGALRM, cleanup_instance)
57
78
 
58
79
 
59
80
  def wait_for_instance(vast_sdk: VastAI, instance_id: str, max_wait_time: int = 300) -> bool:
@@ -264,25 +285,6 @@ def send_scripts() -> None:
264
285
  send_rsync(_instance_info, f"{get_workspace_dir()}/{name}", "/root")
265
286
 
266
287
 
267
- def init_all() -> None:
268
- global _sdk, _instance_info
269
-
270
- load_dotenv()
271
-
272
- if not _sdk:
273
- _sdk = VastAI(api_key=os.getenv("VAST_AI_KEY"))
274
- assert _sdk
275
-
276
- signal.alarm(GLOBAL_TIMEOUT)
277
- if not _instance_info:
278
- _instance_info = launch_instance(_sdk, DEFAULT_GPU_TYPE)
279
-
280
- if _instance_info:
281
- send_scripts()
282
-
283
- assert _instance_info, "Failed to connect to a remote instance! Try again"
284
-
285
-
286
288
  def remote_bash(command: str, timeout: int = 60) -> str:
287
289
  """
288
290
  Run commands in a bash shell on a remote machine with GPU cards.
@@ -300,10 +302,10 @@ def remote_bash(command: str, timeout: int = 60) -> str:
300
302
  timeout: Timeout for the command execution. 60 seconds by default. Set a higher value for heavy jobs.
301
303
  """
302
304
 
303
- init_all()
304
- assert _instance_info
305
+ instance = get_instance()
306
+ assert instance
305
307
  assert timeout
306
- result = run_command(_instance_info, command, timeout=timeout, raise_exc=False)
308
+ result = run_command(instance, command, timeout=timeout, raise_exc=False)
307
309
  output = ("STDOUT: " + result.stdout + "\n") if result.stdout else ""
308
310
  output += ("STDERR: " + result.stderr) if result.stderr else ""
309
311
  return output.replace(VAST_AI_GREETING, "")
@@ -316,9 +318,9 @@ def remote_download(file_path: str) -> str:
316
318
  Args:
317
319
  file_path: Path to the file on a remote machine.
318
320
  """
319
- init_all()
320
- assert _instance_info
321
- recieve_rsync(_instance_info, f"/root/{file_path}", f"{get_workspace_dir()}")
321
+ instance = get_instance()
322
+ assert instance
323
+ recieve_rsync(instance, f"/root/{file_path}", f"{get_workspace_dir()}")
322
324
  return f"File '{file_path}' downloaded!"
323
325
 
324
326
 
@@ -327,8 +329,7 @@ def create_remote_text_editor(
327
329
  ) -> Callable[..., str]:
328
330
  @functools.wraps(text_editor_func)
329
331
  def wrapper(*args: Any, **kwargs: Any) -> str:
330
- init_all()
331
- assert _instance_info
332
+ instance = get_instance()
332
333
 
333
334
  args_dict = {k: v for k, v in kwargs.items()}
334
335
  if args:
@@ -337,12 +338,12 @@ def create_remote_text_editor(
337
338
  command = args_dict["command"]
338
339
 
339
340
  if command != "write":
340
- recieve_rsync(_instance_info, f"/root/{path}", f"{get_workspace_dir()}")
341
+ recieve_rsync(instance, f"/root/{path}", f"{get_workspace_dir()}")
341
342
 
342
343
  result: str = text_editor_func(*args, **kwargs)
343
344
 
344
345
  if command != "view":
345
- send_rsync(_instance_info, f"{get_workspace_dir()}/{path}", "/root")
346
+ send_rsync(instance, f"{get_workspace_dir()}/{path}", "/root")
346
347
 
347
348
  return result
348
349
 
@@ -50,17 +50,23 @@ def _insert(path: Path, insert_line: int, new_str: str) -> str:
50
50
  return truncate_content(new_content, WRITE_MAX_OUTPUT_LENGTH, target_line=insert_line)
51
51
 
52
52
 
53
- def _str_replace(path: Path, old_str: str, new_str: str) -> str:
53
+ def _str_replace(path: Path, old_str: str, new_str: str, dry_run: bool = False) -> str:
54
54
  assert path.is_file(), f"File not found: {path}"
55
55
  content = path.open().read()
56
56
  count = content.count(old_str)
57
57
  assert count != 0, "old_str not found in file"
58
58
  assert count == 1, "old_str is not unique in file"
59
59
  target_line = content[: content.find(old_str) + len(old_str)].count("\n")
60
- _save_file_state(path, content.splitlines(True))
61
60
  new_content = content.replace(old_str, new_str)
62
- path.write_text(new_content)
63
- return truncate_content(new_content, WRITE_MAX_OUTPUT_LENGTH, target_line=target_line)
61
+ if not dry_run:
62
+ _save_file_state(path, content.splitlines(True))
63
+ path.write_text(new_content)
64
+ display_content = truncate_content(
65
+ new_content, WRITE_MAX_OUTPUT_LENGTH, target_line=target_line
66
+ )
67
+ if dry_run:
68
+ display_content = f"Dry run:\n{display_content}"
69
+ return display_content
64
70
 
65
71
 
66
72
  def _undo_edit(path: Path) -> str:
@@ -129,6 +135,7 @@ def text_editor(
129
135
  view_start_line: Optional[int] = None,
130
136
  view_end_line: Optional[int] = None,
131
137
  show_lines: Optional[bool] = False,
138
+ dry_run: Optional[bool] = False,
132
139
  ) -> str:
133
140
  """
134
141
  Custom editing tool for viewing, creating and editing files.
@@ -167,6 +174,7 @@ def text_editor(
167
174
  new_str: Required for `str_replace`, `insert` and `append`.
168
175
  old_str: Required for `str_replace` containing the string in `path` to replace.
169
176
  show_lines: Optional for view command. If True, the command will also output line numbers.
177
+ dry_run: Optional for `str_replace` command. If True, the command won't modify the file but will display the result.
170
178
  """
171
179
  assert not path.startswith(
172
180
  "/"
@@ -191,7 +199,8 @@ def text_editor(
191
199
  if command == "str_replace":
192
200
  assert old_str is not None, "'old_str' is required for 'str_replace' command"
193
201
  assert new_str is not None, "'new_str' is required for 'str_replace' command"
194
- return _str_replace(path_obj, old_str, new_str)
202
+ assert dry_run is not None
203
+ return _str_replace(path_obj, old_str, new_str, dry_run=dry_run)
195
204
  if command == "undo_edit":
196
205
  return _undo_edit(path_obj)
197
206
  assert False, f"Not a valid command! List of commands: {valid_commands}"
@@ -1,3 +1,5 @@
1
+ import random
2
+ import socket
1
3
  from typing import Optional
2
4
 
3
5
 
@@ -48,3 +50,16 @@ def truncate_content(
48
50
  prefix = content[:half_length]
49
51
  suffix = content[-half_length:]
50
52
  return prefix + disclaimer + suffix
53
+
54
+
55
+ def find_free_port() -> Optional[int]:
56
+ ports = list(range(5000, 6001))
57
+ random.shuffle(ports)
58
+ for port in ports:
59
+ try:
60
+ with socket.socket() as s:
61
+ s.bind(("", port))
62
+ return port
63
+ except OSError:
64
+ continue
65
+ return None
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mle-kit-mcp
3
- Version: 0.0.9
3
+ Version: 0.1.1
4
4
  Summary: MCP server that provides different tools for MLE
5
5
  Author-email: Ilya Gusev <phoenixilya@gmail.com>
6
6
  Project-URL: Homepage, https://github.com/IlyaGusev/mle_kit_mcp
@@ -4,6 +4,7 @@ pyproject.toml
4
4
  mle_kit_mcp/__init__.py
5
5
  mle_kit_mcp/__main__.py
6
6
  mle_kit_mcp/files.py
7
+ mle_kit_mcp/llm_proxy_source.py
7
8
  mle_kit_mcp/py.typed
8
9
  mle_kit_mcp/server.py
9
10
  mle_kit_mcp/utils.py
@@ -15,8 +16,10 @@ mle_kit_mcp.egg-info/requires.txt
15
16
  mle_kit_mcp.egg-info/top_level.txt
16
17
  mle_kit_mcp/tools/__init__.py
17
18
  mle_kit_mcp/tools/bash.py
19
+ mle_kit_mcp/tools/llm_proxy.py
18
20
  mle_kit_mcp/tools/remote_gpu.py
19
21
  mle_kit_mcp/tools/text_editor.py
20
22
  tests/test_bash.py
23
+ tests/test_llm_proxy.py
21
24
  tests/test_text_editor.py
22
25
  tests/test_truncate_context.py
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "mle-kit-mcp"
7
- version = "0.0.9"
7
+ version = "0.1.1"
8
8
  description = "MCP server that provides different tools for MLE"
9
9
  readme = "README.md"
10
10
  authors = [
@@ -16,3 +16,9 @@ def test_bash() -> None:
16
16
 
17
17
  result = bash("fddafad")
18
18
  assert "fddafad: command not found" in result
19
+
20
+
21
+ def test_bash_cwd() -> None:
22
+ bash("mkdir -p dummy_dir")
23
+ bash("touch dummy", cwd="dummy_dir")
24
+ assert os.path.exists(get_workspace_dir() / "dummy_dir" / "dummy")
@@ -0,0 +1,24 @@
1
+ import json
2
+ from mle_kit_mcp.tools import llm_proxy_local, bash
3
+
4
+
5
+ QUERY_SNIPPET = """
6
+ import httpx
7
+ headers = {{'Authorization': 'Bearer {token}'}}
8
+ json_payload = {{'model': 'gpt-4o', 'messages': [{{'role': 'user', 'content': 'Hello, how are you?'}}]}}
9
+ response = httpx.post("{url}", headers=headers, json=json_payload)
10
+ print(response.json())
11
+ """
12
+
13
+
14
+ def test_llm_proxy_local():
15
+ result = json.loads(llm_proxy_local(port=8001))
16
+ token = result["token"]
17
+ url = result["url"]
18
+ assert url
19
+ assert token
20
+
21
+ snippet = QUERY_SNIPPET.format(url=url, token=token)
22
+ result = bash(f'cat > test_query.py << "EOF"\n{snippet}\nEOF')
23
+ result = bash("python test_query.py")
24
+ assert "content" in result
@@ -87,6 +87,18 @@ def test_text_editor_str_replace() -> None:
87
87
  assert "41.9" in new_content and "41.8" not in new_content
88
88
 
89
89
 
90
+ def test_text_editor_str_replace_dry_run() -> None:
91
+ with tempfile.NamedTemporaryFile(dir=get_workspace_dir(), mode="w+") as f:
92
+ name = os.path.basename(f.name)
93
+ test_file = get_workspace_dir() / name
94
+ test_file.write_text(DOCUMENT1)
95
+
96
+ result = text_editor("str_replace", name, old_str="41.8", new_str="41.9", dry_run=True)
97
+ new_content = test_file.open().read()
98
+ assert "41.9" not in new_content and "41.8" in new_content
99
+ assert "Dry run" in result
100
+
101
+
90
102
  def test_text_editor_undo_edit() -> None:
91
103
  with tempfile.NamedTemporaryFile(dir=get_workspace_dir(), mode="w+") as f:
92
104
  name = os.path.basename(f.name)
File without changes
File without changes
File without changes