mle-kit-mcp 0.0.9__tar.gz → 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.0}/PKG-INFO +1 -1
  2. mle_kit_mcp-0.1.0/mle_kit_mcp/llm_proxy_source.py +51 -0
  3. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.0}/mle_kit_mcp/server.py +8 -0
  4. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.0}/mle_kit_mcp/tools/__init__.py +6 -0
  5. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.0}/mle_kit_mcp/tools/bash.py +40 -28
  6. mle_kit_mcp-0.1.0/mle_kit_mcp/tools/llm_proxy.py +148 -0
  7. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.0}/mle_kit_mcp/tools/remote_gpu.py +35 -34
  8. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.0}/mle_kit_mcp/utils.py +15 -0
  9. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.0}/mle_kit_mcp.egg-info/PKG-INFO +1 -1
  10. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.0}/mle_kit_mcp.egg-info/SOURCES.txt +3 -0
  11. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.0}/pyproject.toml +1 -1
  12. mle_kit_mcp-0.1.0/tests/test_llm_proxy.py +24 -0
  13. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.0}/LICENSE +0 -0
  14. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.0}/README.md +0 -0
  15. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.0}/mle_kit_mcp/__init__.py +0 -0
  16. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.0}/mle_kit_mcp/__main__.py +0 -0
  17. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.0}/mle_kit_mcp/files.py +0 -0
  18. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.0}/mle_kit_mcp/py.typed +0 -0
  19. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.0}/mle_kit_mcp/tools/text_editor.py +0 -0
  20. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.0}/mle_kit_mcp.egg-info/dependency_links.txt +0 -0
  21. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.0}/mle_kit_mcp.egg-info/entry_points.txt +0 -0
  22. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.0}/mle_kit_mcp.egg-info/requires.txt +0 -0
  23. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.0}/mle_kit_mcp.egg-info/top_level.txt +0 -0
  24. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.0}/setup.cfg +0 -0
  25. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.0}/tests/test_bash.py +0 -0
  26. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.0}/tests/test_text_editor.py +0 -0
  27. {mle_kit_mcp-0.0.9 → mle_kit_mcp-0.1.0}/tests/test_truncate_context.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mle-kit-mcp
3
- Version: 0.0.9
3
+ Version: 0.1.0
4
4
  Summary: MCP server that provides different tools for MLE
5
5
  Author-email: Ilya Gusev <phoenixilya@gmail.com>
6
6
  Project-URL: Homepage, https://github.com/IlyaGusev/mle_kit_mcp
@@ -0,0 +1,51 @@
1
+ import os
2
+
3
+ import fire # type: ignore
4
+ import uvicorn
5
+ from openai import AsyncOpenAI
6
+ from fastapi import FastAPI, Request, HTTPException
7
+ from fastapi.responses import JSONResponse
8
+
9
+ ACCESS_TOKEN = os.getenv("ACCESS_TOKEN", "")
10
+ OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY", "")
11
+ OPENROUTER_BASE_URL = os.getenv("OPENROUTER_BASE_URL", "https://openrouter.ai/api/v1")
12
+
13
+ app = FastAPI()
14
+ client = AsyncOpenAI(base_url=OPENROUTER_BASE_URL, api_key=OPENROUTER_API_KEY)
15
+
16
+
17
+ def _check_auth(request: Request) -> None:
18
+ auth = request.headers.get("authorization", "")
19
+ if not ACCESS_TOKEN or auth != f"Bearer {ACCESS_TOKEN}":
20
+ raise HTTPException(status_code=401, detail="Unauthorized")
21
+ if not OPENROUTER_API_KEY:
22
+ raise HTTPException(status_code=500, detail="OpenRouter key not configured")
23
+
24
+
25
+ @app.post("/v1/chat/completions")
26
+ async def chat_completions(request: Request) -> JSONResponse:
27
+ _check_auth(request)
28
+ payload = await request.json()
29
+ if isinstance(payload, dict) and payload.get("stream"):
30
+ payload.pop("stream", None)
31
+
32
+ try:
33
+ resp = await client.chat.completions.create(**payload)
34
+ except Exception as e:
35
+ raise HTTPException(status_code=502, detail=str(e))
36
+ return JSONResponse(resp.model_dump())
37
+
38
+
39
+ @app.get("/health")
40
+ async def health() -> JSONResponse:
41
+ if not OPENROUTER_API_KEY:
42
+ return JSONResponse({"error": "missing OPENROUTER_API_KEY"}, 500)
43
+ return JSONResponse({"result": "ok"})
44
+
45
+
46
+ def main(host: str = "127.0.0.1", port: int = 8001) -> None:
47
+ uvicorn.run("llm_proxy:app", host=host, port=port)
48
+
49
+
50
+ if __name__ == "__main__":
51
+ fire.Fire(main)
@@ -1,3 +1,4 @@
1
+ import os
1
2
  from pathlib import Path
2
3
  from typing import Optional
3
4
 
@@ -12,6 +13,10 @@ from .tools.remote_gpu import (
12
13
  create_remote_text_editor,
13
14
  remote_download,
14
15
  )
16
+ from .tools.llm_proxy import (
17
+ llm_proxy_local,
18
+ llm_proxy_remote,
19
+ )
15
20
  from .files import get_workspace_dir, WorkspaceDirectory
16
21
 
17
22
 
@@ -30,6 +35,9 @@ def run(host: str = "0.0.0.0", port: int = 5050, workspace: Optional[str] = None
30
35
  server.add_tool(remote_bash)
31
36
  server.add_tool(remote_text_editor)
32
37
  server.add_tool(remote_download)
38
+ if os.getenv("OPENROUTER_API_KEY"):
39
+ server.add_tool(llm_proxy_local)
40
+ server.add_tool(llm_proxy_remote)
33
41
 
34
42
  http_app = server.streamable_http_app()
35
43
 
@@ -4,6 +4,10 @@ from .remote_gpu import (
4
4
  remote_bash,
5
5
  remote_download,
6
6
  )
7
+ from .llm_proxy import (
8
+ llm_proxy_local,
9
+ llm_proxy_remote,
10
+ )
7
11
 
8
12
 
9
13
  __all__ = [
@@ -11,4 +15,6 @@ __all__ = [
11
15
  "text_editor",
12
16
  "remote_bash",
13
17
  "remote_download",
18
+ "llm_proxy_local",
19
+ "llm_proxy_remote",
14
20
  ]
@@ -1,8 +1,11 @@
1
- import docker # type: ignore
2
1
  import atexit
3
2
  import signal
4
3
  from typing import Optional, Any
5
4
 
5
+ from docker import from_env as docker_from_env # type: ignore
6
+ from docker import DockerClient
7
+ from docker.models.containers import Container # type: ignore
8
+
6
9
  from mle_kit_mcp.files import get_workspace_dir
7
10
 
8
11
 
@@ -13,6 +16,40 @@ BASE_IMAGE = "python:3.12-slim"
13
16
  DOCKER_WORKSPACE_DIR_PATH = "/workdir"
14
17
 
15
18
 
19
+ def get_docker_client() -> DockerClient:
20
+ global _client
21
+ if not _client:
22
+ _client = docker_from_env()
23
+ return _client
24
+
25
+
26
+ def create_container() -> Container:
27
+ client = get_docker_client()
28
+ container = client.containers.run(
29
+ BASE_IMAGE,
30
+ "tail -f /dev/null",
31
+ detach=True,
32
+ remove=True,
33
+ tty=True,
34
+ stdin_open=True,
35
+ volumes={
36
+ get_workspace_dir(): {
37
+ "bind": DOCKER_WORKSPACE_DIR_PATH,
38
+ "mode": "rw",
39
+ }
40
+ },
41
+ working_dir=DOCKER_WORKSPACE_DIR_PATH,
42
+ )
43
+ return container
44
+
45
+
46
+ def get_container() -> Container:
47
+ global _container
48
+ if not _container:
49
+ _container = create_container()
50
+ return _container
51
+
52
+
16
53
  def cleanup_container(signum: Optional[Any] = None, frame: Optional[Any] = None) -> None:
17
54
  global _container
18
55
  if _container:
@@ -45,33 +82,8 @@ def bash(command: str) -> str:
45
82
  command: The bash command to run.
46
83
  """
47
84
 
48
- global _container, _client
49
-
50
- if not _client:
51
- _client = docker.from_env()
52
-
53
- if not _container:
54
- try:
55
- _container = _client.containers.get("bash_runner")
56
- except docker.errors.NotFound:
57
- _container = _client.containers.run(
58
- BASE_IMAGE,
59
- "tail -f /dev/null",
60
- detach=True,
61
- remove=True,
62
- name="bash_runner",
63
- tty=True,
64
- stdin_open=True,
65
- volumes={
66
- get_workspace_dir(): {
67
- "bind": DOCKER_WORKSPACE_DIR_PATH,
68
- "mode": "rw",
69
- }
70
- },
71
- working_dir=DOCKER_WORKSPACE_DIR_PATH,
72
- )
73
-
74
- result = _container.exec_run(
85
+ container = get_container()
86
+ result = container.exec_run(
75
87
  ["bash", "-c", command],
76
88
  workdir=DOCKER_WORKSPACE_DIR_PATH,
77
89
  stdout=True,
@@ -0,0 +1,148 @@
1
+ import os
2
+ import json
3
+ import time
4
+ import random
5
+ import secrets
6
+ from pathlib import Path
7
+ from typing import Optional
8
+
9
+ from dotenv import load_dotenv
10
+
11
+ from mle_kit_mcp.tools.bash import get_container
12
+ from mle_kit_mcp.files import get_workspace_dir
13
+ from mle_kit_mcp.utils import find_free_port
14
+
15
+ from mle_kit_mcp.tools.remote_gpu import (
16
+ get_instance as _remote_get_instance,
17
+ run_command as _remote_run_command,
18
+ send_rsync as _remote_send_rsync,
19
+ )
20
+
21
+ INPUT_SCRIPT_FILE_NAME = "llm_proxy_source.py"
22
+ OUTPUT_SCRIPT_FILE_NAME = "llm_proxy.py"
23
+ DEPENDENCIES = "fastapi uvicorn httpx openai fire"
24
+ START_TIMEOUT = 30
25
+
26
+
27
+ def _write_proxy_script(script_path: Path) -> None:
28
+ source_script_path = Path(__file__).parent.parent / INPUT_SCRIPT_FILE_NAME
29
+ script = source_script_path.read_text()
30
+ script_path.write_text(script)
31
+
32
+
33
+ def llm_proxy_local(port: Optional[int] = None) -> str:
34
+ """
35
+ Start a lightweight OpenRouter proxy inside the same Docker container used by the "bash" tool.
36
+
37
+ Returns a JSON string with fields: url, token, scope.
38
+ The url is reachable from inside the "bash" container as localhost.
39
+ Use the token in the Authorization header: "Bearer <token>" when calling the proxy.
40
+ It runs a standard OpenAI compatible server, so you can use it with any OpenAI compatible client.
41
+ You can use all models available on OpenRouter, for instance:
42
+ - openai/gpt-5-mini
43
+ - google/gemini-2.5-pro
44
+ - anthropic/claude-sonnet-4
45
+
46
+ Args:
47
+ port: Optional fixed port to bind inside the container. Random if omitted.
48
+ """
49
+
50
+ load_dotenv()
51
+ api_key = os.getenv("OPENROUTER_API_KEY")
52
+ assert api_key, "Set OPENROUTER_API_KEY in the environment before starting the proxy."
53
+
54
+ _write_proxy_script(get_workspace_dir() / OUTPUT_SCRIPT_FILE_NAME)
55
+
56
+ container = get_container()
57
+ dependencies_cmd = f"python -m pip install --quiet --no-input {DEPENDENCIES}"
58
+ container.exec_run(["bash", "-lc", dependencies_cmd])
59
+
60
+ chosen_port = port or find_free_port()
61
+ token = secrets.token_urlsafe(24)
62
+ launch_cmd = (
63
+ f"OPENROUTER_API_KEY='{api_key}' ACCESS_TOKEN='{token}' "
64
+ f"nohup python {OUTPUT_SCRIPT_FILE_NAME} "
65
+ f"--host 127.0.0.1 --port {chosen_port} "
66
+ f"> llm_proxy.log 2>&1 "
67
+ f"& echo $! > llm_proxy.pid"
68
+ )
69
+ container.exec_run(["bash", "-lc", launch_cmd])
70
+
71
+ health_cmd = f'import httpx; print(httpx.get("http://127.0.0.1:{chosen_port}/health").json())'
72
+ start_time = time.time()
73
+ while time.time() - start_time < START_TIMEOUT:
74
+ result = container.exec_run(["python", "-c", health_cmd])
75
+ if result.exit_code == 0 and "ok" in result.output.decode("utf-8").strip():
76
+ break
77
+ time.sleep(1)
78
+ else:
79
+ raise Exception("Failed to start the proxy")
80
+
81
+ return json.dumps(
82
+ {
83
+ "url": f"http://127.0.0.1:{chosen_port}/v1/chat/completions",
84
+ "token": token,
85
+ "scope": "bash-container",
86
+ }
87
+ )
88
+
89
+
90
+ def llm_proxy_remote(port: Optional[int] = None) -> str:
91
+ """
92
+ Start a lightweight OpenRouter proxy on the remote GPU machine.
93
+
94
+ Returns a JSON string with fields: url, token, scope.
95
+ The url is reachable from inside the remote machine as localhost.
96
+ Use the token in the Authorization header: "Bearer <token>" when calling the proxy.
97
+ It runs a standard OpenAI compatible server, so you can use it with any OpenAI compatible client.
98
+ You can use all models available on OpenRouter, for instance:
99
+ - openai/gpt-5-mini
100
+ - google/gemini-2.5-pro
101
+ - anthropic/claude-sonnet-4
102
+
103
+ Args:
104
+ port: Optional fixed port to bind on the remote. Random if omitted.
105
+ """
106
+
107
+ load_dotenv()
108
+ api_key = os.getenv("OPENROUTER_API_KEY")
109
+ assert api_key, "Set OPENROUTER_API_KEY in the environment before starting the proxy."
110
+
111
+ instance = _remote_get_instance()
112
+ script_path = get_workspace_dir() / OUTPUT_SCRIPT_FILE_NAME
113
+ _write_proxy_script(script_path)
114
+ _remote_send_rsync(instance, f"{script_path}", "/root")
115
+
116
+ chosen_port = port or random.randint(5000, 6000)
117
+ token = secrets.token_urlsafe(24)
118
+ dependencies_cmd = f"python3 -m pip install -q --no-input {DEPENDENCIES}"
119
+ _remote_run_command(instance, dependencies_cmd, timeout=300, raise_exc=True)
120
+
121
+ launch_cmd = (
122
+ f"OPENROUTER_API_KEY='{api_key}' ACCESS_TOKEN='{token}' "
123
+ f"nohup python {OUTPUT_SCRIPT_FILE_NAME} "
124
+ f"--host 127.0.0.1 --port {chosen_port} "
125
+ f"> openrouter_proxy.log 2>&1 "
126
+ f"& echo $! > openrouter_proxy.pid"
127
+ )
128
+ _remote_run_command(instance, launch_cmd, timeout=60, raise_exc=True)
129
+
130
+ health_cmd = f'import httpx; print(httpx.get("http://127.0.0.1:{chosen_port}/health").json())'
131
+ start_time = time.time()
132
+ while time.time() - start_time < START_TIMEOUT:
133
+ result = _remote_run_command(
134
+ instance, f"python -c '{health_cmd}'", timeout=10, raise_exc=False
135
+ )
136
+ if result.returncode == 0 and "ok" in result.stdout.strip():
137
+ break
138
+ time.sleep(1)
139
+ else:
140
+ raise Exception("Failed to start the proxy")
141
+
142
+ return json.dumps(
143
+ {
144
+ "url": f"http://127.0.0.1:{chosen_port}/v1/chat/completions",
145
+ "token": token,
146
+ "scope": "remote-gpu",
147
+ }
148
+ )
@@ -36,7 +36,28 @@ _sdk: Optional[VastAI] = None
36
36
  _instance_info: Optional[InstanceInfo] = None
37
37
 
38
38
 
39
- def cleanup_machine(signum: Optional[Any] = None, frame: Optional[Any] = None) -> None:
39
+ def get_sdk() -> VastAI:
40
+ global _sdk
41
+ if not _sdk:
42
+ _sdk = VastAI(api_key=os.getenv("VAST_AI_KEY"))
43
+ return _sdk
44
+
45
+
46
+ def get_instance() -> InstanceInfo:
47
+ load_dotenv()
48
+ signal.alarm(GLOBAL_TIMEOUT)
49
+ global _instance_info
50
+ if not _instance_info:
51
+ _instance_info = launch_instance(get_sdk(), DEFAULT_GPU_TYPE)
52
+
53
+ if _instance_info:
54
+ send_scripts()
55
+
56
+ assert _instance_info, "Failed to connect to a remote instance! Try again"
57
+ return _instance_info
58
+
59
+
60
+ def cleanup_instance(signum: Optional[Any] = None, frame: Optional[Any] = None) -> None:
40
61
  global _instance_info
41
62
  signal.alarm(0)
42
63
  if _instance_info and _sdk:
@@ -50,10 +71,10 @@ def cleanup_machine(signum: Optional[Any] = None, frame: Optional[Any] = None) -
50
71
  raise KeyboardInterrupt()
51
72
 
52
73
 
53
- atexit.register(cleanup_machine)
54
- signal.signal(signal.SIGINT, cleanup_machine)
55
- signal.signal(signal.SIGTERM, cleanup_machine)
56
- signal.signal(signal.SIGALRM, cleanup_machine)
74
+ atexit.register(cleanup_instance)
75
+ signal.signal(signal.SIGINT, cleanup_instance)
76
+ signal.signal(signal.SIGTERM, cleanup_instance)
77
+ signal.signal(signal.SIGALRM, cleanup_instance)
57
78
 
58
79
 
59
80
  def wait_for_instance(vast_sdk: VastAI, instance_id: str, max_wait_time: int = 300) -> bool:
@@ -264,25 +285,6 @@ def send_scripts() -> None:
264
285
  send_rsync(_instance_info, f"{get_workspace_dir()}/{name}", "/root")
265
286
 
266
287
 
267
- def init_all() -> None:
268
- global _sdk, _instance_info
269
-
270
- load_dotenv()
271
-
272
- if not _sdk:
273
- _sdk = VastAI(api_key=os.getenv("VAST_AI_KEY"))
274
- assert _sdk
275
-
276
- signal.alarm(GLOBAL_TIMEOUT)
277
- if not _instance_info:
278
- _instance_info = launch_instance(_sdk, DEFAULT_GPU_TYPE)
279
-
280
- if _instance_info:
281
- send_scripts()
282
-
283
- assert _instance_info, "Failed to connect to a remote instance! Try again"
284
-
285
-
286
288
  def remote_bash(command: str, timeout: int = 60) -> str:
287
289
  """
288
290
  Run commands in a bash shell on a remote machine with GPU cards.
@@ -300,10 +302,10 @@ def remote_bash(command: str, timeout: int = 60) -> str:
300
302
  timeout: Timeout for the command execution. 60 seconds by default. Set a higher value for heavy jobs.
301
303
  """
302
304
 
303
- init_all()
304
- assert _instance_info
305
+ instance = get_instance()
306
+ assert instance
305
307
  assert timeout
306
- result = run_command(_instance_info, command, timeout=timeout, raise_exc=False)
308
+ result = run_command(instance, command, timeout=timeout, raise_exc=False)
307
309
  output = ("STDOUT: " + result.stdout + "\n") if result.stdout else ""
308
310
  output += ("STDERR: " + result.stderr) if result.stderr else ""
309
311
  return output.replace(VAST_AI_GREETING, "")
@@ -316,9 +318,9 @@ def remote_download(file_path: str) -> str:
316
318
  Args:
317
319
  file_path: Path to the file on a remote machine.
318
320
  """
319
- init_all()
320
- assert _instance_info
321
- recieve_rsync(_instance_info, f"/root/{file_path}", f"{get_workspace_dir()}")
321
+ instance = get_instance()
322
+ assert instance
323
+ recieve_rsync(instance, f"/root/{file_path}", f"{get_workspace_dir()}")
322
324
  return f"File '{file_path}' downloaded!"
323
325
 
324
326
 
@@ -327,8 +329,7 @@ def create_remote_text_editor(
327
329
  ) -> Callable[..., str]:
328
330
  @functools.wraps(text_editor_func)
329
331
  def wrapper(*args: Any, **kwargs: Any) -> str:
330
- init_all()
331
- assert _instance_info
332
+ instance = get_instance()
332
333
 
333
334
  args_dict = {k: v for k, v in kwargs.items()}
334
335
  if args:
@@ -337,12 +338,12 @@ def create_remote_text_editor(
337
338
  command = args_dict["command"]
338
339
 
339
340
  if command != "write":
340
- recieve_rsync(_instance_info, f"/root/{path}", f"{get_workspace_dir()}")
341
+ recieve_rsync(instance, f"/root/{path}", f"{get_workspace_dir()}")
341
342
 
342
343
  result: str = text_editor_func(*args, **kwargs)
343
344
 
344
345
  if command != "view":
345
- send_rsync(_instance_info, f"{get_workspace_dir()}/{path}", "/root")
346
+ send_rsync(instance, f"{get_workspace_dir()}/{path}", "/root")
346
347
 
347
348
  return result
348
349
 
@@ -1,3 +1,5 @@
1
+ import random
2
+ import socket
1
3
  from typing import Optional
2
4
 
3
5
 
@@ -48,3 +50,16 @@ def truncate_content(
48
50
  prefix = content[:half_length]
49
51
  suffix = content[-half_length:]
50
52
  return prefix + disclaimer + suffix
53
+
54
+
55
+ def find_free_port() -> Optional[int]:
56
+ ports = list(range(5000, 6001))
57
+ random.shuffle(ports)
58
+ for port in ports:
59
+ try:
60
+ with socket.socket() as s:
61
+ s.bind(("", port))
62
+ return port
63
+ except OSError:
64
+ continue
65
+ return None
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mle-kit-mcp
3
- Version: 0.0.9
3
+ Version: 0.1.0
4
4
  Summary: MCP server that provides different tools for MLE
5
5
  Author-email: Ilya Gusev <phoenixilya@gmail.com>
6
6
  Project-URL: Homepage, https://github.com/IlyaGusev/mle_kit_mcp
@@ -4,6 +4,7 @@ pyproject.toml
4
4
  mle_kit_mcp/__init__.py
5
5
  mle_kit_mcp/__main__.py
6
6
  mle_kit_mcp/files.py
7
+ mle_kit_mcp/llm_proxy_source.py
7
8
  mle_kit_mcp/py.typed
8
9
  mle_kit_mcp/server.py
9
10
  mle_kit_mcp/utils.py
@@ -15,8 +16,10 @@ mle_kit_mcp.egg-info/requires.txt
15
16
  mle_kit_mcp.egg-info/top_level.txt
16
17
  mle_kit_mcp/tools/__init__.py
17
18
  mle_kit_mcp/tools/bash.py
19
+ mle_kit_mcp/tools/llm_proxy.py
18
20
  mle_kit_mcp/tools/remote_gpu.py
19
21
  mle_kit_mcp/tools/text_editor.py
20
22
  tests/test_bash.py
23
+ tests/test_llm_proxy.py
21
24
  tests/test_text_editor.py
22
25
  tests/test_truncate_context.py
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "mle-kit-mcp"
7
- version = "0.0.9"
7
+ version = "0.1.0"
8
8
  description = "MCP server that provides different tools for MLE"
9
9
  readme = "README.md"
10
10
  authors = [
@@ -0,0 +1,24 @@
1
+ import json
2
+ from mle_kit_mcp.tools import llm_proxy_local, bash
3
+
4
+
5
+ QUERY_SNIPPET = """
6
+ import httpx
7
+ headers = {{'Authorization': 'Bearer {token}'}}
8
+ json_payload = {{'model': 'gpt-4o', 'messages': [{{'role': 'user', 'content': 'Hello, how are you?'}}]}}
9
+ response = httpx.post("{url}", headers=headers, json=json_payload)
10
+ print(response.json())
11
+ """
12
+
13
+
14
+ def test_llm_proxy_local():
15
+ result = json.loads(llm_proxy_local(port=8001))
16
+ token = result["token"]
17
+ url = result["url"]
18
+ assert url
19
+ assert token
20
+
21
+ snippet = QUERY_SNIPPET.format(url=url, token=token)
22
+ result = bash(f'cat > test_query.py << "EOF"\n{snippet}\nEOF')
23
+ result = bash("python test_query.py")
24
+ assert "content" in result
File without changes
File without changes
File without changes