mle-kit-mcp 0.2.2__tar.gz → 0.2.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. {mle_kit_mcp-0.2.2 → mle_kit_mcp-0.2.4}/PKG-INFO +1 -1
  2. {mle_kit_mcp-0.2.2 → mle_kit_mcp-0.2.4}/mle_kit_mcp/llm_proxy.py +0 -10
  3. {mle_kit_mcp-0.2.2 → mle_kit_mcp-0.2.4}/mle_kit_mcp/tools/llm_proxy.py +8 -20
  4. {mle_kit_mcp-0.2.2 → mle_kit_mcp-0.2.4}/mle_kit_mcp/tools/remote_gpu.py +6 -0
  5. {mle_kit_mcp-0.2.2 → mle_kit_mcp-0.2.4}/mle_kit_mcp.egg-info/PKG-INFO +1 -1
  6. {mle_kit_mcp-0.2.2 → mle_kit_mcp-0.2.4}/pyproject.toml +1 -1
  7. {mle_kit_mcp-0.2.2 → mle_kit_mcp-0.2.4}/tests/test_llm_proxy.py +3 -6
  8. {mle_kit_mcp-0.2.2 → mle_kit_mcp-0.2.4}/LICENSE +0 -0
  9. {mle_kit_mcp-0.2.2 → mle_kit_mcp-0.2.4}/README.md +0 -0
  10. {mle_kit_mcp-0.2.2 → mle_kit_mcp-0.2.4}/mle_kit_mcp/__init__.py +0 -0
  11. {mle_kit_mcp-0.2.2 → mle_kit_mcp-0.2.4}/mle_kit_mcp/__main__.py +0 -0
  12. {mle_kit_mcp-0.2.2 → mle_kit_mcp-0.2.4}/mle_kit_mcp/files.py +0 -0
  13. {mle_kit_mcp-0.2.2 → mle_kit_mcp-0.2.4}/mle_kit_mcp/py.typed +0 -0
  14. {mle_kit_mcp-0.2.2 → mle_kit_mcp-0.2.4}/mle_kit_mcp/server.py +0 -0
  15. {mle_kit_mcp-0.2.2 → mle_kit_mcp-0.2.4}/mle_kit_mcp/tools/__init__.py +0 -0
  16. {mle_kit_mcp-0.2.2 → mle_kit_mcp-0.2.4}/mle_kit_mcp/tools/bash.py +0 -0
  17. {mle_kit_mcp-0.2.2 → mle_kit_mcp-0.2.4}/mle_kit_mcp/tools/text_editor.py +0 -0
  18. {mle_kit_mcp-0.2.2 → mle_kit_mcp-0.2.4}/mle_kit_mcp/utils.py +0 -0
  19. {mle_kit_mcp-0.2.2 → mle_kit_mcp-0.2.4}/mle_kit_mcp.egg-info/SOURCES.txt +0 -0
  20. {mle_kit_mcp-0.2.2 → mle_kit_mcp-0.2.4}/mle_kit_mcp.egg-info/dependency_links.txt +0 -0
  21. {mle_kit_mcp-0.2.2 → mle_kit_mcp-0.2.4}/mle_kit_mcp.egg-info/entry_points.txt +0 -0
  22. {mle_kit_mcp-0.2.2 → mle_kit_mcp-0.2.4}/mle_kit_mcp.egg-info/requires.txt +0 -0
  23. {mle_kit_mcp-0.2.2 → mle_kit_mcp-0.2.4}/mle_kit_mcp.egg-info/top_level.txt +0 -0
  24. {mle_kit_mcp-0.2.2 → mle_kit_mcp-0.2.4}/setup.cfg +0 -0
  25. {mle_kit_mcp-0.2.2 → mle_kit_mcp-0.2.4}/tests/test_bash.py +0 -0
  26. {mle_kit_mcp-0.2.2 → mle_kit_mcp-0.2.4}/tests/test_text_editor.py +0 -0
  27. {mle_kit_mcp-0.2.2 → mle_kit_mcp-0.2.4}/tests/test_truncate_context.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mle-kit-mcp
3
- Version: 0.2.2
3
+ Version: 0.2.4
4
4
  Summary: MCP server that provides different tools for MLE
5
5
  Author-email: Ilya Gusev <phoenixilya@gmail.com>
6
6
  Project-URL: Homepage, https://github.com/IlyaGusev/mle_kit_mcp
@@ -6,7 +6,6 @@ from openai import AsyncOpenAI
6
6
  from fastapi import FastAPI, Request, HTTPException
7
7
  from fastapi.responses import JSONResponse
8
8
 
9
- ACCESS_TOKEN = os.getenv("ACCESS_TOKEN", "")
10
9
  OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY", "")
11
10
  OPENROUTER_BASE_URL = os.getenv("OPENROUTER_BASE_URL", "https://openrouter.ai/api/v1")
12
11
 
@@ -14,17 +13,8 @@ app = FastAPI()
14
13
  client = AsyncOpenAI(base_url=OPENROUTER_BASE_URL, api_key=OPENROUTER_API_KEY)
15
14
 
16
15
 
17
- def _check_auth(request: Request) -> None:
18
- auth = request.headers.get("authorization", "")
19
- if not ACCESS_TOKEN or auth != f"Bearer {ACCESS_TOKEN}":
20
- raise HTTPException(status_code=401, detail="Unauthorized")
21
- if not OPENROUTER_API_KEY:
22
- raise HTTPException(status_code=500, detail="OpenRouter key not configured")
23
-
24
-
25
16
  @app.post("/v1/chat/completions")
26
17
  async def chat_completions(request: Request) -> JSONResponse:
27
- _check_auth(request)
28
18
  payload = await request.json()
29
19
  if isinstance(payload, dict) and payload.get("stream"):
30
20
  payload.pop("stream", None)
@@ -2,15 +2,12 @@ import os
2
2
  import json
3
3
  import time
4
4
  import random
5
- import secrets
6
5
  from pathlib import Path
7
- from typing import Optional
8
6
 
9
7
  from dotenv import load_dotenv
10
8
 
11
9
  from mle_kit_mcp.tools.bash import get_container
12
10
  from mle_kit_mcp.files import get_workspace_dir
13
- from mle_kit_mcp.utils import find_free_port
14
11
 
15
12
  from mle_kit_mcp.tools.remote_gpu import (
16
13
  get_instance as _remote_get_instance,
@@ -30,13 +27,12 @@ def _write_proxy_script(script_path: Path) -> None:
30
27
  script_path.write_text(script)
31
28
 
32
29
 
33
- def llm_proxy_local(port: Optional[int] = None) -> str:
30
+ def llm_proxy_local() -> str:
34
31
  """
35
32
  Start a lightweight OpenRouter proxy inside the same Docker container used by the "bash" tool.
36
33
 
37
- Returns a JSON string with fields: url, token, scope.
34
+ Returns a JSON string with url and scope.
38
35
  The url is reachable from inside the "bash" container as localhost.
39
- Use the token in the Authorization header: "Bearer <token>" when calling the proxy.
40
36
  It runs a standard OpenAI compatible server, so you can use it with any OpenAI compatible client.
41
37
  You can use all models available on OpenRouter, for instance:
42
38
  - openai/gpt-5-mini
@@ -57,10 +53,9 @@ def llm_proxy_local(port: Optional[int] = None) -> str:
57
53
  dependencies_cmd = f"python -m pip install --quiet --no-input {DEPENDENCIES}"
58
54
  container.exec_run(["bash", "-lc", dependencies_cmd])
59
55
 
60
- chosen_port = port or find_free_port()
61
- token = secrets.token_urlsafe(24)
56
+ chosen_port = random.randint(5000, 6000)
62
57
  launch_cmd = (
63
- f"OPENROUTER_API_KEY='{api_key}' ACCESS_TOKEN='{token}' "
58
+ f"OPENROUTER_API_KEY='{api_key}' "
64
59
  f"nohup python {OUTPUT_SCRIPT_FILE_NAME} "
65
60
  f"--host 127.0.0.1 --port {chosen_port} "
66
61
  f"> llm_proxy.log 2>&1 "
@@ -81,27 +76,22 @@ def llm_proxy_local(port: Optional[int] = None) -> str:
81
76
  return json.dumps(
82
77
  {
83
78
  "url": f"http://127.0.0.1:{chosen_port}/v1/chat/completions",
84
- "token": token,
85
79
  "scope": "bash-container",
86
80
  }
87
81
  )
88
82
 
89
83
 
90
- def llm_proxy_remote(port: Optional[int] = None) -> str:
84
+ def llm_proxy_remote() -> str:
91
85
  """
92
86
  Start a lightweight OpenRouter proxy on the remote GPU machine.
93
87
 
94
- Returns a JSON string with fields: url, token, scope.
88
+ Returns a JSON string with url and scope.
95
89
  The url is reachable from inside the remote machine as localhost.
96
- Use the token in the Authorization header: "Bearer <token>" when calling the proxy.
97
90
  It runs a standard OpenAI compatible server, so you can use it with any OpenAI compatible client.
98
91
  You can use all models available on OpenRouter, for instance:
99
92
  - openai/gpt-5-mini
100
93
  - google/gemini-2.5-pro
101
94
  - anthropic/claude-sonnet-4
102
-
103
- Args:
104
- port: Optional fixed port to bind on the remote. Random if omitted.
105
95
  """
106
96
 
107
97
  load_dotenv()
@@ -113,13 +103,12 @@ def llm_proxy_remote(port: Optional[int] = None) -> str:
113
103
  _write_proxy_script(script_path)
114
104
  _remote_send_rsync(instance, f"{script_path}", "/root")
115
105
 
116
- chosen_port = port or random.randint(5000, 6000)
117
- token = secrets.token_urlsafe(24)
106
+ chosen_port = random.randint(5000, 6000)
118
107
  dependencies_cmd = f"python3 -m pip install -q --no-input {DEPENDENCIES}"
119
108
  _remote_run_command(instance, dependencies_cmd, timeout=300)
120
109
 
121
110
  launch_cmd = (
122
- f"OPENROUTER_API_KEY='{api_key}' ACCESS_TOKEN='{token}' "
111
+ f"OPENROUTER_API_KEY='{api_key}' "
123
112
  f"nohup python {OUTPUT_SCRIPT_FILE_NAME} "
124
113
  f"--host 127.0.0.1 --port {chosen_port} "
125
114
  f"> openrouter_proxy.log 2>&1 "
@@ -140,7 +129,6 @@ def llm_proxy_remote(port: Optional[int] = None) -> str:
140
129
  return json.dumps(
141
130
  {
142
131
  "url": f"http://127.0.0.1:{chosen_port}/v1/chat/completions",
143
- "token": token,
144
132
  "scope": "remote-gpu",
145
133
  }
146
134
  )
@@ -354,11 +354,17 @@ def create_remote_text_editor(
354
354
  command = args_dict["command"]
355
355
 
356
356
  if command != "write":
357
+ dir_path = "/".join(path.split("/")[:-1])
358
+ if dir_path:
359
+ recieve_rsync(instance, f"/root/{path}", f"{get_workspace_dir()}/{dir_path}")
357
360
  recieve_rsync(instance, f"/root/{path}", f"{get_workspace_dir()}")
358
361
 
359
362
  result: str = text_editor_func(*args, **kwargs)
360
363
 
361
364
  if command != "view":
365
+ dir_path = "/".join(path.split("/")[:-1])
366
+ if dir_path:
367
+ send_rsync(instance, f"{get_workspace_dir()}/{path}", f"/root/{dir_path}")
362
368
  send_rsync(instance, f"{get_workspace_dir()}/{path}", "/root")
363
369
 
364
370
  return result
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mle-kit-mcp
3
- Version: 0.2.2
3
+ Version: 0.2.4
4
4
  Summary: MCP server that provides different tools for MLE
5
5
  Author-email: Ilya Gusev <phoenixilya@gmail.com>
6
6
  Project-URL: Homepage, https://github.com/IlyaGusev/mle_kit_mcp
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "mle-kit-mcp"
7
- version = "0.2.2"
7
+ version = "0.2.4"
8
8
  description = "MCP server that provides different tools for MLE"
9
9
  readme = "README.md"
10
10
  authors = [
@@ -4,21 +4,18 @@ from mle_kit_mcp.tools import llm_proxy_local, bash
4
4
 
5
5
  QUERY_SNIPPET = """
6
6
  import httpx
7
- headers = {{'Authorization': 'Bearer {token}'}}
8
7
  json_payload = {{'model': 'gpt-4o', 'messages': [{{'role': 'user', 'content': 'Hello, how are you?'}}]}}
9
- response = httpx.post("{url}", headers=headers, json=json_payload)
8
+ response = httpx.post("{url}", json=json_payload)
10
9
  print(response.json())
11
10
  """
12
11
 
13
12
 
14
13
  def test_llm_proxy_local():
15
- result = json.loads(llm_proxy_local(port=8001))
16
- token = result["token"]
14
+ result = json.loads(llm_proxy_local())
17
15
  url = result["url"]
18
16
  assert url
19
- assert token
20
17
 
21
- snippet = QUERY_SNIPPET.format(url=url, token=token)
18
+ snippet = QUERY_SNIPPET.format(url=url)
22
19
  result = bash(f'cat > test_query.py << "EOF"\n{snippet}\nEOF')
23
20
  result = bash("python test_query.py")
24
21
  assert "content" in result
File without changes
File without changes
File without changes