letswork 2.0.3__tar.gz → 2.0.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. {letswork-2.0.3 → letswork-2.0.5}/PKG-INFO +1 -1
  2. {letswork-2.0.3 → letswork-2.0.5}/letswork/cli.py +15 -8
  3. {letswork-2.0.3 → letswork-2.0.5}/letswork/events.py +0 -2
  4. {letswork-2.0.3 → letswork-2.0.5}/letswork/proxy.py +63 -26
  5. letswork-2.0.5/letswork/tunnel.py +52 -0
  6. {letswork-2.0.3 → letswork-2.0.5}/pyproject.toml +1 -1
  7. letswork-2.0.3/letswork/remote_client.py +0 -115
  8. letswork-2.0.3/letswork/tui/__init__.py +0 -1
  9. letswork-2.0.3/letswork/tui/app.py +0 -354
  10. letswork-2.0.3/letswork/tui/approval_panel.py +0 -78
  11. letswork-2.0.3/letswork/tui/chat.py +0 -54
  12. letswork-2.0.3/letswork/tui/chat_app.py +0 -145
  13. letswork-2.0.3/letswork/tui/file_tree.py +0 -191
  14. letswork-2.0.3/letswork/tui/file_viewer.py +0 -184
  15. letswork-2.0.3/letswork/tunnel.py +0 -37
  16. {letswork-2.0.3 → letswork-2.0.5}/.github/workflows/ci.yml +0 -0
  17. {letswork-2.0.3 → letswork-2.0.5}/.github/workflows/publish.yml +0 -0
  18. {letswork-2.0.3 → letswork-2.0.5}/.gitignore +0 -0
  19. {letswork-2.0.3 → letswork-2.0.5}/README.md +0 -0
  20. {letswork-2.0.3 → letswork-2.0.5}/docs/architecture.md +0 -0
  21. {letswork-2.0.3 → letswork-2.0.5}/docs/spec.md +0 -0
  22. {letswork-2.0.3 → letswork-2.0.5}/docs/tasks.md +0 -0
  23. {letswork-2.0.3 → letswork-2.0.5}/letswork/__init__.py +0 -0
  24. {letswork-2.0.3 → letswork-2.0.5}/letswork/approval.py +0 -0
  25. {letswork-2.0.3 → letswork-2.0.5}/letswork/auth.py +0 -0
  26. {letswork-2.0.3 → letswork-2.0.5}/letswork/filelock.py +0 -0
  27. {letswork-2.0.3 → letswork-2.0.5}/letswork/launcher.py +0 -0
  28. {letswork-2.0.3 → letswork-2.0.5}/letswork/server.py +0 -0
  29. {letswork-2.0.3 → letswork-2.0.5}/server.json +0 -0
  30. {letswork-2.0.3 → letswork-2.0.5}/tests/__init__.py +0 -0
  31. {letswork-2.0.3 → letswork-2.0.5}/tests/test_auth.py +0 -0
  32. {letswork-2.0.3 → letswork-2.0.5}/tests/test_filelock.py +0 -0
  33. {letswork-2.0.3 → letswork-2.0.5}/tests/test_server.py +0 -0
  34. {letswork-2.0.3 → letswork-2.0.5}/tests/test_tunnel.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: letswork
3
- Version: 2.0.3
3
+ Version: 2.0.5
4
4
  Summary: Real-time collaborative coding via MCP — two developers, one codebase
5
5
  Author: Sai Charan Rajoju
6
6
  License-Expression: MIT
@@ -37,10 +37,22 @@ def start(port, debug):
37
37
  server_module.approval_queue = approval_queue
38
38
 
39
39
  def _on_approved(change):
40
+ # Release the lock so the file can be written to again
41
+ rel = os.path.relpath(
42
+ os.path.abspath(os.path.join(project_root, change.path)),
43
+ os.path.abspath(project_root),
44
+ )
45
+ server_module.lock_manager.release_lock(rel, change.user_id)
40
46
  event_log.emit(EventType.FILE_WRITE, change.user_id,
41
47
  {"path": change.path, "status": "approved"})
42
48
 
43
49
  def _on_rejected(change):
50
+ # Release the lock so the guest can retry or move on
51
+ rel = os.path.relpath(
52
+ os.path.abspath(os.path.join(project_root, change.path)),
53
+ os.path.abspath(project_root),
54
+ )
55
+ server_module.lock_manager.release_lock(rel, change.user_id)
44
56
  event_log.emit(EventType.FILE_WRITE, change.user_id,
45
57
  {"path": change.path, "status": "rejected"})
46
58
 
@@ -148,14 +160,9 @@ def start(port, debug):
148
160
  click.echo(f" [{ts}] 🔌 {event.user_id} connected")
149
161
  elif event.event_type == EventType.ERROR:
150
162
  click.echo(f" [{ts}] ⚠️ {event.data.get('error', '?')}")
151
- elif event.event_type == EventType.PING:
152
- click.echo(f" [{ts}] 🏓 {event.user_id} pinged")
153
- if debug:
154
- if event.event_type not in (
155
- EventType.FILE_WRITE, EventType.CONNECTION,
156
- EventType.ERROR, EventType.PING,
157
- ):
158
- click.echo(f" [{ts}] [debug] {event.event_type.value} — {event.data}")
163
+ elif debug:
164
+ # Debug-only events (ping, file reads, locks, etc.)
165
+ click.echo(f" [{ts}] [debug] {event.event_type.value} by {event.user_id} — {event.data}")
159
166
 
160
167
  event_log.on_event(_notify)
161
168
 
@@ -73,8 +73,6 @@ class EventLog:
73
73
  return f"[{time}] 🔒 {user_id} locked {data.get('path', '?')}"
74
74
  elif event_type == EventType.FILE_UNLOCK:
75
75
  return f"[{time}] 🔓 {user_id} unlocked {data.get('path', '?')}"
76
- elif event_type == EventType.CHAT_MESSAGE:
77
- return f"[{time}] 💬 {user_id}: {data.get('message', '')}"
78
76
  elif event_type == EventType.FILE_TREE_REQUEST:
79
77
  return f"[{time}] 📁 {user_id} viewed file tree"
80
78
 
@@ -5,6 +5,9 @@ Claude Code connects to this as a stdio MCP server (reliable, no streaming issue
5
5
  This proxy forwards all tool calls to the host's HTTP MCP server using a proper
6
6
  MCP client session (required by FastMCP's streamable HTTP transport).
7
7
 
8
+ Reconnects automatically if the tunnel drops. Sends a keepalive ping every 30s
9
+ to prevent Cloudflare from closing idle connections.
10
+
8
11
  Usage (done automatically by `letswork join`):
9
12
  claude mcp add letswork -- letswork-proxy --url <URL> --token <TOKEN>
10
13
  """
@@ -19,6 +22,9 @@ from mcp import ClientSession, types
19
22
 
20
23
  log = logging.getLogger("letswork.proxy")
21
24
 
25
+ _RECONNECT_DELAYS = [1, 2, 5, 10, 30] # seconds between attempts
26
+ _KEEPALIVE_INTERVAL = 30 # seconds between keepalive pings
27
+
22
28
 
23
29
  def _setup_logging(debug: bool) -> None:
24
30
  level = logging.DEBUG if debug else logging.WARNING
@@ -36,13 +42,12 @@ def make_proxy_server(base_url: str, token: str) -> tuple:
36
42
  url = url + "/mcp"
37
43
 
38
44
  server = Server("letswork-proxy")
39
- # Shared session state — populated once the client connects
40
45
  _session: ClientSession | None = None
46
+ _session_lock = asyncio.Lock()
41
47
 
42
48
  async def _get_session() -> ClientSession:
43
- nonlocal _session
44
49
  if _session is None:
45
- raise RuntimeError("Not connected to host")
50
+ raise RuntimeError("Not connected to host — reconnecting, please retry in a moment")
46
51
  return _session
47
52
 
48
53
  @server.list_tools()
@@ -52,7 +57,6 @@ def make_proxy_server(base_url: str, token: str) -> tuple:
52
57
  tools = []
53
58
  for t in result.tools:
54
59
  schema = t.inputSchema if t.inputSchema else {"type": "object", "properties": {}}
55
- # Strip 'token' — proxy injects it automatically
56
60
  schema = dict(schema)
57
61
  props = dict(schema.get("properties", {}))
58
62
  props.pop("token", None)
@@ -72,39 +76,72 @@ def make_proxy_server(base_url: str, token: str) -> tuple:
72
76
  @server.call_tool()
73
77
  async def call_tool(name: str, arguments: dict) -> list[types.TextContent]:
74
78
  session = await _get_session()
75
- # Inject token automatically
76
79
  arguments = {**arguments, "token": token}
77
- log.debug(f"→ tool call: {name}({list(k for k in arguments if k != 'token')})")
80
+ log.debug(f"→ {name}({[k for k in arguments if k != 'token']})")
78
81
  try:
79
82
  result = await session.call_tool(name, arguments)
80
83
  except Exception as e:
81
- log.error(f"✗ tool call {name} failed: {e}")
84
+ log.error(f"✗ {name} failed: {e}")
82
85
  raise
83
- out = []
84
- for item in result.content:
85
- if item.type == "text":
86
- out.append(types.TextContent(type="text", text=item.text))
86
+ out = [types.TextContent(type="text", text=item.text)
87
+ for item in result.content if item.type == "text"]
87
88
  if not out:
88
89
  out.append(types.TextContent(type="text", text=str(result)))
89
90
  log.debug(f"← {name} OK")
90
91
  return out
91
92
 
92
- async def run(read_stream, write_stream):
93
+ async def _keepalive(session: ClientSession) -> None:
94
+ """Ping host every 30s so Cloudflare doesn't close the idle connection."""
95
+ while True:
96
+ await asyncio.sleep(_KEEPALIVE_INTERVAL)
97
+ try:
98
+ await session.call_tool("ping", {"token": token})
99
+ log.debug("keepalive ping OK")
100
+ except Exception as e:
101
+ log.debug(f"keepalive ping failed: {e}")
102
+ break # session is dead — let the outer loop reconnect
103
+
104
+ async def _connect_loop(read_stream, write_stream) -> None:
93
105
  nonlocal _session
94
- log.debug(f"Connecting to host at {url}")
95
- try:
96
- async with streamablehttp_client(url) as (host_read, host_write, _):
97
- async with ClientSession(host_read, host_write) as session:
98
- await session.initialize()
99
- _session = session
100
- log.debug("Connected to host MCP server")
101
- await server.run(
102
- read_stream, write_stream,
103
- server.create_initialization_options(),
104
- )
105
- except Exception as e:
106
- log.error(f"Proxy connection failed: {e}")
107
- raise
106
+ attempt = 0
107
+ first = True
108
+
109
+ while True:
110
+ try:
111
+ log.debug(f"Connecting to host at {url} (attempt {attempt + 1})")
112
+ async with streamablehttp_client(url) as (host_read, host_write, _):
113
+ async with ClientSession(host_read, host_write) as session:
114
+ await session.initialize()
115
+ async with _session_lock:
116
+ _session = session
117
+ attempt = 0
118
+ log.debug("Connected to host MCP server")
119
+
120
+ if first:
121
+ first = False
122
+ # Start serving stdio on first connect
123
+ asyncio.ensure_future(
124
+ server.run(read_stream, write_stream,
125
+ server.create_initialization_options())
126
+ )
127
+
128
+ # Run keepalive until connection drops
129
+ await _keepalive(session)
130
+
131
+ except Exception as e:
132
+ async with _session_lock:
133
+ _session = None
134
+ if first:
135
+ # Failed on very first attempt — bail out, Claude Code will show error
136
+ log.error(f"Initial connection failed: {e}")
137
+ raise
138
+ delay = _RECONNECT_DELAYS[min(attempt, len(_RECONNECT_DELAYS) - 1)]
139
+ attempt += 1
140
+ log.debug(f"Disconnected ({e}), retrying in {delay}s...")
141
+ await asyncio.sleep(delay)
142
+
143
+ async def run(read_stream, write_stream):
144
+ await _connect_loop(read_stream, write_stream)
108
145
 
109
146
  return server, run
110
147
 
@@ -0,0 +1,52 @@
1
+ import subprocess
2
+ import shutil
3
+ import re
4
+ import time
5
+ import threading
6
+
7
+
8
+ def start_tunnel(port: int) -> tuple[str, subprocess.Popen]:
9
+ """Start a Cloudflare tunnel pointing to the local MCP server port. Returns the HTTPS URL and the subprocess handle."""
10
+ if shutil.which("cloudflared") is None:
11
+ raise RuntimeError("cloudflared is not installed. Install it from https://developers.cloudflare.com/cloudflare-one/connections/connect-networks/downloads/")
12
+
13
+ command = ["cloudflared", "tunnel", "--url", f"http://localhost:{port}"]
14
+ process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
15
+
16
+ url_found = threading.Event()
17
+ result: list[str] = []
18
+
19
+ def _read_stderr():
20
+ deadline = time.monotonic() + 60 # 60s timeout
21
+ while time.monotonic() < deadline:
22
+ line = process.stderr.readline()
23
+ if not line:
24
+ if process.poll() is not None:
25
+ break
26
+ continue
27
+ text = line.decode("utf-8", errors="replace").strip()
28
+ match = re.search(r"https://[a-zA-Z0-9\-]+\.trycloudflare\.com", text)
29
+ if match:
30
+ result.append(match.group(0))
31
+ url_found.set()
32
+ return
33
+
34
+ reader = threading.Thread(target=_read_stderr, daemon=True)
35
+ reader.start()
36
+
37
+ if url_found.wait(timeout=60):
38
+ return result[0], process
39
+
40
+ process.terminate()
41
+ if process.poll() is None:
42
+ process.wait(timeout=5)
43
+ raise RuntimeError("Failed to start tunnel: could not find tunnel URL within 60 seconds")
44
+
45
+
46
+ def stop_tunnel(process: subprocess.Popen) -> None:
47
+ """Gracefully terminate the cloudflared subprocess."""
48
+ process.terminate()
49
+ try:
50
+ process.wait(timeout=5)
51
+ except subprocess.TimeoutExpired:
52
+ process.kill()
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "letswork"
7
- version = "2.0.3"
7
+ version = "2.0.5"
8
8
  description = "Real-time collaborative coding via MCP — two developers, one codebase"
9
9
  readme = "README.md"
10
10
  license = "MIT"
@@ -1,115 +0,0 @@
1
- import asyncio
2
- import threading
3
- from mcp.client.streamable_http import streamablehttp_client
4
- from mcp import ClientSession
5
-
6
- _RECONNECT_DELAYS = [1, 2, 5, 10, 30] # seconds between retries
7
-
8
-
9
- class RemoteClient:
10
- """
11
- Connects to Host MCP server over streamable-http,
12
- exposes sync methods for TUI widgets.
13
- Auto-reconnects if the connection drops.
14
- """
15
- def __init__(self, mcp_url: str, token: str):
16
- self.mcp_url = mcp_url
17
- self.token = token
18
- self._session: ClientSession | None = None
19
- self._loop: asyncio.AbstractEventLoop | None = None
20
- self._thread: threading.Thread | None = None
21
- self._connected = False
22
- self._should_run = False
23
- self._on_reconnect: callable | None = None
24
-
25
- def on_reconnect(self, callback: callable) -> None:
26
- """Register a callback invoked after each successful reconnect."""
27
- self._on_reconnect = callback
28
-
29
- def connect(self) -> bool:
30
- self._should_run = True
31
- loop = asyncio.new_event_loop()
32
- self._loop = loop
33
- ready_event = threading.Event()
34
-
35
- async def _run_loop():
36
- attempt = 0
37
- while self._should_run:
38
- try:
39
- async with streamablehttp_client(self.mcp_url) as (read, write, _):
40
- async with ClientSession(read, write) as session:
41
- await session.initialize()
42
- self._session = session
43
- self._connected = True
44
- attempt = 0
45
- ready_event.set()
46
- if self._on_reconnect:
47
- self._on_reconnect()
48
- while self._connected and self._should_run:
49
- await asyncio.sleep(0.1)
50
- except Exception:
51
- self._connected = False
52
- self._session = None
53
- if not ready_event.is_set():
54
- # First connect failed — signal caller and stop retrying
55
- ready_event.set()
56
- return
57
- # Reconnect with backoff
58
- delay = _RECONNECT_DELAYS[min(attempt, len(_RECONNECT_DELAYS) - 1)]
59
- attempt += 1
60
- await asyncio.sleep(delay)
61
-
62
- def thread_target():
63
- loop.run_until_complete(_run_loop())
64
-
65
- self._thread = threading.Thread(target=thread_target, daemon=True)
66
- self._thread.start()
67
-
68
- ready_event.wait(timeout=10)
69
- return self._connected
70
-
71
- def disconnect(self):
72
- self._should_run = False
73
- self._connected = False
74
- if self._thread:
75
- self._thread.join(timeout=5)
76
- self._session = None
77
- self._loop = None
78
-
79
- def _run_async(self, coro) -> any:
80
- if not self._connected or self._loop is None:
81
- raise RuntimeError("Not connected")
82
- future = asyncio.run_coroutine_threadsafe(coro, self._loop)
83
- return future.result(timeout=15)
84
-
85
- def _call_tool(self, tool_name: str, arguments: dict) -> str:
86
- if not self._connected or self._session is None:
87
- return "Error: not connected"
88
- try:
89
- result = self._run_async(
90
- self._session.call_tool(tool_name, arguments)
91
- )
92
- for item in result.content:
93
- if item.type == "text":
94
- return item.text
95
- return str(result)
96
- except Exception as e:
97
- return f"Error: {e}"
98
-
99
- def list_files(self, path: str = ".") -> str:
100
- return self._call_tool("list_files", {"token": self.token, "path": path})
101
-
102
- def read_file(self, path: str) -> str:
103
- return self._call_tool("read_file", {"token": self.token, "path": path})
104
-
105
- def write_file(self, path: str, content: str) -> str:
106
- return self._call_tool("write_file", {"token": self.token, "path": path, "content": content})
107
-
108
- def lock_file(self, path: str) -> str:
109
- return self._call_tool("lock_file", {"token": self.token, "path": path})
110
-
111
- def unlock_file(self, path: str) -> str:
112
- return self._call_tool("unlock_file", {"token": self.token, "path": path})
113
-
114
- def get_status(self) -> str:
115
- return self._call_tool("get_status", {"token": self.token})
@@ -1 +0,0 @@
1
- """LetsWork TUI dashboard."""