ptn 0.1.4__py3-none-any.whl → 0.3.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- porterminal/__init__.py +82 -14
- porterminal/_version.py +34 -0
- porterminal/app.py +32 -4
- porterminal/application/ports/__init__.py +2 -0
- porterminal/application/ports/connection_registry_port.py +46 -0
- porterminal/application/services/management_service.py +2 -3
- porterminal/application/services/terminal_service.py +116 -28
- porterminal/asgi.py +8 -3
- porterminal/cli/args.py +103 -0
- porterminal/cli/display.py +1 -1
- porterminal/composition.py +19 -5
- porterminal/config.py +62 -70
- porterminal/container.py +3 -10
- porterminal/domain/__init__.py +0 -2
- porterminal/domain/entities/output_buffer.py +0 -4
- porterminal/domain/ports/__init__.py +1 -2
- porterminal/domain/ports/pty_port.py +0 -29
- porterminal/domain/ports/tab_repository.py +0 -5
- porterminal/infrastructure/auth.py +131 -0
- porterminal/infrastructure/cloudflared.py +5 -1
- porterminal/infrastructure/config/__init__.py +0 -2
- porterminal/infrastructure/config/shell_detector.py +342 -1
- porterminal/infrastructure/repositories/in_memory_tab.py +0 -4
- porterminal/infrastructure/server.py +37 -5
- porterminal/static/assets/app-BkHv5qu0.css +32 -0
- porterminal/static/assets/app-CaIGfw7i.js +72 -0
- porterminal/static/assets/app-D9ELFbEO.js +72 -0
- porterminal/static/assets/app-DF3nl_io.js +72 -0
- porterminal/static/assets/app-DQePboVd.css +32 -0
- porterminal/static/assets/app-DoBiVkTD.js +72 -0
- porterminal/static/assets/app-azbHOsRw.css +32 -0
- porterminal/static/assets/app-nMNFwMa6.css +32 -0
- porterminal/static/index.html +28 -25
- porterminal/updater.py +115 -168
- ptn-0.3.2.dist-info/METADATA +171 -0
- {ptn-0.1.4.dist-info → ptn-0.3.2.dist-info}/RECORD +39 -33
- porterminal/infrastructure/config/yaml_loader.py +0 -34
- porterminal/static/assets/app-BQiuUo6Q.css +0 -32
- porterminal/static/assets/app-YNN_jEhv.js +0 -71
- porterminal/static/manifest.json +0 -31
- porterminal/static/sw.js +0 -66
- ptn-0.1.4.dist-info/METADATA +0 -191
- {ptn-0.1.4.dist-info → ptn-0.3.2.dist-info}/WHEEL +0 -0
- {ptn-0.1.4.dist-info → ptn-0.3.2.dist-info}/entry_points.txt +0 -0
- {ptn-0.1.4.dist-info → ptn-0.3.2.dist-info}/licenses/LICENSE +0 -0
porterminal/__init__.py
CHANGED
|
@@ -8,9 +8,13 @@ This package provides:
|
|
|
8
8
|
- Configuration system with shell auto-detection
|
|
9
9
|
"""
|
|
10
10
|
|
|
11
|
-
|
|
11
|
+
try:
|
|
12
|
+
from ._version import __version__
|
|
13
|
+
except ImportError:
|
|
14
|
+
__version__ = "0.0.0-dev" # Fallback before first build
|
|
12
15
|
|
|
13
16
|
import os
|
|
17
|
+
import signal
|
|
14
18
|
import subprocess
|
|
15
19
|
import sys
|
|
16
20
|
import time
|
|
@@ -139,8 +143,37 @@ def _run_in_background(args) -> int:
|
|
|
139
143
|
def main() -> int:
|
|
140
144
|
"""Main entry point."""
|
|
141
145
|
args = parse_args()
|
|
146
|
+
|
|
147
|
+
# Check for updates (notification only, never exec's)
|
|
148
|
+
from porterminal.updater import check_and_notify
|
|
149
|
+
|
|
150
|
+
check_and_notify()
|
|
142
151
|
verbose = args.verbose
|
|
143
152
|
|
|
153
|
+
# Load config to check require_password setting
|
|
154
|
+
from porterminal.config import get_config
|
|
155
|
+
|
|
156
|
+
config = get_config()
|
|
157
|
+
|
|
158
|
+
# Handle password mode (CLI flag or config setting)
|
|
159
|
+
if args.password or config.security.require_password:
|
|
160
|
+
import getpass
|
|
161
|
+
|
|
162
|
+
try:
|
|
163
|
+
password = getpass.getpass("Enter password: ")
|
|
164
|
+
if not password:
|
|
165
|
+
console.print("[red]Error:[/red] Password cannot be empty")
|
|
166
|
+
return 1
|
|
167
|
+
|
|
168
|
+
import bcrypt
|
|
169
|
+
|
|
170
|
+
password_hash = bcrypt.hashpw(password.encode(), bcrypt.gensalt())
|
|
171
|
+
os.environ["PORTERMINAL_PASSWORD_HASH"] = password_hash.decode()
|
|
172
|
+
console.print("[green]Password protection enabled[/green]")
|
|
173
|
+
except KeyboardInterrupt:
|
|
174
|
+
console.print("\n[dim]Cancelled[/dim]")
|
|
175
|
+
return 0
|
|
176
|
+
|
|
144
177
|
# Handle background mode
|
|
145
178
|
if args.background:
|
|
146
179
|
return _run_in_background(args)
|
|
@@ -162,9 +195,6 @@ def main() -> int:
|
|
|
162
195
|
cwd_str = str(cwd)
|
|
163
196
|
os.environ["PORTERMINAL_CWD"] = cwd_str
|
|
164
197
|
|
|
165
|
-
from porterminal.config import get_config
|
|
166
|
-
|
|
167
|
-
config = get_config()
|
|
168
198
|
bind_host = config.server.host
|
|
169
199
|
preferred_port = config.server.port
|
|
170
200
|
port = preferred_port
|
|
@@ -218,6 +248,10 @@ def main() -> int:
|
|
|
218
248
|
status.update("[cyan]Establishing tunnel...[/cyan]")
|
|
219
249
|
tunnel_process, tunnel_url = start_cloudflared(port)
|
|
220
250
|
|
|
251
|
+
if tunnel_url:
|
|
252
|
+
# Wait for tunnel to stabilize before showing URL
|
|
253
|
+
time.sleep(1)
|
|
254
|
+
|
|
221
255
|
if not tunnel_url:
|
|
222
256
|
console.print("[red]Error:[/red] Failed to establish tunnel")
|
|
223
257
|
for proc in [server_process, tunnel_process]:
|
|
@@ -257,10 +291,18 @@ def main() -> int:
|
|
|
257
291
|
try:
|
|
258
292
|
while True:
|
|
259
293
|
if server_process is not None and server_process.poll() is not None:
|
|
260
|
-
|
|
294
|
+
code = server_process.returncode
|
|
295
|
+
if code == 0 or code < 0:
|
|
296
|
+
console.print("\n[dim]Server stopped[/dim]")
|
|
297
|
+
else:
|
|
298
|
+
console.print(f"\n[yellow]Server stopped (exit code {code})[/yellow]")
|
|
261
299
|
break
|
|
262
300
|
if tunnel_process is not None and tunnel_process.poll() is not None:
|
|
263
|
-
|
|
301
|
+
code = tunnel_process.returncode
|
|
302
|
+
if code == 0 or code < 0:
|
|
303
|
+
console.print("\n[dim]Tunnel closed[/dim]")
|
|
304
|
+
else:
|
|
305
|
+
console.print(f"\n[yellow]Tunnel stopped (exit code {code})[/yellow]")
|
|
264
306
|
break
|
|
265
307
|
time.sleep(1)
|
|
266
308
|
|
|
@@ -271,15 +313,41 @@ def main() -> int:
|
|
|
271
313
|
def cleanup_process(proc: subprocess.Popen | None, name: str) -> None:
|
|
272
314
|
if proc is None or proc.poll() is not None:
|
|
273
315
|
return
|
|
274
|
-
try:
|
|
275
|
-
proc.terminate()
|
|
276
|
-
proc.wait(timeout=5)
|
|
277
|
-
except subprocess.TimeoutExpired:
|
|
278
|
-
proc.kill()
|
|
279
|
-
proc.wait() # Reap the killed process
|
|
280
316
|
|
|
281
|
-
|
|
282
|
-
|
|
317
|
+
if sys.platform == "win32":
|
|
318
|
+
# Windows: use taskkill /T to kill entire process tree
|
|
319
|
+
try:
|
|
320
|
+
subprocess.run(
|
|
321
|
+
["taskkill", "/T", "/F", "/PID", str(proc.pid)],
|
|
322
|
+
capture_output=True,
|
|
323
|
+
timeout=10,
|
|
324
|
+
)
|
|
325
|
+
# Wait for process to actually terminate
|
|
326
|
+
proc.wait(timeout=5)
|
|
327
|
+
except (subprocess.TimeoutExpired, OSError):
|
|
328
|
+
# Last resort: try to kill just the main process
|
|
329
|
+
try:
|
|
330
|
+
proc.kill()
|
|
331
|
+
proc.wait(timeout=2)
|
|
332
|
+
except (OSError, subprocess.TimeoutExpired):
|
|
333
|
+
pass
|
|
334
|
+
else:
|
|
335
|
+
# Unix: terminate gracefully, then kill
|
|
336
|
+
try:
|
|
337
|
+
proc.terminate()
|
|
338
|
+
proc.wait(timeout=5)
|
|
339
|
+
except subprocess.TimeoutExpired:
|
|
340
|
+
proc.kill()
|
|
341
|
+
proc.wait()
|
|
342
|
+
|
|
343
|
+
# Ignore Ctrl+C during cleanup to prevent orphaned processes
|
|
344
|
+
# Cleanup has timeouts so it won't hang forever
|
|
345
|
+
old_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
|
|
346
|
+
try:
|
|
347
|
+
cleanup_process(server_process, "server")
|
|
348
|
+
cleanup_process(tunnel_process, "tunnel")
|
|
349
|
+
finally:
|
|
350
|
+
signal.signal(signal.SIGINT, old_handler)
|
|
283
351
|
|
|
284
352
|
return 0
|
|
285
353
|
|
porterminal/_version.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
# file generated by setuptools-scm
|
|
2
|
+
# don't change, don't track in version control
|
|
3
|
+
|
|
4
|
+
__all__ = [
|
|
5
|
+
"__version__",
|
|
6
|
+
"__version_tuple__",
|
|
7
|
+
"version",
|
|
8
|
+
"version_tuple",
|
|
9
|
+
"__commit_id__",
|
|
10
|
+
"commit_id",
|
|
11
|
+
]
|
|
12
|
+
|
|
13
|
+
TYPE_CHECKING = False
|
|
14
|
+
if TYPE_CHECKING:
|
|
15
|
+
from typing import Tuple
|
|
16
|
+
from typing import Union
|
|
17
|
+
|
|
18
|
+
VERSION_TUPLE = Tuple[Union[int, str], ...]
|
|
19
|
+
COMMIT_ID = Union[str, None]
|
|
20
|
+
else:
|
|
21
|
+
VERSION_TUPLE = object
|
|
22
|
+
COMMIT_ID = object
|
|
23
|
+
|
|
24
|
+
version: str
|
|
25
|
+
__version__: str
|
|
26
|
+
__version_tuple__: VERSION_TUPLE
|
|
27
|
+
version_tuple: VERSION_TUPLE
|
|
28
|
+
commit_id: COMMIT_ID
|
|
29
|
+
__commit_id__: COMMIT_ID
|
|
30
|
+
|
|
31
|
+
__version__ = version = '0.3.2'
|
|
32
|
+
__version_tuple__ = version_tuple = (0, 3, 2)
|
|
33
|
+
|
|
34
|
+
__commit_id__ = commit_id = None
|
porterminal/app.py
CHANGED
|
@@ -13,9 +13,11 @@ from fastapi.responses import HTMLResponse, JSONResponse
|
|
|
13
13
|
from fastapi.staticfiles import StaticFiles
|
|
14
14
|
from starlette.middleware.base import RequestResponseEndpoint
|
|
15
15
|
|
|
16
|
+
from . import __version__
|
|
16
17
|
from .composition import create_container
|
|
17
18
|
from .container import Container
|
|
18
19
|
from .domain import UserId
|
|
20
|
+
from .infrastructure.auth import authenticate_connection, validate_auth_message
|
|
19
21
|
from .infrastructure.web import FastAPIWebSocketAdapter
|
|
20
22
|
from .logging_setup import setup_logging_from_env
|
|
21
23
|
|
|
@@ -51,10 +53,15 @@ async def lifespan(app: FastAPI):
|
|
|
51
53
|
security_preflight_checks()
|
|
52
54
|
|
|
53
55
|
# Create DI container with all wired dependencies
|
|
54
|
-
config_path
|
|
56
|
+
# config_path=None uses find_config_file() to search standard locations
|
|
55
57
|
cwd = os.environ.get("PORTERMINAL_CWD")
|
|
56
58
|
|
|
57
|
-
|
|
59
|
+
# Get password hash from environment if set
|
|
60
|
+
password_hash = None
|
|
61
|
+
if hash_str := os.environ.get("PORTERMINAL_PASSWORD_HASH"):
|
|
62
|
+
password_hash = hash_str.encode()
|
|
63
|
+
|
|
64
|
+
container = create_container(config_path=None, cwd=cwd, password_hash=password_hash)
|
|
58
65
|
app.state.container = container
|
|
59
66
|
|
|
60
67
|
# Wire up cascade: when session is destroyed, close associated tabs and broadcast
|
|
@@ -82,7 +89,7 @@ def create_app() -> FastAPI:
|
|
|
82
89
|
app = FastAPI(
|
|
83
90
|
title="Porterminal",
|
|
84
91
|
description="Web-based terminal accessible from phone via Cloudflare Tunnel",
|
|
85
|
-
version=
|
|
92
|
+
version=__version__,
|
|
86
93
|
lifespan=lifespan,
|
|
87
94
|
)
|
|
88
95
|
|
|
@@ -224,6 +231,17 @@ def create_app() -> FastAPI:
|
|
|
224
231
|
)
|
|
225
232
|
|
|
226
233
|
try:
|
|
234
|
+
# Authentication phase if password is set
|
|
235
|
+
if container.password_hash is not None:
|
|
236
|
+
authenticated = await authenticate_connection(
|
|
237
|
+
connection,
|
|
238
|
+
container.password_hash,
|
|
239
|
+
max_attempts=container.max_auth_attempts,
|
|
240
|
+
)
|
|
241
|
+
if not authenticated:
|
|
242
|
+
await websocket.close(code=4001, reason="Auth failed")
|
|
243
|
+
return
|
|
244
|
+
|
|
227
245
|
# Register for broadcasts
|
|
228
246
|
await connection_registry.register(user_id, connection)
|
|
229
247
|
|
|
@@ -332,6 +350,13 @@ def create_app() -> FastAPI:
|
|
|
332
350
|
session.session_id,
|
|
333
351
|
)
|
|
334
352
|
|
|
353
|
+
# Authentication check if password is set
|
|
354
|
+
if container.password_hash is not None:
|
|
355
|
+
if not await validate_auth_message(connection, container.password_hash):
|
|
356
|
+
logger.warning("Terminal WebSocket auth failed user_id=%s", user_id)
|
|
357
|
+
await websocket.close(code=4001, reason="Auth failed")
|
|
358
|
+
return
|
|
359
|
+
|
|
335
360
|
# Update tab access time
|
|
336
361
|
tab_service.touch_tab(tab_id, user_id)
|
|
337
362
|
|
|
@@ -339,13 +364,16 @@ def create_app() -> FastAPI:
|
|
|
339
364
|
# Register connection for broadcasts
|
|
340
365
|
await connection_registry.register(user_id, connection)
|
|
341
366
|
|
|
342
|
-
# Send session info
|
|
367
|
+
# Send session info including current dimensions
|
|
368
|
+
# New clients should adapt to existing dimensions to prevent rendering issues
|
|
343
369
|
await connection.send_message(
|
|
344
370
|
{
|
|
345
371
|
"type": "session_info",
|
|
346
372
|
"session_id": session.session_id,
|
|
347
373
|
"shell": session.shell_id,
|
|
348
374
|
"tab_id": tab.tab_id,
|
|
375
|
+
"cols": session.dimensions.cols,
|
|
376
|
+
"rows": session.dimensions.rows,
|
|
349
377
|
}
|
|
350
378
|
)
|
|
351
379
|
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
"""Connection registry port - interface for broadcasting to user connections."""
|
|
2
|
+
|
|
3
|
+
from typing import TYPE_CHECKING, Any, Protocol
|
|
4
|
+
|
|
5
|
+
if TYPE_CHECKING:
|
|
6
|
+
from porterminal.domain import UserId
|
|
7
|
+
|
|
8
|
+
from .connection_port import ConnectionPort
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class ConnectionRegistryPort(Protocol):
|
|
12
|
+
"""Protocol for managing and broadcasting to user connections.
|
|
13
|
+
|
|
14
|
+
Infrastructure layer (e.g., UserConnectionRegistry) implements this.
|
|
15
|
+
Application layer uses this interface for broadcasting messages.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
async def register(self, user_id: "UserId", connection: "ConnectionPort") -> None:
|
|
19
|
+
"""Register a new connection for a user."""
|
|
20
|
+
...
|
|
21
|
+
|
|
22
|
+
async def unregister(self, user_id: "UserId", connection: "ConnectionPort") -> None:
|
|
23
|
+
"""Unregister a connection."""
|
|
24
|
+
...
|
|
25
|
+
|
|
26
|
+
async def broadcast(
|
|
27
|
+
self,
|
|
28
|
+
user_id: "UserId",
|
|
29
|
+
message: dict[str, Any],
|
|
30
|
+
exclude: "ConnectionPort | None" = None,
|
|
31
|
+
) -> int:
|
|
32
|
+
"""Send message to all connections for a user.
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
user_id: User to broadcast to.
|
|
36
|
+
message: Message dict to send.
|
|
37
|
+
exclude: Optional connection to exclude (e.g., the sender).
|
|
38
|
+
|
|
39
|
+
Returns:
|
|
40
|
+
Number of connections sent to.
|
|
41
|
+
"""
|
|
42
|
+
...
|
|
43
|
+
|
|
44
|
+
def total_connections(self) -> int:
|
|
45
|
+
"""Get total number of connections across all users."""
|
|
46
|
+
...
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
import logging
|
|
4
4
|
from collections.abc import Callable
|
|
5
5
|
|
|
6
|
-
from porterminal.application.ports import ConnectionPort
|
|
6
|
+
from porterminal.application.ports import ConnectionPort, ConnectionRegistryPort
|
|
7
7
|
from porterminal.application.services.session_service import SessionService
|
|
8
8
|
from porterminal.application.services.tab_service import TabService
|
|
9
9
|
from porterminal.domain import (
|
|
@@ -11,7 +11,6 @@ from porterminal.domain import (
|
|
|
11
11
|
TerminalDimensions,
|
|
12
12
|
UserId,
|
|
13
13
|
)
|
|
14
|
-
from porterminal.infrastructure.registry import UserConnectionRegistry
|
|
15
14
|
|
|
16
15
|
logger = logging.getLogger(__name__)
|
|
17
16
|
|
|
@@ -27,7 +26,7 @@ class ManagementService:
|
|
|
27
26
|
self,
|
|
28
27
|
session_service: SessionService,
|
|
29
28
|
tab_service: TabService,
|
|
30
|
-
connection_registry:
|
|
29
|
+
connection_registry: ConnectionRegistryPort,
|
|
31
30
|
shell_provider: Callable[[str | None], ShellCommand | None],
|
|
32
31
|
default_dimensions: TerminalDimensions,
|
|
33
32
|
) -> None:
|
|
@@ -63,11 +63,23 @@ class TerminalService:
|
|
|
63
63
|
# Multi-client support: track connections and read loops per session
|
|
64
64
|
self._session_connections: dict[str, set[ConnectionPort]] = {}
|
|
65
65
|
self._session_read_tasks: dict[str, asyncio.Task[None]] = {}
|
|
66
|
+
# Per-session locks to prevent race between buffer replay and broadcast
|
|
67
|
+
self._session_locks: dict[str, asyncio.Lock] = {}
|
|
66
68
|
|
|
67
69
|
# -------------------------------------------------------------------------
|
|
68
70
|
# Multi-client connection tracking
|
|
69
71
|
# -------------------------------------------------------------------------
|
|
70
72
|
|
|
73
|
+
def _get_session_lock(self, session_id: str) -> asyncio.Lock:
|
|
74
|
+
"""Get or create a lock for a session."""
|
|
75
|
+
if session_id not in self._session_locks:
|
|
76
|
+
self._session_locks[session_id] = asyncio.Lock()
|
|
77
|
+
return self._session_locks[session_id]
|
|
78
|
+
|
|
79
|
+
def _cleanup_session_lock(self, session_id: str) -> None:
|
|
80
|
+
"""Remove session lock when no longer needed."""
|
|
81
|
+
self._session_locks.pop(session_id, None)
|
|
82
|
+
|
|
71
83
|
def _register_connection(self, session_id: str, connection: ConnectionPort) -> int:
|
|
72
84
|
"""Register a connection for a session. Returns connection count."""
|
|
73
85
|
if session_id not in self._session_connections:
|
|
@@ -85,8 +97,21 @@ class TerminalService:
|
|
|
85
97
|
del self._session_connections[session_id]
|
|
86
98
|
return count
|
|
87
99
|
|
|
100
|
+
async def _send_to_connections(self, connections: list[ConnectionPort], data: bytes) -> None:
|
|
101
|
+
"""Send data to a list of connections (used with pre-snapshotted list)."""
|
|
102
|
+
for conn in connections:
|
|
103
|
+
try:
|
|
104
|
+
await conn.send_output(data)
|
|
105
|
+
except Exception:
|
|
106
|
+
pass # Connection cleanup handled elsewhere
|
|
107
|
+
|
|
88
108
|
async def _broadcast_output(self, session_id: str, data: bytes) -> None:
|
|
89
|
-
"""Broadcast PTY output to all connections for a session.
|
|
109
|
+
"""Broadcast PTY output to all connections for a session.
|
|
110
|
+
|
|
111
|
+
Note: This is only used for error/status messages where the race
|
|
112
|
+
condition doesn't matter. For PTY data, use _send_to_connections
|
|
113
|
+
with a lock-protected snapshot.
|
|
114
|
+
"""
|
|
90
115
|
connections = self._session_connections.get(session_id, set())
|
|
91
116
|
dead: list[ConnectionPort] = []
|
|
92
117
|
for conn in list(connections): # Copy to avoid mutation during iteration
|
|
@@ -128,31 +153,41 @@ class TerminalService:
|
|
|
128
153
|
session_id = str(session.id)
|
|
129
154
|
clock = AsyncioClock()
|
|
130
155
|
rate_limiter = TokenBucketRateLimiter(self._rate_limit_config, clock)
|
|
156
|
+
lock = self._get_session_lock(session_id)
|
|
157
|
+
|
|
158
|
+
# Register atomically to prevent race with broadcast.
|
|
159
|
+
# Without this lock, a new client could register between add_output and
|
|
160
|
+
# broadcast, receiving the same data twice (once from buffer, once broadcast).
|
|
161
|
+
#
|
|
162
|
+
# Buffer snapshot and read loop start are also under lock to ensure:
|
|
163
|
+
# - Buffer is captured before any new data arrives
|
|
164
|
+
# - Only one read loop starts per session (prevents duplicate PTY reads)
|
|
165
|
+
# - I/O (send_output) happens OUTSIDE lock to avoid blocking other clients
|
|
166
|
+
buffered = None
|
|
167
|
+
async with lock:
|
|
168
|
+
connection_count = self._register_connection(session_id, connection)
|
|
169
|
+
is_first_client = connection_count == 1
|
|
131
170
|
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
"Client connected session_id=%s connection_count=%d",
|
|
138
|
-
session_id,
|
|
139
|
-
connection_count,
|
|
140
|
-
)
|
|
171
|
+
logger.info(
|
|
172
|
+
"Client connected session_id=%s connection_count=%d",
|
|
173
|
+
session_id,
|
|
174
|
+
connection_count,
|
|
175
|
+
)
|
|
141
176
|
|
|
142
|
-
|
|
143
|
-
# First client starts the shared PTY read loop
|
|
177
|
+
# First client starts the shared PTY read loop (under lock to prevent duplicates)
|
|
144
178
|
if is_first_client:
|
|
145
179
|
self._start_broadcast_read_loop(session, session_id)
|
|
146
180
|
|
|
181
|
+
# Snapshot buffer while under lock (ensures consistency with broadcast)
|
|
147
182
|
# Note: session_info is sent by the caller (app.py) to include tab_id
|
|
148
|
-
|
|
149
|
-
# Replay buffered output to THIS connection only (not broadcast)
|
|
150
183
|
if not skip_buffer and not session.output_buffer.is_empty:
|
|
151
184
|
buffered = session.get_buffered_output()
|
|
152
|
-
# Don't clear buffer - other clients may need it too
|
|
153
|
-
if buffered:
|
|
154
|
-
await connection.send_output(buffered)
|
|
155
185
|
|
|
186
|
+
# Replay buffer OUTSIDE lock to avoid blocking other clients during I/O
|
|
187
|
+
if buffered:
|
|
188
|
+
await connection.send_output(buffered)
|
|
189
|
+
|
|
190
|
+
try:
|
|
156
191
|
# Start heartbeat for this connection
|
|
157
192
|
heartbeat_task = asyncio.create_task(self._heartbeat_loop(connection))
|
|
158
193
|
|
|
@@ -173,9 +208,10 @@ class TerminalService:
|
|
|
173
208
|
remaining,
|
|
174
209
|
)
|
|
175
210
|
|
|
176
|
-
# Last client: stop the read loop
|
|
211
|
+
# Last client: stop the read loop and cleanup lock
|
|
177
212
|
if remaining == 0:
|
|
178
213
|
await self._stop_broadcast_read_loop(session_id)
|
|
214
|
+
self._cleanup_session_lock(session_id)
|
|
179
215
|
|
|
180
216
|
def _start_broadcast_read_loop(
|
|
181
217
|
self,
|
|
@@ -212,6 +248,11 @@ class TerminalService:
|
|
|
212
248
|
- Small data (<64 bytes): flush immediately for interactive responsiveness
|
|
213
249
|
- Large data: batch for ~16ms to reduce WebSocket message frequency
|
|
214
250
|
- Flush if batch exceeds 16KB to prevent memory buildup
|
|
251
|
+
|
|
252
|
+
Thread safety:
|
|
253
|
+
- Uses session lock to prevent race between add_output/broadcast and
|
|
254
|
+
new client registration/buffer replay. Lock is held briefly during
|
|
255
|
+
buffer update and connection snapshot, not during actual I/O.
|
|
215
256
|
"""
|
|
216
257
|
# Check if PTY is alive at start
|
|
217
258
|
if not session.pty_handle.is_alive():
|
|
@@ -219,18 +260,29 @@ class TerminalService:
|
|
|
219
260
|
await self._broadcast_output(session_id, b"\r\n[PTY failed to start]\r\n")
|
|
220
261
|
return
|
|
221
262
|
|
|
263
|
+
lock = self._get_session_lock(session_id)
|
|
222
264
|
batch_buffer: list[bytes] = []
|
|
223
265
|
batch_size = 0
|
|
224
266
|
last_flush_time = asyncio.get_running_loop().time()
|
|
225
267
|
|
|
226
268
|
async def flush_batch() -> None:
|
|
269
|
+
"""Flush batched data with lock protection."""
|
|
227
270
|
nonlocal batch_buffer, batch_size, last_flush_time
|
|
228
|
-
if batch_buffer:
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
271
|
+
if not batch_buffer:
|
|
272
|
+
return
|
|
273
|
+
|
|
274
|
+
combined = b"".join(batch_buffer)
|
|
275
|
+
batch_buffer = []
|
|
276
|
+
batch_size = 0
|
|
277
|
+
last_flush_time = asyncio.get_running_loop().time()
|
|
278
|
+
|
|
279
|
+
# Acquire lock, add to buffer, snapshot connections, release lock
|
|
280
|
+
async with lock:
|
|
281
|
+
session.add_output(combined)
|
|
282
|
+
connections = list(self._session_connections.get(session_id, set()))
|
|
283
|
+
|
|
284
|
+
# Broadcast outside lock (I/O can be slow)
|
|
285
|
+
await self._send_to_connections(connections, combined)
|
|
234
286
|
|
|
235
287
|
def has_connections() -> bool:
|
|
236
288
|
return (
|
|
@@ -242,12 +294,16 @@ class TerminalService:
|
|
|
242
294
|
try:
|
|
243
295
|
data = session.pty_handle.read(4096)
|
|
244
296
|
if data:
|
|
245
|
-
session.add_output(data)
|
|
246
297
|
session.touch(datetime.now(UTC))
|
|
247
298
|
|
|
248
299
|
# Small data (interactive): flush immediately for responsiveness
|
|
249
300
|
if len(data) < INTERACTIVE_THRESHOLD and not batch_buffer:
|
|
250
|
-
|
|
301
|
+
# Acquire lock, add to buffer, snapshot connections
|
|
302
|
+
async with lock:
|
|
303
|
+
session.add_output(data)
|
|
304
|
+
connections = list(self._session_connections.get(session_id, set()))
|
|
305
|
+
# Broadcast outside lock
|
|
306
|
+
await self._send_to_connections(connections, data)
|
|
251
307
|
else:
|
|
252
308
|
# Batch larger data
|
|
253
309
|
batch_buffer.append(data)
|
|
@@ -351,7 +407,7 @@ class TerminalService:
|
|
|
351
407
|
msg_type = message.get("type")
|
|
352
408
|
|
|
353
409
|
if msg_type == "resize":
|
|
354
|
-
await self._handle_resize(session, message)
|
|
410
|
+
await self._handle_resize(session, message, connection)
|
|
355
411
|
elif msg_type == "input":
|
|
356
412
|
await self._handle_json_input(session, message, rate_limiter, connection)
|
|
357
413
|
elif msg_type == "ping":
|
|
@@ -366,8 +422,17 @@ class TerminalService:
|
|
|
366
422
|
self,
|
|
367
423
|
session: Session[PTYPort],
|
|
368
424
|
message: dict[str, Any],
|
|
425
|
+
connection: ConnectionPort,
|
|
369
426
|
) -> None:
|
|
370
|
-
"""Handle terminal resize message.
|
|
427
|
+
"""Handle terminal resize message.
|
|
428
|
+
|
|
429
|
+
Multi-client strategy:
|
|
430
|
+
- When multiple clients share a session, PTY dimensions are locked
|
|
431
|
+
- Only the first client (or when all clients agree) can resize
|
|
432
|
+
- New clients receive current dimensions and must adapt locally
|
|
433
|
+
- This prevents rendering artifacts from dimension mismatches
|
|
434
|
+
"""
|
|
435
|
+
session_id = str(session.id)
|
|
371
436
|
cols = int(message.get("cols", 120))
|
|
372
437
|
rows = int(message.get("rows", 30))
|
|
373
438
|
|
|
@@ -377,6 +442,29 @@ class TerminalService:
|
|
|
377
442
|
if session.dimensions == new_dims:
|
|
378
443
|
return
|
|
379
444
|
|
|
445
|
+
# Check if multiple clients are connected
|
|
446
|
+
connections = self._session_connections.get(session_id, set())
|
|
447
|
+
if len(connections) > 1:
|
|
448
|
+
# Multiple clients: reject resize, tell client to use current dimensions
|
|
449
|
+
logger.info(
|
|
450
|
+
"Resize rejected (multi-client) session_id=%s requested=%dx%d current=%dx%d",
|
|
451
|
+
session.id,
|
|
452
|
+
new_dims.cols,
|
|
453
|
+
new_dims.rows,
|
|
454
|
+
session.dimensions.cols,
|
|
455
|
+
session.dimensions.rows,
|
|
456
|
+
)
|
|
457
|
+
# Send current dimensions back so client can adapt
|
|
458
|
+
await connection.send_message(
|
|
459
|
+
{
|
|
460
|
+
"type": "resize_sync",
|
|
461
|
+
"cols": session.dimensions.cols,
|
|
462
|
+
"rows": session.dimensions.rows,
|
|
463
|
+
}
|
|
464
|
+
)
|
|
465
|
+
return
|
|
466
|
+
|
|
467
|
+
# Single client: allow resize
|
|
380
468
|
session.update_dimensions(new_dims)
|
|
381
469
|
session.pty_handle.resize(new_dims)
|
|
382
470
|
session.touch(datetime.now(UTC))
|
porterminal/asgi.py
CHANGED
|
@@ -17,15 +17,20 @@ def create_app_from_env():
|
|
|
17
17
|
|
|
18
18
|
This is called by uvicorn when using the --factory flag.
|
|
19
19
|
Environment variables:
|
|
20
|
-
PORTERMINAL_CONFIG_PATH: Path to config file (
|
|
20
|
+
PORTERMINAL_CONFIG_PATH: Path to config file (overrides search)
|
|
21
21
|
PORTERMINAL_CWD: Working directory for PTY sessions
|
|
22
|
+
|
|
23
|
+
Config search order (when env var not set):
|
|
24
|
+
1. ptn.yaml in cwd
|
|
25
|
+
2. .ptn/ptn.yaml in cwd
|
|
26
|
+
3. ~/.ptn/ptn.yaml
|
|
22
27
|
"""
|
|
23
28
|
from porterminal.app import create_app
|
|
24
29
|
|
|
25
|
-
config_path = os.environ.get("PORTERMINAL_CONFIG_PATH", "config.yaml")
|
|
26
30
|
cwd = os.environ.get("PORTERMINAL_CWD")
|
|
27
31
|
|
|
28
|
-
|
|
32
|
+
# config_path=None uses find_config_file() to search standard locations
|
|
33
|
+
container = create_container(config_path=None, cwd=cwd)
|
|
29
34
|
|
|
30
35
|
# Create app with container
|
|
31
36
|
# Note: The current app.py doesn't accept container yet,
|