portacode 1.4.15__py3-none-any.whl → 1.4.15.dev1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- portacode/_version.py +2 -2
- portacode/connection/handlers/WEBSOCKET_PROTOCOL.md +95 -14
- portacode/connection/handlers/project_state/manager.py +3 -25
- portacode/connection/handlers/proxmox_infra.py +413 -364
- portacode/connection/terminal.py +0 -80
- {portacode-1.4.15.dist-info → portacode-1.4.15.dev1.dist-info}/METADATA +1 -1
- {portacode-1.4.15.dist-info → portacode-1.4.15.dev1.dist-info}/RECORD +11 -11
- {portacode-1.4.15.dist-info → portacode-1.4.15.dev1.dist-info}/WHEEL +1 -1
- {portacode-1.4.15.dist-info → portacode-1.4.15.dev1.dist-info}/entry_points.txt +0 -0
- {portacode-1.4.15.dist-info → portacode-1.4.15.dev1.dist-info}/licenses/LICENSE +0 -0
- {portacode-1.4.15.dist-info → portacode-1.4.15.dev1.dist-info}/top_level.txt +0 -0
|
@@ -5,7 +5,6 @@ from __future__ import annotations
|
|
|
5
5
|
import asyncio
|
|
6
6
|
import json
|
|
7
7
|
import logging
|
|
8
|
-
import math
|
|
9
8
|
import os
|
|
10
9
|
import secrets
|
|
11
10
|
import shlex
|
|
@@ -16,7 +15,8 @@ import sys
|
|
|
16
15
|
import tempfile
|
|
17
16
|
import time
|
|
18
17
|
import threading
|
|
19
|
-
|
|
18
|
+
import urllib.request
|
|
19
|
+
from datetime import datetime
|
|
20
20
|
from pathlib import Path
|
|
21
21
|
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple
|
|
22
22
|
|
|
@@ -41,13 +41,15 @@ BRIDGE_IP = SUBNET_CIDR.split("/", 1)[0]
|
|
|
41
41
|
DHCP_START = "10.10.0.100"
|
|
42
42
|
DHCP_END = "10.10.0.200"
|
|
43
43
|
DNS_SERVER = "1.1.1.1"
|
|
44
|
+
CLOUDFLARE_DEB_URL = "https://github.com/cloudflare/cloudflared/releases/latest/download/cloudflared-linux-amd64.deb"
|
|
44
45
|
IFACES_PATH = Path("/etc/network/interfaces")
|
|
45
46
|
SYSCTL_PATH = Path("/etc/sysctl.d/99-portacode-forward.conf")
|
|
46
47
|
UNIT_DIR = Path("/etc/systemd/system")
|
|
47
48
|
_MANAGED_CONTAINERS_CACHE_TTL_S = 30.0
|
|
48
49
|
_MANAGED_CONTAINERS_CACHE: Dict[str, Any] = {"timestamp": 0.0, "summary": None}
|
|
49
50
|
_MANAGED_CONTAINERS_CACHE_LOCK = threading.Lock()
|
|
50
|
-
|
|
51
|
+
_CLOUDFLARE_TUNNEL_PROCESSES: Dict[str, subprocess.Popen] = {}
|
|
52
|
+
_CLOUDFLARE_TUNNELS_LOCK = threading.Lock()
|
|
51
53
|
|
|
52
54
|
ProgressCallback = Callable[[int, int, Dict[str, Any], str, Optional[Dict[str, Any]]], None]
|
|
53
55
|
|
|
@@ -64,7 +66,6 @@ def _emit_progress_event(
|
|
|
64
66
|
phase: str,
|
|
65
67
|
request_id: Optional[str],
|
|
66
68
|
details: Optional[Dict[str, Any]] = None,
|
|
67
|
-
on_behalf_of_device: Optional[str] = None,
|
|
68
69
|
) -> None:
|
|
69
70
|
loop = handler.context.get("event_loop")
|
|
70
71
|
if not loop or loop.is_closed():
|
|
@@ -89,8 +90,6 @@ def _emit_progress_event(
|
|
|
89
90
|
payload["request_id"] = request_id
|
|
90
91
|
if details:
|
|
91
92
|
payload["details"] = details
|
|
92
|
-
if on_behalf_of_device:
|
|
93
|
-
payload["on_behalf_of_device"] = str(on_behalf_of_device)
|
|
94
93
|
|
|
95
94
|
future = asyncio.run_coroutine_threadsafe(handler.send_response(payload), loop)
|
|
96
95
|
future.add_done_callback(
|
|
@@ -180,64 +179,6 @@ def _list_templates(client: Any, node: str, storages: Iterable[Dict[str, Any]])
|
|
|
180
179
|
return templates
|
|
181
180
|
|
|
182
181
|
|
|
183
|
-
def _build_proxmox_client_from_config(config: Dict[str, Any]):
|
|
184
|
-
user = config.get("user")
|
|
185
|
-
token_name = config.get("token_name")
|
|
186
|
-
token_value = config.get("token_value")
|
|
187
|
-
if not user or not token_name or not token_value:
|
|
188
|
-
raise RuntimeError("Proxmox API credentials are missing")
|
|
189
|
-
ProxmoxAPI = _ensure_proxmoxer()
|
|
190
|
-
return ProxmoxAPI(
|
|
191
|
-
config.get("host", DEFAULT_HOST),
|
|
192
|
-
user=user,
|
|
193
|
-
token_name=token_name,
|
|
194
|
-
token_value=token_value,
|
|
195
|
-
verify_ssl=config.get("verify_ssl", False),
|
|
196
|
-
timeout=30,
|
|
197
|
-
)
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
def _current_time_iso() -> str:
|
|
201
|
-
return datetime.now(timezone.utc).isoformat()
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
def _parse_iso_timestamp(value: str) -> Optional[datetime]:
|
|
205
|
-
if not value:
|
|
206
|
-
return None
|
|
207
|
-
text = value
|
|
208
|
-
if text.endswith("Z"):
|
|
209
|
-
text = text[:-1] + "+00:00"
|
|
210
|
-
try:
|
|
211
|
-
return datetime.fromisoformat(text)
|
|
212
|
-
except ValueError:
|
|
213
|
-
return None
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
def _templates_need_refresh(config: Dict[str, Any]) -> bool:
|
|
217
|
-
if not config or not config.get("token_value"):
|
|
218
|
-
return False
|
|
219
|
-
last = _parse_iso_timestamp(config.get("templates_last_refreshed") or "")
|
|
220
|
-
if not last:
|
|
221
|
-
return True
|
|
222
|
-
return (datetime.now(timezone.utc) - last).total_seconds() >= TEMPLATES_REFRESH_INTERVAL_S
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
def _ensure_templates_refreshed_on_startup(config: Dict[str, Any]) -> None:
|
|
226
|
-
if not _templates_need_refresh(config):
|
|
227
|
-
return
|
|
228
|
-
try:
|
|
229
|
-
client = _build_proxmox_client_from_config(config)
|
|
230
|
-
node = config.get("node") or _pick_node(client)
|
|
231
|
-
storages = client.nodes(node).storage.get()
|
|
232
|
-
templates = _list_templates(client, node, storages)
|
|
233
|
-
if templates:
|
|
234
|
-
config["templates"] = templates
|
|
235
|
-
config["templates_last_refreshed"] = _current_time_iso()
|
|
236
|
-
_save_config(config)
|
|
237
|
-
except Exception as exc:
|
|
238
|
-
logger.warning("Unable to refresh Proxmox templates on startup: %s", exc)
|
|
239
|
-
|
|
240
|
-
|
|
241
182
|
def _pick_storage(storages: Iterable[Dict[str, Any]]) -> str:
|
|
242
183
|
candidates = [s for s in storages if "rootdir" in s.get("content", "") and s.get("avail", 0) > 0]
|
|
243
184
|
if not candidates:
|
|
@@ -342,6 +283,25 @@ def _ensure_bridge(bridge: str = DEFAULT_BRIDGE) -> Dict[str, Any]:
|
|
|
342
283
|
return {"applied": True, "bridge": bridge, "message": f"Bridge {bridge} configured"}
|
|
343
284
|
|
|
344
285
|
|
|
286
|
+
def _ensure_cloudflared_installed() -> None:
|
|
287
|
+
if shutil.which("cloudflared"):
|
|
288
|
+
return
|
|
289
|
+
apt = shutil.which("apt-get")
|
|
290
|
+
if not apt:
|
|
291
|
+
raise RuntimeError("cloudflared is missing and apt-get is unavailable to install it")
|
|
292
|
+
download_dir = Path(tempfile.mkdtemp())
|
|
293
|
+
deb_path = download_dir / "cloudflared.deb"
|
|
294
|
+
try:
|
|
295
|
+
urllib.request.urlretrieve(CLOUDFLARE_DEB_URL, deb_path)
|
|
296
|
+
try:
|
|
297
|
+
_call_subprocess(["dpkg", "-i", str(deb_path)], check=True)
|
|
298
|
+
except subprocess.CalledProcessError:
|
|
299
|
+
_call_subprocess([apt, "install", "-f", "-y"], check=True)
|
|
300
|
+
_call_subprocess(["dpkg", "-i", str(deb_path)], check=True)
|
|
301
|
+
finally:
|
|
302
|
+
shutil.rmtree(download_dir, ignore_errors=True)
|
|
303
|
+
|
|
304
|
+
|
|
345
305
|
def _verify_connectivity(timeout: float = 5.0) -> bool:
|
|
346
306
|
try:
|
|
347
307
|
_call_subprocess(["/bin/ping", "-c", "2", "1.1.1.1"], check=True, timeout=timeout)
|
|
@@ -418,6 +378,7 @@ def _build_managed_containers_summary(records: List[Dict[str, Any]]) -> Dict[str
|
|
|
418
378
|
"cpu_share": cpu_share,
|
|
419
379
|
"created_at": record.get("created_at"),
|
|
420
380
|
"status": status,
|
|
381
|
+
"tunnel": record.get("tunnel"),
|
|
421
382
|
}
|
|
422
383
|
)
|
|
423
384
|
|
|
@@ -496,171 +457,48 @@ def _friendly_step_label(step_name: str) -> str:
|
|
|
496
457
|
return normalized.capitalize()
|
|
497
458
|
|
|
498
459
|
|
|
499
|
-
_NETWORK_WAIT_CMD = (
|
|
500
|
-
"count=0; "
|
|
501
|
-
"while [ \"$count\" -lt 20 ]; do "
|
|
502
|
-
" if command -v ip >/dev/null 2>&1 && ip route get 1.1.1.1 >/dev/null 2>&1; then break; fi; "
|
|
503
|
-
" if [ -f /proc/net/route ] && grep -q '^00000000' /proc/net/route >/dev/null 2>&1; then break; fi; "
|
|
504
|
-
" sleep 1; "
|
|
505
|
-
" count=$((count+1)); "
|
|
506
|
-
"done"
|
|
507
|
-
)
|
|
508
|
-
|
|
509
|
-
_PACKAGE_MANAGER_PROFILES: Dict[str, Dict[str, Any]] = {
|
|
510
|
-
"apt": {
|
|
511
|
-
"update_cmd": "apt-get update -y",
|
|
512
|
-
"update_step_name": "apt_update",
|
|
513
|
-
"install_cmd": "apt-get install -y python3 python3-pip sudo --fix-missing",
|
|
514
|
-
"install_step_name": "install_deps",
|
|
515
|
-
"update_retries": 4,
|
|
516
|
-
"install_retries": 5,
|
|
517
|
-
},
|
|
518
|
-
"dnf": {
|
|
519
|
-
"update_cmd": "dnf check-update || true",
|
|
520
|
-
"update_step_name": "dnf_update",
|
|
521
|
-
"install_cmd": "dnf install -y python3 python3-pip sudo",
|
|
522
|
-
"install_step_name": "install_deps",
|
|
523
|
-
"update_retries": 3,
|
|
524
|
-
"install_retries": 5,
|
|
525
|
-
},
|
|
526
|
-
"yum": {
|
|
527
|
-
"update_cmd": "yum makecache",
|
|
528
|
-
"update_step_name": "yum_update",
|
|
529
|
-
"install_cmd": "yum install -y python3 python3-pip sudo",
|
|
530
|
-
"install_step_name": "install_deps",
|
|
531
|
-
"update_retries": 3,
|
|
532
|
-
"install_retries": 5,
|
|
533
|
-
},
|
|
534
|
-
"apk": {
|
|
535
|
-
"update_cmd": "apk update",
|
|
536
|
-
"update_step_name": "apk_update",
|
|
537
|
-
"install_cmd": "apk add --no-cache python3 py3-pip sudo shadow",
|
|
538
|
-
"install_step_name": "install_deps",
|
|
539
|
-
"update_retries": 3,
|
|
540
|
-
"install_retries": 5,
|
|
541
|
-
},
|
|
542
|
-
"pacman": {
|
|
543
|
-
"update_cmd": "pacman -Sy --noconfirm",
|
|
544
|
-
"update_step_name": "pacman_update",
|
|
545
|
-
"install_cmd": "pacman -S --noconfirm python python-pip sudo",
|
|
546
|
-
"install_step_name": "install_deps",
|
|
547
|
-
"update_retries": 3,
|
|
548
|
-
"install_retries": 5,
|
|
549
|
-
},
|
|
550
|
-
"zypper": {
|
|
551
|
-
"update_cmd": "zypper refresh",
|
|
552
|
-
"update_step_name": "zypper_update",
|
|
553
|
-
"install_cmd": "zypper install -y python3 python3-pip sudo",
|
|
554
|
-
"install_step_name": "install_deps",
|
|
555
|
-
"update_retries": 3,
|
|
556
|
-
"install_retries": 5,
|
|
557
|
-
},
|
|
558
|
-
}
|
|
559
|
-
|
|
560
|
-
_UPDATE_RETRY_ON = [
|
|
561
|
-
"Temporary failure resolving",
|
|
562
|
-
"Could not resolve",
|
|
563
|
-
"Failed to fetch",
|
|
564
|
-
]
|
|
565
|
-
|
|
566
|
-
_INSTALL_RETRY_ON = [
|
|
567
|
-
"lock-frontend",
|
|
568
|
-
"Unable to acquire the dpkg frontend lock",
|
|
569
|
-
"Temporary failure resolving",
|
|
570
|
-
"Could not resolve",
|
|
571
|
-
"Failed to fetch",
|
|
572
|
-
]
|
|
573
|
-
|
|
574
|
-
|
|
575
460
|
def _build_bootstrap_steps(
|
|
576
461
|
user: str,
|
|
577
462
|
password: str,
|
|
578
463
|
ssh_key: str,
|
|
579
464
|
include_portacode_connect: bool = True,
|
|
580
|
-
package_manager: str = "apt",
|
|
581
465
|
) -> List[Dict[str, Any]]:
|
|
582
|
-
|
|
583
|
-
steps: List[Dict[str, Any]] = [
|
|
584
|
-
{"name": "wait_for_network", "cmd": _NETWORK_WAIT_CMD, "retries": 0},
|
|
585
|
-
]
|
|
586
|
-
update_cmd = profile.get("update_cmd")
|
|
587
|
-
if update_cmd:
|
|
588
|
-
steps.append(
|
|
589
|
-
{
|
|
590
|
-
"name": profile.get("update_step_name", "package_update"),
|
|
591
|
-
"cmd": update_cmd,
|
|
592
|
-
"retries": profile.get("update_retries", 3),
|
|
593
|
-
"retry_delay_s": 5,
|
|
594
|
-
"retry_on": _UPDATE_RETRY_ON,
|
|
595
|
-
}
|
|
596
|
-
)
|
|
597
|
-
install_cmd = profile.get("install_cmd")
|
|
598
|
-
if install_cmd:
|
|
599
|
-
steps.append(
|
|
600
|
-
{
|
|
601
|
-
"name": profile.get("install_step_name", "install_deps"),
|
|
602
|
-
"cmd": install_cmd,
|
|
603
|
-
"retries": profile.get("install_retries", 5),
|
|
604
|
-
"retry_delay_s": 5,
|
|
605
|
-
"retry_on": _INSTALL_RETRY_ON,
|
|
606
|
-
}
|
|
607
|
-
)
|
|
608
|
-
steps.extend(
|
|
609
|
-
[
|
|
610
|
-
{
|
|
611
|
-
"name": "user_exists",
|
|
612
|
-
"cmd": (
|
|
613
|
-
f"id -u {user} >/dev/null 2>&1 || "
|
|
614
|
-
f"(if command -v adduser >/dev/null 2>&1 && adduser --disabled-password --help >/dev/null 2>&1; then "
|
|
615
|
-
f" adduser --disabled-password --gecos '' {user}; "
|
|
616
|
-
"else "
|
|
617
|
-
f" useradd -m -s /bin/sh {user}; "
|
|
618
|
-
"fi)"
|
|
619
|
-
),
|
|
620
|
-
"retries": 0,
|
|
621
|
-
},
|
|
622
|
-
{
|
|
623
|
-
"name": "add_sudo",
|
|
624
|
-
"cmd": (
|
|
625
|
-
f"if command -v usermod >/dev/null 2>&1; then "
|
|
626
|
-
" if ! getent group sudo >/dev/null 2>&1; then "
|
|
627
|
-
" if command -v groupadd >/dev/null 2>&1; then "
|
|
628
|
-
" groupadd sudo >/dev/null 2>&1 || true; "
|
|
629
|
-
" fi; "
|
|
630
|
-
" fi; "
|
|
631
|
-
f" usermod -aG sudo {user}; "
|
|
632
|
-
"else "
|
|
633
|
-
" for grp in wheel sudo; do "
|
|
634
|
-
" if ! getent group \"$grp\" >/dev/null 2>&1 && command -v groupadd >/dev/null 2>&1; then "
|
|
635
|
-
" groupadd \"$grp\" >/dev/null 2>&1 || true; "
|
|
636
|
-
" fi; "
|
|
637
|
-
" addgroup \"$grp\" >/dev/null 2>&1 || true; "
|
|
638
|
-
f" addgroup {user} \"$grp\" >/dev/null 2>&1 || true; "
|
|
639
|
-
" done; "
|
|
640
|
-
"fi"
|
|
641
|
-
),
|
|
642
|
-
"retries": 0,
|
|
643
|
-
},
|
|
466
|
+
steps = [
|
|
644
467
|
{
|
|
645
|
-
"name": "
|
|
646
|
-
"cmd":
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
468
|
+
"name": "apt_update",
|
|
469
|
+
"cmd": "apt-get update -y",
|
|
470
|
+
"retries": 4,
|
|
471
|
+
"retry_delay_s": 5,
|
|
472
|
+
"retry_on": [
|
|
473
|
+
"Temporary failure resolving",
|
|
474
|
+
"Could not resolve",
|
|
475
|
+
"Failed to fetch",
|
|
476
|
+
],
|
|
651
477
|
},
|
|
652
|
-
|
|
653
|
-
|
|
478
|
+
{
|
|
479
|
+
"name": "install_deps",
|
|
480
|
+
"cmd": "apt-get install -y python3 python3-pip sudo --fix-missing",
|
|
481
|
+
"retries": 5,
|
|
482
|
+
"retry_delay_s": 5,
|
|
483
|
+
"retry_on": [
|
|
484
|
+
"lock-frontend",
|
|
485
|
+
"Unable to acquire the dpkg frontend lock",
|
|
486
|
+
"Temporary failure resolving",
|
|
487
|
+
"Could not resolve",
|
|
488
|
+
"Failed to fetch",
|
|
489
|
+
],
|
|
490
|
+
},
|
|
491
|
+
{"name": "user_exists", "cmd": f"id -u {user} >/dev/null 2>&1 || adduser --disabled-password --gecos '' {user}", "retries": 0},
|
|
492
|
+
{"name": "add_sudo", "cmd": f"usermod -aG sudo {user}", "retries": 0},
|
|
493
|
+
]
|
|
654
494
|
if password:
|
|
655
495
|
steps.append({"name": "set_password", "cmd": f"echo '{user}:{password}' | chpasswd", "retries": 0})
|
|
656
496
|
if ssh_key:
|
|
657
|
-
steps.append(
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
}
|
|
663
|
-
)
|
|
497
|
+
steps.append({
|
|
498
|
+
"name": "add_ssh_key",
|
|
499
|
+
"cmd": f"install -d -m 700 /home/{user}/.ssh && echo '{ssh_key}' >> /home/{user}/.ssh/authorized_keys && chown -R {user}:{user} /home/{user}/.ssh",
|
|
500
|
+
"retries": 0,
|
|
501
|
+
})
|
|
664
502
|
steps.extend(
|
|
665
503
|
[
|
|
666
504
|
{"name": "pip_upgrade", "cmd": "python3 -m pip install --upgrade pip", "retries": 0},
|
|
@@ -672,45 +510,6 @@ def _build_bootstrap_steps(
|
|
|
672
510
|
return steps
|
|
673
511
|
|
|
674
512
|
|
|
675
|
-
def _guess_package_manager_from_template(template: str) -> str:
|
|
676
|
-
normalized = (template or "").lower()
|
|
677
|
-
if "alpine" in normalized:
|
|
678
|
-
return "apk"
|
|
679
|
-
if "archlinux" in normalized:
|
|
680
|
-
return "pacman"
|
|
681
|
-
if "centos-7" in normalized:
|
|
682
|
-
return "yum"
|
|
683
|
-
if any(keyword in normalized for keyword in ("centos-8", "centos-9", "centos-9-stream", "centos-8-stream")):
|
|
684
|
-
return "dnf"
|
|
685
|
-
if any(keyword in normalized for keyword in ("rockylinux", "almalinux", "fedora")):
|
|
686
|
-
return "dnf"
|
|
687
|
-
if "opensuse" in normalized or "suse" in normalized:
|
|
688
|
-
return "zypper"
|
|
689
|
-
if any(keyword in normalized for keyword in ("debian", "ubuntu", "devuan", "turnkeylinux")):
|
|
690
|
-
return "apt"
|
|
691
|
-
if normalized.startswith("system/") and "linux" in normalized:
|
|
692
|
-
return "apt"
|
|
693
|
-
return "apt"
|
|
694
|
-
|
|
695
|
-
|
|
696
|
-
def _detect_package_manager(vmid: int) -> str:
|
|
697
|
-
candidates = [
|
|
698
|
-
("apt", "apt-get"),
|
|
699
|
-
("dnf", "dnf"),
|
|
700
|
-
("yum", "yum"),
|
|
701
|
-
("apk", "apk"),
|
|
702
|
-
("pacman", "pacman"),
|
|
703
|
-
("zypper", "zypper"),
|
|
704
|
-
]
|
|
705
|
-
for name, binary in candidates:
|
|
706
|
-
res = _run_pct(vmid, f"command -v {binary} >/dev/null 2>&1")
|
|
707
|
-
if res.get("returncode") == 0:
|
|
708
|
-
logger.debug("Detected package manager %s inside container %s", name, vmid)
|
|
709
|
-
return name
|
|
710
|
-
logger.warning("Unable to detect package manager inside container %s; defaulting to apt", vmid)
|
|
711
|
-
return "apt"
|
|
712
|
-
|
|
713
|
-
|
|
714
513
|
def _get_storage_type(storages: Iterable[Dict[str, Any]], storage_name: str) -> str:
|
|
715
514
|
for entry in storages:
|
|
716
515
|
if entry.get("storage") == storage_name:
|
|
@@ -817,6 +616,88 @@ def _remove_container_record(vmid: int) -> None:
|
|
|
817
616
|
_invalidate_managed_containers_cache()
|
|
818
617
|
|
|
819
618
|
|
|
619
|
+
def _update_container_tunnel(vmid: int, tunnel: Optional[Dict[str, Any]]) -> None:
|
|
620
|
+
record = _read_container_record(vmid)
|
|
621
|
+
if tunnel:
|
|
622
|
+
record["tunnel"] = tunnel
|
|
623
|
+
else:
|
|
624
|
+
record.pop("tunnel", None)
|
|
625
|
+
_write_container_record(vmid, record)
|
|
626
|
+
|
|
627
|
+
|
|
628
|
+
def _ensure_cloudflare_token(config: Dict[str, Any]) -> str:
|
|
629
|
+
cloudflare = config.get("cloudflare") or {}
|
|
630
|
+
token = cloudflare.get("api_token")
|
|
631
|
+
if not token:
|
|
632
|
+
raise RuntimeError("Cloudflare API token is not configured.")
|
|
633
|
+
return token
|
|
634
|
+
|
|
635
|
+
|
|
636
|
+
def _launch_container_tunnel(proxmox: Any, node: str, vmid: int, tunnel: Dict[str, Any]) -> Dict[str, Any]:
|
|
637
|
+
port = int(tunnel.get("container_port") or 0)
|
|
638
|
+
if not port:
|
|
639
|
+
raise ValueError("container_port is required to create a tunnel.")
|
|
640
|
+
hostname = tunnel.get("url")
|
|
641
|
+
if not hostname:
|
|
642
|
+
raise ValueError("cloudflare_url is required to expose the tunnel.")
|
|
643
|
+
protocol = (tunnel.get("protocol") or "http").lower()
|
|
644
|
+
ip_address = _resolve_container_ip(proxmox, node, vmid)
|
|
645
|
+
target_url = f"{protocol}://{ip_address}:{port}"
|
|
646
|
+
name = tunnel.get("name") or _build_tunnel_name(vmid, port)
|
|
647
|
+
_stop_cloudflare_process(name)
|
|
648
|
+
proc = _start_cloudflare_process(name, hostname, target_url)
|
|
649
|
+
updated = {
|
|
650
|
+
"name": name,
|
|
651
|
+
"container_port": port,
|
|
652
|
+
"url": hostname,
|
|
653
|
+
"protocol": protocol,
|
|
654
|
+
"status": "running",
|
|
655
|
+
"pid": proc.pid,
|
|
656
|
+
"target_ip": ip_address,
|
|
657
|
+
"target_url": target_url,
|
|
658
|
+
"last_updated": datetime.utcnow().isoformat() + "Z",
|
|
659
|
+
}
|
|
660
|
+
_update_container_tunnel(vmid, updated)
|
|
661
|
+
return updated
|
|
662
|
+
|
|
663
|
+
|
|
664
|
+
def _stop_container_tunnel(vmid: int) -> None:
|
|
665
|
+
try:
|
|
666
|
+
record = _read_container_record(vmid)
|
|
667
|
+
except FileNotFoundError:
|
|
668
|
+
return
|
|
669
|
+
tunnel = record.get("tunnel")
|
|
670
|
+
if not tunnel:
|
|
671
|
+
return
|
|
672
|
+
name = tunnel.get("name") or _build_tunnel_name(vmid, int(tunnel.get("container_port") or 0))
|
|
673
|
+
stopped = _stop_cloudflare_process(name)
|
|
674
|
+
if stopped or tunnel.get("status") != "stopped":
|
|
675
|
+
tunnel_update = {
|
|
676
|
+
**tunnel,
|
|
677
|
+
"status": "stopped",
|
|
678
|
+
"pid": None,
|
|
679
|
+
"last_updated": datetime.utcnow().isoformat() + "Z",
|
|
680
|
+
}
|
|
681
|
+
_update_container_tunnel(vmid, tunnel_update)
|
|
682
|
+
|
|
683
|
+
|
|
684
|
+
def _remove_container_tunnel_state(vmid: int) -> None:
|
|
685
|
+
_stop_container_tunnel(vmid)
|
|
686
|
+
_update_container_tunnel(vmid, None)
|
|
687
|
+
|
|
688
|
+
|
|
689
|
+
def _ensure_container_tunnel_running(proxmox: Any, node: str, vmid: int) -> None:
|
|
690
|
+
try:
|
|
691
|
+
record = _read_container_record(vmid)
|
|
692
|
+
except FileNotFoundError:
|
|
693
|
+
return
|
|
694
|
+
tunnel = record.get("tunnel")
|
|
695
|
+
if not tunnel:
|
|
696
|
+
return
|
|
697
|
+
_ensure_cloudflare_token(_load_config())
|
|
698
|
+
_launch_container_tunnel(proxmox, node, vmid, tunnel)
|
|
699
|
+
|
|
700
|
+
|
|
820
701
|
def _build_container_payload(message: Dict[str, Any], config: Dict[str, Any]) -> Dict[str, Any]:
|
|
821
702
|
templates = config.get("templates") or []
|
|
822
703
|
default_template = templates[0] if templates else ""
|
|
@@ -898,8 +779,7 @@ def _connect_proxmox(config: Dict[str, Any]) -> Any:
|
|
|
898
779
|
|
|
899
780
|
|
|
900
781
|
def _run_pct(vmid: int, cmd: str, input_text: Optional[str] = None) -> Dict[str, Any]:
|
|
901
|
-
|
|
902
|
-
full = ["pct", "exec", str(vmid), "--", shell, "-c", cmd]
|
|
782
|
+
full = ["pct", "exec", str(vmid), "--", "bash", "-lc", cmd]
|
|
903
783
|
start = time.time()
|
|
904
784
|
proc = subprocess.run(full, text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, input=input_text)
|
|
905
785
|
return {
|
|
@@ -911,10 +791,6 @@ def _run_pct(vmid: int, cmd: str, input_text: Optional[str] = None) -> Dict[str,
|
|
|
911
791
|
}
|
|
912
792
|
|
|
913
793
|
|
|
914
|
-
def _su_command(user: str, command: str) -> str:
|
|
915
|
-
return f"su - {user} -s /bin/sh -c {shlex.quote(command)}"
|
|
916
|
-
|
|
917
|
-
|
|
918
794
|
def _run_pct_check(vmid: int, cmd: str) -> Dict[str, Any]:
|
|
919
795
|
res = _run_pct(vmid, cmd)
|
|
920
796
|
if res["returncode"] != 0:
|
|
@@ -937,6 +813,74 @@ def _run_pct_push(vmid: int, src: str, dest: str) -> subprocess.CompletedProcess
|
|
|
937
813
|
return _call_subprocess(["pct", "push", str(vmid), src, dest])
|
|
938
814
|
|
|
939
815
|
|
|
816
|
+
def _build_tunnel_name(vmid: int, port: int) -> str:
|
|
817
|
+
return f"portacode-ct{vmid}-{port}"
|
|
818
|
+
|
|
819
|
+
|
|
820
|
+
def _get_cloudflared_binary() -> str:
|
|
821
|
+
binary = shutil.which("cloudflared")
|
|
822
|
+
if not binary:
|
|
823
|
+
raise RuntimeError(
|
|
824
|
+
"cloudflared is required for Cloudflare tunnels but was not found on PATH. "
|
|
825
|
+
"Install cloudflared and run 'cloudflared tunnel login' before creating tunnels."
|
|
826
|
+
)
|
|
827
|
+
return binary
|
|
828
|
+
|
|
829
|
+
|
|
830
|
+
def _start_cloudflare_process(name: str, hostname: str, target_url: str) -> subprocess.Popen:
|
|
831
|
+
binary = _get_cloudflared_binary()
|
|
832
|
+
cmd = [
|
|
833
|
+
binary,
|
|
834
|
+
"tunnel",
|
|
835
|
+
"--hostname",
|
|
836
|
+
hostname,
|
|
837
|
+
"--url",
|
|
838
|
+
target_url,
|
|
839
|
+
"--no-autoupdate",
|
|
840
|
+
]
|
|
841
|
+
proc = subprocess.Popen(
|
|
842
|
+
cmd,
|
|
843
|
+
stdout=subprocess.DEVNULL,
|
|
844
|
+
stderr=subprocess.DEVNULL,
|
|
845
|
+
)
|
|
846
|
+
with _CLOUDFLARE_TUNNELS_LOCK:
|
|
847
|
+
_CLOUDFLARE_TUNNEL_PROCESSES[name] = proc
|
|
848
|
+
return proc
|
|
849
|
+
|
|
850
|
+
|
|
851
|
+
def _stop_cloudflare_process(name: str) -> bool:
|
|
852
|
+
with _CLOUDFLARE_TUNNELS_LOCK:
|
|
853
|
+
proc = _CLOUDFLARE_TUNNEL_PROCESSES.pop(name, None)
|
|
854
|
+
if not proc:
|
|
855
|
+
return False
|
|
856
|
+
try:
|
|
857
|
+
proc.terminate()
|
|
858
|
+
proc.wait(timeout=5)
|
|
859
|
+
except subprocess.TimeoutExpired:
|
|
860
|
+
proc.kill()
|
|
861
|
+
proc.wait()
|
|
862
|
+
return True
|
|
863
|
+
|
|
864
|
+
|
|
865
|
+
def _resolve_container_ip(proxmox: Any, node: str, vmid: int) -> str:
|
|
866
|
+
status = proxmox.nodes(node).lxc(vmid).status.current.get()
|
|
867
|
+
if status:
|
|
868
|
+
ip_field = status.get("ip")
|
|
869
|
+
if isinstance(ip_field, list):
|
|
870
|
+
for entry in ip_field:
|
|
871
|
+
if isinstance(entry, str) and "." in entry:
|
|
872
|
+
return entry.split("/")[0]
|
|
873
|
+
elif isinstance(ip_field, str) and "." in ip_field:
|
|
874
|
+
return ip_field.split("/")[0]
|
|
875
|
+
res = _run_pct_exec(vmid, ["ip", "-4", "-o", "addr", "show", "eth0"])
|
|
876
|
+
line = res.stdout.splitlines()[0] if res.stdout else ""
|
|
877
|
+
parts = line.split()
|
|
878
|
+
if len(parts) >= 4:
|
|
879
|
+
addr = parts[3]
|
|
880
|
+
return addr.split("/")[0]
|
|
881
|
+
raise RuntimeError("Unable to determine container IP address")
|
|
882
|
+
|
|
883
|
+
|
|
940
884
|
def _push_bytes_to_container(
|
|
941
885
|
vmid: int, user: str, path: str, data: bytes, mode: int = 0o600
|
|
942
886
|
) -> None:
|
|
@@ -974,7 +918,7 @@ def _push_bytes_to_container(
|
|
|
974
918
|
|
|
975
919
|
|
|
976
920
|
def _resolve_portacode_key_dir(vmid: int, user: str) -> str:
|
|
977
|
-
data_dir_cmd =
|
|
921
|
+
data_dir_cmd = f"su - {user} -c 'echo -n ${{XDG_DATA_HOME:-$HOME/.local/share}}'"
|
|
978
922
|
data_home = _run_pct_check(vmid, data_dir_cmd)["stdout"].strip()
|
|
979
923
|
portacode_dir = f"{data_home}/portacode"
|
|
980
924
|
_run_pct_exec_check(vmid, ["mkdir", "-p", portacode_dir])
|
|
@@ -991,19 +935,18 @@ def _deploy_device_keypair(vmid: int, user: str, private_key: str, public_key: s
|
|
|
991
935
|
|
|
992
936
|
|
|
993
937
|
def _portacode_connect_and_read_key(vmid: int, user: str, timeout_s: int = 10) -> Dict[str, Any]:
|
|
994
|
-
|
|
995
|
-
cmd = ["pct", "exec", str(vmid), "--", "/bin/sh", "-c", su_connect_cmd]
|
|
938
|
+
cmd = ["pct", "exec", str(vmid), "--", "bash", "-lc", f"su - {user} -c 'portacode connect'"]
|
|
996
939
|
proc = subprocess.Popen(cmd, text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
|
997
940
|
start = time.time()
|
|
998
941
|
|
|
999
|
-
data_dir_cmd =
|
|
942
|
+
data_dir_cmd = f"su - {user} -c 'echo -n ${{XDG_DATA_HOME:-$HOME/.local/share}}'"
|
|
1000
943
|
data_dir = _run_pct_check(vmid, data_dir_cmd)["stdout"].strip()
|
|
1001
944
|
key_dir = f"{data_dir}/portacode/keys"
|
|
1002
945
|
pub_path = f"{key_dir}/id_portacode.pub"
|
|
1003
946
|
priv_path = f"{key_dir}/id_portacode"
|
|
1004
947
|
|
|
1005
948
|
def file_size(path: str) -> Optional[int]:
|
|
1006
|
-
stat_cmd =
|
|
949
|
+
stat_cmd = f"su - {user} -c 'test -s {path} && stat -c %s {path}'"
|
|
1007
950
|
res = _run_pct(vmid, stat_cmd)
|
|
1008
951
|
if res["returncode"] != 0:
|
|
1009
952
|
return None
|
|
@@ -1061,7 +1004,7 @@ def _portacode_connect_and_read_key(vmid: int, user: str, timeout_s: int = 10) -
|
|
|
1061
1004
|
final_pub = file_size(pub_path)
|
|
1062
1005
|
final_priv = file_size(priv_path)
|
|
1063
1006
|
if final_pub and final_priv:
|
|
1064
|
-
key_res = _run_pct(vmid,
|
|
1007
|
+
key_res = _run_pct(vmid, f"su - {user} -c 'cat {pub_path}'")
|
|
1065
1008
|
if not process_exited:
|
|
1066
1009
|
proc.terminate()
|
|
1067
1010
|
try:
|
|
@@ -1101,7 +1044,7 @@ def _portacode_connect_and_read_key(vmid: int, user: str, timeout_s: int = 10) -
|
|
|
1101
1044
|
except subprocess.TimeoutExpired:
|
|
1102
1045
|
proc.kill()
|
|
1103
1046
|
|
|
1104
|
-
key_res = _run_pct(vmid,
|
|
1047
|
+
key_res = _run_pct(vmid, f"su - {user} -c 'cat {pub_path}'")
|
|
1105
1048
|
return {
|
|
1106
1049
|
"ok": True,
|
|
1107
1050
|
"public_key": key_res["stdout"].strip(),
|
|
@@ -1194,16 +1137,7 @@ def _bootstrap_portacode(
|
|
|
1194
1137
|
total_steps: Optional[int] = None,
|
|
1195
1138
|
default_public_key: Optional[str] = None,
|
|
1196
1139
|
) -> Tuple[str, List[Dict[str, Any]]]:
|
|
1197
|
-
if steps is not None
|
|
1198
|
-
actual_steps = steps
|
|
1199
|
-
else:
|
|
1200
|
-
detected_manager = _detect_package_manager(vmid)
|
|
1201
|
-
actual_steps = _build_bootstrap_steps(
|
|
1202
|
-
user,
|
|
1203
|
-
password,
|
|
1204
|
-
ssh_key,
|
|
1205
|
-
package_manager=detected_manager,
|
|
1206
|
-
)
|
|
1140
|
+
actual_steps = steps if steps is not None else _build_bootstrap_steps(user, password, ssh_key)
|
|
1207
1141
|
results, ok = _run_setup_steps(
|
|
1208
1142
|
vmid,
|
|
1209
1143
|
actual_steps,
|
|
@@ -1227,15 +1161,6 @@ def _bootstrap_portacode(
|
|
|
1227
1161
|
else:
|
|
1228
1162
|
command_text = str(command)
|
|
1229
1163
|
command_suffix = f" command={command_text}" if command_text else ""
|
|
1230
|
-
stdout = details.get("stdout")
|
|
1231
|
-
stderr = details.get("stderr")
|
|
1232
|
-
if stdout or stderr:
|
|
1233
|
-
logger.debug(
|
|
1234
|
-
"Bootstrap command output%s%s%s",
|
|
1235
|
-
f" stdout={stdout!r}" if stdout else "",
|
|
1236
|
-
" " if stdout and stderr else "",
|
|
1237
|
-
f"stderr={stderr!r}" if stderr else "",
|
|
1238
|
-
)
|
|
1239
1164
|
if summary:
|
|
1240
1165
|
logger.warning(
|
|
1241
1166
|
"Portacode bootstrap failure summary=%s%s%s",
|
|
@@ -1243,15 +1168,10 @@ def _bootstrap_portacode(
|
|
|
1243
1168
|
f" history_len={len(history)}" if history else "",
|
|
1244
1169
|
f" command={command_text}" if command_text else "",
|
|
1245
1170
|
)
|
|
1246
|
-
|
|
1247
|
-
|
|
1248
|
-
|
|
1249
|
-
|
|
1250
|
-
f" stderr={stderr!r}" if stderr else "",
|
|
1251
|
-
)
|
|
1252
|
-
raise RuntimeError(
|
|
1253
|
-
f"Portacode bootstrap steps failed: {summary}{history_snippet}{command_suffix}"
|
|
1254
|
-
)
|
|
1171
|
+
raise RuntimeError(
|
|
1172
|
+
f"Portacode bootstrap steps failed: {summary}{history_snippet}{command_suffix}"
|
|
1173
|
+
)
|
|
1174
|
+
raise RuntimeError("Portacode bootstrap steps failed.")
|
|
1255
1175
|
key_step = next((entry for entry in results if entry.get("name") == "portacode_connect"), None)
|
|
1256
1176
|
public_key = key_step.get("public_key") if key_step else default_public_key
|
|
1257
1177
|
if not public_key:
|
|
@@ -1259,6 +1179,12 @@ def _bootstrap_portacode(
|
|
|
1259
1179
|
return public_key, results
|
|
1260
1180
|
|
|
1261
1181
|
|
|
1182
|
+
def _build_cloudflare_snapshot(cloudflare_config: Dict[str, Any] | None) -> Dict[str, Any]:
|
|
1183
|
+
if not cloudflare_config:
|
|
1184
|
+
return {"configured": False}
|
|
1185
|
+
return {"configured": bool(cloudflare_config.get("api_token"))}
|
|
1186
|
+
|
|
1187
|
+
|
|
1262
1188
|
def build_snapshot(config: Dict[str, Any]) -> Dict[str, Any]:
|
|
1263
1189
|
network = config.get("network", {})
|
|
1264
1190
|
base_network = {
|
|
@@ -1266,9 +1192,13 @@ def build_snapshot(config: Dict[str, Any]) -> Dict[str, Any]:
|
|
|
1266
1192
|
"message": network.get("message"),
|
|
1267
1193
|
"bridge": network.get("bridge", DEFAULT_BRIDGE),
|
|
1268
1194
|
}
|
|
1195
|
+
cloudflare_snapshot = _build_cloudflare_snapshot(config.get("cloudflare"))
|
|
1269
1196
|
if not config:
|
|
1270
|
-
return {
|
|
1271
|
-
|
|
1197
|
+
return {
|
|
1198
|
+
"configured": False,
|
|
1199
|
+
"network": base_network,
|
|
1200
|
+
"cloudflare": cloudflare_snapshot,
|
|
1201
|
+
}
|
|
1272
1202
|
return {
|
|
1273
1203
|
"configured": True,
|
|
1274
1204
|
"host": config.get("host"),
|
|
@@ -1279,18 +1209,54 @@ def build_snapshot(config: Dict[str, Any]) -> Dict[str, Any]:
|
|
|
1279
1209
|
"templates": config.get("templates") or [],
|
|
1280
1210
|
"last_verified": config.get("last_verified"),
|
|
1281
1211
|
"network": base_network,
|
|
1212
|
+
"cloudflare": cloudflare_snapshot,
|
|
1282
1213
|
}
|
|
1283
1214
|
|
|
1284
1215
|
|
|
1285
|
-
def
|
|
1216
|
+
def _resolve_proxmox_credentials(
|
|
1217
|
+
token_identifier: Optional[str],
|
|
1218
|
+
token_value: Optional[str],
|
|
1219
|
+
existing: Dict[str, Any],
|
|
1220
|
+
) -> Tuple[str, str, str]:
|
|
1221
|
+
if token_identifier:
|
|
1222
|
+
if not token_value:
|
|
1223
|
+
raise ValueError("token_value is required when providing a new token_identifier")
|
|
1224
|
+
user, token_name = _parse_token(token_identifier)
|
|
1225
|
+
return user, token_name, token_value
|
|
1226
|
+
if existing and existing.get("user") and existing.get("token_name") and existing.get("token_value"):
|
|
1227
|
+
return existing["user"], existing["token_name"], existing["token_value"]
|
|
1228
|
+
raise ValueError("Proxmox token identifier and value are required when no existing configuration is available")
|
|
1229
|
+
|
|
1230
|
+
|
|
1231
|
+
def _build_cloudflare_config(existing: Dict[str, Any], api_token: Optional[str]) -> Dict[str, Any]:
|
|
1232
|
+
cloudflare: Dict[str, Any] = dict(existing.get("cloudflare", {}) or {})
|
|
1233
|
+
if api_token:
|
|
1234
|
+
cloudflare["api_token"] = api_token
|
|
1235
|
+
if cloudflare.get("api_token"):
|
|
1236
|
+
cloudflare["configured"] = True
|
|
1237
|
+
elif "configured" in cloudflare:
|
|
1238
|
+
cloudflare.pop("configured", None)
|
|
1239
|
+
return cloudflare
|
|
1240
|
+
|
|
1241
|
+
|
|
1242
|
+
def configure_infrastructure(
|
|
1243
|
+
token_identifier: Optional[str] = None,
|
|
1244
|
+
token_value: Optional[str] = None,
|
|
1245
|
+
verify_ssl: Optional[bool] = None,
|
|
1246
|
+
cloudflare_api_token: Optional[str] = None,
|
|
1247
|
+
) -> Dict[str, Any]:
|
|
1286
1248
|
ProxmoxAPI = _ensure_proxmoxer()
|
|
1287
|
-
|
|
1249
|
+
existing = _load_config()
|
|
1250
|
+
user, token_name, resolved_token_value = _resolve_proxmox_credentials(
|
|
1251
|
+
token_identifier, token_value, existing
|
|
1252
|
+
)
|
|
1253
|
+
actual_verify_ssl = verify_ssl if verify_ssl is not None else existing.get("verify_ssl", False)
|
|
1288
1254
|
client = ProxmoxAPI(
|
|
1289
1255
|
DEFAULT_HOST,
|
|
1290
1256
|
user=user,
|
|
1291
1257
|
token_name=token_name,
|
|
1292
|
-
token_value=
|
|
1293
|
-
verify_ssl=
|
|
1258
|
+
token_value=resolved_token_value,
|
|
1259
|
+
verify_ssl=actual_verify_ssl,
|
|
1294
1260
|
timeout=30,
|
|
1295
1261
|
)
|
|
1296
1262
|
node = _pick_node(client)
|
|
@@ -1298,32 +1264,36 @@ def configure_infrastructure(token_identifier: str, token_value: str, verify_ssl
|
|
|
1298
1264
|
storages = client.nodes(node).storage.get()
|
|
1299
1265
|
default_storage = _pick_storage(storages)
|
|
1300
1266
|
templates = _list_templates(client, node, storages)
|
|
1301
|
-
network
|
|
1302
|
-
|
|
1303
|
-
|
|
1304
|
-
|
|
1305
|
-
|
|
1306
|
-
|
|
1307
|
-
|
|
1308
|
-
|
|
1309
|
-
|
|
1310
|
-
|
|
1311
|
-
|
|
1312
|
-
|
|
1267
|
+
network = dict(existing.get("network", {}) or {})
|
|
1268
|
+
_ensure_cloudflared_installed()
|
|
1269
|
+
if not network.get("applied"):
|
|
1270
|
+
try:
|
|
1271
|
+
network = _ensure_bridge()
|
|
1272
|
+
# Wait for network convergence before validating connectivity
|
|
1273
|
+
time.sleep(2)
|
|
1274
|
+
if not _verify_connectivity():
|
|
1275
|
+
raise RuntimeError("Connectivity check failed; bridge reverted")
|
|
1276
|
+
network["health"] = "healthy"
|
|
1277
|
+
except Exception as exc:
|
|
1278
|
+
logger.warning("Bridge setup failed; reverting previous changes: %s", exc)
|
|
1279
|
+
_revert_bridge()
|
|
1280
|
+
raise
|
|
1313
1281
|
config = {
|
|
1314
1282
|
"host": DEFAULT_HOST,
|
|
1315
1283
|
"node": node,
|
|
1316
1284
|
"user": user,
|
|
1317
1285
|
"token_name": token_name,
|
|
1318
|
-
"token_value":
|
|
1319
|
-
"verify_ssl":
|
|
1286
|
+
"token_value": resolved_token_value,
|
|
1287
|
+
"verify_ssl": actual_verify_ssl,
|
|
1320
1288
|
"default_storage": default_storage,
|
|
1321
|
-
"last_verified": datetime.utcnow().isoformat() + "Z",
|
|
1322
1289
|
"templates": templates,
|
|
1323
|
-
"
|
|
1290
|
+
"last_verified": datetime.utcnow().isoformat() + "Z",
|
|
1324
1291
|
"network": network,
|
|
1325
1292
|
"node_status": status,
|
|
1326
1293
|
}
|
|
1294
|
+
cloudflare = _build_cloudflare_config(existing, cloudflare_api_token)
|
|
1295
|
+
if cloudflare:
|
|
1296
|
+
config["cloudflare"] = cloudflare
|
|
1327
1297
|
_save_config(config)
|
|
1328
1298
|
snapshot = build_snapshot(config)
|
|
1329
1299
|
snapshot["node_status"] = status
|
|
@@ -1374,7 +1344,7 @@ def _instantiate_container(proxmox: Any, node: str, payload: Dict[str, Any]) ->
|
|
|
1374
1344
|
memory=int(payload["ram_mib"]),
|
|
1375
1345
|
swap=int(payload.get("swap_mb", 0)),
|
|
1376
1346
|
cores=max(int(payload.get("cores", 1)), 1),
|
|
1377
|
-
|
|
1347
|
+
cpuunits=int(payload.get("cpuunits", 256)),
|
|
1378
1348
|
net0=payload["net0"],
|
|
1379
1349
|
unprivileged=int(payload.get("unprivileged", 1)),
|
|
1380
1350
|
description=payload.get("description", MANAGED_MARKER),
|
|
@@ -1382,13 +1352,6 @@ def _instantiate_container(proxmox: Any, node: str, payload: Dict[str, Any]) ->
|
|
|
1382
1352
|
ssh_public_keys=payload.get("ssh_public_key") or None,
|
|
1383
1353
|
)
|
|
1384
1354
|
status, elapsed = _wait_for_task(proxmox, node, upid)
|
|
1385
|
-
exitstatus = (status or {}).get("exitstatus")
|
|
1386
|
-
if exitstatus and exitstatus.upper() != "OK":
|
|
1387
|
-
msg = status.get("status") or "unknown error"
|
|
1388
|
-
details = status.get("error") or status.get("errmsg") or status.get("description") or status
|
|
1389
|
-
raise RuntimeError(
|
|
1390
|
-
f"Container creation task failed ({exitstatus}): {msg} details={details}"
|
|
1391
|
-
)
|
|
1392
1355
|
return vmid, elapsed
|
|
1393
1356
|
except ResourceException as exc:
|
|
1394
1357
|
raise RuntimeError(f"Failed to create container: {exc}") from exc
|
|
@@ -1404,24 +1367,16 @@ class CreateProxmoxContainerHandler(SyncHandler):
|
|
|
1404
1367
|
def execute(self, message: Dict[str, Any]) -> Dict[str, Any]:
|
|
1405
1368
|
logger.info("create_proxmox_container command received")
|
|
1406
1369
|
request_id = message.get("request_id")
|
|
1407
|
-
|
|
1408
|
-
device_id = str(raw_device_id or "").strip()
|
|
1409
|
-
if not device_id:
|
|
1410
|
-
raise ValueError("device_id is required to create a container")
|
|
1370
|
+
device_id = message.get("device_id")
|
|
1411
1371
|
device_public_key = (message.get("device_public_key") or "").strip()
|
|
1412
1372
|
device_private_key = (message.get("device_private_key") or "").strip()
|
|
1413
1373
|
has_device_keypair = bool(device_public_key and device_private_key)
|
|
1414
|
-
config_guess = _load_config()
|
|
1415
|
-
template_candidates = config_guess.get("templates") or []
|
|
1416
|
-
template_hint = (message.get("template") or (template_candidates[0] if template_candidates else "")).strip()
|
|
1417
|
-
package_manager = _guess_package_manager_from_template(template_hint)
|
|
1418
1374
|
bootstrap_user, bootstrap_password, bootstrap_ssh_key = _get_provisioning_user_info(message)
|
|
1419
1375
|
bootstrap_steps = _build_bootstrap_steps(
|
|
1420
1376
|
bootstrap_user,
|
|
1421
1377
|
bootstrap_password,
|
|
1422
1378
|
bootstrap_ssh_key,
|
|
1423
1379
|
include_portacode_connect=not has_device_keypair,
|
|
1424
|
-
package_manager=package_manager,
|
|
1425
1380
|
)
|
|
1426
1381
|
total_steps = 3 + len(bootstrap_steps) + 2
|
|
1427
1382
|
current_step_index = 1
|
|
@@ -1444,7 +1399,6 @@ class CreateProxmoxContainerHandler(SyncHandler):
|
|
|
1444
1399
|
message=start_message,
|
|
1445
1400
|
phase="lifecycle",
|
|
1446
1401
|
request_id=request_id,
|
|
1447
|
-
on_behalf_of_device=device_id,
|
|
1448
1402
|
)
|
|
1449
1403
|
try:
|
|
1450
1404
|
result = action()
|
|
@@ -1460,7 +1414,6 @@ class CreateProxmoxContainerHandler(SyncHandler):
|
|
|
1460
1414
|
phase="lifecycle",
|
|
1461
1415
|
request_id=request_id,
|
|
1462
1416
|
details={"error": str(exc)},
|
|
1463
|
-
on_behalf_of_device=device_id,
|
|
1464
1417
|
)
|
|
1465
1418
|
raise
|
|
1466
1419
|
_emit_progress_event(
|
|
@@ -1473,7 +1426,6 @@ class CreateProxmoxContainerHandler(SyncHandler):
|
|
|
1473
1426
|
message=success_message,
|
|
1474
1427
|
phase="lifecycle",
|
|
1475
1428
|
request_id=request_id,
|
|
1476
|
-
on_behalf_of_device=device_id,
|
|
1477
1429
|
)
|
|
1478
1430
|
current_step_index += 1
|
|
1479
1431
|
return result
|
|
@@ -1500,8 +1452,7 @@ class CreateProxmoxContainerHandler(SyncHandler):
|
|
|
1500
1452
|
proxmox = _connect_proxmox(config)
|
|
1501
1453
|
node = config.get("node") or DEFAULT_NODE_NAME
|
|
1502
1454
|
payload = _build_container_payload(message, config)
|
|
1503
|
-
payload["
|
|
1504
|
-
payload["cores"] = int(max(math.ceil(payload["cpus"]), 1))
|
|
1455
|
+
payload["cpuunits"] = max(int(payload["cpus"] * 1024), 10)
|
|
1505
1456
|
payload["memory"] = int(payload["ram_mib"])
|
|
1506
1457
|
payload["node"] = node
|
|
1507
1458
|
logger.debug(
|
|
@@ -1516,7 +1467,6 @@ class CreateProxmoxContainerHandler(SyncHandler):
|
|
|
1516
1467
|
payload["vmid"] = vmid
|
|
1517
1468
|
payload["created_at"] = datetime.utcnow().isoformat() + "Z"
|
|
1518
1469
|
payload["status"] = "creating"
|
|
1519
|
-
payload["device_id"] = device_id
|
|
1520
1470
|
_write_container_record(vmid, payload)
|
|
1521
1471
|
return proxmox, node, vmid, payload
|
|
1522
1472
|
|
|
@@ -1577,7 +1527,6 @@ class CreateProxmoxContainerHandler(SyncHandler):
|
|
|
1577
1527
|
phase="bootstrap",
|
|
1578
1528
|
request_id=request_id,
|
|
1579
1529
|
details=details or None,
|
|
1580
|
-
on_behalf_of_device=device_id,
|
|
1581
1530
|
)
|
|
1582
1531
|
|
|
1583
1532
|
public_key, steps = _bootstrap_portacode(
|
|
@@ -1621,7 +1570,6 @@ class CreateProxmoxContainerHandler(SyncHandler):
|
|
|
1621
1570
|
message="Notifying the server of the new device…",
|
|
1622
1571
|
phase="service",
|
|
1623
1572
|
request_id=request_id,
|
|
1624
|
-
on_behalf_of_device=device_id,
|
|
1625
1573
|
)
|
|
1626
1574
|
_emit_progress_event(
|
|
1627
1575
|
self,
|
|
@@ -1633,7 +1581,6 @@ class CreateProxmoxContainerHandler(SyncHandler):
|
|
|
1633
1581
|
message="Authentication metadata recorded.",
|
|
1634
1582
|
phase="service",
|
|
1635
1583
|
request_id=request_id,
|
|
1636
|
-
on_behalf_of_device=device_id,
|
|
1637
1584
|
)
|
|
1638
1585
|
|
|
1639
1586
|
install_step = service_start_index + 1
|
|
@@ -1648,10 +1595,9 @@ class CreateProxmoxContainerHandler(SyncHandler):
|
|
|
1648
1595
|
message="Running sudo portacode service install…",
|
|
1649
1596
|
phase="service",
|
|
1650
1597
|
request_id=request_id,
|
|
1651
|
-
on_behalf_of_device=device_id,
|
|
1652
1598
|
)
|
|
1653
1599
|
|
|
1654
|
-
cmd =
|
|
1600
|
+
cmd = f"su - {payload['username']} -c 'sudo -S portacode service install'"
|
|
1655
1601
|
res = _run_pct(vmid, cmd, input_text=payload["password"] + "\n")
|
|
1656
1602
|
|
|
1657
1603
|
if res["returncode"] != 0:
|
|
@@ -1669,7 +1615,6 @@ class CreateProxmoxContainerHandler(SyncHandler):
|
|
|
1669
1615
|
"stderr": res.get("stderr"),
|
|
1670
1616
|
"stdout": res.get("stdout"),
|
|
1671
1617
|
},
|
|
1672
|
-
on_behalf_of_device=device_id,
|
|
1673
1618
|
)
|
|
1674
1619
|
raise RuntimeError(res.get("stderr") or res.get("stdout") or "Service install failed")
|
|
1675
1620
|
|
|
@@ -1683,7 +1628,6 @@ class CreateProxmoxContainerHandler(SyncHandler):
|
|
|
1683
1628
|
message="Portacode service install finished.",
|
|
1684
1629
|
phase="service",
|
|
1685
1630
|
request_id=request_id,
|
|
1686
|
-
on_behalf_of_device=device_id,
|
|
1687
1631
|
)
|
|
1688
1632
|
|
|
1689
1633
|
logger.info("create_proxmox_container: portacode service install completed inside ct %s", vmid)
|
|
@@ -1707,7 +1651,6 @@ class CreateProxmoxContainerHandler(SyncHandler):
|
|
|
1707
1651
|
},
|
|
1708
1652
|
"setup_steps": steps,
|
|
1709
1653
|
"device_id": device_id,
|
|
1710
|
-
"on_behalf_of_device": device_id,
|
|
1711
1654
|
"service_installed": service_installed,
|
|
1712
1655
|
}
|
|
1713
1656
|
|
|
@@ -1733,9 +1676,6 @@ class StartPortacodeServiceHandler(SyncHandler):
|
|
|
1733
1676
|
password = record.get("password")
|
|
1734
1677
|
if not user or not password:
|
|
1735
1678
|
raise RuntimeError("Container credentials unavailable")
|
|
1736
|
-
on_behalf_of_device = record.get("device_id")
|
|
1737
|
-
if on_behalf_of_device:
|
|
1738
|
-
on_behalf_of_device = str(on_behalf_of_device)
|
|
1739
1679
|
|
|
1740
1680
|
start_index = int(message.get("step_index", 1))
|
|
1741
1681
|
total_steps = int(message.get("total_steps", start_index + 2))
|
|
@@ -1753,7 +1693,6 @@ class StartPortacodeServiceHandler(SyncHandler):
|
|
|
1753
1693
|
message="Notifying the server of the new device…",
|
|
1754
1694
|
phase="service",
|
|
1755
1695
|
request_id=request_id,
|
|
1756
|
-
on_behalf_of_device=on_behalf_of_device,
|
|
1757
1696
|
)
|
|
1758
1697
|
_emit_progress_event(
|
|
1759
1698
|
self,
|
|
@@ -1765,7 +1704,6 @@ class StartPortacodeServiceHandler(SyncHandler):
|
|
|
1765
1704
|
message="Authentication metadata recorded.",
|
|
1766
1705
|
phase="service",
|
|
1767
1706
|
request_id=request_id,
|
|
1768
|
-
on_behalf_of_device=on_behalf_of_device,
|
|
1769
1707
|
)
|
|
1770
1708
|
|
|
1771
1709
|
install_step = start_index + 1
|
|
@@ -1780,10 +1718,9 @@ class StartPortacodeServiceHandler(SyncHandler):
|
|
|
1780
1718
|
message="Running sudo portacode service install…",
|
|
1781
1719
|
phase="service",
|
|
1782
1720
|
request_id=request_id,
|
|
1783
|
-
on_behalf_of_device=on_behalf_of_device,
|
|
1784
1721
|
)
|
|
1785
1722
|
|
|
1786
|
-
cmd =
|
|
1723
|
+
cmd = f"su - {user} -c 'sudo -S portacode service install'"
|
|
1787
1724
|
res = _run_pct(vmid, cmd, input_text=password + "\n")
|
|
1788
1725
|
|
|
1789
1726
|
if res["returncode"] != 0:
|
|
@@ -1801,7 +1738,6 @@ class StartPortacodeServiceHandler(SyncHandler):
|
|
|
1801
1738
|
"stderr": res.get("stderr"),
|
|
1802
1739
|
"stdout": res.get("stdout"),
|
|
1803
1740
|
},
|
|
1804
|
-
on_behalf_of_device=on_behalf_of_device,
|
|
1805
1741
|
)
|
|
1806
1742
|
raise RuntimeError(res.get("stderr") or res.get("stdout") or "Service install failed")
|
|
1807
1743
|
|
|
@@ -1815,7 +1751,6 @@ class StartPortacodeServiceHandler(SyncHandler):
|
|
|
1815
1751
|
message="Portacode service install finished.",
|
|
1816
1752
|
phase="service",
|
|
1817
1753
|
request_id=request_id,
|
|
1818
|
-
on_behalf_of_device=on_behalf_of_device,
|
|
1819
1754
|
)
|
|
1820
1755
|
|
|
1821
1756
|
return {
|
|
@@ -1842,6 +1777,10 @@ class StartProxmoxContainerHandler(SyncHandler):
|
|
|
1842
1777
|
|
|
1843
1778
|
status, elapsed = _start_container(proxmox, node, vmid)
|
|
1844
1779
|
_update_container_record(vmid, {"status": "running"})
|
|
1780
|
+
try:
|
|
1781
|
+
_ensure_container_tunnel_running(proxmox, node, vmid)
|
|
1782
|
+
except Exception as exc:
|
|
1783
|
+
raise RuntimeError(f"Failed to start Cloudflare tunnel for container {vmid}: {exc}") from exc
|
|
1845
1784
|
|
|
1846
1785
|
infra = get_infra_snapshot()
|
|
1847
1786
|
return {
|
|
@@ -1871,6 +1810,7 @@ class StopProxmoxContainerHandler(SyncHandler):
|
|
|
1871
1810
|
_ensure_container_managed(proxmox, node, vmid)
|
|
1872
1811
|
|
|
1873
1812
|
status, elapsed = _stop_container(proxmox, node, vmid)
|
|
1813
|
+
_stop_container_tunnel(vmid)
|
|
1874
1814
|
final_status = status.get("status") or "stopped"
|
|
1875
1815
|
_update_container_record(vmid, {"status": final_status})
|
|
1876
1816
|
|
|
@@ -1906,8 +1846,13 @@ class RemoveProxmoxContainerHandler(SyncHandler):
|
|
|
1906
1846
|
node = _get_node_from_config(config)
|
|
1907
1847
|
_ensure_container_managed(proxmox, node, vmid)
|
|
1908
1848
|
|
|
1849
|
+
_stop_container_tunnel(vmid)
|
|
1909
1850
|
stop_status, stop_elapsed = _stop_container(proxmox, node, vmid)
|
|
1910
1851
|
delete_status, delete_elapsed = _delete_container(proxmox, node, vmid)
|
|
1852
|
+
try:
|
|
1853
|
+
_update_container_tunnel(vmid, None)
|
|
1854
|
+
except FileNotFoundError:
|
|
1855
|
+
pass
|
|
1911
1856
|
_remove_container_record(vmid)
|
|
1912
1857
|
|
|
1913
1858
|
infra = get_infra_snapshot()
|
|
@@ -1926,6 +1871,107 @@ class RemoveProxmoxContainerHandler(SyncHandler):
|
|
|
1926
1871
|
}
|
|
1927
1872
|
|
|
1928
1873
|
|
|
1874
|
+
class CreateCloudflareTunnelHandler(SyncHandler):
|
|
1875
|
+
"""Create a Cloudflare tunnel for a container."""
|
|
1876
|
+
|
|
1877
|
+
@property
|
|
1878
|
+
def command_name(self) -> str:
|
|
1879
|
+
return "create_cloudflare_tunnel"
|
|
1880
|
+
|
|
1881
|
+
def execute(self, message: Dict[str, Any]) -> Dict[str, Any]:
|
|
1882
|
+
vmid = _parse_ctid(message)
|
|
1883
|
+
container_port = int(message.get("container_port") or 0)
|
|
1884
|
+
if container_port <= 0:
|
|
1885
|
+
raise ValueError("container_port is required and must be greater than zero.")
|
|
1886
|
+
hostname = (message.get("cloudflare_url") or message.get("hostname") or "").strip()
|
|
1887
|
+
if not hostname:
|
|
1888
|
+
raise ValueError("cloudflare_url is required.")
|
|
1889
|
+
protocol = (message.get("protocol") or "http").strip().lower()
|
|
1890
|
+
if protocol not in {"http", "https", "tcp"}:
|
|
1891
|
+
raise ValueError("protocol must be one of http, https, or tcp.")
|
|
1892
|
+
config = _ensure_infra_configured()
|
|
1893
|
+
_ensure_cloudflare_token(config)
|
|
1894
|
+
proxmox = _connect_proxmox(config)
|
|
1895
|
+
node = _get_node_from_config(config)
|
|
1896
|
+
_ensure_container_managed(proxmox, node, vmid)
|
|
1897
|
+
status = proxmox.nodes(node).lxc(vmid).status.current.get().get("status")
|
|
1898
|
+
if status != "running":
|
|
1899
|
+
raise RuntimeError("Container must be running to create a tunnel.")
|
|
1900
|
+
tunnel = {
|
|
1901
|
+
"container_port": container_port,
|
|
1902
|
+
"url": hostname,
|
|
1903
|
+
"protocol": protocol,
|
|
1904
|
+
}
|
|
1905
|
+
created = _launch_container_tunnel(proxmox, node, vmid, tunnel)
|
|
1906
|
+
return {
|
|
1907
|
+
"event": "cloudflare_tunnel_created",
|
|
1908
|
+
"ctid": str(vmid),
|
|
1909
|
+
"success": True,
|
|
1910
|
+
"message": f"Created Cloudflare tunnel for container {vmid}.",
|
|
1911
|
+
"tunnel": created,
|
|
1912
|
+
}
|
|
1913
|
+
|
|
1914
|
+
|
|
1915
|
+
class UpdateCloudflareTunnelHandler(SyncHandler):
|
|
1916
|
+
"""Update an existing Cloudflare tunnel for a container."""
|
|
1917
|
+
|
|
1918
|
+
@property
|
|
1919
|
+
def command_name(self) -> str:
|
|
1920
|
+
return "update_cloudflare_tunnel"
|
|
1921
|
+
|
|
1922
|
+
def execute(self, message: Dict[str, Any]) -> Dict[str, Any]:
|
|
1923
|
+
vmid = _parse_ctid(message)
|
|
1924
|
+
config = _ensure_infra_configured()
|
|
1925
|
+
_ensure_cloudflare_token(config)
|
|
1926
|
+
proxmox = _connect_proxmox(config)
|
|
1927
|
+
node = _get_node_from_config(config)
|
|
1928
|
+
_ensure_container_managed(proxmox, node, vmid)
|
|
1929
|
+
record = _read_container_record(vmid)
|
|
1930
|
+
tunnel = record.get("tunnel")
|
|
1931
|
+
if not tunnel:
|
|
1932
|
+
raise RuntimeError("No Cloudflare tunnel configured for this container.")
|
|
1933
|
+
container_port = int(message.get("container_port") or tunnel.get("container_port") or 0)
|
|
1934
|
+
if container_port <= 0:
|
|
1935
|
+
raise ValueError("container_port must be greater than zero.")
|
|
1936
|
+
hostname = (message.get("cloudflare_url") or tunnel.get("url") or "").strip()
|
|
1937
|
+
if not hostname:
|
|
1938
|
+
raise ValueError("cloudflare_url is required.")
|
|
1939
|
+
protocol = (message.get("protocol") or tunnel.get("protocol") or "http").strip().lower()
|
|
1940
|
+
if protocol not in {"http", "https", "tcp"}:
|
|
1941
|
+
raise ValueError("protocol must be one of http, https, or tcp.")
|
|
1942
|
+
updated_tunnel = {
|
|
1943
|
+
"container_port": container_port,
|
|
1944
|
+
"url": hostname,
|
|
1945
|
+
"protocol": protocol,
|
|
1946
|
+
}
|
|
1947
|
+
result = _launch_container_tunnel(proxmox, node, vmid, updated_tunnel)
|
|
1948
|
+
return {
|
|
1949
|
+
"event": "cloudflare_tunnel_updated",
|
|
1950
|
+
"ctid": str(vmid),
|
|
1951
|
+
"success": True,
|
|
1952
|
+
"message": f"Updated Cloudflare tunnel for container {vmid}.",
|
|
1953
|
+
"tunnel": result,
|
|
1954
|
+
}
|
|
1955
|
+
|
|
1956
|
+
|
|
1957
|
+
class RemoveCloudflareTunnelHandler(SyncHandler):
|
|
1958
|
+
"""Remove any Cloudflare tunnel associated with a container."""
|
|
1959
|
+
|
|
1960
|
+
@property
|
|
1961
|
+
def command_name(self) -> str:
|
|
1962
|
+
return "remove_cloudflare_tunnel"
|
|
1963
|
+
|
|
1964
|
+
def execute(self, message: Dict[str, Any]) -> Dict[str, Any]:
|
|
1965
|
+
vmid = _parse_ctid(message)
|
|
1966
|
+
_remove_container_tunnel_state(vmid)
|
|
1967
|
+
return {
|
|
1968
|
+
"event": "cloudflare_tunnel_removed",
|
|
1969
|
+
"ctid": str(vmid),
|
|
1970
|
+
"success": True,
|
|
1971
|
+
"message": f"Removed Cloudflare tunnel state for container {vmid}.",
|
|
1972
|
+
}
|
|
1973
|
+
|
|
1974
|
+
|
|
1929
1975
|
class ConfigureProxmoxInfraHandler(SyncHandler):
|
|
1930
1976
|
@property
|
|
1931
1977
|
def command_name(self) -> str:
|
|
@@ -1934,10 +1980,13 @@ class ConfigureProxmoxInfraHandler(SyncHandler):
|
|
|
1934
1980
|
def execute(self, message: Dict[str, Any]) -> Dict[str, Any]:
|
|
1935
1981
|
token_identifier = message.get("token_identifier")
|
|
1936
1982
|
token_value = message.get("token_value")
|
|
1937
|
-
verify_ssl =
|
|
1938
|
-
|
|
1939
|
-
|
|
1940
|
-
|
|
1983
|
+
verify_ssl = message.get("verify_ssl")
|
|
1984
|
+
snapshot = configure_infrastructure(
|
|
1985
|
+
token_identifier=token_identifier,
|
|
1986
|
+
token_value=token_value,
|
|
1987
|
+
verify_ssl=verify_ssl,
|
|
1988
|
+
cloudflare_api_token=message.get("cloudflare_api_token"),
|
|
1989
|
+
)
|
|
1941
1990
|
return {
|
|
1942
1991
|
"event": "proxmox_infra_configured",
|
|
1943
1992
|
"success": True,
|