more-compute 0.1.3__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- frontend/app/globals.css +322 -77
- frontend/app/layout.tsx +98 -82
- frontend/components/Cell.tsx +234 -95
- frontend/components/Notebook.tsx +430 -199
- frontend/components/{AddCellButton.tsx → cell/AddCellButton.tsx} +0 -2
- frontend/components/cell/MonacoCell.tsx +726 -0
- frontend/components/layout/ConnectionBanner.tsx +41 -0
- frontend/components/{Sidebar.tsx → layout/Sidebar.tsx} +16 -11
- frontend/components/modals/ConfirmModal.tsx +154 -0
- frontend/components/modals/SuccessModal.tsx +140 -0
- frontend/components/output/MarkdownRenderer.tsx +116 -0
- frontend/components/popups/ComputePopup.tsx +674 -365
- frontend/components/popups/MetricsPopup.tsx +11 -7
- frontend/components/popups/SettingsPopup.tsx +11 -13
- frontend/contexts/PodWebSocketContext.tsx +247 -0
- frontend/eslint.config.mjs +11 -0
- frontend/lib/monaco-themes.ts +160 -0
- frontend/lib/settings.ts +128 -26
- frontend/lib/themes.json +9973 -0
- frontend/lib/websocket-native.ts +19 -8
- frontend/lib/websocket.ts +59 -11
- frontend/next.config.ts +8 -0
- frontend/package-lock.json +1705 -3
- frontend/package.json +8 -1
- frontend/styling_README.md +18 -0
- kernel_run.py +161 -43
- more_compute-0.2.0.dist-info/METADATA +126 -0
- more_compute-0.2.0.dist-info/RECORD +100 -0
- morecompute/__version__.py +1 -0
- morecompute/execution/executor.py +31 -20
- morecompute/execution/worker.py +68 -7
- morecompute/models/__init__.py +31 -0
- morecompute/models/api_models.py +197 -0
- morecompute/notebook.py +50 -7
- morecompute/server.py +574 -94
- morecompute/services/data_manager.py +379 -0
- morecompute/services/lsp_service.py +335 -0
- morecompute/services/pod_manager.py +122 -20
- morecompute/services/pod_monitor.py +138 -0
- morecompute/services/prime_intellect.py +87 -63
- morecompute/utils/config_util.py +59 -0
- morecompute/utils/special_commands.py +11 -5
- morecompute/utils/zmq_util.py +51 -0
- frontend/components/MarkdownRenderer.tsx +0 -84
- frontend/components/popups/PythonPopup.tsx +0 -292
- more_compute-0.1.3.dist-info/METADATA +0 -173
- more_compute-0.1.3.dist-info/RECORD +0 -85
- /frontend/components/{CellButton.tsx → cell/CellButton.tsx} +0 -0
- /frontend/components/{ErrorModal.tsx → modals/ErrorModal.tsx} +0 -0
- /frontend/components/{CellOutput.tsx → output/CellOutput.tsx} +0 -0
- /frontend/components/{ErrorDisplay.tsx → output/ErrorDisplay.tsx} +0 -0
- {more_compute-0.1.3.dist-info → more_compute-0.2.0.dist-info}/WHEEL +0 -0
- {more_compute-0.1.3.dist-info → more_compute-0.2.0.dist-info}/entry_points.txt +0 -0
- {more_compute-0.1.3.dist-info → more_compute-0.2.0.dist-info}/licenses/LICENSE +0 -0
- {more_compute-0.1.3.dist-info → more_compute-0.2.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,138 @@
|
|
|
1
|
+
"""Service for monitoring GPU pod status updates."""
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import sys
|
|
5
|
+
from typing import Callable, Awaitable
|
|
6
|
+
from cachetools import TTLCache
|
|
7
|
+
|
|
8
|
+
from .prime_intellect import PrimeIntellectService
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
PodUpdateCallback = Callable[[dict], Awaitable[None]]
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class PodMonitor:
|
|
15
|
+
"""Monitors GPU pod status and broadcasts updates."""
|
|
16
|
+
|
|
17
|
+
POLL_INTERVAL_SECONDS = 5
|
|
18
|
+
|
|
19
|
+
def __init__(
|
|
20
|
+
self,
|
|
21
|
+
prime_intellect: PrimeIntellectService,
|
|
22
|
+
pod_cache: TTLCache,
|
|
23
|
+
update_callback: PodUpdateCallback
|
|
24
|
+
):
|
|
25
|
+
"""
|
|
26
|
+
Initialize pod monitor.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
prime_intellect: Prime Intellect API service
|
|
30
|
+
pod_cache: Cache to clear on updates
|
|
31
|
+
update_callback: Async callback for broadcasting updates
|
|
32
|
+
"""
|
|
33
|
+
self.pi_service = prime_intellect
|
|
34
|
+
self.pod_cache = pod_cache
|
|
35
|
+
self.update_callback = update_callback
|
|
36
|
+
self.monitoring_tasks: dict[str, asyncio.Task] = {}
|
|
37
|
+
|
|
38
|
+
async def start_monitoring(self, pod_id: str) -> None:
|
|
39
|
+
"""
|
|
40
|
+
Start monitoring a pod's status.
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
pod_id: ID of pod to monitor
|
|
44
|
+
"""
|
|
45
|
+
# Don't start duplicate monitors
|
|
46
|
+
if pod_id in self.monitoring_tasks:
|
|
47
|
+
print(f"[POD MONITOR] Already monitoring pod {pod_id}", file=sys.stderr, flush=True)
|
|
48
|
+
return
|
|
49
|
+
|
|
50
|
+
task = asyncio.create_task(self._monitor_loop(pod_id))
|
|
51
|
+
self.monitoring_tasks[pod_id] = task
|
|
52
|
+
print(f"[POD MONITOR] Started monitoring pod {pod_id}", file=sys.stderr, flush=True)
|
|
53
|
+
|
|
54
|
+
async def stop_monitoring(self, pod_id: str) -> None:
|
|
55
|
+
"""
|
|
56
|
+
Stop monitoring a pod.
|
|
57
|
+
|
|
58
|
+
Args:
|
|
59
|
+
pod_id: ID of pod to stop monitoring
|
|
60
|
+
"""
|
|
61
|
+
task = self.monitoring_tasks.pop(pod_id, None)
|
|
62
|
+
if task and not task.done():
|
|
63
|
+
task.cancel()
|
|
64
|
+
try:
|
|
65
|
+
await task
|
|
66
|
+
except asyncio.CancelledError:
|
|
67
|
+
pass
|
|
68
|
+
print(f"[POD MONITOR] Stopped monitoring pod {pod_id}", file=sys.stderr, flush=True)
|
|
69
|
+
|
|
70
|
+
async def _monitor_loop(self, pod_id: str) -> None:
|
|
71
|
+
"""
|
|
72
|
+
Main monitoring loop for a pod.
|
|
73
|
+
|
|
74
|
+
Args:
|
|
75
|
+
pod_id: ID of pod to monitor
|
|
76
|
+
"""
|
|
77
|
+
try:
|
|
78
|
+
while True:
|
|
79
|
+
try:
|
|
80
|
+
# Fetch current pod status
|
|
81
|
+
pod = await self.pi_service.get_pod(pod_id)
|
|
82
|
+
|
|
83
|
+
print(
|
|
84
|
+
f"[POD MONITOR] Pod {pod_id} status: {pod.status}",
|
|
85
|
+
file=sys.stderr,
|
|
86
|
+
flush=True
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
# Clear cache to force fresh data
|
|
90
|
+
self.pod_cache.clear()
|
|
91
|
+
|
|
92
|
+
# Broadcast update
|
|
93
|
+
await self.update_callback({
|
|
94
|
+
"type": "pod_status_update",
|
|
95
|
+
"data": {
|
|
96
|
+
"pod_id": pod_id,
|
|
97
|
+
"name": pod.name,
|
|
98
|
+
"status": pod.status,
|
|
99
|
+
"ssh_connection": pod.sshConnection,
|
|
100
|
+
"ip": pod.ip,
|
|
101
|
+
"gpu_name": pod.gpuName,
|
|
102
|
+
"price_hr": pod.priceHr
|
|
103
|
+
}
|
|
104
|
+
})
|
|
105
|
+
|
|
106
|
+
# Stop monitoring if ERROR or TERMINATED
|
|
107
|
+
if pod.status in {"ERROR", "TERMINATED"}:
|
|
108
|
+
print(
|
|
109
|
+
f"[POD MONITOR] Pod {pod_id} reached terminal state: {pod.status}",
|
|
110
|
+
file=sys.stderr,
|
|
111
|
+
flush=True
|
|
112
|
+
)
|
|
113
|
+
break
|
|
114
|
+
|
|
115
|
+
# If ACTIVE and has SSH connection, pod is fully ready - stop monitoring
|
|
116
|
+
if pod.status == "ACTIVE" and pod.sshConnection:
|
|
117
|
+
print(
|
|
118
|
+
f"[POD MONITOR] Pod {pod_id} is ACTIVE with SSH connection: {pod.sshConnection}",
|
|
119
|
+
file=sys.stderr,
|
|
120
|
+
flush=True
|
|
121
|
+
)
|
|
122
|
+
break
|
|
123
|
+
|
|
124
|
+
# Wait before next check
|
|
125
|
+
await asyncio.sleep(self.POLL_INTERVAL_SECONDS)
|
|
126
|
+
|
|
127
|
+
except Exception as e:
|
|
128
|
+
print(
|
|
129
|
+
f"[POD MONITOR] Error checking pod {pod_id}: {e}",
|
|
130
|
+
file=sys.stderr,
|
|
131
|
+
flush=True
|
|
132
|
+
)
|
|
133
|
+
await asyncio.sleep(self.POLL_INTERVAL_SECONDS)
|
|
134
|
+
|
|
135
|
+
finally:
|
|
136
|
+
# Clean up
|
|
137
|
+
self.monitoring_tasks.pop(pod_id, None)
|
|
138
|
+
print(f"[POD MONITOR] Stopped monitoring pod {pod_id}", file=sys.stderr, flush=True)
|
|
@@ -1,69 +1,14 @@
|
|
|
1
|
-
from pydantic import BaseModel
|
|
2
|
-
from datetime import datetime
|
|
3
1
|
import httpx
|
|
4
2
|
from fastapi import HTTPException
|
|
5
3
|
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
gpuType: str
|
|
15
|
-
socket: str
|
|
16
|
-
gpuCount: int = 1
|
|
17
|
-
|
|
18
|
-
# Optional
|
|
19
|
-
diskSize: int | None = None
|
|
20
|
-
vcpus: int | None = None
|
|
21
|
-
memory: int | None = None
|
|
22
|
-
maxPrice: float | None = None
|
|
23
|
-
image: str | None = None
|
|
24
|
-
customTemplateId: str | None = None
|
|
25
|
-
dataCenterId: str | None = None
|
|
26
|
-
country: str | None = None
|
|
27
|
-
security: str | None = None
|
|
28
|
-
envVars: list[EnvVar] | None = None
|
|
29
|
-
jupyterPassword: str | None = None
|
|
30
|
-
autoRestart: bool | None = None
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
class ProviderConfig(BaseModel):
|
|
34
|
-
type: str = "runpod"
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
class TeamConfig(BaseModel):
|
|
38
|
-
teamId: str | None = None
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
class CreatePodRequest(BaseModel):
|
|
42
|
-
pod: PodConfig
|
|
43
|
-
provider: ProviderConfig
|
|
44
|
-
team: TeamConfig | None = None
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
class PodResponse(BaseModel):
|
|
48
|
-
id: str
|
|
49
|
-
userId: str
|
|
50
|
-
teamId: str | None
|
|
51
|
-
name: str
|
|
52
|
-
status: str
|
|
53
|
-
gpuName: str
|
|
54
|
-
gpuCount: int
|
|
55
|
-
priceHr: float
|
|
56
|
-
sshConnection: str | None
|
|
57
|
-
ip: str | None
|
|
58
|
-
createdAt: datetime
|
|
59
|
-
updatedAt: datetime
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
class AvailabilityQuery(BaseModel):
|
|
63
|
-
regions: list[str] | None = None
|
|
64
|
-
gpu_count: int | None = None
|
|
65
|
-
gpu_type: str | None = None
|
|
66
|
-
security: str | None = None
|
|
4
|
+
from ..models.api_models import (
|
|
5
|
+
CreatePodRequest,
|
|
6
|
+
CreateDiskRequest,
|
|
7
|
+
PodResponse,
|
|
8
|
+
DiskResponse,
|
|
9
|
+
)
|
|
10
|
+
|
|
11
|
+
# Helper methods for https://docs.primeintellect.ai/api-reference/
|
|
67
12
|
|
|
68
13
|
|
|
69
14
|
class PrimeIntellectService:
|
|
@@ -314,3 +259,82 @@ class PrimeIntellectService:
|
|
|
314
259
|
Dict with confirmation
|
|
315
260
|
"""
|
|
316
261
|
return await self._make_request("PATCH", f"/ssh-keys/{key_id}/primary")
|
|
262
|
+
|
|
263
|
+
|
|
264
|
+
async def get_disks(
|
|
265
|
+
self,
|
|
266
|
+
limit: int = 100,
|
|
267
|
+
offset: int = 0,
|
|
268
|
+
) -> dict[str, object]:
|
|
269
|
+
"""
|
|
270
|
+
Get list of all disks
|
|
271
|
+
|
|
272
|
+
args:
|
|
273
|
+
limit : max number of results (default : 100)
|
|
274
|
+
offset : for Pagination
|
|
275
|
+
|
|
276
|
+
returns:
|
|
277
|
+
dict with:
|
|
278
|
+
- data: list of disk objects
|
|
279
|
+
- total_count : total number of disks
|
|
280
|
+
- offset : current offset
|
|
281
|
+
- limit: current limit
|
|
282
|
+
"""
|
|
283
|
+
params : dict[str, str | int | float | list[str]] = {"limit":limit, "offset":offset}
|
|
284
|
+
return await self._make_request("GET", "/disks/", params=params)
|
|
285
|
+
|
|
286
|
+
async def create_disks(self, disk_request :CreateDiskRequest) -> DiskResponse:
|
|
287
|
+
"""
|
|
288
|
+
Create new persistent disk
|
|
289
|
+
|
|
290
|
+
Args:
|
|
291
|
+
disk_reqeust: disk configuration request
|
|
292
|
+
|
|
293
|
+
Returns:
|
|
294
|
+
Disk response with disk details
|
|
295
|
+
|
|
296
|
+
"""
|
|
297
|
+
import sys
|
|
298
|
+
payload = disk_request.model_dump(exclude_none=True)
|
|
299
|
+
print(f"[PI SERVICE] Creating disk with payload: + {payload}", file=sys.stderr, flush=True)
|
|
300
|
+
response = await self._make_request("POST", "/disks/", json_data=payload)
|
|
301
|
+
return DiskResponse.model_validate(response)
|
|
302
|
+
|
|
303
|
+
async def get_disk(self, disk_id:str) -> DiskResponse:
|
|
304
|
+
"""
|
|
305
|
+
Get details of specific disk
|
|
306
|
+
|
|
307
|
+
args:
|
|
308
|
+
disk_id: disk identifier
|
|
309
|
+
|
|
310
|
+
returns:
|
|
311
|
+
DiskResponse
|
|
312
|
+
"""
|
|
313
|
+
response = await self._make_request("GET", f"/disks/{disk_id}")
|
|
314
|
+
return DiskResponse.model_validate(response)
|
|
315
|
+
|
|
316
|
+
async def update_disk(self, disk_id:str, name:str) -> dict[str,object]:
|
|
317
|
+
"""
|
|
318
|
+
update disk name
|
|
319
|
+
|
|
320
|
+
args:
|
|
321
|
+
disk_id: disk identifier
|
|
322
|
+
name: new name of disk
|
|
323
|
+
|
|
324
|
+
returns:
|
|
325
|
+
dict with update confirmation
|
|
326
|
+
"""
|
|
327
|
+
data: dict[str, object] = {"name":name}
|
|
328
|
+
return await self._make_request("PATCH", f"/disks/{disk_id}", json_data=data)
|
|
329
|
+
|
|
330
|
+
async def delete_disk(self, disk_id: str) -> dict[str, object]:
|
|
331
|
+
"""
|
|
332
|
+
Delete a disk.
|
|
333
|
+
|
|
334
|
+
Args:
|
|
335
|
+
disk_id: The disk identifier
|
|
336
|
+
|
|
337
|
+
Returns:
|
|
338
|
+
Dict with deletion confirmation
|
|
339
|
+
"""
|
|
340
|
+
return await self._make_request("DELETE", f"/disks/{disk_id}")
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
"""Configuration utilities for managing API keys and environment variables."""
|
|
2
|
+
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
import os
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def load_api_key_from_env(env_var: str, env_file_path: Path | None = None) -> str | None:
|
|
8
|
+
"""
|
|
9
|
+
Load API key from environment variable or .env file.
|
|
10
|
+
|
|
11
|
+
Args:
|
|
12
|
+
env_var: Environment variable name to check
|
|
13
|
+
env_file_path: Path to .env file (optional)
|
|
14
|
+
|
|
15
|
+
Returns:
|
|
16
|
+
API key string or None if not found
|
|
17
|
+
"""
|
|
18
|
+
api_key = os.getenv(env_var)
|
|
19
|
+
if api_key:
|
|
20
|
+
return api_key
|
|
21
|
+
|
|
22
|
+
if env_file_path and env_file_path.exists():
|
|
23
|
+
try:
|
|
24
|
+
with env_file_path.open("r", encoding="utf-8") as f:
|
|
25
|
+
for line in f:
|
|
26
|
+
line = line.strip()
|
|
27
|
+
if line.startswith(f"{env_var}="):
|
|
28
|
+
return line.split("=", 1)[1].strip().strip('"').strip("'")
|
|
29
|
+
except Exception:
|
|
30
|
+
pass
|
|
31
|
+
|
|
32
|
+
return None
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def save_api_key_to_env(env_var: str, api_key: str, env_file_path: Path) -> None:
|
|
36
|
+
"""
|
|
37
|
+
Save API key to .env file, replacing existing value if present.
|
|
38
|
+
|
|
39
|
+
Args:
|
|
40
|
+
env_var: Environment variable name
|
|
41
|
+
api_key: API key value to save
|
|
42
|
+
env_file_path: Path to .env file
|
|
43
|
+
|
|
44
|
+
Raises:
|
|
45
|
+
ValueError: If API key is empty
|
|
46
|
+
IOError: If file cannot be written
|
|
47
|
+
"""
|
|
48
|
+
if not api_key.strip():
|
|
49
|
+
raise ValueError("API key cannot be empty")
|
|
50
|
+
|
|
51
|
+
existing_lines = []
|
|
52
|
+
if env_file_path.exists():
|
|
53
|
+
with env_file_path.open("r", encoding="utf-8") as f:
|
|
54
|
+
existing_lines = f.readlines()
|
|
55
|
+
|
|
56
|
+
new_lines = [line for line in existing_lines if not line.strip().startswith(f"{env_var}=")]
|
|
57
|
+
new_lines.append(f"{env_var}={api_key}\n")
|
|
58
|
+
with env_file_path.open("w", encoding="utf-8") as f:
|
|
59
|
+
f.writelines(new_lines)
|
|
@@ -122,11 +122,17 @@ class AsyncSpecialCommandHandler:
|
|
|
122
122
|
# Check if command failed
|
|
123
123
|
if return_code != 0:
|
|
124
124
|
result["status"] = "error"
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
125
|
+
# Only add generic error if we don't already have detailed stderr output
|
|
126
|
+
# The detailed stderr is already in result["outputs"] from streaming
|
|
127
|
+
has_stderr = any(o.get("name") == "stderr" and o.get("text", "").strip()
|
|
128
|
+
for o in result.get("outputs", []))
|
|
129
|
+
if not has_stderr:
|
|
130
|
+
# No detailed error output, add generic error
|
|
131
|
+
result["error"] = {
|
|
132
|
+
"ename": "ShellCommandError",
|
|
133
|
+
"evalue": f"Command failed with return code {return_code}",
|
|
134
|
+
"traceback": [f"Shell command failed: {command}"]
|
|
135
|
+
}
|
|
130
136
|
|
|
131
137
|
except Exception as e:
|
|
132
138
|
result["status"] = "error"
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
"""Utilities for ZMQ socket management and reconnection."""
|
|
2
|
+
|
|
3
|
+
import zmq
|
|
4
|
+
import os
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def reconnect_zmq_sockets(
|
|
8
|
+
executor: any,
|
|
9
|
+
cmd_addr: str | None = None,
|
|
10
|
+
pub_addr: str | None = None,
|
|
11
|
+
is_remote: bool = False
|
|
12
|
+
) -> None:
|
|
13
|
+
"""
|
|
14
|
+
Reconnect ZMQ executor sockets to new addresses.
|
|
15
|
+
|
|
16
|
+
Args:
|
|
17
|
+
executor: Executor instance with ZMQ sockets
|
|
18
|
+
cmd_addr: Command socket address (defaults to local)
|
|
19
|
+
pub_addr: Publish socket address (defaults to local)
|
|
20
|
+
is_remote: True if connecting to remote worker, False for local
|
|
21
|
+
"""
|
|
22
|
+
# Use provided addresses or fall back to defaults
|
|
23
|
+
final_cmd_addr = cmd_addr or os.getenv('MC_ZMQ_CMD_ADDR', 'tcp://127.0.0.1:5555')
|
|
24
|
+
final_pub_addr = pub_addr or os.getenv('MC_ZMQ_PUB_ADDR', 'tcp://127.0.0.1:5556')
|
|
25
|
+
|
|
26
|
+
# Update executor addresses
|
|
27
|
+
executor.cmd_addr = final_cmd_addr
|
|
28
|
+
executor.pub_addr = final_pub_addr
|
|
29
|
+
executor.is_remote = is_remote
|
|
30
|
+
|
|
31
|
+
# Reconnect command socket (REQ)
|
|
32
|
+
executor.req.close(0) # type: ignore[reportAttributeAccessIssue]
|
|
33
|
+
executor.req = executor.ctx.socket(zmq.REQ) # type: ignore[reportUnknownMemberType, reportAttributeAccessIssue]
|
|
34
|
+
executor.req.connect(executor.cmd_addr) # type: ignore[reportAttributeAccessIssue]
|
|
35
|
+
|
|
36
|
+
# Reconnect publish socket (SUB)
|
|
37
|
+
executor.sub.close(0) # type: ignore[reportAttributeAccessIssue]
|
|
38
|
+
executor.sub = executor.ctx.socket(zmq.SUB) # type: ignore[reportUnknownMemberType, reportAttributeAccessIssue]
|
|
39
|
+
executor.sub.connect(executor.pub_addr) # type: ignore[reportAttributeAccessIssue]
|
|
40
|
+
executor.sub.setsockopt_string(zmq.SUBSCRIBE, '') # type: ignore[reportAttributeAccessIssue]
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def reset_to_local_zmq(executor: any) -> None:
|
|
44
|
+
"""
|
|
45
|
+
Reset executor to local ZMQ addresses.
|
|
46
|
+
|
|
47
|
+
Args:
|
|
48
|
+
executor: Executor instance to reset
|
|
49
|
+
"""
|
|
50
|
+
executor.is_remote = False
|
|
51
|
+
reconnect_zmq_sockets(executor)
|
|
@@ -1,84 +0,0 @@
|
|
|
1
|
-
'use client';
|
|
2
|
-
|
|
3
|
-
import React from 'react';
|
|
4
|
-
|
|
5
|
-
interface MarkdownRendererProps {
|
|
6
|
-
source: string;
|
|
7
|
-
onClick?: () => void;
|
|
8
|
-
}
|
|
9
|
-
|
|
10
|
-
const MarkdownRenderer: React.FC<MarkdownRendererProps> = ({ source, onClick }) => {
|
|
11
|
-
const escapeHtml = (text: string) => {
|
|
12
|
-
const div = document.createElement('div');
|
|
13
|
-
div.textContent = text;
|
|
14
|
-
return div.innerHTML;
|
|
15
|
-
};
|
|
16
|
-
|
|
17
|
-
const renderMarkdown = (text: string) => {
|
|
18
|
-
let html = text;
|
|
19
|
-
|
|
20
|
-
// Code blocks (must be processed first)
|
|
21
|
-
html = html.replace(/```([\s\S]*?)```/g, (match, code) => {
|
|
22
|
-
return `<pre><code>${escapeHtml(code.trim())}</code></pre>`;
|
|
23
|
-
});
|
|
24
|
-
|
|
25
|
-
// Language-specific code blocks
|
|
26
|
-
html = html.replace(/```(\w+)\n([\s\S]*?)```/g, (match, lang, code) => {
|
|
27
|
-
return `<pre><code class="language-${lang}">${escapeHtml(code.trim())}</code></pre>`;
|
|
28
|
-
});
|
|
29
|
-
|
|
30
|
-
// Headers
|
|
31
|
-
html = html.replace(/^### (.*$)/gim, '<h3>$1</h3>');
|
|
32
|
-
html = html.replace(/^## (.*$)/gim, '<h2>$1</h2>');
|
|
33
|
-
html = html.replace(/^# (.*$)/gim, '<h1>$1</h1>');
|
|
34
|
-
|
|
35
|
-
// Bold and Italic
|
|
36
|
-
html = html.replace(/\*\*\*([^*]+)\*\*\*/g, '<strong><em>$1</em></strong>');
|
|
37
|
-
html = html.replace(/\*\*([^*]+)\*\*/g, '<strong>$1</strong>');
|
|
38
|
-
html = html.replace(/\*([^*]+)\*/g, '<em>$1</em>');
|
|
39
|
-
|
|
40
|
-
// Strikethrough
|
|
41
|
-
html = html.replace(/~~([^~]+)~~/g, '<del>$1</del>');
|
|
42
|
-
|
|
43
|
-
// Links
|
|
44
|
-
html = html.replace(/\[([^\]]+)\]\(([^)]+)\)/g, '<a href="$2" target="_blank" rel="noopener noreferrer">$1</a>');
|
|
45
|
-
|
|
46
|
-
// Unordered lists
|
|
47
|
-
html = html.replace(/^\s*[-*+] (.+)$/gim, (match, item) => `<li>${item}</li>`);
|
|
48
|
-
html = html.replace(/(<li>[\s\S]*?<\/li>)/g, '<ul>$1</ul>').replace(/<\/ul>\s*<ul>/g, '');
|
|
49
|
-
|
|
50
|
-
// Ordered lists
|
|
51
|
-
html = html.replace(/^\s*\d+\. (.+)$/gim, (match, item) => `<ol><li>${item}</li></ol>`).replace(/<\/ol>\s*<ol>/g, '');
|
|
52
|
-
|
|
53
|
-
// Inline code
|
|
54
|
-
html = html.replace(/`([^`]+)`/g, '<code>$1</code>');
|
|
55
|
-
|
|
56
|
-
// Blockquotes
|
|
57
|
-
html = html.replace(/^> (.+)$/gim, '<blockquote>$1</blockquote>');
|
|
58
|
-
|
|
59
|
-
// Handle HTML br tags first - convert them to proper line breaks
|
|
60
|
-
html = html.replace(/<br\s*\/?>/gi, '\n');
|
|
61
|
-
|
|
62
|
-
// Line breaks and paragraphs - handle paragraph separation properly
|
|
63
|
-
// Split by double newlines to create paragraphs, but preserve single line breaks within paragraphs
|
|
64
|
-
html = html.replace(/\n\s*\n/g, '</p><p>'); // Paragraph breaks (double newlines with optional whitespace)
|
|
65
|
-
html = html.replace(/\n(?!<\/p>)/g, '<br>'); // Line breaks within paragraphs (but not paragraph breaks)
|
|
66
|
-
|
|
67
|
-
// Ensure content is wrapped in paragraph tags if not already in a block element
|
|
68
|
-
if (!html.match(/^<(h[1-6]|ul|ol|pre|blockquote|hr|p)/) && html.trim()) {
|
|
69
|
-
html = '<p>' + html + '</p>';
|
|
70
|
-
}
|
|
71
|
-
|
|
72
|
-
return html;
|
|
73
|
-
};
|
|
74
|
-
|
|
75
|
-
return (
|
|
76
|
-
<div
|
|
77
|
-
className="markdown-rendered"
|
|
78
|
-
onClick={onClick}
|
|
79
|
-
dangerouslySetInnerHTML={{ __html: renderMarkdown(source) }}
|
|
80
|
-
/>
|
|
81
|
-
);
|
|
82
|
-
};
|
|
83
|
-
|
|
84
|
-
export default MarkdownRenderer;
|