procler 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (83) hide show
  1. procler/__init__.py +3 -0
  2. procler/__main__.py +6 -0
  3. procler/api/__init__.py +5 -0
  4. procler/api/app.py +261 -0
  5. procler/api/deps.py +21 -0
  6. procler/api/routes/__init__.py +5 -0
  7. procler/api/routes/config.py +290 -0
  8. procler/api/routes/groups.py +62 -0
  9. procler/api/routes/logs.py +43 -0
  10. procler/api/routes/processes.py +185 -0
  11. procler/api/routes/recipes.py +69 -0
  12. procler/api/routes/snippets.py +134 -0
  13. procler/api/routes/ws.py +459 -0
  14. procler/cli.py +1478 -0
  15. procler/config/__init__.py +65 -0
  16. procler/config/changelog.py +148 -0
  17. procler/config/loader.py +256 -0
  18. procler/config/schema.py +315 -0
  19. procler/core/__init__.py +54 -0
  20. procler/core/context_base.py +117 -0
  21. procler/core/context_docker.py +384 -0
  22. procler/core/context_local.py +287 -0
  23. procler/core/daemon_detector.py +325 -0
  24. procler/core/events.py +74 -0
  25. procler/core/groups.py +419 -0
  26. procler/core/health.py +280 -0
  27. procler/core/log_tailer.py +262 -0
  28. procler/core/process_manager.py +1277 -0
  29. procler/core/recipes.py +330 -0
  30. procler/core/snippets.py +231 -0
  31. procler/core/variable_substitution.py +65 -0
  32. procler/db.py +96 -0
  33. procler/logging.py +41 -0
  34. procler/models.py +130 -0
  35. procler/py.typed +0 -0
  36. procler/settings.py +29 -0
  37. procler/static/assets/AboutView-BwZnsfpW.js +4 -0
  38. procler/static/assets/AboutView-UHbxWXcS.css +1 -0
  39. procler/static/assets/Code-HTS-H1S6.js +74 -0
  40. procler/static/assets/ConfigView-CGJcmp9G.css +1 -0
  41. procler/static/assets/ConfigView-aVtbRDf8.js +1 -0
  42. procler/static/assets/DashboardView-C5jw9Nsd.css +1 -0
  43. procler/static/assets/DashboardView-Dab7Cu9v.js +1 -0
  44. procler/static/assets/DataTable-z39TOAa4.js +746 -0
  45. procler/static/assets/DescriptionsItem-B2E8YbqJ.js +74 -0
  46. procler/static/assets/Divider-Dk-6aD2Y.js +42 -0
  47. procler/static/assets/Empty-MuygEHZM.js +24 -0
  48. procler/static/assets/Grid-CZ9QVKAT.js +1 -0
  49. procler/static/assets/GroupsView-BALG7i1X.js +1 -0
  50. procler/static/assets/GroupsView-gXAI1CVC.css +1 -0
  51. procler/static/assets/Input-e0xaxoWE.js +259 -0
  52. procler/static/assets/PhArrowsClockwise.vue-DqDg31az.js +1 -0
  53. procler/static/assets/PhCheckCircle.vue-Fwj9sh9m.js +1 -0
  54. procler/static/assets/PhEye.vue-JcPHciC2.js +1 -0
  55. procler/static/assets/PhPlay.vue-CZm7Gy3u.js +1 -0
  56. procler/static/assets/PhPlus.vue-yTWqKlSh.js +1 -0
  57. procler/static/assets/PhStop.vue-DxsqwIki.js +1 -0
  58. procler/static/assets/PhTrash.vue-DcqQbN1_.js +125 -0
  59. procler/static/assets/PhXCircle.vue-BXWmrabV.js +1 -0
  60. procler/static/assets/ProcessDetailView-DDbtIWq9.css +1 -0
  61. procler/static/assets/ProcessDetailView-DPtdNV-q.js +1 -0
  62. procler/static/assets/ProcessesView-B3a6Umur.js +1 -0
  63. procler/static/assets/ProcessesView-goLmghbJ.css +1 -0
  64. procler/static/assets/RecipesView-D2VxdneD.js +166 -0
  65. procler/static/assets/RecipesView-DXnFDCK4.css +1 -0
  66. procler/static/assets/Select-BBR17AHq.js +317 -0
  67. procler/static/assets/SnippetsView-B3a9q3AI.css +1 -0
  68. procler/static/assets/SnippetsView-DBCB2yGq.js +1 -0
  69. procler/static/assets/Spin-BXTjvFUk.js +90 -0
  70. procler/static/assets/Tag-Bh_qV63A.js +71 -0
  71. procler/static/assets/changelog-KkTT4H9-.js +1 -0
  72. procler/static/assets/groups-Zu-_v8ey.js +1 -0
  73. procler/static/assets/index-BsN-YMXq.css +1 -0
  74. procler/static/assets/index-BzW1XhyH.js +1282 -0
  75. procler/static/assets/procler-DOrSB1Vj.js +1 -0
  76. procler/static/assets/recipes-1w5SseGb.js +1 -0
  77. procler/static/index.html +17 -0
  78. procler/static/procler.png +0 -0
  79. procler-0.2.0.dist-info/METADATA +545 -0
  80. procler-0.2.0.dist-info/RECORD +83 -0
  81. procler-0.2.0.dist-info/WHEEL +4 -0
  82. procler-0.2.0.dist-info/entry_points.txt +2 -0
  83. procler-0.2.0.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,384 @@
1
+ """Docker container execution context using docker-py SDK."""
2
+
3
+ import asyncio
4
+ import re
5
+ import time
6
+ from collections.abc import AsyncIterator, Callable
7
+
8
+ try:
9
+ import docker
10
+ from docker.errors import APIError, NotFound
11
+
12
+ DOCKER_AVAILABLE = True
13
+ except ImportError:
14
+ DOCKER_AVAILABLE = False
15
+
16
+ from .context_base import ExecResult, ExecutionContext, ProcessHandle
17
+
18
+ # Docker container name pattern: alphanumeric, underscore, dash, dot
19
+ # Must start with alphanumeric
20
+ CONTAINER_NAME_PATTERN = re.compile(r"^[a-zA-Z0-9][a-zA-Z0-9_.-]*$")
21
+
22
+
23
+ def validate_container_name(name: str) -> bool:
24
+ """Validate container name matches Docker naming rules."""
25
+ if not name or len(name) > 255:
26
+ return False
27
+ return bool(CONTAINER_NAME_PATTERN.match(name))
28
+
29
+
30
+ class DockerContext(ExecutionContext):
31
+ """Execute processes inside Docker containers using docker-py SDK."""
32
+
33
+ def __init__(self):
34
+ if not DOCKER_AVAILABLE:
35
+ raise RuntimeError("Docker SDK not available. Install with: pip install docker")
36
+ self._client = docker.from_env()
37
+ self._exec_instances: dict[int, tuple] = {} # pid -> (container, exec_id)
38
+ self._stream_tasks: dict[int, asyncio.Task] = {} # pid -> stream output task
39
+
40
+ @property
41
+ def context_type(self) -> str:
42
+ return "docker"
43
+
44
+ def _get_container(self, container_name: str):
45
+ """Get a container by name or ID."""
46
+ if not validate_container_name(container_name):
47
+ raise ValueError(f"Invalid container name: '{container_name}'")
48
+ try:
49
+ return self._client.containers.get(container_name)
50
+ except NotFound:
51
+ raise ValueError(f"Container '{container_name}' not found")
52
+
53
+ def _is_container_running(self, container_name: str) -> bool:
54
+ """Check if a container is running."""
55
+ try:
56
+ container = self._get_container(container_name)
57
+ return container.status == "running"
58
+ except ValueError:
59
+ return False
60
+
61
+ def check_container_available(self, container_name: str) -> tuple[bool, str | None]:
62
+ """Pre-flight check if container is available and running.
63
+
64
+ Returns (is_available, error_message).
65
+ """
66
+ if not validate_container_name(container_name):
67
+ return False, f"Invalid container name: '{container_name}'"
68
+ try:
69
+ container = self._client.containers.get(container_name)
70
+ if container.status != "running":
71
+ return False, f"Container '{container_name}' is not running (status: {container.status})"
72
+ return True, None
73
+ except NotFound:
74
+ return False, f"Container '{container_name}' not found"
75
+ except Exception as e:
76
+ return False, f"Error checking container '{container_name}': {e}"
77
+
78
+ def list_containers(self, running_only: bool = True) -> list[dict]:
79
+ """List available containers."""
80
+ containers = self._client.containers.list(all=not running_only)
81
+ return [
82
+ {
83
+ "id": c.short_id,
84
+ "name": c.name,
85
+ "status": c.status,
86
+ "image": c.image.tags[0] if c.image.tags else c.image.short_id,
87
+ }
88
+ for c in containers
89
+ ]
90
+
91
+ async def start_process(
92
+ self,
93
+ command: str,
94
+ cwd: str | None = None,
95
+ env: dict[str, str] | None = None,
96
+ on_stdout: Callable[[str], None] | None = None,
97
+ on_stderr: Callable[[str], None] | None = None,
98
+ on_exit: Callable[[int], None] | None = None,
99
+ container_name: str | None = None,
100
+ ) -> ProcessHandle:
101
+ """
102
+ Start a long-running process inside a Docker container.
103
+
104
+ Note: Docker exec doesn't support background processes the same way
105
+ as local subprocesses. This implementation runs the command and
106
+ streams output, but the "PID" is actually a unique identifier
107
+ for the exec instance, not a real container PID.
108
+ """
109
+ if not container_name:
110
+ raise ValueError("container_name is required for Docker context")
111
+
112
+ container = self._get_container(container_name)
113
+
114
+ if container.status != "running":
115
+ raise RuntimeError(f"Container '{container_name}' is not running (status: {container.status})")
116
+
117
+ # Build environment variables list
118
+ env_list = [f"{k}={v}" for k, v in (env or {}).items()]
119
+
120
+ # Create exec instance
121
+ exec_result = container.client.api.exec_create(
122
+ container.id,
123
+ command,
124
+ workdir=cwd,
125
+ environment=env_list if env_list else None,
126
+ stdout=True,
127
+ stderr=True,
128
+ tty=False,
129
+ )
130
+ exec_id = exec_result["Id"]
131
+
132
+ # Start exec and get stream handle
133
+ try:
134
+ output = container.client.api.exec_start(exec_id, stream=True, demux=True)
135
+ except Exception as e:
136
+ raise RuntimeError(f"Failed to start exec in container '{container_name}': {e}") from e
137
+
138
+ def resolve_exec_pid() -> int:
139
+ """Resolve the real PID for this exec session inside the container."""
140
+ try:
141
+ inspect = container.client.api.exec_inspect(exec_id)
142
+ pid = inspect.get("Pid")
143
+ if isinstance(pid, int) and pid > 0:
144
+ return pid
145
+ except Exception:
146
+ return 0
147
+ return 0
148
+
149
+ exec_pid = resolve_exec_pid()
150
+ if exec_pid <= 0:
151
+ # Wait briefly for PID to appear
152
+ for _ in range(10):
153
+ await asyncio.sleep(0.1)
154
+ exec_pid = resolve_exec_pid()
155
+ if exec_pid > 0:
156
+ break
157
+
158
+ # Fall back to a pseudo pid if Docker doesn't report one
159
+ if exec_pid <= 0:
160
+ import random
161
+
162
+ exec_pid = random.randint(100000, 999999)
163
+
164
+ self._exec_instances[exec_pid] = (container, exec_id)
165
+
166
+ # Start streaming output in background
167
+ async def stream_output():
168
+ try:
169
+ for stdout_chunk, stderr_chunk in output:
170
+ if stdout_chunk:
171
+ for line in stdout_chunk.decode("utf-8", errors="replace").splitlines():
172
+ if on_stdout:
173
+ on_stdout(line)
174
+ if stderr_chunk:
175
+ for line in stderr_chunk.decode("utf-8", errors="replace").splitlines():
176
+ if on_stderr:
177
+ on_stderr(line)
178
+
179
+ # Get exit code
180
+ inspect = container.client.api.exec_inspect(exec_id)
181
+ exit_code = inspect.get("ExitCode", 0)
182
+
183
+ if on_exit:
184
+ on_exit(exit_code)
185
+
186
+ except Exception as e:
187
+ if on_stderr:
188
+ on_stderr(f"Error streaming output: {e}")
189
+ if on_exit:
190
+ on_exit(-1)
191
+ finally:
192
+ # Cleanup exec instance and task tracking
193
+ self._exec_instances.pop(exec_pid, None)
194
+ self._stream_tasks.pop(exec_pid, None)
195
+
196
+ # Run in background and track the task
197
+ task = asyncio.create_task(stream_output())
198
+ self._stream_tasks[exec_pid] = task
199
+
200
+ return ProcessHandle(pid=exec_pid, context_type=self.context_type)
201
+
202
+ async def stop_process(self, handle: ProcessHandle, timeout: float = 10.0) -> int:
203
+ """
204
+ Stop a running process in a Docker container.
205
+
206
+ Note: Docker exec instances cannot be stopped directly. We can only
207
+ wait for them to complete or kill the container (which is destructive).
208
+ """
209
+ if handle.pid not in self._exec_instances:
210
+ # Cleanup any orphaned task
211
+ task = self._stream_tasks.pop(handle.pid, None)
212
+ if task and not task.done():
213
+ task.cancel()
214
+ try:
215
+ await task
216
+ except asyncio.CancelledError:
217
+ pass
218
+ return 0 # Already done
219
+
220
+ container, exec_id = self._exec_instances[handle.pid]
221
+
222
+ # Check if still running
223
+ try:
224
+ inspect = container.client.api.exec_inspect(exec_id)
225
+ if not inspect.get("Running", False):
226
+ exit_code = inspect.get("ExitCode", 0)
227
+ self._exec_instances.pop(handle.pid, None)
228
+ # Cancel stream task
229
+ task = self._stream_tasks.pop(handle.pid, None)
230
+ if task and not task.done():
231
+ task.cancel()
232
+ try:
233
+ await task
234
+ except asyncio.CancelledError:
235
+ pass
236
+ return exit_code
237
+ except Exception:
238
+ pass
239
+
240
+ # Wait for completion (can't really kill exec)
241
+ start_time = time.monotonic()
242
+ while time.monotonic() - start_time < timeout:
243
+ await asyncio.sleep(0.1)
244
+ try:
245
+ inspect = container.client.api.exec_inspect(exec_id)
246
+ if not inspect.get("Running", False):
247
+ exit_code = inspect.get("ExitCode", 0)
248
+ self._exec_instances.pop(handle.pid, None)
249
+ task = self._stream_tasks.pop(handle.pid, None)
250
+ if task and not task.done():
251
+ task.cancel()
252
+ try:
253
+ await task
254
+ except asyncio.CancelledError:
255
+ pass
256
+ return exit_code
257
+ except Exception:
258
+ break
259
+
260
+ # Timeout - process may still be running, cleanup anyway
261
+ self._exec_instances.pop(handle.pid, None)
262
+ task = self._stream_tasks.pop(handle.pid, None)
263
+ if task and not task.done():
264
+ task.cancel()
265
+ try:
266
+ await task
267
+ except asyncio.CancelledError:
268
+ pass
269
+ return -1
270
+
271
+ async def is_running(self, handle: ProcessHandle) -> bool:
272
+ """Check if a process is still running."""
273
+ if handle.pid not in self._exec_instances:
274
+ return False
275
+
276
+ container, exec_id = self._exec_instances[handle.pid]
277
+
278
+ try:
279
+ inspect = container.client.api.exec_inspect(exec_id)
280
+ return inspect.get("Running", False)
281
+ except Exception:
282
+ return False
283
+
284
+ async def exec_command(
285
+ self,
286
+ command: str,
287
+ cwd: str | None = None,
288
+ env: dict[str, str] | None = None,
289
+ timeout: float | None = None,
290
+ container_name: str | None = None,
291
+ ) -> ExecResult:
292
+ """Execute a one-shot command inside a Docker container."""
293
+ if not container_name:
294
+ raise ValueError("container_name is required for Docker context")
295
+
296
+ container = self._get_container(container_name)
297
+
298
+ if container.status != "running":
299
+ return ExecResult(
300
+ exit_code=-1,
301
+ stdout="",
302
+ stderr=f"Container '{container_name}' is not running (status: {container.status})",
303
+ )
304
+
305
+ # Build environment variables list
306
+ env_list = [f"{k}={v}" for k, v in (env or {}).items()]
307
+
308
+ try:
309
+ # Run command with exec
310
+ exit_code, output = container.exec_run(
311
+ command,
312
+ workdir=cwd,
313
+ environment=env_list if env_list else None,
314
+ demux=True,
315
+ )
316
+
317
+ stdout = ""
318
+ stderr = ""
319
+
320
+ if output:
321
+ stdout_bytes, stderr_bytes = output
322
+ if stdout_bytes:
323
+ stdout = stdout_bytes.decode("utf-8", errors="replace")
324
+ if stderr_bytes:
325
+ stderr = stderr_bytes.decode("utf-8", errors="replace")
326
+
327
+ return ExecResult(
328
+ exit_code=exit_code,
329
+ stdout=stdout,
330
+ stderr=stderr,
331
+ )
332
+
333
+ except APIError as e:
334
+ return ExecResult(
335
+ exit_code=-1,
336
+ stdout="",
337
+ stderr=f"Docker API error: {e}",
338
+ )
339
+ except Exception as e:
340
+ return ExecResult(
341
+ exit_code=-1,
342
+ stdout="",
343
+ stderr=f"Error executing command: {e}",
344
+ )
345
+
346
+ async def stream_logs(
347
+ self,
348
+ handle: ProcessHandle,
349
+ follow: bool = True,
350
+ ) -> AsyncIterator[tuple[str, str]]:
351
+ """
352
+ Stream logs from a running process in a Docker container.
353
+
354
+ Note: For Docker exec, logs are captured during exec_start.
355
+ This method is a placeholder for compatibility.
356
+ """
357
+ # Docker exec logs are streamed during execution, not after
358
+ # For container logs, use container.logs()
359
+ return
360
+ yield # Make this a generator
361
+
362
+
363
+ # Global singleton for Docker context
364
+ _docker_context: DockerContext | None = None
365
+
366
+
367
+ def get_docker_context() -> DockerContext:
368
+ """Get the global DockerContext instance."""
369
+ global _docker_context
370
+ if _docker_context is None:
371
+ _docker_context = DockerContext()
372
+ return _docker_context
373
+
374
+
375
+ def is_docker_available() -> bool:
376
+ """Check if Docker is available."""
377
+ if not DOCKER_AVAILABLE:
378
+ return False
379
+ try:
380
+ client = docker.from_env()
381
+ client.ping()
382
+ return True
383
+ except Exception:
384
+ return False
@@ -0,0 +1,287 @@
1
+ """Local subprocess execution context."""
2
+
3
+ import asyncio
4
+ import os
5
+ from collections.abc import AsyncIterator, Callable
6
+
7
+ from .context_base import ExecResult, ExecutionContext, ProcessHandle
8
+
9
+
10
+ class ManagedProcess:
11
+ """Internal wrapper for a managed asyncio subprocess."""
12
+
13
+ def __init__(
14
+ self,
15
+ process: asyncio.subprocess.Process,
16
+ stdout_task: asyncio.Task | None = None,
17
+ stderr_task: asyncio.Task | None = None,
18
+ ):
19
+ self.process = process
20
+ self.stdout_task = stdout_task
21
+ self.stderr_task = stderr_task
22
+ self._stdout_lines: list[str] = []
23
+ self._stderr_lines: list[str] = []
24
+
25
+ @property
26
+ def pid(self) -> int:
27
+ return self.process.pid
28
+
29
+ def is_running(self) -> bool:
30
+ return self.process.returncode is None
31
+
32
+ async def wait(self) -> int:
33
+ """Wait for process to complete and return exit code."""
34
+ return await self.process.wait()
35
+
36
+ def terminate(self) -> None:
37
+ """Send SIGTERM to the process."""
38
+ if self.is_running():
39
+ self.process.terminate()
40
+
41
+ def kill(self) -> None:
42
+ """Send SIGKILL to the process."""
43
+ if self.is_running():
44
+ self.process.kill()
45
+
46
+ async def cancel_io_tasks(self) -> None:
47
+ """Cancel the stdout/stderr reading tasks."""
48
+ for task in [self.stdout_task, self.stderr_task]:
49
+ if task and not task.done():
50
+ task.cancel()
51
+ try:
52
+ await task
53
+ except asyncio.CancelledError:
54
+ pass
55
+
56
+
57
+ class LocalContext(ExecutionContext):
58
+ """Execute processes as local subprocesses using asyncio."""
59
+
60
+ def __init__(self):
61
+ self._processes: dict[int, ManagedProcess] = {}
62
+
63
+ @property
64
+ def context_type(self) -> str:
65
+ return "local"
66
+
67
+ async def start_process(
68
+ self,
69
+ command: str,
70
+ cwd: str | None = None,
71
+ env: dict[str, str] | None = None,
72
+ on_stdout: Callable[[str], None] | None = None,
73
+ on_stderr: Callable[[str], None] | None = None,
74
+ on_exit: Callable[[int], None] | None = None,
75
+ ) -> ProcessHandle:
76
+ """Start a long-running local subprocess."""
77
+ # Merge environment
78
+ process_env = os.environ.copy()
79
+ if env:
80
+ process_env.update(env)
81
+
82
+ # Start the process
83
+ process = await asyncio.create_subprocess_shell(
84
+ command,
85
+ stdout=asyncio.subprocess.PIPE,
86
+ stderr=asyncio.subprocess.PIPE,
87
+ cwd=cwd,
88
+ env=process_env,
89
+ start_new_session=True, # Allows killing the whole process group
90
+ )
91
+
92
+ managed = ManagedProcess(process)
93
+
94
+ # Start tasks to read stdout/stderr
95
+ if process.stdout:
96
+ managed.stdout_task = asyncio.create_task(self._read_stream(process.stdout, "stdout", managed, on_stdout))
97
+
98
+ if process.stderr:
99
+ managed.stderr_task = asyncio.create_task(self._read_stream(process.stderr, "stderr", managed, on_stderr))
100
+
101
+ # Start a task to monitor process exit
102
+ if on_exit:
103
+ asyncio.create_task(self._monitor_exit(managed, on_exit))
104
+
105
+ self._processes[process.pid] = managed
106
+
107
+ return ProcessHandle(pid=process.pid, context_type=self.context_type)
108
+
109
+ async def _read_stream(
110
+ self,
111
+ stream: asyncio.StreamReader,
112
+ stream_type: str,
113
+ managed: ManagedProcess,
114
+ callback: Callable[[str], None] | None,
115
+ read_timeout: float = 60.0,
116
+ ) -> None:
117
+ """Read lines from a stream and invoke callback.
118
+
119
+ Args:
120
+ stream: The stream to read from
121
+ stream_type: "stdout" or "stderr"
122
+ managed: The managed process instance
123
+ callback: Optional callback for each line
124
+ read_timeout: Timeout for each readline() call (default 60s)
125
+ """
126
+ try:
127
+ while True:
128
+ try:
129
+ # Use timeout to prevent hanging forever on blocked streams
130
+ line = await asyncio.wait_for(stream.readline(), timeout=read_timeout)
131
+ except TimeoutError:
132
+ # Check if process is still running
133
+ if not managed.is_running():
134
+ break
135
+ # Process still running but no output - continue waiting
136
+ continue
137
+
138
+ if not line:
139
+ break
140
+ decoded = line.decode("utf-8", errors="replace").rstrip("\n\r")
141
+ if stream_type == "stdout":
142
+ managed._stdout_lines.append(decoded)
143
+ else:
144
+ managed._stderr_lines.append(decoded)
145
+ if callback:
146
+ callback(decoded)
147
+ except asyncio.CancelledError:
148
+ pass
149
+
150
+ async def _monitor_exit(
151
+ self,
152
+ managed: ManagedProcess,
153
+ callback: Callable[[int], None],
154
+ ) -> None:
155
+ """Monitor process exit and invoke callback."""
156
+ exit_code = await managed.wait()
157
+ callback(exit_code)
158
+
159
+ async def stop_process(self, handle: ProcessHandle, timeout: float = 10.0) -> int:
160
+ """Stop a running process gracefully, then forcefully if needed."""
161
+ managed = self._processes.get(handle.pid)
162
+ if not managed:
163
+ return -1
164
+
165
+ try:
166
+ if not managed.is_running():
167
+ exit_code = managed.process.returncode or 0
168
+ return exit_code
169
+
170
+ # Try graceful termination first
171
+ managed.terminate()
172
+
173
+ try:
174
+ exit_code = await asyncio.wait_for(managed.wait(), timeout=timeout)
175
+ except TimeoutError:
176
+ # Force kill if graceful shutdown times out
177
+ managed.kill()
178
+ exit_code = await managed.wait()
179
+
180
+ return exit_code
181
+ finally:
182
+ # Always cleanup IO tasks and remove from tracking dict
183
+ try:
184
+ await managed.cancel_io_tasks()
185
+ except Exception:
186
+ pass # Best effort cleanup
187
+ self._processes.pop(handle.pid, None)
188
+
189
+ async def is_running(self, handle: ProcessHandle) -> bool:
190
+ """Check if a process is still running."""
191
+ managed = self._processes.get(handle.pid)
192
+ if not managed:
193
+ return False
194
+ return managed.is_running()
195
+
196
+ async def exec_command(
197
+ self,
198
+ command: str,
199
+ cwd: str | None = None,
200
+ env: dict[str, str] | None = None,
201
+ timeout: float | None = None,
202
+ ) -> ExecResult:
203
+ """Execute a one-shot command and wait for completion."""
204
+ process_env = os.environ.copy()
205
+ if env:
206
+ process_env.update(env)
207
+
208
+ process = await asyncio.create_subprocess_shell(
209
+ command,
210
+ stdout=asyncio.subprocess.PIPE,
211
+ stderr=asyncio.subprocess.PIPE,
212
+ cwd=cwd,
213
+ env=process_env,
214
+ )
215
+
216
+ try:
217
+ stdout, stderr = await asyncio.wait_for(
218
+ process.communicate(),
219
+ timeout=timeout,
220
+ )
221
+ except TimeoutError:
222
+ process.kill()
223
+ await process.wait()
224
+ return ExecResult(
225
+ exit_code=-1,
226
+ stdout="",
227
+ stderr=f"Command timed out after {timeout} seconds",
228
+ )
229
+
230
+ return ExecResult(
231
+ exit_code=process.returncode or 0,
232
+ stdout=stdout.decode("utf-8", errors="replace"),
233
+ stderr=stderr.decode("utf-8", errors="replace"),
234
+ )
235
+
236
+ async def stream_logs(
237
+ self,
238
+ handle: ProcessHandle,
239
+ follow: bool = True,
240
+ ) -> AsyncIterator[tuple[str, str]]:
241
+ """Stream logs from a running process."""
242
+ managed = self._processes.get(handle.pid)
243
+ if not managed:
244
+ return
245
+
246
+ # First yield any buffered lines
247
+ for line in managed._stdout_lines:
248
+ yield ("stdout", line)
249
+ for line in managed._stderr_lines:
250
+ yield ("stderr", line)
251
+
252
+ if not follow:
253
+ return
254
+
255
+ # For follow mode, we'd need a more sophisticated approach
256
+ # with queues. For now, just poll the buffer.
257
+ last_stdout_idx = len(managed._stdout_lines)
258
+ last_stderr_idx = len(managed._stderr_lines)
259
+
260
+ while managed.is_running():
261
+ await asyncio.sleep(0.1)
262
+
263
+ # Check for new stdout lines
264
+ while last_stdout_idx < len(managed._stdout_lines):
265
+ yield ("stdout", managed._stdout_lines[last_stdout_idx])
266
+ last_stdout_idx += 1
267
+
268
+ # Check for new stderr lines
269
+ while last_stderr_idx < len(managed._stderr_lines):
270
+ yield ("stderr", managed._stderr_lines[last_stderr_idx])
271
+ last_stderr_idx += 1
272
+
273
+ def get_managed_process(self, pid: int) -> ManagedProcess | None:
274
+ """Get a managed process by PID (for internal use)."""
275
+ return self._processes.get(pid)
276
+
277
+
278
+ # Global singleton for the local context
279
+ _local_context: LocalContext | None = None
280
+
281
+
282
+ def get_local_context() -> LocalContext:
283
+ """Get the global LocalContext instance."""
284
+ global _local_context
285
+ if _local_context is None:
286
+ _local_context = LocalContext()
287
+ return _local_context