procler 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- procler/__init__.py +3 -0
- procler/__main__.py +6 -0
- procler/api/__init__.py +5 -0
- procler/api/app.py +261 -0
- procler/api/deps.py +21 -0
- procler/api/routes/__init__.py +5 -0
- procler/api/routes/config.py +290 -0
- procler/api/routes/groups.py +62 -0
- procler/api/routes/logs.py +43 -0
- procler/api/routes/processes.py +185 -0
- procler/api/routes/recipes.py +69 -0
- procler/api/routes/snippets.py +134 -0
- procler/api/routes/ws.py +459 -0
- procler/cli.py +1478 -0
- procler/config/__init__.py +65 -0
- procler/config/changelog.py +148 -0
- procler/config/loader.py +256 -0
- procler/config/schema.py +315 -0
- procler/core/__init__.py +54 -0
- procler/core/context_base.py +117 -0
- procler/core/context_docker.py +384 -0
- procler/core/context_local.py +287 -0
- procler/core/daemon_detector.py +325 -0
- procler/core/events.py +74 -0
- procler/core/groups.py +419 -0
- procler/core/health.py +280 -0
- procler/core/log_tailer.py +262 -0
- procler/core/process_manager.py +1277 -0
- procler/core/recipes.py +330 -0
- procler/core/snippets.py +231 -0
- procler/core/variable_substitution.py +65 -0
- procler/db.py +96 -0
- procler/logging.py +41 -0
- procler/models.py +130 -0
- procler/py.typed +0 -0
- procler/settings.py +29 -0
- procler/static/assets/AboutView-BwZnsfpW.js +4 -0
- procler/static/assets/AboutView-UHbxWXcS.css +1 -0
- procler/static/assets/Code-HTS-H1S6.js +74 -0
- procler/static/assets/ConfigView-CGJcmp9G.css +1 -0
- procler/static/assets/ConfigView-aVtbRDf8.js +1 -0
- procler/static/assets/DashboardView-C5jw9Nsd.css +1 -0
- procler/static/assets/DashboardView-Dab7Cu9v.js +1 -0
- procler/static/assets/DataTable-z39TOAa4.js +746 -0
- procler/static/assets/DescriptionsItem-B2E8YbqJ.js +74 -0
- procler/static/assets/Divider-Dk-6aD2Y.js +42 -0
- procler/static/assets/Empty-MuygEHZM.js +24 -0
- procler/static/assets/Grid-CZ9QVKAT.js +1 -0
- procler/static/assets/GroupsView-BALG7i1X.js +1 -0
- procler/static/assets/GroupsView-gXAI1CVC.css +1 -0
- procler/static/assets/Input-e0xaxoWE.js +259 -0
- procler/static/assets/PhArrowsClockwise.vue-DqDg31az.js +1 -0
- procler/static/assets/PhCheckCircle.vue-Fwj9sh9m.js +1 -0
- procler/static/assets/PhEye.vue-JcPHciC2.js +1 -0
- procler/static/assets/PhPlay.vue-CZm7Gy3u.js +1 -0
- procler/static/assets/PhPlus.vue-yTWqKlSh.js +1 -0
- procler/static/assets/PhStop.vue-DxsqwIki.js +1 -0
- procler/static/assets/PhTrash.vue-DcqQbN1_.js +125 -0
- procler/static/assets/PhXCircle.vue-BXWmrabV.js +1 -0
- procler/static/assets/ProcessDetailView-DDbtIWq9.css +1 -0
- procler/static/assets/ProcessDetailView-DPtdNV-q.js +1 -0
- procler/static/assets/ProcessesView-B3a6Umur.js +1 -0
- procler/static/assets/ProcessesView-goLmghbJ.css +1 -0
- procler/static/assets/RecipesView-D2VxdneD.js +166 -0
- procler/static/assets/RecipesView-DXnFDCK4.css +1 -0
- procler/static/assets/Select-BBR17AHq.js +317 -0
- procler/static/assets/SnippetsView-B3a9q3AI.css +1 -0
- procler/static/assets/SnippetsView-DBCB2yGq.js +1 -0
- procler/static/assets/Spin-BXTjvFUk.js +90 -0
- procler/static/assets/Tag-Bh_qV63A.js +71 -0
- procler/static/assets/changelog-KkTT4H9-.js +1 -0
- procler/static/assets/groups-Zu-_v8ey.js +1 -0
- procler/static/assets/index-BsN-YMXq.css +1 -0
- procler/static/assets/index-BzW1XhyH.js +1282 -0
- procler/static/assets/procler-DOrSB1Vj.js +1 -0
- procler/static/assets/recipes-1w5SseGb.js +1 -0
- procler/static/index.html +17 -0
- procler/static/procler.png +0 -0
- procler-0.2.0.dist-info/METADATA +545 -0
- procler-0.2.0.dist-info/RECORD +83 -0
- procler-0.2.0.dist-info/WHEEL +4 -0
- procler-0.2.0.dist-info/entry_points.txt +2 -0
- procler-0.2.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,325 @@
|
|
|
1
|
+
"""Daemon detection module for tracking forking daemons.
|
|
2
|
+
|
|
3
|
+
This module provides functionality to detect and track daemon processes
|
|
4
|
+
that fork to background after starting. It supports two detection methods:
|
|
5
|
+
1. Pidfile-based detection (daemon writes PID to file)
|
|
6
|
+
2. Process name pattern matching (grep ps aux output)
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
|
|
11
|
+
import asyncio
|
|
12
|
+
import logging
|
|
13
|
+
import shlex
|
|
14
|
+
import time
|
|
15
|
+
from dataclasses import dataclass
|
|
16
|
+
|
|
17
|
+
logger = logging.getLogger(__name__)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def _quote_user(user: str | int | None) -> str:
|
|
21
|
+
"""Quote user parameter for shell commands."""
|
|
22
|
+
if user is None:
|
|
23
|
+
return "1000"
|
|
24
|
+
return shlex.quote(str(user))
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
@dataclass
|
|
28
|
+
class ProcessInfo:
|
|
29
|
+
"""Information about a detected process."""
|
|
30
|
+
|
|
31
|
+
pid: int
|
|
32
|
+
command: str
|
|
33
|
+
user: str | None = None
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class DaemonDetector:
|
|
37
|
+
"""Detects and tracks daemon processes after fork.
|
|
38
|
+
|
|
39
|
+
This class provides methods to find daemon PIDs using either pidfile
|
|
40
|
+
reading or process name pattern matching. It works with both local
|
|
41
|
+
processes and processes running inside Docker containers.
|
|
42
|
+
"""
|
|
43
|
+
|
|
44
|
+
async def find_daemon_pid(
|
|
45
|
+
self,
|
|
46
|
+
pattern: str | None = None,
|
|
47
|
+
pidfile: str | None = None,
|
|
48
|
+
container: str | None = None,
|
|
49
|
+
user: str | int | None = None,
|
|
50
|
+
) -> int | None:
|
|
51
|
+
"""Find daemon PID using pidfile or pattern matching.
|
|
52
|
+
|
|
53
|
+
Tries pidfile first if specified, falls back to pattern matching.
|
|
54
|
+
|
|
55
|
+
Args:
|
|
56
|
+
pattern: Process name pattern to grep for (e.g., "msgd")
|
|
57
|
+
pidfile: Path to pidfile containing daemon PID
|
|
58
|
+
container: Docker container name (None for local processes)
|
|
59
|
+
user: User to run as in container (e.g., 1000 or "product")
|
|
60
|
+
|
|
61
|
+
Returns:
|
|
62
|
+
The daemon's PID if found, None otherwise
|
|
63
|
+
"""
|
|
64
|
+
# Try pidfile first if specified
|
|
65
|
+
if pidfile:
|
|
66
|
+
pid = await self._read_pidfile(pidfile, container, user)
|
|
67
|
+
if pid:
|
|
68
|
+
logger.debug(f"Found daemon PID {pid} from pidfile {pidfile}")
|
|
69
|
+
return pid
|
|
70
|
+
logger.debug(f"Pidfile {pidfile} not found or empty, trying pattern")
|
|
71
|
+
|
|
72
|
+
# Fall back to pattern matching
|
|
73
|
+
if pattern:
|
|
74
|
+
pid = await self._find_by_pattern(pattern, container, user)
|
|
75
|
+
if pid:
|
|
76
|
+
logger.debug(f"Found daemon PID {pid} by pattern '{pattern}'")
|
|
77
|
+
return pid
|
|
78
|
+
|
|
79
|
+
logger.debug("Daemon not found by any method")
|
|
80
|
+
return None
|
|
81
|
+
|
|
82
|
+
async def wait_for_fork(
|
|
83
|
+
self,
|
|
84
|
+
pattern: str,
|
|
85
|
+
container: str | None = None,
|
|
86
|
+
user: str | int | None = None,
|
|
87
|
+
timeout: float = 5.0,
|
|
88
|
+
poll_interval: float = 0.2,
|
|
89
|
+
) -> int | None:
|
|
90
|
+
"""Wait for daemon to fork from parent process.
|
|
91
|
+
|
|
92
|
+
Polls for the daemon process until it appears or timeout.
|
|
93
|
+
Use this after starting a daemon that forks to background.
|
|
94
|
+
|
|
95
|
+
Args:
|
|
96
|
+
pattern: Process name pattern to grep for
|
|
97
|
+
container: Docker container name (None for local processes)
|
|
98
|
+
user: User to run as in container
|
|
99
|
+
timeout: Maximum time to wait in seconds
|
|
100
|
+
poll_interval: Time between polls in seconds
|
|
101
|
+
|
|
102
|
+
Returns:
|
|
103
|
+
The daemon's PID if found within timeout, None otherwise
|
|
104
|
+
"""
|
|
105
|
+
start_time = time.monotonic()
|
|
106
|
+
attempts = 0
|
|
107
|
+
|
|
108
|
+
while (time.monotonic() - start_time) < timeout:
|
|
109
|
+
attempts += 1
|
|
110
|
+
pid = await self._find_by_pattern(pattern, container, user)
|
|
111
|
+
if pid:
|
|
112
|
+
logger.debug(f"Found forked daemon PID {pid} after {attempts} attempts")
|
|
113
|
+
return pid
|
|
114
|
+
await asyncio.sleep(poll_interval)
|
|
115
|
+
|
|
116
|
+
logger.warning(f"Daemon with pattern '{pattern}' not found after {timeout}s " f"({attempts} attempts)")
|
|
117
|
+
return None
|
|
118
|
+
|
|
119
|
+
async def is_pid_running(
|
|
120
|
+
self,
|
|
121
|
+
pid: int,
|
|
122
|
+
container: str | None = None,
|
|
123
|
+
user: str | int | None = None,
|
|
124
|
+
) -> bool:
|
|
125
|
+
"""Check if a PID is currently running.
|
|
126
|
+
|
|
127
|
+
Args:
|
|
128
|
+
pid: Process ID to check
|
|
129
|
+
container: Docker container name (None for local processes)
|
|
130
|
+
user: User to run as in container
|
|
131
|
+
|
|
132
|
+
Returns:
|
|
133
|
+
True if process is running, False otherwise
|
|
134
|
+
"""
|
|
135
|
+
if container:
|
|
136
|
+
cmd = f"docker exec -u {_quote_user(user)} {shlex.quote(container)} ps -p {pid}"
|
|
137
|
+
else:
|
|
138
|
+
cmd = f"ps -p {pid}"
|
|
139
|
+
|
|
140
|
+
try:
|
|
141
|
+
proc = await asyncio.create_subprocess_shell(
|
|
142
|
+
cmd,
|
|
143
|
+
stdout=asyncio.subprocess.PIPE,
|
|
144
|
+
stderr=asyncio.subprocess.PIPE,
|
|
145
|
+
)
|
|
146
|
+
await proc.communicate()
|
|
147
|
+
return proc.returncode == 0
|
|
148
|
+
except Exception as e:
|
|
149
|
+
logger.error(f"Error checking PID {pid}: {e}")
|
|
150
|
+
return False
|
|
151
|
+
|
|
152
|
+
async def list_processes(
|
|
153
|
+
self,
|
|
154
|
+
container: str | None = None,
|
|
155
|
+
user: str | int | None = None,
|
|
156
|
+
) -> list[ProcessInfo]:
|
|
157
|
+
"""List all processes (for debugging/inspection).
|
|
158
|
+
|
|
159
|
+
Args:
|
|
160
|
+
container: Docker container name (None for local processes)
|
|
161
|
+
user: User to run as in container
|
|
162
|
+
|
|
163
|
+
Returns:
|
|
164
|
+
List of ProcessInfo objects for all running processes
|
|
165
|
+
"""
|
|
166
|
+
if container:
|
|
167
|
+
cmd = f"docker exec -u {_quote_user(user)} {shlex.quote(container)} ps aux"
|
|
168
|
+
else:
|
|
169
|
+
cmd = "ps aux"
|
|
170
|
+
|
|
171
|
+
try:
|
|
172
|
+
proc = await asyncio.create_subprocess_shell(
|
|
173
|
+
cmd,
|
|
174
|
+
stdout=asyncio.subprocess.PIPE,
|
|
175
|
+
stderr=asyncio.subprocess.PIPE,
|
|
176
|
+
)
|
|
177
|
+
stdout, _ = await proc.communicate()
|
|
178
|
+
|
|
179
|
+
processes = []
|
|
180
|
+
for line in stdout.decode().strip().split("\n")[1:]: # Skip header
|
|
181
|
+
parts = line.split(None, 10) # Split into max 11 parts
|
|
182
|
+
if len(parts) >= 11:
|
|
183
|
+
processes.append(
|
|
184
|
+
ProcessInfo(
|
|
185
|
+
pid=int(parts[1]),
|
|
186
|
+
command=parts[10],
|
|
187
|
+
user=parts[0],
|
|
188
|
+
)
|
|
189
|
+
)
|
|
190
|
+
return processes
|
|
191
|
+
except Exception as e:
|
|
192
|
+
logger.error(f"Error listing processes: {e}")
|
|
193
|
+
return []
|
|
194
|
+
|
|
195
|
+
async def _read_pidfile(
|
|
196
|
+
self,
|
|
197
|
+
pidfile: str,
|
|
198
|
+
container: str | None = None,
|
|
199
|
+
user: str | int | None = None,
|
|
200
|
+
) -> int | None:
|
|
201
|
+
"""Read PID from a pidfile.
|
|
202
|
+
|
|
203
|
+
Args:
|
|
204
|
+
pidfile: Path to the pidfile
|
|
205
|
+
container: Docker container name (None for local processes)
|
|
206
|
+
user: User to run as in container
|
|
207
|
+
|
|
208
|
+
Returns:
|
|
209
|
+
The PID from the file, or None if file doesn't exist/is invalid
|
|
210
|
+
"""
|
|
211
|
+
# Validate pidfile path - reject path traversal attempts
|
|
212
|
+
if ".." in pidfile or pidfile.startswith("/etc/") or pidfile.startswith("/root/"):
|
|
213
|
+
logger.warning(f"Suspicious pidfile path rejected: {pidfile}")
|
|
214
|
+
return None
|
|
215
|
+
|
|
216
|
+
if container:
|
|
217
|
+
cmd = f"docker exec -u {_quote_user(user)} {shlex.quote(container)} cat {shlex.quote(pidfile)}"
|
|
218
|
+
else:
|
|
219
|
+
cmd = f"cat {shlex.quote(pidfile)}"
|
|
220
|
+
|
|
221
|
+
try:
|
|
222
|
+
proc = await asyncio.create_subprocess_shell(
|
|
223
|
+
cmd,
|
|
224
|
+
stdout=asyncio.subprocess.PIPE,
|
|
225
|
+
stderr=asyncio.subprocess.PIPE,
|
|
226
|
+
)
|
|
227
|
+
stdout, _ = await proc.communicate()
|
|
228
|
+
|
|
229
|
+
if proc.returncode == 0:
|
|
230
|
+
pid_str = stdout.decode().strip()
|
|
231
|
+
if pid_str.isdigit():
|
|
232
|
+
pid = int(pid_str)
|
|
233
|
+
# Verify PID is actually running
|
|
234
|
+
if await self.is_pid_running(pid, container, user):
|
|
235
|
+
return pid
|
|
236
|
+
logger.debug(f"PID {pid} from pidfile is not running")
|
|
237
|
+
return None
|
|
238
|
+
except Exception as e:
|
|
239
|
+
logger.error(f"Error reading pidfile {pidfile}: {e}")
|
|
240
|
+
return None
|
|
241
|
+
|
|
242
|
+
async def _find_by_pattern(
|
|
243
|
+
self,
|
|
244
|
+
pattern: str,
|
|
245
|
+
container: str | None = None,
|
|
246
|
+
user: str | int | None = None,
|
|
247
|
+
) -> int | None:
|
|
248
|
+
"""Find process PID by pattern matching.
|
|
249
|
+
|
|
250
|
+
Uses ps aux | grep to find processes matching the pattern.
|
|
251
|
+
Takes the first (oldest) match if multiple exist.
|
|
252
|
+
|
|
253
|
+
Args:
|
|
254
|
+
pattern: Process name pattern to grep for
|
|
255
|
+
container: Docker container name (None for local processes)
|
|
256
|
+
user: User to run as in container
|
|
257
|
+
|
|
258
|
+
Returns:
|
|
259
|
+
The PID of the matching process, or None if not found
|
|
260
|
+
"""
|
|
261
|
+
# Use bracket trick to avoid matching grep itself
|
|
262
|
+
# e.g., "msgd" becomes "[m]sgd"
|
|
263
|
+
safe_pattern = self._make_grep_pattern(pattern)
|
|
264
|
+
|
|
265
|
+
# Escape pattern for shell - use shlex.quote and strip outer quotes for grep
|
|
266
|
+
# since we're already inside quotes
|
|
267
|
+
escaped_pattern = shlex.quote(safe_pattern)[1:-1] # Remove outer quotes added by shlex
|
|
268
|
+
|
|
269
|
+
if container:
|
|
270
|
+
docker_cmd = f"docker exec -u {_quote_user(user)} {shlex.quote(container)}"
|
|
271
|
+
cmd = f"{docker_cmd} bash -c \"ps aux | grep '{escaped_pattern}'\""
|
|
272
|
+
else:
|
|
273
|
+
cmd = f"ps aux | grep '{escaped_pattern}'"
|
|
274
|
+
|
|
275
|
+
try:
|
|
276
|
+
proc = await asyncio.create_subprocess_shell(
|
|
277
|
+
cmd,
|
|
278
|
+
stdout=asyncio.subprocess.PIPE,
|
|
279
|
+
stderr=asyncio.subprocess.PIPE,
|
|
280
|
+
)
|
|
281
|
+
stdout, _ = await proc.communicate()
|
|
282
|
+
|
|
283
|
+
if proc.returncode == 0 and stdout:
|
|
284
|
+
# Take the first line (oldest process)
|
|
285
|
+
lines = stdout.decode().strip().split("\n")
|
|
286
|
+
if lines and lines[0]:
|
|
287
|
+
parts = lines[0].split()
|
|
288
|
+
if len(parts) >= 2:
|
|
289
|
+
try:
|
|
290
|
+
return int(parts[1]) # PID is second column
|
|
291
|
+
except ValueError:
|
|
292
|
+
pass
|
|
293
|
+
return None
|
|
294
|
+
except Exception as e:
|
|
295
|
+
logger.error(f"Error finding process by pattern '{pattern}': {e}")
|
|
296
|
+
return None
|
|
297
|
+
|
|
298
|
+
def _make_grep_pattern(self, pattern: str) -> str:
|
|
299
|
+
"""Convert pattern to grep-safe format using bracket trick.
|
|
300
|
+
|
|
301
|
+
The bracket trick prevents grep from matching its own process.
|
|
302
|
+
e.g., "msgd" becomes "[m]sgd"
|
|
303
|
+
|
|
304
|
+
Args:
|
|
305
|
+
pattern: Original pattern string
|
|
306
|
+
|
|
307
|
+
Returns:
|
|
308
|
+
Pattern with first character in brackets
|
|
309
|
+
"""
|
|
310
|
+
if not pattern:
|
|
311
|
+
return pattern
|
|
312
|
+
# Put first character in brackets to avoid self-match
|
|
313
|
+
return f"[{pattern[0]}]{pattern[1:]}"
|
|
314
|
+
|
|
315
|
+
|
|
316
|
+
# Singleton instance
|
|
317
|
+
_detector: DaemonDetector | None = None
|
|
318
|
+
|
|
319
|
+
|
|
320
|
+
def get_daemon_detector() -> DaemonDetector:
|
|
321
|
+
"""Get the singleton DaemonDetector instance."""
|
|
322
|
+
global _detector
|
|
323
|
+
if _detector is None:
|
|
324
|
+
_detector = DaemonDetector()
|
|
325
|
+
return _detector
|
procler/core/events.py
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
"""Event system for broadcasting status and log updates."""
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import logging
|
|
5
|
+
from collections.abc import Callable, Coroutine
|
|
6
|
+
from typing import Any
|
|
7
|
+
|
|
8
|
+
logger = logging.getLogger(__name__)
|
|
9
|
+
|
|
10
|
+
# Type for async event handlers
|
|
11
|
+
EventHandler = Callable[[dict[str, Any]], Coroutine[Any, Any, None]]
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class EventBus:
|
|
15
|
+
"""Simple event bus for broadcasting events to subscribers."""
|
|
16
|
+
|
|
17
|
+
def __init__(self):
|
|
18
|
+
self._handlers: dict[str, list[EventHandler]] = {}
|
|
19
|
+
|
|
20
|
+
def subscribe(self, event_type: str, handler: EventHandler) -> None:
|
|
21
|
+
"""Subscribe to an event type."""
|
|
22
|
+
if event_type not in self._handlers:
|
|
23
|
+
self._handlers[event_type] = []
|
|
24
|
+
self._handlers[event_type].append(handler)
|
|
25
|
+
|
|
26
|
+
def unsubscribe(self, event_type: str, handler: EventHandler) -> None:
|
|
27
|
+
"""Unsubscribe from an event type."""
|
|
28
|
+
if event_type in self._handlers:
|
|
29
|
+
try:
|
|
30
|
+
self._handlers[event_type].remove(handler)
|
|
31
|
+
except ValueError:
|
|
32
|
+
pass
|
|
33
|
+
|
|
34
|
+
async def emit(self, event_type: str, data: dict[str, Any]) -> None:
|
|
35
|
+
"""Emit an event to all subscribers."""
|
|
36
|
+
handlers = self._handlers.get(event_type, [])
|
|
37
|
+
for handler in handlers:
|
|
38
|
+
try:
|
|
39
|
+
await handler(data)
|
|
40
|
+
except Exception as e:
|
|
41
|
+
# Log but don't let one handler break others
|
|
42
|
+
logger.debug(f"Event handler error for {event_type}: {e}")
|
|
43
|
+
|
|
44
|
+
def emit_sync(self, event_type: str, data: dict[str, Any]) -> None:
|
|
45
|
+
"""Emit an event synchronously (creates task if in async context)."""
|
|
46
|
+
try:
|
|
47
|
+
loop = asyncio.get_running_loop()
|
|
48
|
+
loop.create_task(self.emit(event_type, data))
|
|
49
|
+
except RuntimeError:
|
|
50
|
+
# No running event loop, skip
|
|
51
|
+
pass
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
# Event types
|
|
55
|
+
EVENT_STATUS_CHANGE = "status_change"
|
|
56
|
+
EVENT_LOG_ENTRY = "log_entry"
|
|
57
|
+
EVENT_RECIPE_STEP = "recipe_step" # Recipe execution progress
|
|
58
|
+
|
|
59
|
+
# Global event bus
|
|
60
|
+
_event_bus: EventBus | None = None
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def get_event_bus() -> EventBus:
|
|
64
|
+
"""Get the global EventBus instance."""
|
|
65
|
+
global _event_bus
|
|
66
|
+
if _event_bus is None:
|
|
67
|
+
_event_bus = EventBus()
|
|
68
|
+
return _event_bus
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def reset_event_bus() -> None:
|
|
72
|
+
"""Reset the global EventBus (for testing)."""
|
|
73
|
+
global _event_bus
|
|
74
|
+
_event_bus = None
|