procler 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- procler/__init__.py +3 -0
- procler/__main__.py +6 -0
- procler/api/__init__.py +5 -0
- procler/api/app.py +261 -0
- procler/api/deps.py +21 -0
- procler/api/routes/__init__.py +5 -0
- procler/api/routes/config.py +290 -0
- procler/api/routes/groups.py +62 -0
- procler/api/routes/logs.py +43 -0
- procler/api/routes/processes.py +185 -0
- procler/api/routes/recipes.py +69 -0
- procler/api/routes/snippets.py +134 -0
- procler/api/routes/ws.py +459 -0
- procler/cli.py +1478 -0
- procler/config/__init__.py +65 -0
- procler/config/changelog.py +148 -0
- procler/config/loader.py +256 -0
- procler/config/schema.py +315 -0
- procler/core/__init__.py +54 -0
- procler/core/context_base.py +117 -0
- procler/core/context_docker.py +384 -0
- procler/core/context_local.py +287 -0
- procler/core/daemon_detector.py +325 -0
- procler/core/events.py +74 -0
- procler/core/groups.py +419 -0
- procler/core/health.py +280 -0
- procler/core/log_tailer.py +262 -0
- procler/core/process_manager.py +1277 -0
- procler/core/recipes.py +330 -0
- procler/core/snippets.py +231 -0
- procler/core/variable_substitution.py +65 -0
- procler/db.py +96 -0
- procler/logging.py +41 -0
- procler/models.py +130 -0
- procler/py.typed +0 -0
- procler/settings.py +29 -0
- procler/static/assets/AboutView-BwZnsfpW.js +4 -0
- procler/static/assets/AboutView-UHbxWXcS.css +1 -0
- procler/static/assets/Code-HTS-H1S6.js +74 -0
- procler/static/assets/ConfigView-CGJcmp9G.css +1 -0
- procler/static/assets/ConfigView-aVtbRDf8.js +1 -0
- procler/static/assets/DashboardView-C5jw9Nsd.css +1 -0
- procler/static/assets/DashboardView-Dab7Cu9v.js +1 -0
- procler/static/assets/DataTable-z39TOAa4.js +746 -0
- procler/static/assets/DescriptionsItem-B2E8YbqJ.js +74 -0
- procler/static/assets/Divider-Dk-6aD2Y.js +42 -0
- procler/static/assets/Empty-MuygEHZM.js +24 -0
- procler/static/assets/Grid-CZ9QVKAT.js +1 -0
- procler/static/assets/GroupsView-BALG7i1X.js +1 -0
- procler/static/assets/GroupsView-gXAI1CVC.css +1 -0
- procler/static/assets/Input-e0xaxoWE.js +259 -0
- procler/static/assets/PhArrowsClockwise.vue-DqDg31az.js +1 -0
- procler/static/assets/PhCheckCircle.vue-Fwj9sh9m.js +1 -0
- procler/static/assets/PhEye.vue-JcPHciC2.js +1 -0
- procler/static/assets/PhPlay.vue-CZm7Gy3u.js +1 -0
- procler/static/assets/PhPlus.vue-yTWqKlSh.js +1 -0
- procler/static/assets/PhStop.vue-DxsqwIki.js +1 -0
- procler/static/assets/PhTrash.vue-DcqQbN1_.js +125 -0
- procler/static/assets/PhXCircle.vue-BXWmrabV.js +1 -0
- procler/static/assets/ProcessDetailView-DDbtIWq9.css +1 -0
- procler/static/assets/ProcessDetailView-DPtdNV-q.js +1 -0
- procler/static/assets/ProcessesView-B3a6Umur.js +1 -0
- procler/static/assets/ProcessesView-goLmghbJ.css +1 -0
- procler/static/assets/RecipesView-D2VxdneD.js +166 -0
- procler/static/assets/RecipesView-DXnFDCK4.css +1 -0
- procler/static/assets/Select-BBR17AHq.js +317 -0
- procler/static/assets/SnippetsView-B3a9q3AI.css +1 -0
- procler/static/assets/SnippetsView-DBCB2yGq.js +1 -0
- procler/static/assets/Spin-BXTjvFUk.js +90 -0
- procler/static/assets/Tag-Bh_qV63A.js +71 -0
- procler/static/assets/changelog-KkTT4H9-.js +1 -0
- procler/static/assets/groups-Zu-_v8ey.js +1 -0
- procler/static/assets/index-BsN-YMXq.css +1 -0
- procler/static/assets/index-BzW1XhyH.js +1282 -0
- procler/static/assets/procler-DOrSB1Vj.js +1 -0
- procler/static/assets/recipes-1w5SseGb.js +1 -0
- procler/static/index.html +17 -0
- procler/static/procler.png +0 -0
- procler-0.2.0.dist-info/METADATA +545 -0
- procler-0.2.0.dist-info/RECORD +83 -0
- procler-0.2.0.dist-info/WHEEL +4 -0
- procler-0.2.0.dist-info/entry_points.txt +2 -0
- procler-0.2.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,262 @@
|
|
|
1
|
+
"""Log file tailer for live streaming of daemon process logs."""
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import logging
|
|
5
|
+
from datetime import datetime
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
|
|
8
|
+
from ..models import Process
|
|
9
|
+
from .events import EVENT_LOG_ENTRY, get_event_bus
|
|
10
|
+
from .variable_substitution import substitute_vars_from_config
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
# Polling interval for file changes (seconds)
|
|
15
|
+
POLL_INTERVAL = 0.5
|
|
16
|
+
|
|
17
|
+
# Maximum lines to read per poll (prevents memory issues)
|
|
18
|
+
MAX_LINES_PER_POLL = 1000
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class LogFileTailer:
|
|
22
|
+
"""Tails log files and emits events for new lines.
|
|
23
|
+
|
|
24
|
+
This enables live log streaming for daemon processes that write to log files
|
|
25
|
+
instead of having their stdout/stderr captured directly.
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
def __init__(self):
|
|
29
|
+
# process_id -> tail task
|
|
30
|
+
self._watchers: dict[int, asyncio.Task] = {}
|
|
31
|
+
# process_id -> last file position (bytes)
|
|
32
|
+
self._positions: dict[int, int] = {}
|
|
33
|
+
|
|
34
|
+
async def start_tailing(self, process: Process) -> bool:
|
|
35
|
+
"""Start tailing a process's log file.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
process: The process to tail logs for
|
|
39
|
+
|
|
40
|
+
Returns:
|
|
41
|
+
True if tailing started, False if not applicable
|
|
42
|
+
"""
|
|
43
|
+
process_id = process._id
|
|
44
|
+
log_file = getattr(process, "log_file", None)
|
|
45
|
+
|
|
46
|
+
if not log_file:
|
|
47
|
+
logger.debug(f"Process {process.name} has no log_file, skipping tail")
|
|
48
|
+
return False
|
|
49
|
+
|
|
50
|
+
# Already tailing this process
|
|
51
|
+
if process_id in self._watchers:
|
|
52
|
+
logger.debug(f"Already tailing process {process.name}")
|
|
53
|
+
return True
|
|
54
|
+
|
|
55
|
+
# Determine if we need to tail from container or locally
|
|
56
|
+
# Use daemon_container if set, otherwise fall back to container_name for docker context
|
|
57
|
+
raw_container = getattr(process, "daemon_container", None) or (
|
|
58
|
+
getattr(process, "container_name", None) if getattr(process, "context_type", "local") == "docker" else None
|
|
59
|
+
)
|
|
60
|
+
container = substitute_vars_from_config(raw_container) if raw_container else None
|
|
61
|
+
|
|
62
|
+
# Start at end of file (don't replay history - that's what logs() is for)
|
|
63
|
+
if container:
|
|
64
|
+
initial_size = await self._get_container_file_size(container, log_file)
|
|
65
|
+
else:
|
|
66
|
+
initial_size = self._get_local_file_size(log_file)
|
|
67
|
+
|
|
68
|
+
self._positions[process_id] = initial_size
|
|
69
|
+
|
|
70
|
+
# Create tail task
|
|
71
|
+
if container:
|
|
72
|
+
task = asyncio.create_task(self._tail_container_file(process_id, process.name, container, log_file))
|
|
73
|
+
else:
|
|
74
|
+
task = asyncio.create_task(self._tail_local_file(process_id, process.name, log_file))
|
|
75
|
+
|
|
76
|
+
self._watchers[process_id] = task
|
|
77
|
+
logger.info(f"Started tailing logs for {process.name} ({log_file})")
|
|
78
|
+
return True
|
|
79
|
+
|
|
80
|
+
async def stop_tailing(self, process_id: int) -> None:
|
|
81
|
+
"""Stop tailing a process's log file.
|
|
82
|
+
|
|
83
|
+
Args:
|
|
84
|
+
process_id: The process ID to stop tailing
|
|
85
|
+
"""
|
|
86
|
+
task = self._watchers.pop(process_id, None)
|
|
87
|
+
if task:
|
|
88
|
+
task.cancel()
|
|
89
|
+
try:
|
|
90
|
+
await task
|
|
91
|
+
except asyncio.CancelledError:
|
|
92
|
+
pass
|
|
93
|
+
logger.debug(f"Stopped tailing logs for process {process_id}")
|
|
94
|
+
|
|
95
|
+
self._positions.pop(process_id, None)
|
|
96
|
+
|
|
97
|
+
def is_tailing(self, process_id: int) -> bool:
|
|
98
|
+
"""Check if we're currently tailing a process."""
|
|
99
|
+
return process_id in self._watchers
|
|
100
|
+
|
|
101
|
+
async def stop_all(self) -> None:
|
|
102
|
+
"""Stop all active tailers."""
|
|
103
|
+
for process_id in list(self._watchers.keys()):
|
|
104
|
+
await self.stop_tailing(process_id)
|
|
105
|
+
|
|
106
|
+
def _get_local_file_size(self, file_path: str) -> int:
|
|
107
|
+
"""Get the current size of a local file."""
|
|
108
|
+
try:
|
|
109
|
+
return Path(file_path).stat().st_size
|
|
110
|
+
except OSError:
|
|
111
|
+
return 0
|
|
112
|
+
|
|
113
|
+
async def _get_container_file_size(self, container: str, file_path: str) -> int:
|
|
114
|
+
"""Get the current size of a file inside a Docker container."""
|
|
115
|
+
try:
|
|
116
|
+
proc = await asyncio.create_subprocess_exec(
|
|
117
|
+
"docker",
|
|
118
|
+
"exec",
|
|
119
|
+
container,
|
|
120
|
+
"stat",
|
|
121
|
+
"-c",
|
|
122
|
+
"%s",
|
|
123
|
+
file_path,
|
|
124
|
+
stdout=asyncio.subprocess.PIPE,
|
|
125
|
+
stderr=asyncio.subprocess.PIPE,
|
|
126
|
+
)
|
|
127
|
+
stdout, _ = await proc.communicate()
|
|
128
|
+
if proc.returncode == 0:
|
|
129
|
+
return int(stdout.decode().strip())
|
|
130
|
+
except (ValueError, OSError):
|
|
131
|
+
pass
|
|
132
|
+
return 0
|
|
133
|
+
|
|
134
|
+
async def _tail_local_file(self, process_id: int, process_name: str, file_path: str) -> None:
|
|
135
|
+
"""Tail a local log file using polling."""
|
|
136
|
+
logger.debug(f"Starting local file tail for {process_name}: {file_path}")
|
|
137
|
+
|
|
138
|
+
while True:
|
|
139
|
+
try:
|
|
140
|
+
await asyncio.sleep(POLL_INTERVAL)
|
|
141
|
+
|
|
142
|
+
current_size = self._get_local_file_size(file_path)
|
|
143
|
+
last_position = self._positions.get(process_id, 0)
|
|
144
|
+
|
|
145
|
+
# File was truncated (e.g., log rotation)
|
|
146
|
+
if current_size < last_position:
|
|
147
|
+
logger.debug(f"Log file {file_path} was truncated, resetting position")
|
|
148
|
+
last_position = 0
|
|
149
|
+
|
|
150
|
+
# No new content
|
|
151
|
+
if current_size <= last_position:
|
|
152
|
+
continue
|
|
153
|
+
|
|
154
|
+
# Read new content
|
|
155
|
+
try:
|
|
156
|
+
with open(file_path, "rb") as f:
|
|
157
|
+
f.seek(last_position)
|
|
158
|
+
new_content = f.read(current_size - last_position)
|
|
159
|
+
self._positions[process_id] = f.tell()
|
|
160
|
+
except OSError as e:
|
|
161
|
+
logger.debug(f"Error reading {file_path}: {e}")
|
|
162
|
+
continue
|
|
163
|
+
|
|
164
|
+
# Emit each new line
|
|
165
|
+
await self._emit_lines(process_id, new_content)
|
|
166
|
+
|
|
167
|
+
except asyncio.CancelledError:
|
|
168
|
+
raise
|
|
169
|
+
except Exception as e:
|
|
170
|
+
logger.debug(f"Error tailing {file_path}: {e}")
|
|
171
|
+
await asyncio.sleep(POLL_INTERVAL)
|
|
172
|
+
|
|
173
|
+
async def _tail_container_file(self, process_id: int, process_name: str, container: str, file_path: str) -> None:
|
|
174
|
+
"""Tail a log file inside a Docker container using polling."""
|
|
175
|
+
logger.debug(f"Starting container file tail for {process_name}: {container}:{file_path}")
|
|
176
|
+
|
|
177
|
+
while True:
|
|
178
|
+
try:
|
|
179
|
+
await asyncio.sleep(POLL_INTERVAL)
|
|
180
|
+
|
|
181
|
+
current_size = await self._get_container_file_size(container, file_path)
|
|
182
|
+
last_position = self._positions.get(process_id, 0)
|
|
183
|
+
|
|
184
|
+
# File was truncated
|
|
185
|
+
if current_size < last_position:
|
|
186
|
+
logger.debug(f"Log file {file_path} in {container} was truncated")
|
|
187
|
+
last_position = 0
|
|
188
|
+
|
|
189
|
+
# No new content
|
|
190
|
+
if current_size <= last_position:
|
|
191
|
+
continue
|
|
192
|
+
|
|
193
|
+
# Read new content from container
|
|
194
|
+
# Using tail -c +N reads from byte N onwards
|
|
195
|
+
try:
|
|
196
|
+
proc = await asyncio.create_subprocess_exec(
|
|
197
|
+
"docker",
|
|
198
|
+
"exec",
|
|
199
|
+
container,
|
|
200
|
+
"tail",
|
|
201
|
+
"-c",
|
|
202
|
+
f"+{last_position + 1}", # tail uses 1-based offset
|
|
203
|
+
file_path,
|
|
204
|
+
stdout=asyncio.subprocess.PIPE,
|
|
205
|
+
stderr=asyncio.subprocess.PIPE,
|
|
206
|
+
)
|
|
207
|
+
stdout, _ = await asyncio.wait_for(proc.communicate(), timeout=10.0)
|
|
208
|
+
if proc.returncode == 0 and stdout:
|
|
209
|
+
self._positions[process_id] = current_size
|
|
210
|
+
await self._emit_lines(process_id, stdout)
|
|
211
|
+
except TimeoutError:
|
|
212
|
+
logger.debug(f"Timeout reading from {container}:{file_path}")
|
|
213
|
+
except OSError as e:
|
|
214
|
+
logger.debug(f"Error reading from {container}:{file_path}: {e}")
|
|
215
|
+
|
|
216
|
+
except asyncio.CancelledError:
|
|
217
|
+
raise
|
|
218
|
+
except Exception as e:
|
|
219
|
+
logger.debug(f"Error tailing {container}:{file_path}: {e}")
|
|
220
|
+
await asyncio.sleep(POLL_INTERVAL)
|
|
221
|
+
|
|
222
|
+
async def _emit_lines(self, process_id: int, content: bytes) -> None:
|
|
223
|
+
"""Emit log entry events for each line in content."""
|
|
224
|
+
try:
|
|
225
|
+
text = content.decode("utf-8", errors="replace")
|
|
226
|
+
except Exception:
|
|
227
|
+
return
|
|
228
|
+
|
|
229
|
+
lines = text.splitlines()
|
|
230
|
+
event_bus = get_event_bus()
|
|
231
|
+
timestamp = datetime.now().isoformat()
|
|
232
|
+
|
|
233
|
+
line_count = 0
|
|
234
|
+
for line in lines:
|
|
235
|
+
if not line:
|
|
236
|
+
continue
|
|
237
|
+
if line_count >= MAX_LINES_PER_POLL:
|
|
238
|
+
logger.debug(f"Hit max lines per poll ({MAX_LINES_PER_POLL})")
|
|
239
|
+
break
|
|
240
|
+
|
|
241
|
+
event_bus.emit_sync(
|
|
242
|
+
EVENT_LOG_ENTRY,
|
|
243
|
+
{
|
|
244
|
+
"process_id": process_id,
|
|
245
|
+
"stream": "stdout",
|
|
246
|
+
"line": line,
|
|
247
|
+
"timestamp": timestamp,
|
|
248
|
+
},
|
|
249
|
+
)
|
|
250
|
+
line_count += 1
|
|
251
|
+
|
|
252
|
+
|
|
253
|
+
# Global singleton
|
|
254
|
+
_tailer: LogFileTailer | None = None
|
|
255
|
+
|
|
256
|
+
|
|
257
|
+
def get_log_tailer() -> LogFileTailer:
|
|
258
|
+
"""Get the global LogFileTailer instance."""
|
|
259
|
+
global _tailer
|
|
260
|
+
if _tailer is None:
|
|
261
|
+
_tailer = LogFileTailer()
|
|
262
|
+
return _tailer
|