meerschaum 2.2.7__py3-none-any.whl → 2.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- meerschaum/__init__.py +6 -1
- meerschaum/__main__.py +0 -5
- meerschaum/_internal/arguments/__init__.py +1 -1
- meerschaum/_internal/arguments/_parse_arguments.py +72 -6
- meerschaum/_internal/arguments/_parser.py +45 -15
- meerschaum/_internal/docs/index.py +265 -8
- meerschaum/_internal/entry.py +154 -24
- meerschaum/_internal/shell/Shell.py +264 -77
- meerschaum/actions/__init__.py +29 -17
- meerschaum/actions/api.py +12 -12
- meerschaum/actions/attach.py +113 -0
- meerschaum/actions/copy.py +68 -41
- meerschaum/actions/delete.py +112 -50
- meerschaum/actions/edit.py +3 -3
- meerschaum/actions/install.py +40 -32
- meerschaum/actions/pause.py +44 -27
- meerschaum/actions/restart.py +107 -0
- meerschaum/actions/show.py +130 -159
- meerschaum/actions/start.py +161 -100
- meerschaum/actions/stop.py +78 -42
- meerschaum/api/_events.py +25 -1
- meerschaum/api/_oauth2.py +2 -0
- meerschaum/api/_websockets.py +2 -2
- meerschaum/api/dash/callbacks/jobs.py +36 -44
- meerschaum/api/dash/jobs.py +89 -78
- meerschaum/api/routes/__init__.py +1 -0
- meerschaum/api/routes/_actions.py +148 -17
- meerschaum/api/routes/_jobs.py +407 -0
- meerschaum/api/routes/_pipes.py +5 -5
- meerschaum/config/_default.py +1 -0
- meerschaum/config/_jobs.py +1 -1
- meerschaum/config/_paths.py +7 -0
- meerschaum/config/_shell.py +8 -3
- meerschaum/config/_version.py +1 -1
- meerschaum/config/static/__init__.py +17 -0
- meerschaum/connectors/Connector.py +13 -7
- meerschaum/connectors/__init__.py +28 -15
- meerschaum/connectors/api/APIConnector.py +27 -1
- meerschaum/connectors/api/_actions.py +71 -6
- meerschaum/connectors/api/_jobs.py +368 -0
- meerschaum/connectors/api/_pipes.py +85 -84
- meerschaum/connectors/parse.py +27 -15
- meerschaum/core/Pipe/_bootstrap.py +16 -8
- meerschaum/jobs/_Executor.py +69 -0
- meerschaum/jobs/_Job.py +899 -0
- meerschaum/jobs/__init__.py +396 -0
- meerschaum/jobs/systemd.py +694 -0
- meerschaum/plugins/__init__.py +97 -12
- meerschaum/utils/daemon/Daemon.py +276 -30
- meerschaum/utils/daemon/FileDescriptorInterceptor.py +5 -5
- meerschaum/utils/daemon/RotatingFile.py +14 -7
- meerschaum/utils/daemon/StdinFile.py +121 -0
- meerschaum/utils/daemon/__init__.py +15 -7
- meerschaum/utils/daemon/_names.py +15 -13
- meerschaum/utils/formatting/__init__.py +2 -1
- meerschaum/utils/formatting/_jobs.py +115 -62
- meerschaum/utils/formatting/_shell.py +6 -0
- meerschaum/utils/misc.py +41 -22
- meerschaum/utils/packages/_packages.py +9 -6
- meerschaum/utils/process.py +9 -9
- meerschaum/utils/prompt.py +16 -8
- meerschaum/utils/venv/__init__.py +2 -2
- {meerschaum-2.2.7.dist-info → meerschaum-2.3.0.dist-info}/METADATA +22 -25
- {meerschaum-2.2.7.dist-info → meerschaum-2.3.0.dist-info}/RECORD +70 -61
- {meerschaum-2.2.7.dist-info → meerschaum-2.3.0.dist-info}/WHEEL +1 -1
- {meerschaum-2.2.7.dist-info → meerschaum-2.3.0.dist-info}/LICENSE +0 -0
- {meerschaum-2.2.7.dist-info → meerschaum-2.3.0.dist-info}/NOTICE +0 -0
- {meerschaum-2.2.7.dist-info → meerschaum-2.3.0.dist-info}/entry_points.txt +0 -0
- {meerschaum-2.2.7.dist-info → meerschaum-2.3.0.dist-info}/top_level.txt +0 -0
- {meerschaum-2.2.7.dist-info → meerschaum-2.3.0.dist-info}/zip-safe +0 -0
meerschaum/jobs/_Job.py
ADDED
@@ -0,0 +1,899 @@
|
|
1
|
+
#! /usr/bin/env python3
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
# vim:fenc=utf-8
|
4
|
+
|
5
|
+
"""
|
6
|
+
Define the Meerschaum abstraction atop daemons.
|
7
|
+
"""
|
8
|
+
|
9
|
+
from __future__ import annotations
|
10
|
+
|
11
|
+
import shlex
|
12
|
+
import asyncio
|
13
|
+
import threading
|
14
|
+
import json
|
15
|
+
import pathlib
|
16
|
+
import os
|
17
|
+
import sys
|
18
|
+
import traceback
|
19
|
+
from functools import partial
|
20
|
+
from datetime import datetime, timezone
|
21
|
+
|
22
|
+
import meerschaum as mrsm
|
23
|
+
from meerschaum.utils.typing import (
|
24
|
+
List, Optional, Union, SuccessTuple, Any, Dict, Callable, TYPE_CHECKING,
|
25
|
+
)
|
26
|
+
from meerschaum._internal.entry import entry
|
27
|
+
from meerschaum.utils.warnings import warn
|
28
|
+
from meerschaum.config.paths import LOGS_RESOURCES_PATH
|
29
|
+
from meerschaum.config import get_config
|
30
|
+
from meerschaum.config.static import STATIC_CONFIG
|
31
|
+
|
32
|
+
if TYPE_CHECKING:
|
33
|
+
from meerschaum.jobs._Executor import Executor
|
34
|
+
|
35
|
+
BANNED_CHARS: List[str] = [
|
36
|
+
',', ';', "'", '"',
|
37
|
+
]
|
38
|
+
RESTART_FLAGS: List[str] = [
|
39
|
+
'-s',
|
40
|
+
'--restart',
|
41
|
+
'--loop',
|
42
|
+
'--schedule',
|
43
|
+
'--cron',
|
44
|
+
]
|
45
|
+
|
46
|
+
class StopMonitoringLogs(Exception):
|
47
|
+
"""
|
48
|
+
Raise this exception to stop the logs monitoring.
|
49
|
+
"""
|
50
|
+
|
51
|
+
|
52
|
+
class Job:
|
53
|
+
"""
|
54
|
+
Manage a `meerschaum.utils.daemon.Daemon`, locally or remotely via the API.
|
55
|
+
"""
|
56
|
+
|
57
|
+
def __init__(
|
58
|
+
self,
|
59
|
+
name: str,
|
60
|
+
sysargs: Union[List[str], str, None] = None,
|
61
|
+
executor_keys: Optional[str] = None,
|
62
|
+
_properties: Optional[Dict[str, Any]] = None,
|
63
|
+
_rotating_log = None,
|
64
|
+
_stdin_file = None,
|
65
|
+
_status_hook: Optional[Callable[[], str]] = None,
|
66
|
+
_result_hook: Optional[Callable[[], SuccessTuple]] = None,
|
67
|
+
_externally_managed: bool = False,
|
68
|
+
):
|
69
|
+
"""
|
70
|
+
Create a new job to manage a `meerschaum.utils.daemon.Daemon`.
|
71
|
+
|
72
|
+
Parameters
|
73
|
+
----------
|
74
|
+
name: str
|
75
|
+
The name of the job to be created.
|
76
|
+
This will also be used as the Daemon ID.
|
77
|
+
|
78
|
+
sysargs: Union[List[str], str, None], default None
|
79
|
+
The sysargs of the command to be executed, e.g. 'start api'.
|
80
|
+
|
81
|
+
executor_keys: Optional[str], default None
|
82
|
+
If provided, execute the job remotely on an API instance, e.g. 'api:main'.
|
83
|
+
|
84
|
+
_properties: Optional[Dict[str, Any]], default None
|
85
|
+
If provided, use this to patch the daemon's properties.
|
86
|
+
"""
|
87
|
+
from meerschaum.utils.daemon import Daemon
|
88
|
+
for char in BANNED_CHARS:
|
89
|
+
if char in name:
|
90
|
+
raise ValueError(f"Invalid name: ({char}) is not allowed.")
|
91
|
+
|
92
|
+
if isinstance(sysargs, str):
|
93
|
+
sysargs = shlex.split(sysargs)
|
94
|
+
|
95
|
+
and_key = STATIC_CONFIG['system']['arguments']['and_key']
|
96
|
+
escaped_and_key = STATIC_CONFIG['system']['arguments']['escaped_and_key']
|
97
|
+
if sysargs:
|
98
|
+
sysargs = [
|
99
|
+
(arg if arg != escaped_and_key else and_key)
|
100
|
+
for arg in sysargs
|
101
|
+
]
|
102
|
+
|
103
|
+
### NOTE: 'local' and 'systemd' executors are being coalesced.
|
104
|
+
if executor_keys is None:
|
105
|
+
from meerschaum.jobs import get_executor_keys_from_context
|
106
|
+
executor_keys = get_executor_keys_from_context()
|
107
|
+
|
108
|
+
self.executor_keys = executor_keys
|
109
|
+
self.name = name
|
110
|
+
try:
|
111
|
+
self._daemon = (
|
112
|
+
Daemon(daemon_id=name)
|
113
|
+
if executor_keys == 'local'
|
114
|
+
else None
|
115
|
+
)
|
116
|
+
except Exception:
|
117
|
+
self._daemon = None
|
118
|
+
|
119
|
+
### Handle any injected dependencies.
|
120
|
+
if _rotating_log is not None:
|
121
|
+
self._rotating_log = _rotating_log
|
122
|
+
if self._daemon is not None:
|
123
|
+
self._daemon._rotating_log = _rotating_log
|
124
|
+
|
125
|
+
if _stdin_file is not None:
|
126
|
+
self._stdin_file = _stdin_file
|
127
|
+
if self._daemon is not None:
|
128
|
+
self._daemon._stdin_file = _stdin_file
|
129
|
+
self._daemon._blocking_stdin_file_path = _stdin_file.blocking_file_path
|
130
|
+
|
131
|
+
if _status_hook is not None:
|
132
|
+
self._status_hook = _status_hook
|
133
|
+
|
134
|
+
if _result_hook is not None:
|
135
|
+
self._result_hook = _result_hook
|
136
|
+
|
137
|
+
self._externally_managed = _externally_managed
|
138
|
+
self._properties_patch = _properties or {}
|
139
|
+
if _externally_managed:
|
140
|
+
self._properties_patch.update({'externally_managed': _externally_managed})
|
141
|
+
|
142
|
+
daemon_sysargs = (
|
143
|
+
self._daemon.properties.get('target', {}).get('args', [None])[0]
|
144
|
+
if self._daemon is not None
|
145
|
+
else None
|
146
|
+
)
|
147
|
+
|
148
|
+
if daemon_sysargs and sysargs and daemon_sysargs != sysargs:
|
149
|
+
warn("Given sysargs differ from existing sysargs.")
|
150
|
+
|
151
|
+
self._sysargs = [
|
152
|
+
arg
|
153
|
+
for arg in (daemon_sysargs or sysargs or [])
|
154
|
+
if arg not in ('-d', '--daemon')
|
155
|
+
]
|
156
|
+
for restart_flag in RESTART_FLAGS:
|
157
|
+
if restart_flag in self._sysargs:
|
158
|
+
self._properties_patch.update({'restart': True})
|
159
|
+
break
|
160
|
+
|
161
|
+
@staticmethod
|
162
|
+
def from_pid(pid: int, executor_keys: Optional[str] = None) -> Job:
|
163
|
+
"""
|
164
|
+
Build a `Job` from the PID of a running Meerschaum process.
|
165
|
+
|
166
|
+
Parameters
|
167
|
+
----------
|
168
|
+
pid: int
|
169
|
+
The PID of the process.
|
170
|
+
|
171
|
+
executor_keys: Optional[str], default None
|
172
|
+
The executor keys to assign to the job.
|
173
|
+
"""
|
174
|
+
from meerschaum.config.paths import DAEMON_RESOURCES_PATH
|
175
|
+
|
176
|
+
psutil = mrsm.attempt_import('psutil')
|
177
|
+
try:
|
178
|
+
process = psutil.Process(pid)
|
179
|
+
except psutil.NoSuchProcess as e:
|
180
|
+
warn(f"Process with PID {pid} does not exist.", stack=False)
|
181
|
+
raise e
|
182
|
+
|
183
|
+
command_args = process.cmdline()
|
184
|
+
is_daemon = command_args[1] == '-c'
|
185
|
+
|
186
|
+
if is_daemon:
|
187
|
+
daemon_id = command_args[-1].split('daemon_id=')[-1].split(')')[0].replace("'", '')
|
188
|
+
root_dir = process.environ().get(STATIC_CONFIG['environment']['root'], None)
|
189
|
+
if root_dir is None:
|
190
|
+
from meerschaum.config.paths import ROOT_DIR_PATH
|
191
|
+
root_dir = ROOT_DIR_PATH
|
192
|
+
jobs_dir = root_dir / DAEMON_RESOURCES_PATH.name
|
193
|
+
daemon_dir = jobs_dir / daemon_id
|
194
|
+
pid_file = daemon_dir / 'process.pid'
|
195
|
+
properties_path = daemon_dir / 'properties.json'
|
196
|
+
pickle_path = daemon_dir / 'pickle.pkl'
|
197
|
+
|
198
|
+
if pid_file.exists():
|
199
|
+
with open(pid_file, 'r', encoding='utf-8') as f:
|
200
|
+
daemon_pid = int(f.read())
|
201
|
+
|
202
|
+
if pid != daemon_pid:
|
203
|
+
raise EnvironmentError(f"Differing PIDs: {pid=}, {daemon_pid=}")
|
204
|
+
else:
|
205
|
+
raise EnvironmentError(f"Is job '{daemon_id}' running?")
|
206
|
+
|
207
|
+
return Job(daemon_id, executor_keys=executor_keys)
|
208
|
+
|
209
|
+
from meerschaum._internal.arguments._parse_arguments import parse_arguments
|
210
|
+
from meerschaum.utils.daemon import get_new_daemon_name
|
211
|
+
|
212
|
+
mrsm_ix = 0
|
213
|
+
for i, arg in enumerate(command_args):
|
214
|
+
if 'mrsm' in arg or 'meerschaum' in arg.lower():
|
215
|
+
mrsm_ix = i
|
216
|
+
break
|
217
|
+
|
218
|
+
sysargs = command_args[mrsm_ix+1:]
|
219
|
+
kwargs = parse_arguments(sysargs)
|
220
|
+
name = kwargs.get('name', get_new_daemon_name())
|
221
|
+
return Job(name, sysargs, executor_keys=executor_keys)
|
222
|
+
|
223
|
+
def start(self, debug: bool = False) -> SuccessTuple:
|
224
|
+
"""
|
225
|
+
Start the job's daemon.
|
226
|
+
"""
|
227
|
+
if self.executor is not None:
|
228
|
+
if not self.exists(debug=debug):
|
229
|
+
return self.executor.create_job(self.name, self.sysargs, debug=debug)
|
230
|
+
return self.executor.start_job(self.name, debug=debug)
|
231
|
+
|
232
|
+
if self.is_running():
|
233
|
+
return True, f"{self} is already running."
|
234
|
+
|
235
|
+
success, msg = self.daemon.run(
|
236
|
+
keep_daemon_output=True,
|
237
|
+
allow_dirty_run=True,
|
238
|
+
)
|
239
|
+
if not success:
|
240
|
+
return success, msg
|
241
|
+
|
242
|
+
return success, f"Started {self}."
|
243
|
+
|
244
|
+
def stop(self, timeout_seconds: Optional[int] = None, debug: bool = False) -> SuccessTuple:
|
245
|
+
"""
|
246
|
+
Stop the job's daemon.
|
247
|
+
"""
|
248
|
+
if self.executor is not None:
|
249
|
+
return self.executor.stop_job(self.name, debug=debug)
|
250
|
+
|
251
|
+
if self.daemon.status == 'stopped':
|
252
|
+
if not self.restart:
|
253
|
+
return True, f"{self} is not running."
|
254
|
+
elif self.stop_time is not None:
|
255
|
+
return True, f"{self} will not restart until manually started."
|
256
|
+
|
257
|
+
quit_success, quit_msg = self.daemon.quit(timeout=timeout_seconds)
|
258
|
+
if quit_success:
|
259
|
+
return quit_success, f"Stopped {self}."
|
260
|
+
|
261
|
+
warn(
|
262
|
+
f"Failed to gracefully quit {self}.",
|
263
|
+
stack=False,
|
264
|
+
)
|
265
|
+
kill_success, kill_msg = self.daemon.kill(timeout=timeout_seconds)
|
266
|
+
if not kill_success:
|
267
|
+
return kill_success, kill_msg
|
268
|
+
|
269
|
+
return kill_success, f"Killed {self}."
|
270
|
+
|
271
|
+
def pause(self, timeout_seconds: Optional[int] = None, debug: bool = False) -> SuccessTuple:
|
272
|
+
"""
|
273
|
+
Pause the job's daemon.
|
274
|
+
"""
|
275
|
+
if self.executor is not None:
|
276
|
+
return self.executor.pause_job(self.name, debug=debug)
|
277
|
+
|
278
|
+
pause_success, pause_msg = self.daemon.pause(timeout=timeout_seconds)
|
279
|
+
if not pause_success:
|
280
|
+
return pause_success, pause_msg
|
281
|
+
|
282
|
+
return pause_success, f"Paused {self}."
|
283
|
+
|
284
|
+
def delete(self, debug: bool = False) -> SuccessTuple:
|
285
|
+
"""
|
286
|
+
Delete the job and its daemon.
|
287
|
+
"""
|
288
|
+
if self.executor is not None:
|
289
|
+
return self.executor.delete_job(self.name, debug=debug)
|
290
|
+
|
291
|
+
if self.is_running():
|
292
|
+
stop_success, stop_msg = self.stop()
|
293
|
+
if not stop_success:
|
294
|
+
return stop_success, stop_msg
|
295
|
+
|
296
|
+
cleanup_success, cleanup_msg = self.daemon.cleanup()
|
297
|
+
if not cleanup_success:
|
298
|
+
return cleanup_success, cleanup_msg
|
299
|
+
|
300
|
+
return cleanup_success, f"Deleted {self}."
|
301
|
+
|
302
|
+
def is_running(self) -> bool:
|
303
|
+
"""
|
304
|
+
Determine whether the job's daemon is running.
|
305
|
+
"""
|
306
|
+
return self.status == 'running'
|
307
|
+
|
308
|
+
def exists(self, debug: bool = False) -> bool:
|
309
|
+
"""
|
310
|
+
Determine whether the job exists.
|
311
|
+
"""
|
312
|
+
if self.executor is not None:
|
313
|
+
return self.executor.get_job_exists(self.name, debug=debug)
|
314
|
+
|
315
|
+
return self.daemon.path.exists()
|
316
|
+
|
317
|
+
def get_logs(self) -> Union[str, None]:
|
318
|
+
"""
|
319
|
+
Return the output text of the job's daemon.
|
320
|
+
"""
|
321
|
+
if self.executor is not None:
|
322
|
+
return self.executor.get_logs(self.name)
|
323
|
+
|
324
|
+
return self.daemon.log_text
|
325
|
+
|
326
|
+
def monitor_logs(
|
327
|
+
self,
|
328
|
+
callback_function: Callable[[str], None] = partial(print, end=''),
|
329
|
+
input_callback_function: Optional[Callable[[], str]] = None,
|
330
|
+
stop_callback_function: Optional[Callable[[SuccessTuple], None]] = None,
|
331
|
+
stop_event: Optional[asyncio.Event] = None,
|
332
|
+
stop_on_exit: bool = False,
|
333
|
+
strip_timestamps: bool = False,
|
334
|
+
accept_input: bool = True,
|
335
|
+
debug: bool = False,
|
336
|
+
):
|
337
|
+
"""
|
338
|
+
Monitor the job's log files and execute a callback on new lines.
|
339
|
+
|
340
|
+
Parameters
|
341
|
+
----------
|
342
|
+
callback_function: Callable[[str], None], default partial(print, end='')
|
343
|
+
The callback to execute as new data comes in.
|
344
|
+
Defaults to printing the output directly to `stdout`.
|
345
|
+
|
346
|
+
input_callback_function: Optional[Callable[[], str]], default None
|
347
|
+
If provided, execute this callback when the daemon is blocking on stdin.
|
348
|
+
Defaults to `sys.stdin.readline()`.
|
349
|
+
|
350
|
+
stop_callback_function: Optional[Callable[[SuccessTuple]], str], default None
|
351
|
+
If provided, execute this callback when the daemon stops.
|
352
|
+
The job's SuccessTuple will be passed to the callback.
|
353
|
+
|
354
|
+
stop_event: Optional[asyncio.Event], default None
|
355
|
+
If provided, stop monitoring when this event is set.
|
356
|
+
You may instead raise `meerschaum.jobs.StopMonitoringLogs`
|
357
|
+
from within `callback_function` to stop monitoring.
|
358
|
+
|
359
|
+
stop_on_exit: bool, default False
|
360
|
+
If `True`, stop monitoring when the job stops.
|
361
|
+
|
362
|
+
strip_timestamps: bool, default False
|
363
|
+
If `True`, remove leading timestamps from lines.
|
364
|
+
|
365
|
+
accept_input: bool, default True
|
366
|
+
If `True`, accept input when the daemon blocks on stdin.
|
367
|
+
"""
|
368
|
+
def default_input_callback_function():
|
369
|
+
return sys.stdin.readline()
|
370
|
+
|
371
|
+
if input_callback_function is None:
|
372
|
+
input_callback_function = default_input_callback_function
|
373
|
+
|
374
|
+
if self.executor is not None:
|
375
|
+
self.executor.monitor_logs(
|
376
|
+
self.name,
|
377
|
+
callback_function,
|
378
|
+
input_callback_function=input_callback_function,
|
379
|
+
stop_callback_function=stop_callback_function,
|
380
|
+
stop_on_exit=stop_on_exit,
|
381
|
+
accept_input=accept_input,
|
382
|
+
strip_timestamps=strip_timestamps,
|
383
|
+
debug=debug,
|
384
|
+
)
|
385
|
+
return
|
386
|
+
|
387
|
+
monitor_logs_coroutine = self.monitor_logs_async(
|
388
|
+
callback_function=callback_function,
|
389
|
+
input_callback_function=input_callback_function,
|
390
|
+
stop_callback_function=stop_callback_function,
|
391
|
+
stop_event=stop_event,
|
392
|
+
stop_on_exit=stop_on_exit,
|
393
|
+
strip_timestamps=strip_timestamps,
|
394
|
+
accept_input=accept_input,
|
395
|
+
)
|
396
|
+
return asyncio.run(monitor_logs_coroutine)
|
397
|
+
|
398
|
+
|
399
|
+
async def monitor_logs_async(
|
400
|
+
self,
|
401
|
+
callback_function: Callable[[str], None] = partial(print, end='', flush=True),
|
402
|
+
input_callback_function: Optional[Callable[[], str]] = None,
|
403
|
+
stop_callback_function: Optional[Callable[[SuccessTuple], None]] = None,
|
404
|
+
stop_event: Optional[asyncio.Event] = None,
|
405
|
+
stop_on_exit: bool = False,
|
406
|
+
strip_timestamps: bool = False,
|
407
|
+
accept_input: bool = True,
|
408
|
+
_logs_path: Optional[pathlib.Path] = None,
|
409
|
+
_log = None,
|
410
|
+
_stdin_file = None,
|
411
|
+
debug: bool = False,
|
412
|
+
):
|
413
|
+
"""
|
414
|
+
Monitor the job's log files and await a callback on new lines.
|
415
|
+
|
416
|
+
Parameters
|
417
|
+
----------
|
418
|
+
callback_function: Callable[[str], None], default partial(print, end='')
|
419
|
+
The callback to execute as new data comes in.
|
420
|
+
Defaults to printing the output directly to `stdout`.
|
421
|
+
|
422
|
+
input_callback_function: Optional[Callable[[], str]], default None
|
423
|
+
If provided, execute this callback when the daemon is blocking on stdin.
|
424
|
+
Defaults to `sys.stdin.readline()`.
|
425
|
+
|
426
|
+
stop_callback_function: Optional[Callable[[SuccessTuple]], str], default None
|
427
|
+
If provided, execute this callback when the daemon stops.
|
428
|
+
The job's SuccessTuple will be passed to the callback.
|
429
|
+
|
430
|
+
stop_event: Optional[asyncio.Event], default None
|
431
|
+
If provided, stop monitoring when this event is set.
|
432
|
+
You may instead raise `meerschaum.jobs.StopMonitoringLogs`
|
433
|
+
from within `callback_function` to stop monitoring.
|
434
|
+
|
435
|
+
stop_on_exit: bool, default False
|
436
|
+
If `True`, stop monitoring when the job stops.
|
437
|
+
|
438
|
+
strip_timestamps: bool, default False
|
439
|
+
If `True`, remove leading timestamps from lines.
|
440
|
+
|
441
|
+
accept_input: bool, default True
|
442
|
+
If `True`, accept input when the daemon blocks on stdin.
|
443
|
+
"""
|
444
|
+
def default_input_callback_function():
|
445
|
+
return sys.stdin.readline()
|
446
|
+
|
447
|
+
if input_callback_function is None:
|
448
|
+
input_callback_function = default_input_callback_function
|
449
|
+
|
450
|
+
if self.executor is not None:
|
451
|
+
await self.executor.monitor_logs_async(
|
452
|
+
self.name,
|
453
|
+
callback_function,
|
454
|
+
input_callback_function=input_callback_function,
|
455
|
+
stop_callback_function=stop_callback_function,
|
456
|
+
stop_on_exit=stop_on_exit,
|
457
|
+
accept_input=accept_input,
|
458
|
+
debug=debug,
|
459
|
+
)
|
460
|
+
return
|
461
|
+
|
462
|
+
from meerschaum.utils.formatting._jobs import strip_timestamp_from_line
|
463
|
+
|
464
|
+
events = {
|
465
|
+
'user': stop_event,
|
466
|
+
'stopped': asyncio.Event(),
|
467
|
+
}
|
468
|
+
combined_event = asyncio.Event()
|
469
|
+
emitted_text = False
|
470
|
+
stdin_file = _stdin_file if _stdin_file is not None else self.daemon.stdin_file
|
471
|
+
|
472
|
+
async def check_job_status():
|
473
|
+
nonlocal emitted_text
|
474
|
+
stopped_event = events.get('stopped', None)
|
475
|
+
if stopped_event is None:
|
476
|
+
return
|
477
|
+
|
478
|
+
sleep_time = 0.1
|
479
|
+
while sleep_time < 60:
|
480
|
+
if self.status == 'stopped':
|
481
|
+
if not emitted_text:
|
482
|
+
await asyncio.sleep(sleep_time)
|
483
|
+
sleep_time = round(sleep_time * 1.1, 2)
|
484
|
+
continue
|
485
|
+
|
486
|
+
if stop_callback_function is not None:
|
487
|
+
try:
|
488
|
+
if asyncio.iscoroutinefunction(stop_callback_function):
|
489
|
+
await stop_callback_function(self.result)
|
490
|
+
else:
|
491
|
+
stop_callback_function(self.result)
|
492
|
+
except Exception:
|
493
|
+
warn(traceback.format_exc())
|
494
|
+
|
495
|
+
if stop_on_exit:
|
496
|
+
events['stopped'].set()
|
497
|
+
|
498
|
+
break
|
499
|
+
await asyncio.sleep(0.1)
|
500
|
+
|
501
|
+
async def check_blocking_on_input():
|
502
|
+
while True:
|
503
|
+
if not emitted_text or not self.is_blocking_on_stdin():
|
504
|
+
try:
|
505
|
+
await asyncio.sleep(0.1)
|
506
|
+
except asyncio.exceptions.CancelledError:
|
507
|
+
break
|
508
|
+
continue
|
509
|
+
|
510
|
+
if not self.is_running():
|
511
|
+
break
|
512
|
+
|
513
|
+
await emit_latest_lines()
|
514
|
+
|
515
|
+
try:
|
516
|
+
print('', end='', flush=True)
|
517
|
+
if asyncio.iscoroutinefunction(input_callback_function):
|
518
|
+
data = await input_callback_function()
|
519
|
+
else:
|
520
|
+
data = input_callback_function()
|
521
|
+
except KeyboardInterrupt:
|
522
|
+
break
|
523
|
+
if not data.endswith('\n'):
|
524
|
+
data += '\n'
|
525
|
+
|
526
|
+
stdin_file.write(data)
|
527
|
+
await asyncio.sleep(0.1)
|
528
|
+
|
529
|
+
async def combine_events():
|
530
|
+
event_tasks = [
|
531
|
+
asyncio.create_task(event.wait())
|
532
|
+
for event in events.values()
|
533
|
+
if event is not None
|
534
|
+
]
|
535
|
+
if not event_tasks:
|
536
|
+
return
|
537
|
+
|
538
|
+
try:
|
539
|
+
done, pending = await asyncio.wait(
|
540
|
+
event_tasks,
|
541
|
+
return_when=asyncio.FIRST_COMPLETED,
|
542
|
+
)
|
543
|
+
for task in pending:
|
544
|
+
task.cancel()
|
545
|
+
except asyncio.exceptions.CancelledError:
|
546
|
+
pass
|
547
|
+
finally:
|
548
|
+
combined_event.set()
|
549
|
+
|
550
|
+
check_job_status_task = asyncio.create_task(check_job_status())
|
551
|
+
check_blocking_on_input_task = asyncio.create_task(check_blocking_on_input())
|
552
|
+
combine_events_task = asyncio.create_task(combine_events())
|
553
|
+
|
554
|
+
log = _log if _log is not None else self.daemon.rotating_log
|
555
|
+
lines_to_show = get_config('jobs', 'logs', 'lines_to_show')
|
556
|
+
|
557
|
+
async def emit_latest_lines():
|
558
|
+
nonlocal emitted_text
|
559
|
+
lines = log.readlines()
|
560
|
+
for line in lines[(-1 * lines_to_show):]:
|
561
|
+
if stop_event is not None and stop_event.is_set():
|
562
|
+
return
|
563
|
+
|
564
|
+
if strip_timestamps:
|
565
|
+
line = strip_timestamp_from_line(line)
|
566
|
+
|
567
|
+
try:
|
568
|
+
if asyncio.iscoroutinefunction(callback_function):
|
569
|
+
await callback_function(line)
|
570
|
+
else:
|
571
|
+
callback_function(line)
|
572
|
+
emitted_text = True
|
573
|
+
except StopMonitoringLogs:
|
574
|
+
return
|
575
|
+
except Exception:
|
576
|
+
warn(f"Error in logs callback:\n{traceback.format_exc()}")
|
577
|
+
|
578
|
+
await emit_latest_lines()
|
579
|
+
|
580
|
+
tasks = (
|
581
|
+
[check_job_status_task]
|
582
|
+
+ ([check_blocking_on_input_task] if accept_input else [])
|
583
|
+
+ [combine_events_task]
|
584
|
+
)
|
585
|
+
try:
|
586
|
+
_ = asyncio.gather(*tasks, return_exceptions=True)
|
587
|
+
except Exception:
|
588
|
+
warn(f"Failed to run async checks:\n{traceback.format_exc()}")
|
589
|
+
|
590
|
+
watchfiles = mrsm.attempt_import('watchfiles')
|
591
|
+
async for changes in watchfiles.awatch(
|
592
|
+
_logs_path or LOGS_RESOURCES_PATH,
|
593
|
+
stop_event=combined_event,
|
594
|
+
):
|
595
|
+
for change in changes:
|
596
|
+
file_path_str = change[1]
|
597
|
+
file_path = pathlib.Path(file_path_str)
|
598
|
+
latest_subfile_path = log.get_latest_subfile_path()
|
599
|
+
if latest_subfile_path != file_path:
|
600
|
+
continue
|
601
|
+
|
602
|
+
await emit_latest_lines()
|
603
|
+
|
604
|
+
await emit_latest_lines()
|
605
|
+
|
606
|
+
def is_blocking_on_stdin(self, debug: bool = False) -> bool:
|
607
|
+
"""
|
608
|
+
Return whether a job's daemon is blocking on stdin.
|
609
|
+
"""
|
610
|
+
if self.executor is not None:
|
611
|
+
return self.executor.get_job_is_blocking_on_stdin(self.name, debug=debug)
|
612
|
+
|
613
|
+
return self.is_running() and self.daemon.blocking_stdin_file_path.exists()
|
614
|
+
|
615
|
+
def write_stdin(self, data):
|
616
|
+
"""
|
617
|
+
Write to a job's daemon's `stdin`.
|
618
|
+
"""
|
619
|
+
self.daemon.stdin_file.write(data)
|
620
|
+
|
621
|
+
@property
|
622
|
+
def executor(self) -> Union[Executor, None]:
|
623
|
+
"""
|
624
|
+
If the job is remote, return the connector to the remote API instance.
|
625
|
+
"""
|
626
|
+
return (
|
627
|
+
mrsm.get_connector(self.executor_keys)
|
628
|
+
if self.executor_keys != 'local'
|
629
|
+
else None
|
630
|
+
)
|
631
|
+
|
632
|
+
@property
|
633
|
+
def status(self) -> str:
|
634
|
+
"""
|
635
|
+
Return the running status of the job's daemon.
|
636
|
+
"""
|
637
|
+
if '_status_hook' in self.__dict__:
|
638
|
+
return self._status_hook()
|
639
|
+
|
640
|
+
if self.executor is not None:
|
641
|
+
return self.executor.get_job_status(self.name)
|
642
|
+
|
643
|
+
return self.daemon.status
|
644
|
+
|
645
|
+
@property
|
646
|
+
def pid(self) -> Union[int, None]:
|
647
|
+
"""
|
648
|
+
Return the PID of the job's dameon.
|
649
|
+
"""
|
650
|
+
if self.executor is not None:
|
651
|
+
return self.executor.get_job_metadata(self.name).get('daemon', {}).get('pid', None)
|
652
|
+
|
653
|
+
return self.daemon.pid
|
654
|
+
|
655
|
+
@property
|
656
|
+
def restart(self) -> bool:
|
657
|
+
"""
|
658
|
+
Return whether to restart a stopped job.
|
659
|
+
"""
|
660
|
+
if self.executor is not None:
|
661
|
+
return self.executor.get_job_metadata(self.name).get('restart', False)
|
662
|
+
|
663
|
+
return self.daemon.properties.get('restart', False)
|
664
|
+
|
665
|
+
@property
|
666
|
+
def result(self) -> SuccessTuple:
|
667
|
+
"""
|
668
|
+
Return the `SuccessTuple` when the job has terminated.
|
669
|
+
"""
|
670
|
+
if self.is_running():
|
671
|
+
return True, f"{self} is running."
|
672
|
+
|
673
|
+
if '_result_hook' in self.__dict__:
|
674
|
+
return self._result_hook()
|
675
|
+
|
676
|
+
if self.executor is not None:
|
677
|
+
return (
|
678
|
+
self.executor.get_job_metadata(self.name)
|
679
|
+
.get('result', (False, "No result available."))
|
680
|
+
)
|
681
|
+
|
682
|
+
_result = self.daemon.properties.get('result', None)
|
683
|
+
if _result is None:
|
684
|
+
return False, "No result available."
|
685
|
+
|
686
|
+
return tuple(_result)
|
687
|
+
|
688
|
+
@property
|
689
|
+
def sysargs(self) -> List[str]:
|
690
|
+
"""
|
691
|
+
Return the sysargs to use for the Daemon.
|
692
|
+
"""
|
693
|
+
if self._sysargs:
|
694
|
+
return self._sysargs
|
695
|
+
|
696
|
+
if self.executor is not None:
|
697
|
+
return self.executor.get_job_metadata(self.name).get('sysargs', [])
|
698
|
+
|
699
|
+
target_args = self.daemon.target_args
|
700
|
+
if target_args is None:
|
701
|
+
return []
|
702
|
+
self._sysargs = target_args[0] if len(target_args) > 0 else []
|
703
|
+
return self._sysargs
|
704
|
+
|
705
|
+
@property
|
706
|
+
def daemon(self) -> 'Daemon':
|
707
|
+
"""
|
708
|
+
Return the daemon which this job manages.
|
709
|
+
"""
|
710
|
+
from meerschaum.utils.daemon import Daemon
|
711
|
+
if self._daemon is not None and self.executor is None and self._sysargs:
|
712
|
+
return self._daemon
|
713
|
+
|
714
|
+
remote_properties = (
|
715
|
+
{}
|
716
|
+
if self.executor is None
|
717
|
+
else self.executor.get_job_properties(self.name)
|
718
|
+
)
|
719
|
+
properties = {**remote_properties, **self._properties_patch}
|
720
|
+
|
721
|
+
self._daemon = Daemon(
|
722
|
+
target=entry,
|
723
|
+
target_args=[self._sysargs],
|
724
|
+
target_kw={},
|
725
|
+
daemon_id=self.name,
|
726
|
+
label=shlex.join(self._sysargs),
|
727
|
+
properties=properties,
|
728
|
+
)
|
729
|
+
if '_rotating_log' in self.__dict__:
|
730
|
+
self._daemon._rotating_log = self._rotating_log
|
731
|
+
|
732
|
+
if '_stdin_file' in self.__dict__:
|
733
|
+
self._daemon._stdin_file = self._stdin_file
|
734
|
+
self._daemon._blocking_stdin_file_path = self._stdin_file.blocking_file_path
|
735
|
+
|
736
|
+
return self._daemon
|
737
|
+
|
738
|
+
@property
|
739
|
+
def began(self) -> Union[datetime, None]:
|
740
|
+
"""
|
741
|
+
The datetime when the job began running.
|
742
|
+
"""
|
743
|
+
if self.executor is not None:
|
744
|
+
began_str = self.executor.get_job_began(self.name)
|
745
|
+
if began_str is None:
|
746
|
+
return None
|
747
|
+
return (
|
748
|
+
datetime.fromisoformat(began_str)
|
749
|
+
.astimezone(timezone.utc)
|
750
|
+
.replace(tzinfo=None)
|
751
|
+
)
|
752
|
+
|
753
|
+
began_str = self.daemon.properties.get('process', {}).get('began', None)
|
754
|
+
if began_str is None:
|
755
|
+
return None
|
756
|
+
|
757
|
+
return datetime.fromisoformat(began_str)
|
758
|
+
|
759
|
+
@property
|
760
|
+
def ended(self) -> Union[datetime, None]:
|
761
|
+
"""
|
762
|
+
The datetime when the job stopped running.
|
763
|
+
"""
|
764
|
+
if self.executor is not None:
|
765
|
+
ended_str = self.executor.get_job_ended(self.name)
|
766
|
+
if ended_str is None:
|
767
|
+
return None
|
768
|
+
return (
|
769
|
+
datetime.fromisoformat(ended_str)
|
770
|
+
.astimezone(timezone.utc)
|
771
|
+
.replace(tzinfo=None)
|
772
|
+
)
|
773
|
+
|
774
|
+
ended_str = self.daemon.properties.get('process', {}).get('ended', None)
|
775
|
+
if ended_str is None:
|
776
|
+
return None
|
777
|
+
|
778
|
+
return datetime.fromisoformat(ended_str)
|
779
|
+
|
780
|
+
@property
|
781
|
+
def paused(self) -> Union[datetime, None]:
|
782
|
+
"""
|
783
|
+
The datetime when the job was suspended while running.
|
784
|
+
"""
|
785
|
+
if self.executor is not None:
|
786
|
+
paused_str = self.executor.get_job_paused(self.name)
|
787
|
+
if paused_str is None:
|
788
|
+
return None
|
789
|
+
return (
|
790
|
+
datetime.fromisoformat(paused_str)
|
791
|
+
.astimezone(timezone.utc)
|
792
|
+
.replace(tzinfo=None)
|
793
|
+
)
|
794
|
+
|
795
|
+
paused_str = self.daemon.properties.get('process', {}).get('paused', None)
|
796
|
+
if paused_str is None:
|
797
|
+
return None
|
798
|
+
|
799
|
+
return datetime.fromisoformat(paused_str)
|
800
|
+
|
801
|
+
@property
|
802
|
+
def stop_time(self) -> Union[datetime, None]:
|
803
|
+
"""
|
804
|
+
Return the timestamp when the job was manually stopped.
|
805
|
+
"""
|
806
|
+
if self.executor is not None:
|
807
|
+
return self.executor.get_job_stop_time(self.name)
|
808
|
+
|
809
|
+
if not self.daemon.stop_path.exists():
|
810
|
+
return None
|
811
|
+
|
812
|
+
stop_data = self.daemon._read_stop_file()
|
813
|
+
if not stop_data:
|
814
|
+
return None
|
815
|
+
|
816
|
+
stop_time_str = stop_data.get('stop_time', None)
|
817
|
+
if not stop_time_str:
|
818
|
+
warn(f"Could not read stop time for {self}.")
|
819
|
+
return None
|
820
|
+
|
821
|
+
return datetime.fromisoformat(stop_time_str)
|
822
|
+
|
823
|
+
@property
|
824
|
+
def hidden(self) -> bool:
|
825
|
+
"""
|
826
|
+
Return a bool indicating whether this job should be displayed.
|
827
|
+
"""
|
828
|
+
return (
|
829
|
+
self.name.startswith('_')
|
830
|
+
or self.name.startswith('.')
|
831
|
+
or self._is_externally_managed
|
832
|
+
)
|
833
|
+
|
834
|
+
def check_restart(self) -> SuccessTuple:
|
835
|
+
"""
|
836
|
+
If `restart` is `True` and the daemon is not running,
|
837
|
+
restart the job.
|
838
|
+
Do not restart if the job was manually stopped.
|
839
|
+
"""
|
840
|
+
if self.is_running():
|
841
|
+
return True, f"{self} is running."
|
842
|
+
|
843
|
+
if not self.restart:
|
844
|
+
return True, f"{self} does not need to be restarted."
|
845
|
+
|
846
|
+
if self.stop_time is not None:
|
847
|
+
return True, f"{self} was manually stopped."
|
848
|
+
|
849
|
+
return self.start()
|
850
|
+
|
851
|
+
@property
|
852
|
+
def label(self) -> str:
|
853
|
+
"""
|
854
|
+
Return the job's Daemon label (joined sysargs).
|
855
|
+
"""
|
856
|
+
return shlex.join(self.sysargs).replace(' + ', '\n+ ')
|
857
|
+
|
858
|
+
@property
|
859
|
+
def _externally_managed_file(self) -> pathlib.Path:
|
860
|
+
"""
|
861
|
+
Return the path to the externally managed file.
|
862
|
+
"""
|
863
|
+
return self.daemon.path / '.externally-managed'
|
864
|
+
|
865
|
+
def _set_externally_managed(self):
|
866
|
+
"""
|
867
|
+
Set this job as externally managed.
|
868
|
+
"""
|
869
|
+
self._externally_managed = True
|
870
|
+
try:
|
871
|
+
self._externally_managed_file.parent.mkdir(exist_ok=True, parents=True)
|
872
|
+
self._externally_managed_file.touch()
|
873
|
+
except Exception as e:
|
874
|
+
warn(e)
|
875
|
+
|
876
|
+
@property
|
877
|
+
def _is_externally_managed(self) -> bool:
|
878
|
+
"""
|
879
|
+
Return whether this job is externally managed.
|
880
|
+
"""
|
881
|
+
return self.executor_keys in (None, 'local') and (
|
882
|
+
self._externally_managed or self._externally_managed_file.exists()
|
883
|
+
)
|
884
|
+
|
885
|
+
def __str__(self) -> str:
|
886
|
+
sysargs = self.sysargs
|
887
|
+
sysargs_str = shlex.join(sysargs) if sysargs else ''
|
888
|
+
job_str = f'Job("{self.name}"'
|
889
|
+
if sysargs_str:
|
890
|
+
job_str += f', "{sysargs_str}"'
|
891
|
+
|
892
|
+
job_str += ')'
|
893
|
+
return job_str
|
894
|
+
|
895
|
+
def __repr__(self) -> str:
|
896
|
+
return str(self)
|
897
|
+
|
898
|
+
def __hash__(self) -> int:
|
899
|
+
return hash(self.name)
|