meerschaum 2.2.6__py3-none-any.whl → 2.3.0.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. meerschaum/__init__.py +4 -1
  2. meerschaum/__main__.py +10 -5
  3. meerschaum/_internal/arguments/_parser.py +44 -15
  4. meerschaum/_internal/entry.py +35 -14
  5. meerschaum/_internal/shell/Shell.py +155 -53
  6. meerschaum/_internal/shell/updates.py +175 -0
  7. meerschaum/actions/api.py +12 -12
  8. meerschaum/actions/attach.py +95 -0
  9. meerschaum/actions/delete.py +35 -26
  10. meerschaum/actions/register.py +19 -5
  11. meerschaum/actions/show.py +119 -148
  12. meerschaum/actions/start.py +85 -75
  13. meerschaum/actions/stop.py +68 -39
  14. meerschaum/actions/sync.py +3 -3
  15. meerschaum/actions/upgrade.py +28 -36
  16. meerschaum/api/_events.py +18 -1
  17. meerschaum/api/_oauth2.py +2 -0
  18. meerschaum/api/_websockets.py +2 -2
  19. meerschaum/api/dash/jobs.py +5 -2
  20. meerschaum/api/routes/__init__.py +1 -0
  21. meerschaum/api/routes/_actions.py +122 -44
  22. meerschaum/api/routes/_jobs.py +340 -0
  23. meerschaum/api/routes/_pipes.py +25 -25
  24. meerschaum/config/_default.py +1 -0
  25. meerschaum/config/_formatting.py +1 -0
  26. meerschaum/config/_paths.py +5 -0
  27. meerschaum/config/_shell.py +84 -67
  28. meerschaum/config/_version.py +1 -1
  29. meerschaum/config/static/__init__.py +9 -0
  30. meerschaum/connectors/__init__.py +9 -11
  31. meerschaum/connectors/api/APIConnector.py +18 -1
  32. meerschaum/connectors/api/_actions.py +60 -71
  33. meerschaum/connectors/api/_jobs.py +260 -0
  34. meerschaum/connectors/api/_misc.py +1 -1
  35. meerschaum/connectors/api/_request.py +13 -9
  36. meerschaum/connectors/parse.py +23 -7
  37. meerschaum/core/Pipe/_sync.py +3 -0
  38. meerschaum/plugins/__init__.py +89 -5
  39. meerschaum/utils/daemon/Daemon.py +333 -149
  40. meerschaum/utils/daemon/FileDescriptorInterceptor.py +19 -10
  41. meerschaum/utils/daemon/RotatingFile.py +18 -7
  42. meerschaum/utils/daemon/StdinFile.py +110 -0
  43. meerschaum/utils/daemon/__init__.py +40 -27
  44. meerschaum/utils/formatting/__init__.py +83 -37
  45. meerschaum/utils/formatting/_jobs.py +118 -51
  46. meerschaum/utils/formatting/_shell.py +6 -0
  47. meerschaum/utils/jobs/_Job.py +684 -0
  48. meerschaum/utils/jobs/__init__.py +245 -0
  49. meerschaum/utils/misc.py +18 -17
  50. meerschaum/utils/packages/__init__.py +21 -15
  51. meerschaum/utils/packages/_packages.py +2 -2
  52. meerschaum/utils/prompt.py +20 -7
  53. meerschaum/utils/schedule.py +21 -15
  54. {meerschaum-2.2.6.dist-info → meerschaum-2.3.0.dev1.dist-info}/METADATA +9 -9
  55. {meerschaum-2.2.6.dist-info → meerschaum-2.3.0.dev1.dist-info}/RECORD +61 -54
  56. {meerschaum-2.2.6.dist-info → meerschaum-2.3.0.dev1.dist-info}/WHEEL +1 -1
  57. {meerschaum-2.2.6.dist-info → meerschaum-2.3.0.dev1.dist-info}/LICENSE +0 -0
  58. {meerschaum-2.2.6.dist-info → meerschaum-2.3.0.dev1.dist-info}/NOTICE +0 -0
  59. {meerschaum-2.2.6.dist-info → meerschaum-2.3.0.dev1.dist-info}/entry_points.txt +0 -0
  60. {meerschaum-2.2.6.dist-info → meerschaum-2.3.0.dev1.dist-info}/top_level.txt +0 -0
  61. {meerschaum-2.2.6.dist-info → meerschaum-2.3.0.dev1.dist-info}/zip-safe +0 -0
@@ -0,0 +1,684 @@
1
+ #! /usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # vim:fenc=utf-8
4
+
5
+ """
6
+ Define the Meerschaum abstraction atop daemons.
7
+ """
8
+
9
+ import shlex
10
+ import asyncio
11
+ import threading
12
+ import json
13
+ import pathlib
14
+ import os
15
+ import sys
16
+ import traceback
17
+ from functools import partial
18
+ from datetime import datetime, timezone
19
+
20
+ import meerschaum as mrsm
21
+ from meerschaum.utils.typing import List, Optional, Union, SuccessTuple, Any, Dict, Callable
22
+ from meerschaum._internal.entry import entry
23
+ from meerschaum.utils.warnings import warn
24
+ from meerschaum.config.paths import LOGS_RESOURCES_PATH
25
+ from meerschaum.config import get_config
26
+
27
+ BANNED_CHARS: List[str] = [
28
+ ',', ';', "'", '"',
29
+ ]
30
+ RESTART_FLAGS: List[str] = [
31
+ '-s',
32
+ '--restart',
33
+ '--loop',
34
+ '--schedule',
35
+ '--cron',
36
+ ]
37
+
38
+ class StopMonitoringLogs(Exception):
39
+ """
40
+ Raise this exception to stop the logs monitoring.
41
+ """
42
+
43
+
44
+ class Job:
45
+ """
46
+ Manage a `meerschaum.utils.daemon.Daemon`, locally or remotely via the API.
47
+ """
48
+
49
+ def __init__(
50
+ self,
51
+ name: str,
52
+ sysargs: Union[List[str], str, None] = None,
53
+ executor_keys: Optional[str] = None,
54
+ _properties: Optional[Dict[str, Any]] = None,
55
+ ):
56
+ """
57
+ Create a new job to manage a `meerschaum.utils.daemon.Daemon`.
58
+
59
+ Parameters
60
+ ----------
61
+ name: str
62
+ The name of the job to be created.
63
+ This will also be used as the Daemon ID.
64
+
65
+ sysargs: Union[List[str], str, None], default None
66
+ The sysargs of the command to be executed, e.g. 'start api'.
67
+
68
+ executor_keys: Optional[str], default None
69
+ If provided, execute the job remotely on an API instance, e.g. 'api:main'.
70
+
71
+ _properties: Optional[Dict[str, Any]], default None
72
+ If provided, use this to patch the daemon's properties.
73
+ """
74
+ from meerschaum.utils.daemon import Daemon
75
+ for char in BANNED_CHARS:
76
+ if char in name:
77
+ raise ValueError(f"Invalid name: ({char}) is not allowed.")
78
+
79
+ if isinstance(sysargs, str):
80
+ sysargs = shlex.split(sysargs)
81
+
82
+ if executor_keys == 'local':
83
+ executor_keys = None
84
+ self.executor_keys = executor_keys
85
+ self.name = name
86
+ try:
87
+ self._daemon = (
88
+ Daemon(daemon_id=name)
89
+ if executor_keys is not None
90
+ else None
91
+ )
92
+ except Exception:
93
+ self._daemon = None
94
+
95
+ self._properties_patch = _properties or {}
96
+
97
+ daemon_sysargs = (
98
+ self._daemon.properties.get('target', {}).get('args', [None])[0]
99
+ if self._daemon is not None
100
+ else None
101
+ )
102
+
103
+ if daemon_sysargs and sysargs and daemon_sysargs != sysargs:
104
+ warn("Given sysargs differ from existing sysargs.")
105
+
106
+ self._sysargs = [
107
+ arg
108
+ for arg in (daemon_sysargs or sysargs or [])
109
+ if arg not in ('-d', '--daemon')
110
+ ]
111
+ for restart_flag in RESTART_FLAGS:
112
+ if restart_flag in self._sysargs:
113
+ self._properties_patch.update({'restart': True})
114
+ break
115
+
116
+ def start(self, debug: bool = False) -> SuccessTuple:
117
+ """
118
+ Start the job's daemon.
119
+ """
120
+ if self.executor is not None:
121
+ if not self.exists(debug=debug):
122
+ return self.executor.create_job(self.name, self.sysargs, debug=debug)
123
+ return self.executor.start_job(self.name, debug=debug)
124
+
125
+ if self.is_running():
126
+ return True, f"{self} is already running."
127
+
128
+ success, msg = self.daemon.run(
129
+ keep_daemon_output=True,
130
+ allow_dirty_run=True,
131
+ )
132
+ if not success:
133
+ return success, msg
134
+
135
+ return success, f"Started {self}."
136
+
137
+ def stop(self, timeout_seconds: Optional[int] = None, debug: bool = False) -> SuccessTuple:
138
+ """
139
+ Stop the job's daemon.
140
+ """
141
+ if self.executor is not None:
142
+ return self.executor.stop_job(self.name, debug=debug)
143
+
144
+ if self.daemon.status == 'stopped':
145
+ if not self.restart:
146
+ return True, f"{self} is not running."
147
+
148
+ quit_success, quit_msg = self.daemon.quit(timeout=timeout_seconds)
149
+ if quit_success:
150
+ return quit_success, f"Stopped {self}."
151
+
152
+ warn(
153
+ f"Failed to gracefully quit {self}.",
154
+ stack=False,
155
+ )
156
+ kill_success, kill_msg = self.daemon.kill(timeout=timeout_seconds)
157
+ if not kill_success:
158
+ return kill_success, kill_msg
159
+
160
+ return kill_success, f"Killed {self}."
161
+
162
+ def pause(self, timeout_seconds: Optional[int] = None, debug: bool = False) -> SuccessTuple:
163
+ """
164
+ Pause the job's daemon.
165
+ """
166
+ if self.executor is not None:
167
+ return self.executor.pause_job(self.name, debug=debug)
168
+
169
+ pause_success, pause_msg = self.daemon.pause(timeout=timeout_seconds)
170
+ if not pause_success:
171
+ return pause_success, pause_msg
172
+
173
+ return pause_success, f"Paused {self}."
174
+
175
+ def delete(self, debug: bool = False) -> SuccessTuple:
176
+ """
177
+ Delete the job and its daemon.
178
+ """
179
+ if self.executor is not None:
180
+ return self.executor.delete_job(self.name, debug=debug)
181
+
182
+ if self.is_running():
183
+ stop_success, stop_msg = self.stop()
184
+ if not stop_success:
185
+ return stop_success, stop_msg
186
+
187
+ cleanup_success, cleanup_msg = self.daemon.cleanup()
188
+ if not cleanup_success:
189
+ return cleanup_success, cleanup_msg
190
+
191
+ return cleanup_success, f"Deleted {self}."
192
+
193
+ def is_running(self) -> bool:
194
+ """
195
+ Determine whether the job's daemon is running.
196
+ """
197
+ return self.status == 'running'
198
+
199
+ def exists(self, debug: bool = False) -> bool:
200
+ """
201
+ Determine whether the job exists.
202
+ """
203
+ if self.executor is not None:
204
+ return self.executor.get_job_exists(self.name, debug=debug)
205
+
206
+ return self.daemon.path.exists()
207
+
208
+ def get_logs(self) -> Union[str, None]:
209
+ """
210
+ Return the output text of the job's daemon.
211
+ """
212
+ if self.executor is not None:
213
+ return self.executor.get_logs(self.name)
214
+
215
+ return self.daemon.log_text
216
+
217
+ def monitor_logs(
218
+ self,
219
+ callback_function: Callable[[str], None] = partial(print, end=''),
220
+ input_callback_function: Optional[Callable[[], str]] = None,
221
+ stop_event: Optional[threading.Event] = None,
222
+ stop_on_exit: bool = False,
223
+ strip_timestamps: bool = False,
224
+ accept_input: bool = True,
225
+ debug: bool = False,
226
+ ):
227
+ """
228
+ Monitor the job's log files and execute a callback on new lines.
229
+
230
+ Parameters
231
+ ----------
232
+ callback_function: Callable[[str], None], default partial(print, end='')
233
+ The callback to execute as new data comes in.
234
+ Defaults to printing the output directly to `stdout`.
235
+
236
+ input_callback_function: Optional[Callable[[], str]], default None
237
+ If provided, execute this callback when the daemon is blocking on stdin.
238
+ Defaults to `sys.stdin.readline()`.
239
+
240
+ stop_event: Optional[asyncio.Event], default None
241
+ If provided, stop monitoring when this event is set.
242
+ You may instead raise `meerschaum.utils.jobs.StopMonitoringLogs`
243
+ from within `callback_function` to stop monitoring.
244
+
245
+ stop_on_exit: bool, default False
246
+ If `True`, stop monitoring when the job stops.
247
+
248
+ strip_timestamps: bool, default False
249
+ If `True`, remove leading timestamps from lines.
250
+
251
+ accept_input: bool, default True
252
+ If `True`, accept input when the daemon blocks on stdin.
253
+ """
254
+ if self.executor is not None:
255
+ self.executor.monitor_logs(self.name, callback_function)
256
+ return
257
+
258
+ monitor_logs_coroutine = self.monitor_logs_async(
259
+ callback_function=callback_function,
260
+ input_callback_function=input_callback_function,
261
+ stop_event=stop_event,
262
+ stop_on_exit=stop_on_exit,
263
+ strip_timestamps=strip_timestamps,
264
+ accept_input=accept_input,
265
+ )
266
+ return asyncio.run(monitor_logs_coroutine)
267
+
268
+
269
+ async def monitor_logs_async(
270
+ self,
271
+ callback_function: Callable[[str], None] = partial(print, end='', flush=True),
272
+ input_callback_function: Optional[Callable[[], str]] = None,
273
+ stop_event: Optional[asyncio.Event] = None,
274
+ stop_on_exit: bool = False,
275
+ strip_timestamps: bool = False,
276
+ accept_input: bool = True,
277
+ debug: bool = False,
278
+ ):
279
+ """
280
+ Monitor the job's log files and await a callback on new lines.
281
+
282
+ Parameters
283
+ ----------
284
+ callback_function: Callable[[str], None], default partial(print, end='')
285
+ The callback to execute as new data comes in.
286
+ Defaults to printing the output directly to `stdout`.
287
+
288
+ input_callback_function: Optional[Callable[[], str]], default None
289
+ If provided, execute this callback when the daemon is blocking on stdin.
290
+ Defaults to `sys.stdin.readline()`.
291
+
292
+ stop_event: Optional[asyncio.Event], default None
293
+ If provided, stop monitoring when this event is set.
294
+ You may instead raise `meerschaum.utils.jobs.StopMonitoringLogs`
295
+ from within `callback_function` to stop monitoring.
296
+
297
+ stop_on_exit: bool, default False
298
+ If `True`, stop monitoring when the job stops.
299
+
300
+ strip_timestamps: bool, default False
301
+ If `True`, remove leading timestamps from lines.
302
+
303
+ accept_input: bool, default True
304
+ If `True`, accept input when the daemon blocks on stdin.
305
+ """
306
+ def default_input_callback_function():
307
+ return sys.stdin.readline()
308
+
309
+ if input_callback_function is None:
310
+ input_callback_function = default_input_callback_function
311
+
312
+ if self.executor is not None:
313
+ await self.executor.monitor_logs_async(
314
+ self.name,
315
+ callback_function,
316
+ input_callback_function=input_callback_function,
317
+ accept_input=accept_input,
318
+ debug=debug,
319
+ )
320
+ return
321
+
322
+ from meerschaum.utils.formatting._jobs import strip_timestamp_from_line
323
+
324
+ events = {
325
+ 'user': stop_event,
326
+ 'stopped': (asyncio.Event() if stop_on_exit else None),
327
+ }
328
+ combined_event = asyncio.Event()
329
+ emitted_text = False
330
+
331
+ async def check_job_status():
332
+ nonlocal emitted_text
333
+ stopped_event = events.get('stopped', None)
334
+ if stopped_event is None:
335
+ return
336
+ sleep_time = 0.1
337
+ while sleep_time < 60:
338
+ if self.status == 'stopped':
339
+ if not emitted_text:
340
+ await asyncio.sleep(sleep_time)
341
+ sleep_time = round(sleep_time * 1.1, 2)
342
+ continue
343
+ events['stopped'].set()
344
+ break
345
+ await asyncio.sleep(0.1)
346
+
347
+ async def check_blocking_on_input():
348
+ while True:
349
+ if not emitted_text or not self.is_blocking_on_stdin():
350
+ try:
351
+ await asyncio.sleep(0.1)
352
+ except asyncio.exceptions.CancelledError:
353
+ break
354
+ continue
355
+
356
+ if not self.is_running():
357
+ break
358
+
359
+ await emit_latest_lines()
360
+
361
+ try:
362
+ print('', end='', flush=True)
363
+ if asyncio.iscoroutinefunction(callback_function):
364
+ data = await input_callback_function()
365
+ else:
366
+ data = input_callback_function()
367
+ except KeyboardInterrupt:
368
+ break
369
+ if not data.endswith('\n'):
370
+ data += '\n'
371
+ self.daemon.stdin_file.write(data)
372
+ await asyncio.sleep(0.1)
373
+
374
+ async def combine_events():
375
+ event_tasks = [
376
+ asyncio.create_task(event.wait())
377
+ for event in events.values()
378
+ if event is not None
379
+ ]
380
+ if not event_tasks:
381
+ return
382
+
383
+ try:
384
+ done, pending = await asyncio.wait(
385
+ event_tasks,
386
+ return_when=asyncio.FIRST_COMPLETED,
387
+ )
388
+ for task in pending:
389
+ task.cancel()
390
+ except asyncio.exceptions.CancelledError:
391
+ pass
392
+ finally:
393
+ combined_event.set()
394
+
395
+ check_job_status_task = asyncio.create_task(check_job_status())
396
+ check_blocking_on_input_task = asyncio.create_task(check_blocking_on_input())
397
+ combine_events_task = asyncio.create_task(combine_events())
398
+
399
+ log = self.daemon.rotating_log
400
+ lines_to_show = get_config('jobs', 'logs', 'lines_to_show')
401
+
402
+ async def emit_latest_lines():
403
+ nonlocal emitted_text
404
+ lines = log.readlines()
405
+ for line in lines[(-1 * lines_to_show):]:
406
+ if stop_event is not None and stop_event.is_set():
407
+ return
408
+
409
+ if strip_timestamps:
410
+ line = strip_timestamp_from_line(line)
411
+
412
+ try:
413
+ if asyncio.iscoroutinefunction(callback_function):
414
+ await callback_function(line)
415
+ else:
416
+ callback_function(line)
417
+ emitted_text = True
418
+ except StopMonitoringLogs:
419
+ return
420
+ except Exception:
421
+ warn(f"Error in logs callback:\n{traceback.format_exc()}")
422
+
423
+ await emit_latest_lines()
424
+
425
+ tasks = (
426
+ [check_job_status_task]
427
+ + ([check_blocking_on_input_task] if accept_input else [])
428
+ + [combine_events_task]
429
+ )
430
+ try:
431
+ _ = asyncio.gather(*tasks, return_exceptions=True)
432
+ except Exception:
433
+ warn(f"Failed to run async checks:\n{traceback.format_exc()}")
434
+
435
+ watchfiles = mrsm.attempt_import('watchfiles')
436
+ async for changes in watchfiles.awatch(
437
+ LOGS_RESOURCES_PATH,
438
+ stop_event=combined_event,
439
+ ):
440
+ for change in changes:
441
+ file_path_str = change[1]
442
+ file_path = pathlib.Path(file_path_str)
443
+ latest_subfile_path = log.get_latest_subfile_path()
444
+ if latest_subfile_path != file_path:
445
+ continue
446
+
447
+ lines = log.readlines()
448
+ for line in lines:
449
+ if strip_timestamps:
450
+ line = strip_timestamp_from_line(line)
451
+ try:
452
+ if asyncio.iscoroutinefunction(callback_function):
453
+ await callback_function(line)
454
+ else:
455
+ callback_function(line)
456
+ emitted_text = True
457
+ except RuntimeError:
458
+ return
459
+ except StopMonitoringLogs:
460
+ return
461
+ except Exception:
462
+ warn(f"Error in logs callback:\n{traceback.format_exc()}")
463
+ return
464
+
465
+ await emit_latest_lines()
466
+
467
+ def is_blocking_on_stdin(self, debug: bool = False) -> bool:
468
+ """
469
+ Return whether a job's daemon is blocking on stdin.
470
+ """
471
+ if self.executor is not None:
472
+ return self.executor.get_job_is_blocking_on_stdin(self.name, debug=debug)
473
+
474
+ return self.is_running() and self.daemon.blocking_stdin_file_path.exists()
475
+
476
+ def write_stdin(self, data):
477
+ """
478
+ Write to a job's daemon's `stdin`.
479
+ """
480
+ if self.executor is not None:
481
+ pass
482
+
483
+ self.daemon.stdin_file.write(data)
484
+
485
+ @property
486
+ def executor(self) -> Union['APIConnector', None]:
487
+ """
488
+ If the job is remote, return the connector to the remote API instance.
489
+ """
490
+ return (
491
+ mrsm.get_connector(self.executor_keys)
492
+ if self.executor_keys is not None
493
+ else None
494
+ )
495
+
496
+ @property
497
+ def status(self) -> str:
498
+ """
499
+ Return the running status of the job's daemon.
500
+ """
501
+ if self.executor is not None:
502
+ return self.executor.get_job_metadata(
503
+ self.name
504
+ ).get('daemon', {}).get('status', 'stopped')
505
+
506
+ return self.daemon.status
507
+
508
+ @property
509
+ def pid(self) -> Union[int, None]:
510
+ """
511
+ Return the PID of the job's dameon.
512
+ """
513
+ if self.executor is not None:
514
+ return self.executor.get_job_metadata(self.name).get('daemon', {}).get('pid', None)
515
+
516
+ return self.daemon.pid
517
+
518
+ @property
519
+ def restart(self) -> bool:
520
+ """
521
+ Return whether to restart a stopped job.
522
+ """
523
+ return self.daemon.properties.get('restart', False)
524
+
525
+ @property
526
+ def result(self) -> SuccessTuple:
527
+ """
528
+ Return the `SuccessTuple` when the job has terminated.
529
+ """
530
+ if self.is_running():
531
+ return True, f"{self} is running."
532
+
533
+ _result = self.daemon.properties.get('result', None)
534
+ if _result is None:
535
+ return False, "No result available."
536
+
537
+ return tuple(_result)
538
+
539
+ @property
540
+ def sysargs(self) -> List[str]:
541
+ """
542
+ Return the sysargs to use for the Daemon.
543
+ """
544
+ if self._sysargs:
545
+ return self._sysargs
546
+
547
+ # target_args = self.daemon.properties.get('target', {}).get('args', None)
548
+ target_args = self.daemon.target_args
549
+ if target_args is None:
550
+ return []
551
+ self._sysargs = target_args[0] if len(target_args) > 0 else []
552
+ return self._sysargs
553
+
554
+ @property
555
+ def daemon(self) -> 'Daemon':
556
+ """
557
+ Return the daemon which this job manages.
558
+ """
559
+ from meerschaum.utils.daemon import Daemon
560
+ if self._daemon is not None and self.executor is None and self._sysargs:
561
+ return self._daemon
562
+
563
+ remote_properties = (
564
+ {}
565
+ if self.executor is None
566
+ else self.executor.get_job_properties(self.name)
567
+ )
568
+ properties = {**remote_properties, **self._properties_patch}
569
+
570
+ self._daemon = Daemon(
571
+ target=entry,
572
+ target_args=[self._sysargs],
573
+ target_kw={},
574
+ daemon_id=self.name,
575
+ label=shlex.join(self._sysargs),
576
+ properties=properties,
577
+ )
578
+
579
+ return self._daemon
580
+
581
+ @property
582
+ def began(self) -> Union[datetime, None]:
583
+ """
584
+ The datetime when the job began running.
585
+ """
586
+ began_str = self.daemon.properties.get('process', {}).get('began', None)
587
+ if began_str is None:
588
+ return None
589
+
590
+ return datetime.fromisoformat(began_str)
591
+
592
+ @property
593
+ def ended(self) -> Union[datetime, None]:
594
+ """
595
+ The datetime when the job stopped running.
596
+ """
597
+ ended_str = self.daemon.properties.get('process', {}).get('ended', None)
598
+ if ended_str is None:
599
+ return None
600
+
601
+ return datetime.fromisoformat(ended_str)
602
+
603
+ @property
604
+ def paused(self) -> Union[datetime, None]:
605
+ """
606
+ The datetime when the job was suspended while running.
607
+ """
608
+ paused_str = self.daemon.properties.get('process', {}).get('paused', None)
609
+ if paused_str is None:
610
+ return None
611
+
612
+ return datetime.fromisoformat(paused_str)
613
+
614
+ @property
615
+ def stop_time(self) -> Union[datetime, None]:
616
+ """
617
+ Return the timestamp when the job was manually stopped.
618
+ """
619
+ if self.executor is not None:
620
+ return self.executor.get_job_stop_time(self.name)
621
+
622
+ if not self.daemon.stop_path.exists():
623
+ return None
624
+
625
+ try:
626
+ with open(self.daemon.stop_path, 'r', encoding='utf-8') as f:
627
+ stop_data = json.load(f)
628
+ except Exception as e:
629
+ warn(f"Failed to read stop file for {self}:\n{e}")
630
+ return None
631
+
632
+ stop_time_str = stop_data.get('stop_time', None)
633
+ if not stop_time_str:
634
+ warn(f"Could not read stop time for {self}.")
635
+ return None
636
+
637
+ return datetime.fromisoformat(stop_time_str)
638
+
639
+ @property
640
+ def hidden(self) -> bool:
641
+ """
642
+ Return a bool indicating whether this job should be displayed.
643
+ """
644
+ return self.name.startswith('_') or self.name.startswith('.')
645
+
646
+ def check_restart(self) -> SuccessTuple:
647
+ """
648
+ If `restart` is `True` and the daemon is not running,
649
+ restart the job.
650
+ Do not restart if the job was manually stopped.
651
+ """
652
+ if self.is_running():
653
+ return True, f"{self} is running."
654
+
655
+ if not self.restart:
656
+ return True, f"{self} does not need to be restarted."
657
+
658
+ if self.stop_time is not None:
659
+ return True, f"{self} was manually stopped."
660
+
661
+ return self.start()
662
+
663
+ @property
664
+ def label(self) -> str:
665
+ """
666
+ Return the job's Daemon label (joined sysargs).
667
+ """
668
+ return shlex.join(self.sysargs)
669
+
670
+ def __str__(self) -> str:
671
+ sysargs = self.sysargs
672
+ sysargs_str = shlex.join(sysargs) if sysargs else ''
673
+ job_str = f'Job("{self.name}"'
674
+ if sysargs_str:
675
+ job_str += f', "{sysargs_str}"'
676
+
677
+ job_str += ')'
678
+ return job_str
679
+
680
+ def __repr__(self) -> str:
681
+ return str(self)
682
+
683
+ def __hash__(self) -> int:
684
+ return hash(self.name)