dron 0.1.20241008__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
dron/systemd.py ADDED
@@ -0,0 +1,542 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import os
5
+ import re
6
+ import shlex
7
+ import shutil
8
+ from datetime import datetime, timedelta, timezone
9
+ from functools import lru_cache
10
+ from itertools import groupby
11
+ from pathlib import Path
12
+ from subprocess import PIPE, Popen, run
13
+ from tempfile import TemporaryDirectory
14
+ from typing import Any, Iterator, Sequence
15
+
16
+ from zoneinfo import ZoneInfo
17
+
18
+ from .api import (
19
+ OnFailureAction,
20
+ When,
21
+ )
22
+ from .common import (
23
+ MANAGED_MARKER,
24
+ Body,
25
+ Command,
26
+ MonitorEntry,
27
+ MonitorParams,
28
+ State,
29
+ TimerSpec,
30
+ Unit,
31
+ UnitState,
32
+ datetime_aware,
33
+ escape,
34
+ is_managed,
35
+ logger,
36
+ )
37
+
38
+
39
+ def _is_missing_systemd() -> str | None:
40
+ has_systemd = shutil.which('systemctl') is not None
41
+ if not has_systemd:
42
+ return "systemd not available, running under docker or osx"
43
+ return None
44
+
45
+
46
+ def _systemctl(*args: Path | str) -> list[Path | str]:
47
+ return ['systemctl', '--user', *args]
48
+
49
+
50
+ def managed_header() -> str:
51
+ return f'''
52
+ # {MANAGED_MARKER}
53
+ # If you do any manual changes, they will be overridden on the next dron run
54
+ '''.lstrip()
55
+
56
+
57
+ # TODO how to come up with good implicit job name?
58
+ # TODO do we need a special target for dron?
59
+ def timer(*, unit_name: str, when: When) -> str:
60
+ spec: TimerSpec
61
+ if isinstance(when, str):
62
+ spec = {'OnCalendar': when}
63
+ else:
64
+ spec = when
65
+
66
+ specs = '\n'.join(f'{k}={v}' for k, v in spec.items())
67
+
68
+ return f'''
69
+ {managed_header()}
70
+ [Unit]
71
+ Description=Timer for {unit_name} {MANAGED_MARKER}
72
+
73
+ [Timer]
74
+ {specs}
75
+
76
+ [Install]
77
+ WantedBy=timers.target
78
+ '''.lstrip()
79
+
80
+
81
+ # TODO add Restart=always and RestartSec?
82
+ # TODO allow to pass extra args
83
+ def service(
84
+ *,
85
+ unit_name: str,
86
+ command: Command,
87
+ on_failure: Sequence[OnFailureAction],
88
+ **kwargs: str,
89
+ ) -> str:
90
+ # TODO not sure if something else needs to be escaped for ExecStart??
91
+ # todo systemd-escape? but only can be used for names
92
+
93
+ # ok OnFailure is quite annoying since it can't take arguments etc... seems much easier to use ExecStopPost
94
+ # (+ can possibly run on success too that way?)
95
+ # https://unix.stackexchange.com/a/441662/180307
96
+ cmd = escape(command)
97
+
98
+ exec_stop_posts = [
99
+ f"ExecStopPost=/bin/sh -c 'if [ $$EXIT_STATUS != 0 ]; then {action}; fi'"
100
+ for action in on_failure
101
+ ]
102
+
103
+ sections: dict[str, list[str]] = {}
104
+ sections['[Unit]'] = [f'''
105
+ Description=Service for {unit_name} {MANAGED_MARKER}
106
+ '''.strip()]
107
+
108
+ sections['[Service]'] = [
109
+ f'ExecStart={cmd}',
110
+ *exec_stop_posts,
111
+ ]
112
+
113
+ for k, value in kwargs.items():
114
+ # ideally it would have section name
115
+ m = re.search(r'(\[\w+\])(.*)', k)
116
+ if m is not None:
117
+ section = m.group(1)
118
+ key = m.group(2)
119
+ else:
120
+ # 'legacy' behaviour, by default put into [Service]
121
+ section = '[Service]'
122
+ key = k
123
+ if section not in sections:
124
+ sections[section] = []
125
+ sections[section].append(f'{key}={value}')
126
+
127
+ res = managed_header()
128
+ for section_name, lines in sections.items():
129
+ res += '\n\n' + '\n'.join([section_name, *lines])
130
+ res += '\n'
131
+
132
+ return res
133
+
134
+
135
+ def test_managed() -> None:
136
+ skip_if_no_systemd()
137
+ from .dron import verify_unit
138
+
139
+ assert is_managed(timer(unit_name='whatever', when='daily'))
140
+
141
+ custom = '''
142
+ [Service]
143
+ ExecStart=/bin/echo 123
144
+ '''
145
+ verify_unit(unit_name='other.service', body=custom) # precondition
146
+ assert not is_managed(custom)
147
+
148
+
149
+ def verify_units(pre_units: list[tuple[Unit, Body]]) -> None:
150
+ # ugh. systemd-analyze takes about 0.2 seconds for each unit for some reason
151
+ # oddly enough, in bulk it works just as fast :thinking_face:
152
+ # also doesn't work in parallel (i.e. parallel processes)
153
+ # that ends up with some weird errors trying to connect to socket
154
+ with TemporaryDirectory() as _tdir:
155
+ tdir = Path(_tdir)
156
+ for unit, body in pre_units:
157
+ (tdir / unit).write_text(body)
158
+ res = run(['systemd-analyze', '--user', 'verify', *tdir.glob('*')], capture_output=True, check=False)
159
+ # ugh. apparently even exit code 0 doesn't guarantee correct output??
160
+ out = res.stdout.decode('utf8')
161
+ err = res.stderr.decode('utf8')
162
+ assert out == '', out
163
+ if err == '':
164
+ return
165
+
166
+ err_lines = err.splitlines(keepends=True)
167
+ unique_err_lines = []
168
+ # uhh.. in bulk mode it spams with tons of 'Cannot add dependency job' for some reason
169
+ # I guess it kinda treats everything as dependent on each other??
170
+ # https://github.com/systemd/systemd/blob/b692ad36b99909453cf4f975a346e41d6afc68a0/src/core/transaction.c#L978
171
+ for l in err_lines:
172
+ if l not in unique_err_lines:
173
+ unique_err_lines.append(l)
174
+ err_lines = unique_err_lines
175
+
176
+ if len(err_lines) == 0:
177
+ return
178
+
179
+ msg = f'failed checking , exit code {res.returncode}'
180
+ logger.error(msg)
181
+ logger.error('systemd-analyze output:')
182
+ for line in err_lines:
183
+ logger.error(line.strip())
184
+ raise RuntimeError(msg)
185
+
186
+
187
+ def test_verify_systemd() -> None:
188
+ skip_if_no_systemd()
189
+ from .dron import verify_unit
190
+
191
+ def fails(body: str) -> None:
192
+ import pytest
193
+ with pytest.raises(Exception):
194
+ verify_unit(unit_name='whatever.service', body=body)
195
+
196
+ def ok(body: str) -> None:
197
+ verify_unit(unit_name='ok.service', body=body)
198
+
199
+ ok(body='''
200
+ [Service]
201
+ ExecStart=/bin/echo 123
202
+ ''')
203
+
204
+ from .api import notify
205
+ on_failure = (
206
+ notify.email('test@gmail.com'),
207
+ notify.desktop_notification,
208
+ )
209
+ ok(body=service(unit_name='alala', command='/bin/echo 123', on_failure=on_failure))
210
+
211
+ # garbage
212
+ fails(body='fewfewf')
213
+
214
+ # no execstart
215
+ fails(body='''
216
+ [Service]
217
+ StandardOutput=journal
218
+ ''')
219
+
220
+ fails(body='''
221
+ [Service]
222
+ ExecStart=yes
223
+ StandardOutput=baaad
224
+ ''')
225
+
226
+
227
+ def _sd(s: str) -> str:
228
+ return f'org.freedesktop.systemd1{s}'
229
+
230
+
231
+ class BusManager:
232
+ def __init__(self) -> None:
233
+ # unused-ignore because on macos there is no dbus (but this code is still running mypy on CI)
234
+ from dbus import ( # type: ignore[import-untyped,import-not-found,unused-ignore]
235
+ Interface,
236
+ SessionBus,
237
+ )
238
+ self.Interface = Interface # meh
239
+
240
+ self.bus = SessionBus() # note: SystemBus is for system-wide services
241
+ systemd = self.bus.get_object(_sd(''), '/org/freedesktop/systemd1')
242
+ self.manager = Interface(systemd, dbus_interface=_sd('.Manager'))
243
+
244
+ def properties(self, u: Unit):
245
+ service_unit = self.manager.GetUnit(u)
246
+ service_proxy = self.bus.get_object(_sd(''), str(service_unit))
247
+ return self.Interface(service_proxy, dbus_interface='org.freedesktop.DBus.Properties')
248
+
249
+ @staticmethod # meh
250
+ def prop(obj, schema: str, name: str):
251
+ return obj.Get(_sd(schema), name)
252
+
253
+ @classmethod
254
+ def exec_start(cls, props) -> Sequence[str]:
255
+ dbus_exec_start = cls.prop(props, '.Service', 'ExecStart')
256
+ return [str(x) for x in dbus_exec_start[0][1]]
257
+
258
+
259
+ def systemd_state(*, with_body: bool) -> State:
260
+ bus = BusManager()
261
+ states = bus.manager.ListUnits() # ok nice, it's basically instant
262
+
263
+ for state in states:
264
+ name = state[0]
265
+ descr = state[1]
266
+ if not is_managed(descr):
267
+ continue
268
+
269
+ # todo annoying, this call still takes some time... but whatever ok
270
+ props = bus.properties(name)
271
+
272
+ # useful for debugging, can also use .Service if it's not a timer
273
+ # all_properties = props.GetAll(_sd('.Unit'))
274
+
275
+ # stale = int(bus.prop(props, '.Unit', 'NeedDaemonReload')) == 1
276
+ unit_file = Path(str(bus.prop(props, '.Unit', 'FragmentPath'))).resolve()
277
+ body = unit_file.read_text() if with_body else None
278
+ cmdline: Sequence[str] | None
279
+ if '.timer' in name: # meh
280
+ cmdline = None
281
+ else:
282
+ cmdline = BusManager.exec_start(props)
283
+
284
+ yield UnitState(unit_file=unit_file, body=body, cmdline=cmdline)
285
+
286
+
287
+ def test_managed_units() -> None:
288
+ skip_if_no_systemd()
289
+ # TODO wonder if i'd be able to use launchd on ci...
290
+ from .cli import cmd_monitor
291
+ from .dron import managed_units
292
+
293
+ # shouldn't fail at least
294
+ list(managed_units(with_body=True))
295
+
296
+ # TODO ugh. doesn't work on circleci, fails with
297
+ # dbus.exceptions.DBusException: org.freedesktop.DBus.Error.BadAddress: Address does not contain a colon
298
+ # todo maybe don't need it anymore with 20.04 circleci?
299
+ if 'CI' not in os.environ:
300
+ cmd_monitor(MonitorParams(with_success_rate=True, with_command=True))
301
+
302
+
303
+ def skip_if_no_systemd() -> None:
304
+ import pytest
305
+ reason = _is_missing_systemd()
306
+ if reason is not None:
307
+ pytest.skip(f'No systemd: {reason}')
308
+
309
+
310
+ _UTCMAX = datetime.max.replace(tzinfo=timezone.utc)
311
+
312
+
313
+ class MonitorHelper:
314
+ def from_usec(self, usec) -> datetime_aware:
315
+ u = int(usec)
316
+ if u == 2 ** 64 - 1: # apparently systemd uses max uint64
317
+ # happens if the job is running ATM?
318
+ return _UTCMAX
319
+ else:
320
+ return datetime.fromtimestamp(u / 10 ** 6, tz=timezone.utc)
321
+
322
+ @property
323
+ @lru_cache # noqa: B019
324
+ def local_tz(self) -> ZoneInfo:
325
+ try:
326
+ # it's a required dependency, but still might fail in some weird environments?
327
+ # e.g. if zoneinfo information isn't available
328
+ from tzlocal import get_localzone
329
+ return get_localzone()
330
+ except Exception as e:
331
+ logger.error("Couldn't determine local timezone! Falling back to UTC")
332
+ return ZoneInfo('UTC')
333
+
334
+
335
+ def get_entries_for_monitor(managed: State, *, params: MonitorParams) -> list[MonitorEntry]:
336
+ # TODO reorder timers and services so timers go before?
337
+ sd = lambda s: f'org.freedesktop.systemd1{s}'
338
+
339
+ mon = MonitorHelper()
340
+
341
+ UTCNOW = datetime.now(tz=timezone.utc)
342
+
343
+ bus = BusManager()
344
+
345
+ entries: list[MonitorEntry] = []
346
+ names = sorted(s.unit_file.name for s in managed)
347
+ uname = lambda full: full.split('.')[0]
348
+ for k, _gr in groupby(names, key=uname):
349
+ gr = list(_gr)
350
+ # if timer is None, guess that means the job is always running?
351
+ timer: str | None
352
+ service: str
353
+ if len(gr) == 2:
354
+ [service, timer] = gr
355
+ else:
356
+ assert len(gr) == 1, gr
357
+ [service] = gr
358
+ timer = None
359
+
360
+ if timer is not None:
361
+ props = bus.properties(timer)
362
+ cal = bus.prop(props, '.Timer', 'TimersCalendar')
363
+ next_ = bus.prop(props, '.Timer', 'NextElapseUSecRealtime')
364
+
365
+ unit_props = bus.properties(service)
366
+ # note: there is also bus.prop(props, '.Timer', 'LastTriggerUSec'), but makes more sense to use unit to account for manual runs
367
+ last = bus.prop(unit_props, '.Unit', 'ActiveExitTimestamp')
368
+
369
+ schedule = cal[0][1] # TODO is there a more reliable way to retrieve it??
370
+ # todo not sure if last is really that useful..
371
+
372
+ last_dt = mon.from_usec(last)
373
+ next_dt = mon.from_usec(next_)
374
+ nexts = next_dt.astimezone(mon.local_tz).replace(tzinfo=None, microsecond=0).isoformat()
375
+
376
+ if next_dt == datetime.max:
377
+ left_delta = timedelta(0)
378
+ else:
379
+ left_delta = next_dt - UTCNOW
380
+ else:
381
+ left_delta = timedelta(0) # TODO
382
+ last_dt = UTCNOW
383
+ nexts = 'n/a'
384
+ schedule = 'always'
385
+
386
+ # TODO maybe format seconds prettier. dunno
387
+ def fmt_delta(d: timedelta) -> str:
388
+ # format to reduce constant countdown...
389
+ ad = abs(d)
390
+ # get rid of microseconds
391
+ ad = ad - timedelta(microseconds=ad.microseconds)
392
+
393
+ day = timedelta(days=1)
394
+ hour = timedelta(hours=1)
395
+ minute = timedelta(minutes=1)
396
+ gt = False
397
+ if ad > day:
398
+ full_days = ad // day
399
+ hours = (ad % day) // hour
400
+ ads = f'{full_days}d {hours}h'
401
+ gt = True
402
+ elif ad > minute:
403
+ full_mins = ad // minute
404
+ ad = timedelta(minutes=full_mins)
405
+ ads = str(ad)
406
+ gt = True
407
+ else:
408
+ # show exact
409
+ ads = str(ad)
410
+ if len(ads) == 7:
411
+ ads = '0' + ads # meh. fix missing leading zero in hours..
412
+ ads = ('>' if gt else '') + ads
413
+ return ads
414
+
415
+
416
+ left = f'{fmt_delta(left_delta)!s:<9}'
417
+ if last_dt.timestamp() == 0:
418
+ ago = 'never' # TODO yellow?
419
+ else:
420
+ passed_delta = UTCNOW - last_dt
421
+ ago = str(fmt_delta(passed_delta))
422
+ # TODO instead of hacking microsecond, use 'NOW' or something?
423
+
424
+ props = bus.properties(service)
425
+ # TODO some summary too? e.g. how often in failed
426
+ # TODO make defensive?
427
+ result = bus.prop(props, '.Service', 'Result')
428
+ exec_start = BusManager.exec_start(props)
429
+ assert exec_start is not None, service # not None for services
430
+ command = ' '.join(map(shlex.quote, exec_start)) if params.with_command else None
431
+ _pid: int | None = int(bus.prop(props, '.Service', 'MainPID'))
432
+ pid = None if _pid == 0 else str(_pid)
433
+
434
+ if params.with_success_rate:
435
+ rate = _unit_success_rate(service)
436
+ rates = f' {rate:.2f}'
437
+ else:
438
+ rates = ''
439
+
440
+ status_ok = result == 'success'
441
+ status = f'{result:<9} {ago:<8}{rates}'
442
+
443
+ entries.append(MonitorEntry(
444
+ unit=k,
445
+ status=status,
446
+ left=left,
447
+ next=nexts,
448
+ schedule=schedule,
449
+ command=command,
450
+ pid=pid,
451
+ status_ok=status_ok,
452
+ ))
453
+ return entries
454
+
455
+
456
+ Json = dict[str, Any]
457
+ def _unit_logs(unit: Unit) -> Iterator[Json]:
458
+ # TODO so do I need to parse logs to get failure stats? perhaps json would be more reliable
459
+ cmd = f'journalctl --user -u {unit} -o json -t systemd --output-fields UNIT_RESULT,JOB_TYPE,MESSAGE'
460
+ with Popen(cmd.split(), stdout=PIPE) as po:
461
+ stdout = po.stdout; assert stdout is not None
462
+ for line in stdout:
463
+ j = json.loads(line.decode('utf8'))
464
+ # apparently, successful runs aren't getting logged? not sure why
465
+ jt = j.get('JOB_TYPE')
466
+ ur = j.get('UNIT_RESULT')
467
+ # not sure about this..
468
+ yield j
469
+
470
+
471
+ def _unit_success_rate(unit: Unit) -> float:
472
+ started = 0
473
+ failed = 0
474
+ # TODO not sure how much time it takes to query all journals?
475
+ for j in _unit_logs(unit):
476
+ jt = j.get('JOB_TYPE')
477
+ ur = j.get('UNIT_RESULT')
478
+ if jt is not None:
479
+ assert ur is None
480
+ started += 1
481
+ elif ur is not None:
482
+ assert jt is None
483
+ failed += 1
484
+ else:
485
+ # TODO eh? sometimes jobs also report Succeeded status
486
+ # e.g. syncthing-paranoid
487
+ pass
488
+ if started == 0:
489
+ assert failed == 0, unit
490
+ return 1.0
491
+ success = started - failed
492
+ return success / started
493
+
494
+
495
+ def cmd_past(unit: Unit) -> None:
496
+ mon = MonitorHelper()
497
+ for j in _unit_logs(unit):
498
+ ts = mon.from_usec(j['__REALTIME_TIMESTAMP'])
499
+ msg = j['MESSAGE']
500
+ print(ts.isoformat(), msg)
501
+
502
+
503
+ def cmd_run(*, unit: Unit, do_exec: bool) -> None:
504
+ assert do_exec # support without exec later
505
+ # TODO we might have called it before via managed_units.. maybe need to cache
506
+ states = []
507
+ for s in systemd_state(with_body=False):
508
+ # meh
509
+ unit_name = s.unit_file.name
510
+ if unit_name.endswith('.timer'):
511
+ continue
512
+ if s.unit_file.stem == unit:
513
+ states.append(s)
514
+ [state] = states
515
+ cmdline = state.cmdline
516
+ assert cmdline is not None
517
+ cmds = ' '.join(map(shlex.quote, cmdline))
518
+ logger.info(f'running: {cmds}')
519
+ os.execvp(
520
+ cmdline[0],
521
+ list(cmdline),
522
+ )
523
+
524
+
525
+ # used to use this, keeping for now just for the refernce
526
+ # def old_systemd_emailer() -> None:
527
+ # user = getpass.getuser()
528
+ # X = textwrap.dedent(f'''
529
+ # [Unit]
530
+ # Description=status email for %i to {user}
531
+ #
532
+ # [Service]
533
+ # Type=oneshot
534
+ # ExecStart={SYSTEMD_EMAIL} --to {user} --unit %i --journalctl-args "-o cat"
535
+ # # TODO why these were suggested??
536
+ # # User=nobody
537
+ # # Group=systemd-journal
538
+ # ''')
539
+ #
540
+ # write_unit(unit=f'status-email@.service', body=X, prefix=SYSTEMD_USER_DIR)
541
+ # # I guess makes sense to reload here; fairly atomic step
542
+ # _daemon_reload()
@@ -0,0 +1,119 @@
1
+ from __future__ import annotations
2
+
3
+ from pathlib import Path
4
+
5
+ import pytest
6
+
7
+ from ..dron import load_jobs
8
+
9
+
10
+ def test_load_jobs_basic(tmp_path: Path) -> None:
11
+ tpath = Path(tmp_path) / 'drontab.py'
12
+ tpath.write_text(
13
+ '''
14
+ from typing import Iterator
15
+
16
+ from dron.api import job, Job
17
+
18
+
19
+ def jobs() -> Iterator[Job]:
20
+ job3 = job(
21
+ '03:10',
22
+ ['/path/to/command.py', 'some', 'args', '3'],
23
+ unit_name='job3',
24
+ )
25
+ job1 = job(
26
+ '01:10',
27
+ ['/path/to/command.py', 'some', 'args', '1'],
28
+ unit_name='job1',
29
+ )
30
+ yield job1
31
+ yield job(
32
+ '02:10',
33
+ ['/path/to/command.py', 'some', 'args', '2'],
34
+ unit_name='job2',
35
+ )
36
+ yield job3
37
+
38
+ '''
39
+ )
40
+ loaded = list(load_jobs(tabfile=tpath, ppath=tmp_path))
41
+ [job1, job2, job3] = loaded
42
+
43
+ assert job1.when == '01:10'
44
+ assert job1.command == ['/path/to/command.py', 'some', 'args', '1']
45
+ assert job1.unit_name == 'job1'
46
+
47
+ assert job2.when == '02:10'
48
+ assert job2.command == ['/path/to/command.py', 'some', 'args', '2']
49
+ assert job2.unit_name == 'job2'
50
+
51
+ assert job3.when == '03:10'
52
+ assert job3.command == ['/path/to/command.py', 'some', 'args', '3']
53
+ assert job3.unit_name == 'job3'
54
+
55
+
56
+ def test_load_jobs_dupes(tmp_path: Path) -> None:
57
+ tpath = Path(tmp_path) / 'drontab.py'
58
+ tpath.write_text(
59
+ '''
60
+ from typing import Iterator
61
+
62
+ from dron.api import job, Job
63
+
64
+ def jobs() -> Iterator[Job]:
65
+ yield job('00:00', 'echo', unit_name='job3')
66
+ yield job('00:00', 'echo', unit_name='job1')
67
+ # whoops! duplicate job name
68
+ yield job('00:00', 'echo', unit_name='job3')
69
+ '''
70
+ )
71
+ with pytest.raises(AssertionError):
72
+ _loaded = list(load_jobs(tabfile=tpath, ppath=tmp_path))
73
+
74
+
75
+ def test_jobs_auto_naming(tmp_path: Path) -> None:
76
+ tpath = Path(tmp_path) / 'drontab.py'
77
+ tpath.write_text(
78
+ '''
79
+ from typing import Iterator
80
+
81
+ from dron.api import job, Job
82
+
83
+
84
+ job2 = job(
85
+ '00:02',
86
+ 'echo',
87
+ )
88
+
89
+
90
+ def job_maker(when) -> Job:
91
+ return job(when, 'echo job maker', stacklevel=2)
92
+
93
+
94
+ def jobs() -> Iterator[Job]:
95
+ job_1 = job('00:01',
96
+ 'echo',
97
+ )
98
+ yield job2
99
+ yield job('00:00', 'echo', unit_name='job_named')
100
+ yield job_1
101
+ job4 = \
102
+ job('00:04', 'echo')
103
+ job5 = job_maker('00:05')
104
+ yield job5
105
+ yield job4
106
+ '''
107
+ )
108
+ loaded = list(load_jobs(tabfile=tpath, ppath=tmp_path))
109
+ (job2, job_named, job_1, job5, job4) = loaded
110
+ assert job_1.unit_name == 'job_1'
111
+ assert job_1.when == '00:01'
112
+ assert job2.unit_name == 'job2'
113
+ assert job2.when == '00:02'
114
+ assert job_named.unit_name == 'job_named'
115
+ assert job_named.when == '00:00'
116
+ assert job4.unit_name == 'job4'
117
+ assert job4.when == '00:04'
118
+ assert job5.unit_name == 'job5'
119
+ assert job5.when == '00:05'
@@ -0,0 +1,21 @@
1
+ The MIT License (MIT)
2
+
3
+ Copyright (c) 2024 Dima Gerasimov
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.