tklr-dgraham 0.0.0rc11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tklr-dgraham might be problematic. Click here for more details.

tklr/common.py ADDED
@@ -0,0 +1,1296 @@
1
+ # pyright: reportUndefinedVariable=false
2
+ from dateutil.parser import parse as dateutil_parse
3
+ from dateutil.parser import parserinfo
4
+ from datetime import datetime, date, timedelta
5
+ from zoneinfo import ZoneInfo
6
+ import platform
7
+ import sys
8
+ import os
9
+ import sys
10
+ import textwrap
11
+ import shutil
12
+ import re
13
+ from shlex import split as qsplit
14
+ import contextlib, io
15
+ import subprocess # for check_output
16
+ from rich import print as rprint
17
+ from rich.console import Console
18
+ from rich.markdown import Markdown
19
+
20
+ # Initialize a Rich console
21
+
22
+ from pygments.lexer import RegexLexer
23
+ from pygments.token import Keyword
24
+ from pygments.token import Literal
25
+ from pygments.token import Operator
26
+ from pygments.token import Comment
27
+
28
+ import functools
29
+ from time import perf_counter
30
+ from typing import List, Callable, Any
31
+ import inspect
32
+ from typing import Literal
33
+ from .versioning import get_version
34
+
35
+ # import logging
36
+ # import logging.config
37
+ # logger = logging.getLogger('etm')
38
+ # settings = None
39
+
40
+ from dateutil import __version__ as dateutil_version
41
+
42
+ from time import perf_counter as timer
43
+
44
+ # from etm.make_examples import make_examples
45
+ import tomllib
46
+ from pathlib import Path
47
+
48
+ ETMDB = DBITEM = DBARCH = dataview = data_changed = None
49
+
50
+
51
+ # def get_version(pyproject_path: Path | None = None) -> str:
52
+ # """
53
+ # Extract the version from pyproject.toml [project] section.
54
+ #
55
+ # Args:
56
+ # pyproject_path (Path or None): Optional override path. If None, searches upward.
57
+ #
58
+ # Returns:
59
+ # str: version string (e.g., "0.1.0")
60
+ # """
61
+ # if pyproject_path is None:
62
+ # # Search upward from current working dir
63
+ # current = Path.cwd()
64
+ # while current != current.parent:
65
+ # candidate = current / "pyproject.toml"
66
+ # if candidate.exists():
67
+ # pyproject_path = candidate
68
+ # break
69
+ # current = current.parent
70
+ # else:
71
+ # return "dev"
72
+ #
73
+ # try:
74
+ # with open(pyproject_path, "rb") as f:
75
+ # data = tomllib.load(f)
76
+ # return data.get("project", {}).get("version", "dev")
77
+ # except Exception:
78
+ # return "dev"
79
+
80
+
81
+ def log_msg(msg: str, file_path: str = "log_msg.md"):
82
+ """
83
+ Log a message and save it directly to a specified file.
84
+
85
+ Args:
86
+ msg (str): The message to log.
87
+ file_path (str, optional): Path to the log file. Defaults to "log_msg.txt".
88
+ """
89
+ caller_name = inspect.stack()[1].function
90
+ lines = [
91
+ f"- {datetime.now().strftime('%y-%m-%d %H:%M')} " + rf"({caller_name}): ",
92
+ ]
93
+ lines.extend(
94
+ [
95
+ f"\n{x}"
96
+ for x in textwrap.wrap(
97
+ msg.strip(),
98
+ width=shutil.get_terminal_size()[0] - 6,
99
+ initial_indent=" ",
100
+ subsequent_indent=" ",
101
+ )
102
+ ]
103
+ )
104
+ lines.append("\n\n")
105
+
106
+ # Save the message to the file
107
+ with open(file_path, "a") as f:
108
+ f.writelines(lines)
109
+
110
+
111
+ def display_messages(file_path: str = "log_msg.md"):
112
+ """
113
+ Display all logged messages from the specified file.
114
+
115
+ Args:
116
+ file_path (str, optional): Path to the log file. Defaults to "log_msg.txt".
117
+ """
118
+ try:
119
+ # Read messages from the file
120
+ with open(file_path, "r") as f:
121
+ markdown_content = f.read()
122
+ markdown = Markdown(markdown_content)
123
+ console = Console()
124
+ console.print(markdown)
125
+ except FileNotFoundError:
126
+ print(f"Error: Log file '{file_path}' not found.")
127
+
128
+
129
+ def is_aware(dt):
130
+ return dt.tzinfo is not None and dt.tzinfo.utcoffset(dt) is not None
131
+
132
+
133
+ def benchmark(func: Callable[..., Any]) -> Callable[..., Any]:
134
+ @functools.wraps(func)
135
+ def wrapper(*args: Any, **kwargs: Any) -> Any:
136
+ start = perf_counter()
137
+ result = func(*args, **kwargs)
138
+ end = perf_counter()
139
+ logger.debug(f"⏱ {func.__name__} took {end - start:.4f} seconds")
140
+ return result
141
+
142
+ return wrapper
143
+
144
+
145
+ def timeit(message: str = "") -> Callable[[Callable[..., Any]], Callable[..., Any]]:
146
+ def decorator(func: Callable[..., Any]) -> Callable[..., Any]:
147
+ @functools.wraps(func)
148
+ def wrapper(self, *args: Any, **kwargs: Any) -> Any:
149
+ msg = f" ({message.format(self=self)})" if message else ""
150
+ start = perf_counter()
151
+ result = func(self, *args, **kwargs)
152
+ end = perf_counter()
153
+ logger.debug(f"⏱ {func.__name__}{msg} took {end - start:.4f} seconds")
154
+ return result
155
+
156
+ return wrapper
157
+
158
+ return decorator
159
+
160
+
161
+ def drop_zero_minutes(dt, mode: Literal["24", "12"], end=False):
162
+ """
163
+ >>> drop_zero_minutes(parse('2018-03-07 10am'))
164
+ '10'
165
+ >>> drop_zero_minutes(parse('2018-03-07 2:45pm'))
166
+ '2:45'
167
+ """
168
+ show_minutes = True if mode == "24" else False
169
+ # show_minutes = False
170
+ # logger.debug(f"starting {dt = }; {ampm = }; {show_minutes = }")
171
+ # logger.debug(f"{dt.replace(tzinfo=None) = }")
172
+ dt = dt.replace(tzinfo=None)
173
+ # logger.debug(f"{dt = }")
174
+ # if show_minutes:
175
+ if show_minutes:
176
+ if mode == "12":
177
+ return dt.strftime("%-I:%M").rstrip("M").lower()
178
+ else:
179
+ return dt.strftime("%-H:%M")
180
+ else:
181
+ if dt.minute == 0:
182
+ if mode == "12":
183
+ return dt.strftime("%-I")
184
+ else:
185
+ # return dt.strftime("%-Hh") if end else dt.strftime("%-H")
186
+ return dt.strftime("%-H") if end else dt.strftime("%-H")
187
+ else:
188
+ if mode == "12":
189
+ return dt.strftime("%-I:%M").rstrip("M").lower()
190
+ else:
191
+ return dt.strftime("%-H:%M")
192
+
193
+
194
+ period_regex = re.compile(r"((\d+)([wdhms]))+?")
195
+ expanded_period_regex = re.compile(r"((\d+)\s(week|day|hour|minute|second)s?)+?")
196
+
197
+
198
+ def fmt_user(dt_str: str) -> str:
199
+ """
200
+ User friendly formatting for dates and datetimes using env settings
201
+ for ampm, yearfirst, dayfirst and two_digit year.
202
+ """
203
+ if not dt_str:
204
+ return "unscheduled"
205
+ try:
206
+ dt = dateutil_parse(dt_str)
207
+ except Exception as e:
208
+ return f"error parsing {dt_str}: {e}"
209
+ if dt_str.endswith("T0000"):
210
+ return dt.strftime("%Y-%m-%d")
211
+ return dt.strftime("%Y-%m-%d %H:%M")
212
+
213
+
214
+ def parse_period(s: str) -> timedelta:
215
+ """\
216
+ Take a period string and return a corresponding timedelta.
217
+ Examples:
218
+ parse_period('-2w3d4h5m')= -timedelta(weeks=2,days=3,hours=4,minutes=5)
219
+ parse_period('1h30m') = timedelta(hours=1, minutes=30)
220
+ parse_period('-10m') = -timedelta(minutes=10)
221
+ where:
222
+ y: years
223
+ w: weeks
224
+ d: days
225
+ h: hours
226
+ m: minutes
227
+ s: seconds
228
+ """
229
+
230
+ knms = {
231
+ "w": "weeks",
232
+ "week": "weeks",
233
+ "weeks": "weeks",
234
+ "d": "days",
235
+ "day": "days",
236
+ "days": "days",
237
+ "h": "hours",
238
+ "hour": "hours",
239
+ "hours": "hours",
240
+ "m": "minutes",
241
+ "minute": "minutes",
242
+ "minutes": "minutes",
243
+ "s": "seconds",
244
+ "second": "second",
245
+ "seconds": "seconds",
246
+ }
247
+
248
+ kwds = {
249
+ "weeks": 0,
250
+ "days": 0,
251
+ "hours": 0,
252
+ "minutes": 0,
253
+ "seconds": 0,
254
+ }
255
+
256
+ s = str(s).strip()
257
+ sign = None
258
+ if s[0] in ["+", "-"]:
259
+ # record the sign and keep the rest of the string
260
+ sign = s[0]
261
+ s = s[1:]
262
+
263
+ m = period_regex.findall(str(s))
264
+ if not m:
265
+ m = expanded_period_regex.findall(str(s))
266
+ if not m:
267
+ return False, f"Invalid period string '{s}'"
268
+ for g in m:
269
+ if g[2] not in knms:
270
+ return False, f"invalid period argument: {g[2]}"
271
+
272
+ # num = -int(g[2]) if g[1] == "-" else int(g[2])
273
+ num = int(g[1])
274
+ if num:
275
+ kwds[knms[g[2]]] = num
276
+ td = timedelta(**kwds)
277
+
278
+ if sign and sign == "-":
279
+ td = -td
280
+
281
+ return True, td
282
+
283
+
284
+ def format_extent(
285
+ beg_dt: datetime, end_dt: datetime, mode: str = Literal["24", "12"]
286
+ ) -> str:
287
+ """
288
+ Format the beginning to ending times to display for a reminder with an extent (both @s and @e).
289
+ >>> beg_dt = parse('2018-03-07 10am')
290
+ >>> end_dt = parse('2018-03-07 11:30am')
291
+ >>> fmt_extent(beg_dt, end_dt)
292
+ '10-11:30am'
293
+ >>> end_dt = parse('2018-03-07 2pm')
294
+ >>> fmt_extent(beg_dt, end_dt)
295
+ '10am-2pm'
296
+ """
297
+ log_msg(f"{beg_dt = }; {end_dt = }; {mode = }")
298
+ beg_suffix = ""
299
+ end_suffix = end_dt.strftime("%p").lower().rstrip("m") if mode == "12" else ""
300
+ if beg_dt == end_dt:
301
+ if beg_dt.hour == 0 and beg_dt.minute == 0 and beg_dt.second == 0:
302
+ return "~"
303
+ elif beg_dt.hour == 23 and beg_dt.minute == 59 and beg_dt.second == 59:
304
+ return "~"
305
+ else:
306
+ return f"{drop_zero_minutes(end_dt, mode)}{end_suffix}"
307
+
308
+ if end_dt.hour == 23 and end_dt.minute == 59 and end_dt.second == 59:
309
+ # end_dt = end_dt.replace(hour=0, minute=0, second=0)
310
+ log_msg(f"end_dt: {end_dt = }")
311
+ # end_dt = end_dt + timedelta(seconds=1)
312
+ log_msg(f"end_dt adjusted: {end_dt = }")
313
+ end_suffix = "a" if mode == "12" else ""
314
+ # end_fmt = "12" if mode == "12" else "24"
315
+
316
+ if mode == "12":
317
+ diff = (beg_dt.hour < 12 and end_dt.hour >= 12) or (
318
+ beg_dt.hour >= 12 and end_dt.hour < 12
319
+ )
320
+ beg_suffix = beg_dt.strftime("%p").lower().rstrip("m") if diff else ""
321
+
322
+ beg_fmt = drop_zero_minutes(beg_dt, mode)
323
+ end_fmt = drop_zero_minutes(end_dt, mode, end=True)
324
+ log_msg(f"end: {end_dt = }; {end_fmt = }")
325
+ if mode == "12":
326
+ beg_fmt = beg_fmt.lstrip("0")
327
+ end_fmt = end_fmt.lstrip("0")
328
+ # else:
329
+ # beg_fmt = beg_fmt.lstrip("0")
330
+ # end_fmt = end_fmt.lstrip("0")
331
+
332
+ return f"{beg_fmt}{beg_suffix}-{end_fmt}{end_suffix}"
333
+
334
+
335
+ def timedelta_str_to_seconds(time_str: str) -> tuple[bool, int]:
336
+ """
337
+ Converts a time string composed of integers followed by 'w', 'd', 'h', or 'm'
338
+ into the total number of seconds.
339
+ Args:
340
+ time_str (str): The time string (e.g., '3h15s').
341
+ Returns:
342
+ int: The total number of seconds.
343
+ Raises:
344
+ ValueError: If the input string is not in the expected format.
345
+ """
346
+ # Define time multipliers for each unit
347
+ multipliers = {
348
+ "w": 7 * 24 * 60 * 60, # Weeks to seconds
349
+ "d": 24 * 60 * 60, # Days to seconds
350
+ "h": 60 * 60, # Hours to seconds
351
+ "m": 60, # Minutes to seconds
352
+ }
353
+ # Match all integer-unit pairs (e.g., "3h", "15s")
354
+ matches = re.findall(r"(\d+)([wdhm])", time_str)
355
+ if not matches:
356
+ return (
357
+ False,
358
+ "Invalid time string format. Expected integers followed by 'w', 'd', 'h', or 'm'.",
359
+ )
360
+ # Convert each match to seconds and sum them
361
+ total_seconds = sum(int(value) * multipliers[unit] for value, unit in matches)
362
+ return True, total_seconds
363
+
364
+
365
+ def fmt_period(seconds: int, short=True):
366
+ """
367
+ Format seconds as a human readable string
368
+ if short report only biggest 2, else all
369
+ >>> td = timedelta(weeks=1, days=2, hours=3, minutes=27).total_seconds()
370
+ >>> fmt_td(td)
371
+ '1w2d3h27m'
372
+ """
373
+ if type(seconds) is not int:
374
+ return "?"
375
+ if seconds <= 0:
376
+ return ""
377
+ try:
378
+ total_seconds = abs(seconds)
379
+ until = []
380
+ days = hours = minutes = 0
381
+ if total_seconds:
382
+ seconds = total_seconds % 60
383
+ minutes = total_seconds // 60
384
+ if minutes >= 60:
385
+ hours = minutes // 60
386
+ minutes = minutes % 60
387
+ if hours >= 24:
388
+ days = hours // 24
389
+ hours = hours % 24
390
+ if days >= 7:
391
+ weeks = days // 7
392
+ days = days % 7
393
+
394
+ if weeks:
395
+ until.append(f"{weeks}w")
396
+ if days:
397
+ until.append(f"{days}d")
398
+ if hours:
399
+ until.append(f"{hours}h")
400
+ if minutes:
401
+ until.append(f"{minutes}m")
402
+ if seconds:
403
+ until.append(f"{seconds}s")
404
+ if not until:
405
+ until.append("0m")
406
+ ret = "".join(until[:2]) if short else "".join(until)
407
+ return ret
408
+ except Exception as e:
409
+ log_msg(f"{seconds}: {e}")
410
+ return ""
411
+
412
+
413
+ def fmt_dt(dt: int, fmt: Literal["date", "time", "datetime"] = "datetime"):
414
+ """
415
+ Format seconds as a human readable string
416
+ >>> fmt_dt(1610386800)
417
+ '2021-01-11 00:00:00'
418
+ """
419
+ # log_msg(f"dt: {dt}")
420
+ fmt = (
421
+ "%y-%m-%d" if fmt == "date" else "%H:%M" if fmt == "time" else "%Y-%m-%d %H:%M"
422
+ )
423
+ if type(dt) is not int:
424
+ return "?"
425
+ if dt <= 0:
426
+ return ""
427
+ return datetime.fromtimestamp(dt).strftime(fmt)
428
+
429
+
430
+ def duration_in_words(seconds: int, short=False):
431
+ """
432
+ Return string representing weeks, days, hours and minutes. Drop any remaining seconds.
433
+ >>> td = timedelta(weeks=1, days=2, hours=3, minutes=27)
434
+ >>> format_duration(td)
435
+ '1 week 2 days 3 hours 27 minutes'
436
+ """
437
+ try:
438
+ until = []
439
+ total_seconds = int(seconds)
440
+ weeks = days = hours = minutes = seconds = 0
441
+ if total_seconds:
442
+ sign = "" if total_seconds > 0 else "- "
443
+ total_seconds = abs(total_seconds)
444
+ seconds = total_seconds % 60
445
+ minutes = total_seconds // 60
446
+ if minutes >= 60:
447
+ hours = minutes // 60
448
+ minutes = minutes % 60
449
+ if hours >= 24:
450
+ days = hours // 24
451
+ hours = hours % 24
452
+ if days >= 7:
453
+ weeks = days // 7
454
+ days = days % 7
455
+ if weeks:
456
+ if weeks > 1:
457
+ until.append(f"{sign}{weeks} weeks")
458
+ else:
459
+ until.append(f"{sign}{weeks} week")
460
+ if days:
461
+ if days > 1:
462
+ until.append(f"{sign}{days} days")
463
+ else:
464
+ until.append(f"{sign}{days} day")
465
+ if hours:
466
+ if hours > 1:
467
+ until.append(f"{sign}{hours} hours")
468
+ else:
469
+ until.append(f"{sign}{hours} hour")
470
+ if minutes:
471
+ if minutes > 1:
472
+ until.append(f"{sign}{minutes} minutes")
473
+ else:
474
+ until.append(f"{sign}{minutes} minute")
475
+ if seconds:
476
+ if seconds > 1:
477
+ until.append(f"{sign}{seconds} seconds")
478
+ else:
479
+ until.append(f"{sign}{seconds} second")
480
+ if not until:
481
+ until.append("zero minutes")
482
+ ret = " ".join(until[:2]) if short else " ".join(until)
483
+ return ret
484
+ except Exception as e:
485
+ log_msg(f"{seconds = } raised exception: {e}")
486
+ return None
487
+
488
+
489
+ class TimeIt(object):
490
+ def __init__(self, label="", loglevel=1):
491
+ self.loglevel = loglevel
492
+ self.label = label
493
+ if self.loglevel == 1:
494
+ self.start = timer()
495
+
496
+ def stop(self, *args):
497
+ if self.loglevel == 1:
498
+ self.end = timer()
499
+ msg = f"⏱ {self.label} took {self.end - self.start:.4f} seconds"
500
+ logger.debug(msg)
501
+
502
+
503
+ # from etm.__main__ import ETMHOME
504
+ # from etm import options
505
+
506
+ python_version = platform.python_version()
507
+ system_platform = platform.platform(terse=True)
508
+ sys_platform = platform.system()
509
+ mac = sys.platform == "darwin"
510
+ windoz = sys_platform in ("Windows", "Microsoft")
511
+
512
+ WA = {}
513
+ parse_datetime = None
514
+ text_pattern = None
515
+ etmhome = None
516
+ timers_file = None
517
+
518
+ VERSION_INFO = f"""\
519
+ python: {python_version}
520
+ dateutil: {dateutil_version}
521
+ platform: {system_platform}\
522
+ """
523
+
524
+
525
+ def check_output(cmd):
526
+ if not cmd:
527
+ return
528
+ res = ""
529
+ try:
530
+ res = subprocess.check_output(
531
+ cmd,
532
+ stderr=subprocess.STDOUT,
533
+ shell=True,
534
+ universal_newlines=True,
535
+ encoding="UTF-8",
536
+ )
537
+ return True, res
538
+ except subprocess.CalledProcessError as e:
539
+ logger.warning(f"Error running {cmd}\n'{e.output}'")
540
+ lines = e.output.strip().split("\n")
541
+ msg = lines[-1]
542
+ return False, msg
543
+
544
+
545
+ def db_replace(new):
546
+ """
547
+ Used with update to replace the original doc with new.
548
+ """
549
+
550
+ def transform(doc):
551
+ # update doc to include key/values from new
552
+ doc.update(new)
553
+ # remove any key/values from doc that are not in new
554
+ for k in list(doc.keys()):
555
+ if k not in new:
556
+ del doc[k]
557
+
558
+ return transform
559
+
560
+
561
+ def import_file(import_file=None):
562
+ import_file = import_file.strip()
563
+ if not import_file:
564
+ return False, ""
565
+ if import_file.lower() == "lorem":
566
+ return True, import_examples()
567
+
568
+ if not os.path.isfile(import_file):
569
+ return (
570
+ False,
571
+ f'"{import_file}"\n either does not exist or is not a regular file',
572
+ )
573
+ filename, extension = os.path.splitext(import_file)
574
+ if extension == ".text":
575
+ return True, import_text(import_file)
576
+ else:
577
+ return (
578
+ False,
579
+ f"Importing a file with the extension '{extension}' is not implemented. Only files with the extension '.text' are recognized",
580
+ )
581
+
582
+
583
+ def import_examples():
584
+ docs = []
585
+ examples = make_examples(last_id=last_id)
586
+
587
+ results = []
588
+ good = []
589
+ bad = []
590
+ items = []
591
+
592
+ logger.debug(f"starting import from last_id: {last_id}")
593
+ count = 0
594
+ for s in examples:
595
+ ok = True
596
+ count += 1
597
+ if not s:
598
+ continue
599
+ item = Item() # use ETMDB by default
600
+ item.new_item()
601
+ item.text_changed(s, 1)
602
+ if item.item_hsh.get("itemtype", None) is None:
603
+ ok = False
604
+
605
+ if item.item_hsh.get("summary", None) is None:
606
+ ok = False
607
+
608
+ if ok:
609
+ # don't check links because the ids won't yet exist
610
+ item.update_item_hsh(check_links=False)
611
+ good.append(f"{item.doc_id}")
612
+ else:
613
+ logger.debug(f"bad entry: {s}")
614
+ bad.append(s)
615
+
616
+ logger.debug("ending import")
617
+ res = f"imported {len(good)} items"
618
+ if good:
619
+ res += f"\n ids: {good[0]} - {good[-1]}"
620
+ if bad:
621
+ res += f"\nrejected {bad} items:\n "
622
+ res += "\n ".join(results)
623
+ return res
624
+
625
+
626
+ def import_text(import_file=None):
627
+ docs = []
628
+ with open(import_file, "r") as fo:
629
+ logger.debug(f"opened for reading: '{import_file}'")
630
+ results = []
631
+ good = []
632
+ bad = 0
633
+ reminders = []
634
+ reminder = []
635
+ for line in fo:
636
+ s = line.strip()
637
+ if s and s[0] in ["!", "*", "-", "%"]:
638
+ if reminder:
639
+ # append it to reminders and reset it
640
+ reminders.append(reminder)
641
+ reminder = []
642
+ reminder = [s]
643
+ else:
644
+ # append to the existing reminder
645
+ reminder.append(s)
646
+ if reminder:
647
+ reminders.append(reminder)
648
+ count = 0
649
+ for reminder in reminders:
650
+ count += 1
651
+ logger.debug(f"reminder number {count}: {reminder}")
652
+ ok = True
653
+ s = "\n".join(reminder)
654
+ if not s:
655
+ continue
656
+ logger.debug(f"adding item for {s}")
657
+ item = Item() # use ETMDB by default
658
+ item.new_item()
659
+ item.text_changed(s, 1)
660
+ if item.item_hsh.get("itemtype", None) is None:
661
+ ok = False
662
+
663
+ if item.item_hsh.get("summary", None) is None:
664
+ ok = False
665
+
666
+ if ok:
667
+ # don't check links because the ids won't yet exist
668
+ item.update_item_hsh(check_links=False)
669
+ good.append(f"{item.doc_id}")
670
+ else:
671
+ logger.debug(f"bad entry: {s}")
672
+ bad.append(s)
673
+
674
+ # if not ok:
675
+ # bad += 1
676
+ # results.append(f' {s}')
677
+ # continue
678
+
679
+ # update_item_hsh stores the item in ETMDB
680
+ # item.update_item_hsh()
681
+ # good.append(f'{item.doc_id}')
682
+
683
+ res = f"imported {len(good)} items"
684
+ if good:
685
+ res += f"\n ids: {good[0]} - {good[-1]}"
686
+ if bad:
687
+ res += f"\nrejected {bad} items:\n "
688
+ res += "\n ".join(results)
689
+ logger.debug(f"returning: {res}")
690
+ return res
691
+
692
+
693
+ def import_json(import_file=None):
694
+ import json
695
+
696
+ with open(import_file, "r") as fo:
697
+ import_hsh = json.load(fo)
698
+ items = import_hsh["items"]
699
+ docs = []
700
+ dups = 0
701
+ add = 0
702
+ for id in items:
703
+ item_hsh = items[id]
704
+ itemtype = item_hsh.get("itemtype")
705
+ if not itemtype:
706
+ continue
707
+ summary = item_hsh.get("summary")
708
+ if not summary:
709
+ continue
710
+ z = item_hsh.get("z", "Factory")
711
+ bad_keys = [x for x in item_hsh if not item_hsh[x]]
712
+ for key in bad_keys:
713
+ del item_hsh[key]
714
+ if "s" in item_hsh:
715
+ item_hsh["s"] = pen_from_fmt(item_hsh["s"], z)
716
+ if "f" in item_hsh:
717
+ item_hsh["f"] = period_from_fmt(item_hsh["f"], z)
718
+ item_hsh["created"] = datetime.now("UTC")
719
+ if "h" in item_hsh:
720
+ item_hsh["h"] = [period_from_fmt(x, z) for x in item_hsh["h"]]
721
+ if "+" in item_hsh:
722
+ item_hsh["+"] = [pen_from_fmt(x, z) for x in item_hsh["+"]]
723
+ if "-" in item_hsh:
724
+ item_hsh["-"] = [pen_from_fmt(x, z) for x in item_hsh["-"]]
725
+ if "e" in item_hsh:
726
+ item_hsh["e"] = parse_duration(item_hsh["e"])[1]
727
+ if "w" in item_hsh:
728
+ wrps = [parse_duration(x)[1] for x in item_hsh["w"]]
729
+ item_hsh["w"] = wrps
730
+ if "a" in item_hsh:
731
+ alerts = []
732
+ for alert in item_hsh["a"]:
733
+ # drop the True from parse_duration
734
+ tds = [parse_duration(x)[1] for x in alert[0]]
735
+ # put the largest duration first
736
+ tds.sort(reverse=True)
737
+ cmds = alert[1:2]
738
+ args = ""
739
+ if len(alert) > 2 and alert[2]:
740
+ args = ", ".join(alert[2])
741
+ for cmd in cmds:
742
+ if args:
743
+ row = (tds, cmd, args)
744
+ else:
745
+ row = (tds, cmd)
746
+ alerts.append(row)
747
+ item_hsh["a"] = alerts
748
+ if "j" in item_hsh:
749
+ jbs = []
750
+ for jb in item_hsh["j"]:
751
+ if "h" in jb:
752
+ if "f" not in jb:
753
+ jb["f"] = jb["h"][-1]
754
+ del jb["h"]
755
+ jbs.append(jb)
756
+ ok, lofh, last_completed = jobs(jbs, item_hsh)
757
+
758
+ if ok:
759
+ item_hsh["j"] = lofh
760
+ else:
761
+ print("using jbs", jbs)
762
+ print(
763
+ "ok:",
764
+ ok,
765
+ " lofh:",
766
+ lofh,
767
+ " last_completed:",
768
+ last_completed,
769
+ )
770
+
771
+ if "r" in item_hsh:
772
+ ruls = []
773
+ for rul in item_hsh["r"]:
774
+ if "r" in rul and rul["r"] == "l":
775
+ continue
776
+ elif "f" in rul:
777
+ if rul["f"] == "l":
778
+ continue
779
+ else:
780
+ rul["r"] = rul["f"]
781
+ del rul["f"]
782
+ if "u" in rul:
783
+ if "t" in rul:
784
+ del rul["t"]
785
+ if "c" in rul:
786
+ del rul["c"]
787
+ elif "t" in rul:
788
+ rul["c"] = rul["t"]
789
+ del rul["t"]
790
+ if "u" in rul:
791
+ if type(rul["u"]) == str:
792
+ try:
793
+ rul["u"] = parse(rul["u"], tz=z)
794
+ except Exception as e:
795
+ logger.error(f"error parsing rul['u']: {rul['u']}. {e}")
796
+ if "w" in rul:
797
+ if isinstance(rul["w"], list):
798
+ rul["w"] = [
799
+ "{0}:{1}".format("{W}", x.upper()) for x in rul["w"]
800
+ ]
801
+ else:
802
+ rul["w"] = "{0}:{1}".format("{W}", rul["w"].upper())
803
+ bad_keys = []
804
+ for key in rul:
805
+ if not rul[key]:
806
+ bad_keys.append(key)
807
+ if bad_keys:
808
+ for key in bad_keys:
809
+ del rul[key]
810
+ if rul:
811
+ ruls.append(rul)
812
+ if ruls:
813
+ item_hsh["r"] = ruls
814
+ else:
815
+ del item_hsh["r"]
816
+
817
+ docs.append(item_hsh)
818
+ # now check for duplicates. If an item to be imported has the same type, summary and starting time as an existing item, regard it as a duplicate and do not import it.
819
+ exst = []
820
+ new = []
821
+ dups = 0
822
+ for x in ETMDB:
823
+ exst.append(
824
+ {
825
+ "itemtype": x.get("itemtype"),
826
+ "summary": x.get("summary"),
827
+ "s": x.get("s"),
828
+ }
829
+ )
830
+ i = 0
831
+ for x in docs:
832
+ i += 1
833
+ y = {
834
+ "itemtype": x.get("itemtype"),
835
+ "summary": x.get("summary"),
836
+ "s": x.get("s"),
837
+ }
838
+ if exst and y in exst:
839
+ dups += 1
840
+ else:
841
+ new.append(x)
842
+
843
+ ids = []
844
+ if new:
845
+ ids = ETMDB.insert_multiple(new)
846
+ ETMDB.close()
847
+ msg = f"imported {len(new)} items"
848
+ if ids:
849
+ msg += f"\n ids: {ids[0]}-{ids[-1]}."
850
+ if dups:
851
+ msg += f"\n rejected {dups} items as duplicates"
852
+ return msg
853
+
854
+
855
+ def update_db(db, doc_id, hsh={}):
856
+ old = db.get(doc_id=doc_id)
857
+ if not old:
858
+ logger.error(f"Could not get document corresponding to doc_id {doc_id}")
859
+ return
860
+ if old == hsh:
861
+ return
862
+ hsh["modified"] = datetime.now()
863
+ logger.debug(f"starting db.update")
864
+ try:
865
+ db.update(db_replace(hsh), doc_ids=[doc_id])
866
+ except Exception as e:
867
+ logger.error(
868
+ f"Error updating document corresponding to doc_id {doc_id}\nhsh {hsh}\nexception: {repr(e)}"
869
+ )
870
+
871
+
872
+ def write_back(db, docs):
873
+ logger.debug(f"starting write_back")
874
+ for doc in docs:
875
+ try:
876
+ doc_id = doc.doc_id
877
+ update_db(db, doc_id, doc)
878
+ except Exception as e:
879
+ logger.error(f"write_back exception: {e}")
880
+
881
+
882
+ def setup_logging(level, etmdir, file=None):
883
+ """
884
+ Setup logging configuration. Override root:level in
885
+ logging.yaml with default_level.
886
+ """
887
+
888
+ if not os.path.isdir(etmdir):
889
+ return
890
+
891
+ log_levels = {
892
+ 1: logging.DEBUG,
893
+ 2: logging.INFO,
894
+ 3: logging.WARN,
895
+ 4: logging.ERROR,
896
+ 5: logging.CRITICAL,
897
+ }
898
+
899
+ level = int(level)
900
+ loglevel = log_levels.get(level, log_levels[3])
901
+
902
+ # if we get here, we have an existing etmdir
903
+ logfile = os.path.normpath(os.path.abspath(os.path.join(etmdir, "etm.log")))
904
+
905
+ config = {
906
+ "disable_existing_loggers": False,
907
+ "formatters": {
908
+ "simple": {
909
+ "format": "--- %(asctime)s - %(levelname)s - %(module)s.%(funcName)s\n %(message)s"
910
+ }
911
+ },
912
+ "handlers": {
913
+ "file": {
914
+ "backupCount": 7,
915
+ "class": "logging.handlers.TimedRotatingFileHandler",
916
+ "encoding": "utf8",
917
+ "filename": logfile,
918
+ "formatter": "simple",
919
+ "level": loglevel,
920
+ "when": "midnight",
921
+ "interval": 1,
922
+ }
923
+ },
924
+ "loggers": {
925
+ "etmmv": {
926
+ "handlers": ["file"],
927
+ "level": loglevel,
928
+ "propagate": False,
929
+ }
930
+ },
931
+ "Redirectoot": {"handlers": ["file"], "level": loglevel},
932
+ "version": 1,
933
+ }
934
+ logging.config.dictConfig(config)
935
+ # logger = logging.getLogger('asyncio').setLevel(logging.WARNING)
936
+ logger = logging.getLogger("etmmv")
937
+
938
+ logger.critical("\n######## Initializing logging #########")
939
+ if logfile:
940
+ logger.critical(
941
+ f"logging for file: {file}\n logging at level: {loglevel}\n logging to file: {logfile}"
942
+ )
943
+ else:
944
+ logger.critical(f"logging at level: {loglevel}\n logging to file: {logfile}")
945
+ return logger
946
+
947
+
948
+ def openWithDefault(path):
949
+ if " " in path:
950
+ parts = qsplit(path)
951
+ if parts:
952
+ # wrapper to catch 'Exception Ignored' messages
953
+ output = io.StringIO()
954
+ with contextlib.redirect_stderr(output):
955
+ # the pid business is evidently needed to avoid waiting
956
+ pid = subprocess.Popen(
957
+ parts,
958
+ stdin=subprocess.DEVNULL,
959
+ stdout=subprocess.DEVNULL,
960
+ stderr=subprocess.DEVNULL,
961
+ ).pid
962
+ res = output.getvalue()
963
+ if res:
964
+ logger.error(f"caught by contextlib:\n'{res}'")
965
+
966
+ else:
967
+ path = os.path.normpath(os.path.expanduser(path))
968
+ sys_platform = platform.system()
969
+ if platform.system() == "Darwin": # macOS
970
+ subprocess.run(
971
+ ("open", path),
972
+ stdout=subprocess.DEVNULL,
973
+ stderr=subprocess.DEVNULL,
974
+ )
975
+ elif platform.system() == "Windows": # Windows
976
+ os.startfile(path)
977
+ else: # linux
978
+ subprocess.run(
979
+ ("xdg-open", path),
980
+ stdout=subprocess.DEVNULL,
981
+ stderr=subprocess.DEVNULL,
982
+ )
983
+
984
+ return
985
+
986
+
987
+ class TDBLexer(RegexLexer):
988
+ name = "TDB"
989
+ aliases = ["tdb"]
990
+ filenames = "*.*"
991
+ flags = re.MULTILINE | re.DOTALL
992
+
993
+ tokens = {
994
+ "root": [
995
+ (
996
+ r"\b(begins|includes|in|equals|more|less|exists|any|all|one)\b",
997
+ Keyword,
998
+ ),
999
+ (
1000
+ r"\b(replace|remove|archive|delete|set|provide|attach|detach)\b",
1001
+ Keyword,
1002
+ ),
1003
+ (r"\b(itemtype|summary)\b", Literal),
1004
+ (r"\b(and|or|info)\b", Keyword),
1005
+ ],
1006
+ }
1007
+
1008
+
1009
+ def nowrap(txt, indent=3, width=shutil.get_terminal_size()[0] - 3):
1010
+ return txt
1011
+
1012
+
1013
+ def wrap(
1014
+ txt_to_wrap: str, indent: int = 3, width: int = shutil.get_terminal_size()[0] - 3
1015
+ ) -> str:
1016
+ """
1017
+ Split text on newlines into paragraphs. Then preserving the
1018
+ indentation of the beginning of each paragraph, wrap each paragraph to the specified width using the initial indentation plus the number of spaces specified by the indent parameter as the subsequent indentation.
1019
+ """
1020
+ para = [x.rstrip() for x in txt_to_wrap.split("\n")]
1021
+ tmp = []
1022
+ for p in para:
1023
+ p_ = p.lstrip(" ")
1024
+ i_ = len(p) - len(p_)
1025
+ initial_indent = " " * i_
1026
+ subsequent_indent = " " * (indent + i_)
1027
+ tmp.append(
1028
+ textwrap.fill(
1029
+ p_,
1030
+ initial_indent=initial_indent,
1031
+ subsequent_indent=subsequent_indent,
1032
+ width=width - indent - 1,
1033
+ )
1034
+ )
1035
+ return "\n".join(tmp)
1036
+
1037
+
1038
+ def unwrap(wrapped_text: str) -> str:
1039
+ # Split the text into paragraphs
1040
+ paragraphs = wrapped_text.split("\n")
1041
+
1042
+ # Remove indentations and join lines within each paragraph
1043
+ unwrapped_paragraphs = []
1044
+ current_paragraph = []
1045
+
1046
+ first = True
1047
+ for line in paragraphs:
1048
+ if line.strip() == "":
1049
+ # Paragraph separator
1050
+ if current_paragraph:
1051
+ unwrapped_paragraphs.append(" ".join(current_paragraph))
1052
+ current_paragraph = []
1053
+ unwrapped_paragraphs.append("")
1054
+ first = True
1055
+ elif first:
1056
+ current_paragraph.append(line)
1057
+ first = False
1058
+ else:
1059
+ # Remove leading spaces used for indentation
1060
+ current_paragraph.append(line.strip())
1061
+
1062
+ # Add the last paragraph if there is any
1063
+ if current_paragraph:
1064
+ unwrapped_paragraphs.append(" ".join(current_paragraph))
1065
+
1066
+ # Join the unwrapped paragraphs
1067
+ return "\n".join(unwrapped_paragraphs)
1068
+
1069
+
1070
+ def parse(s, **kwd):
1071
+ # enable pi when read by main and settings is available
1072
+ pi = parserinfo(dayfirst=settings["dayfirst"], yearfirst=settings["yearfirst"])
1073
+ # logger.debug(f"parsing {s = } with {kwd = }")
1074
+ dt = dateutil_parse(s, parserinfo=pi)
1075
+ if "tzinfo" in kwd:
1076
+ tzinfo = kwd["tzinfo"]
1077
+ # logger.debug(f"using {tzinfo = } with {dt = }")
1078
+ if tzinfo == None:
1079
+ return dt.replace(tzinfo=None)
1080
+ elif tzinfo == "local":
1081
+ return dt.astimezone()
1082
+ else:
1083
+ return dt.replace(tzinfo=ZoneInfo(tzinfo))
1084
+ else:
1085
+ return dt.astimezone()
1086
+
1087
+
1088
+ class AttrDict(dict):
1089
+ def __init__(self, *args, **kwargs):
1090
+ super(AttrDict, self).__init__(*args, **kwargs)
1091
+ for arg in args:
1092
+ if isinstance(arg, dict):
1093
+ for k, v in arg.items():
1094
+ self[k] = v
1095
+
1096
+ if kwargs:
1097
+ for k, v in kwargs.items():
1098
+ self[k] = v
1099
+
1100
+ def __getattr__(self, item):
1101
+ try:
1102
+ return self[item]
1103
+ except KeyError:
1104
+ raise AttributeError(f"'AttrDict' object has no attribute '{item}'")
1105
+
1106
+ def __setattr__(self, key, value):
1107
+ self[key] = value
1108
+
1109
+ # Initializing AttrDict with a dictionary
1110
+ # d = AttrDict({'attr': 'value', 'another': 123})
1111
+ # print(d.attr) # Outputs: value
1112
+
1113
+
1114
+ class EtmChar:
1115
+ VSEP = "⏐" # U+23D0 this will be a de-emphasized color
1116
+ FREE = "─" # U+2500 this will be a de-emphasized color
1117
+ HSEP = "┈" #
1118
+ BUSY = "■" # U+25A0 this will be busy (event) color
1119
+ CONF = "▦" # U+25A6 this will be conflict color
1120
+ TASK = "▩" # U+25A9 this will be busy (task) color
1121
+ ADAY = "━" # U+2501 for all day events ━
1122
+ RSKIP = "▶" # U+25E6 for used time
1123
+ LSKIP = "◀" # U+25E6 for used time
1124
+ USED = "◦" # U+25E6 for used time
1125
+ REPS = "↻" # Flag for repeating items
1126
+ FINISHED_CHAR = "✓"
1127
+ SKIPPED_CHAR = "✗"
1128
+ SLOW_CHAR = "∾"
1129
+ LATE_CHAR = "∿"
1130
+ INACTIVE_CHAR = "≁"
1131
+ # INACTIVE_CHAR='∽'
1132
+ ENDED_CHAR = "≀"
1133
+ UPDATE_CHAR = "𝕦"
1134
+ INBASKET_CHAR = "𝕚"
1135
+ KONNECT_CHAR = "k"
1136
+ LINK_CHAR = "g"
1137
+ PIN_CHAR = "p"
1138
+ ELLIPSIS_CHAR = "…"
1139
+ LINEDOT = " · " # ܁ U+00B7 (middle dot),
1140
+ ELECTRIC = "⌁"
1141
+
1142
+
1143
+ # model, data and ical
1144
+ # with integer prefixes
1145
+ WKDAYS_DECODE = {
1146
+ "{0}{1}".format(n, d): "{0}({1})".format(d, n) if n else d
1147
+ for d in ["MO", "TU", "WE", "TH", "FR", "SA", "SU"]
1148
+ for n in ["-4", "-3", "-2", "-1", "", "1", "2", "3", "4"]
1149
+ }
1150
+
1151
+ WKDAYS_ENCODE = {
1152
+ "{0}({1})".format(d, n): "{0}{1}".format(n, d) if n else d
1153
+ for d in ["MO", "TU", "WE", "TH", "FR", "SA", "SU"]
1154
+ for n in ["-4", "-3", "-2", "-1", "+1", "+2", "+3", "+4"]
1155
+ }
1156
+
1157
+ # without integer prefixes
1158
+ for wkd in ["MO", "TU", "WE", "TH", "FR", "SA", "SU"]:
1159
+ WKDAYS_ENCODE[wkd] = wkd
1160
+
1161
+ # print(f'WKDAYS_DECODE:\n{WKDAYS_DECODE}')
1162
+ # print(f'WKDAYS_ENCODE:\n{WKDAYS_ENCODE}')
1163
+ # WKDAYS_DECODE:
1164
+ # {'-4MO': 'MO(-4)', '-3MO': 'MO(-3)', '-2MO': 'MO(-2)', '-1MO': 'MO(-1)', 'MO': 'MO', '1MO': 'MO(1)', '2MO': 'MO(2)', '3MO': 'MO(3)', '4MO': 'MO(4)', '-4TU': 'TU(-4)', '-3TU': 'TU(-3)', '-2TU': 'TU(-2)', '-1TU': 'TU(-1)', 'TU': 'TU', '1TU': 'TU(1)', '2TU': 'TU(2)', '3TU': 'TU(3)', '4TU': 'TU(4)', '-4WE': 'WE(-4)', '-3WE': 'WE(-3)', '-2WE': 'WE(-2)', '-1WE': 'WE(-1)', 'WE': 'WE', '1WE': 'WE(1)', '2WE': 'WE(2)', '3WE': 'WE(3)', '4WE': 'WE(4)', '-4TH': 'TH(-4)', '-3TH': 'TH(-3)', '-2TH': 'TH(-2)', '-1TH': 'TH(-1)', 'TH': 'TH', '1TH': 'TH(1)', '2TH': 'TH(2)', '3TH': 'TH(3)', '4TH': 'TH(4)', '-4FR': 'FR(-4)', '-3FR': 'FR(-3)', '-2FR': 'FR(-2)', '-1FR': 'FR(-1)', 'FR': 'FR', '1FR': 'FR(1)', '2FR': 'FR(2)', '3FR': 'FR(3)', '4FR': 'FR(4)', '-4SA': 'SA(-4)', '-3SA': 'SA(-3)', '-2SA': 'SA(-2)', '-1SA': 'SA(-1)', 'SA': 'SA', '1SA': 'SA(1)', '2SA': 'SA(2)', '3SA': 'SA(3)', '4SA': 'SA(4)', '-4SU': 'SU(-4)', '-3SU': 'SU(-3)', '-2SU': 'SU(-2)', '-1SU': 'SU(-1)', 'SU': 'SU', '1SU': 'SU(1)', '2SU': 'SU(2)', '3SU': 'SU(3)', '4SU': 'SU(4)'}
1165
+ # WKDAYS_ENCODE:
1166
+ # {'MO(-4)': '-4MO', 'MO(-3)': '-3MO', 'MO(-2)': '-2MO', 'MO(-1)': '-1MO', 'MO(+1)': '+1MO', 'MO(+2)': '+2MO', 'MO(+3)': '+3MO', 'MO(+4)': '+4MO', 'TU(-4)': '-4TU', 'TU(-3)': '-3TU', 'TU(-2)': '-2TU', 'TU(-1)': '-1TU', 'TU(+1)': '+1TU', 'TU(+2)': '+2TU', 'TU(+3)': '+3TU', 'TU(+4)': '+4TU', 'WE(-4)': '-4WE', 'WE(-3)': '-3WE', 'WE(-2)': '-2WE', 'WE(-1)': '-1WE', 'WE(+1)': '+1WE', 'WE(+2)': '+2WE', 'WE(+3)': '+3WE', 'WE(+4)': '+4WE', 'TH(-4)': '-4TH', 'TH(-3)': '-3TH', 'TH(-2)': '-2TH', 'TH(-1)': '-1TH', 'TH(+1)': '+1TH', 'TH(+2)': '+2TH', 'TH(+3)': '+3TH', 'TH(+4)': '+4TH', 'FR(-4)': '-4FR', 'FR(-3)': '-3FR', 'FR(-2)': '-2FR', 'FR(-1)': '-1FR', 'FR(+1)': '+1FR', 'FR(+2)': '+2FR', 'FR(+3)': '+3FR', 'FR(+4)': '+4FR', 'SA(-4)': '-4SA', 'SA(-3)': '-3SA', 'SA(-2)': '-2SA', 'SA(-1)': '-1SA', 'SA(+1)': '+1SA', 'SA(+2)': '+2SA', 'SA(+3)': '+3SA', 'SA(+4)': '+4SA', 'SU(-4)': '-4SU', 'SU(-3)': '-3SU', 'SU(-2)': '-2SU', 'SU(-1)': '-1SU', 'SU(+1)': '+1SU', 'SU(+2)': '+2SU', 'SU(+3)': '+3SU', 'SU(+4)': '+4SU', 'MO': 'MO', 'TU': 'TU', 'WE': 'WE', 'TH': 'TH', 'FR': 'FR', 'SA': 'SA', 'SU': 'SU'}
1167
+
1168
+
1169
+ AWARE_FMT = "%Y%m%dT%H%MA"
1170
+ NAIVE_FMT = "%Y%m%dT%H%MN"
1171
+ DATE_FMT = "%Y%m%d"
1172
+
1173
+
1174
+ def normalize_timedelta(delta):
1175
+ total_seconds = delta.total_seconds()
1176
+ sign = "-" if total_seconds < 0 else ""
1177
+ minutes, remainder = divmod(abs(int(total_seconds)), 60)
1178
+ hours, minutes = divmod(minutes, 60)
1179
+ days, hours = divmod(hours, 24)
1180
+ weeks, days = divmod(days, 7)
1181
+
1182
+ until = []
1183
+ if weeks:
1184
+ until.append(f"{weeks}w")
1185
+ if days:
1186
+ until.append(f"{days}d")
1187
+ if hours:
1188
+ until.append(f"{hours}h")
1189
+ if minutes:
1190
+ until.append(f"{minutes}m")
1191
+ if not until:
1192
+ until.append("0m")
1193
+
1194
+ return sign + "".join(until)
1195
+
1196
+
1197
+ # Test
1198
+ td = timedelta(days=-1, hours=2, minutes=30)
1199
+ normalized_td = normalize_timedelta(td)
1200
+
1201
+ td = timedelta(days=1, hours=-2, minutes=-30)
1202
+ normalized_td = normalize_timedelta(td)
1203
+
1204
+
1205
+ def get_anchor(aware: bool) -> datetime:
1206
+ dt = datetime(1970, 1, 1, 0, 0, 0)
1207
+ if aware:
1208
+ return dt.replace(tzinfo=ZoneInfo("UTC"))
1209
+ return dt
1210
+
1211
+
1212
+ def encode_datetime(obj):
1213
+ if not isinstance(obj, datetime):
1214
+ raise ValueError(f"{obj} is not a datetime instance")
1215
+ if is_aware(obj):
1216
+ return obj.astimezone(ZoneInfo("UTC")).strftime(AWARE_FMT)
1217
+ else:
1218
+ return obj.strftime(NAIVE_FMT)
1219
+
1220
+
1221
+ def decode_datetime(s):
1222
+ if s[-1] not in "AN" or len(s) != 14:
1223
+ raise ValueError(f"{s} is not a datetime string")
1224
+ if s[-1] == "A":
1225
+ return (
1226
+ datetime.strptime(s, AWARE_FMT).replace(tzinfo=ZoneInfo("UTC")).astimezone()
1227
+ )
1228
+ else:
1229
+ return datetime.strptime(s, NAIVE_FMT).astimezone(None)
1230
+
1231
+
1232
+ def truncate_string(s: str, max_length: int) -> str:
1233
+ log_msg(f"Truncating string '{s}' to {max_length} characters")
1234
+ if len(s) > max_length:
1235
+ return f"{s[: max_length - 2]} {EtmChar.ELLIPSIS_CHAR}"
1236
+ else:
1237
+ return s
1238
+
1239
+
1240
+ class Period:
1241
+ def __init__(self, datetime1, datetime2):
1242
+ # datetime1: done/start; datetime2: due/end. On time => period positive
1243
+ # Ensure both inputs are datetime.datetime instances
1244
+ if not isinstance(datetime1, datetime) or not isinstance(datetime2, datetime):
1245
+ raise ValueError("Both inputs must be datetime instances")
1246
+
1247
+ aware1 = is_aware(datetime1)
1248
+ aware2 = is_aware(datetime2)
1249
+
1250
+ if aware1 != aware2:
1251
+ raise ValueError(
1252
+ f"start: {datetime1.tzinfo}, end: {datetime2.tzinfo}. Both datetimes must either be naive or both must be aware."
1253
+ )
1254
+
1255
+ if aware1:
1256
+ self.start = datetime1.astimezone(ZoneInfo("UTC"))
1257
+ self.end = datetime2.astimezone(ZoneInfo("UTC"))
1258
+ else:
1259
+ self.start = datetime1.replace(tzinfo=None)
1260
+ self.end = datetime2.replace(tzinfo=None)
1261
+
1262
+ self.diff = self.end - self.start
1263
+
1264
+ def __repr__(self):
1265
+ return f"Period({encode_datetime(self.start)} -> {encode_datetime(self.end)}, {normalize_timedelta(self.diff)})"
1266
+
1267
+ def __eq__(self, other):
1268
+ if isinstance(other, Period):
1269
+ return self.start == other.start
1270
+ return NotImplemented
1271
+
1272
+ def __lt__(self, other):
1273
+ if isinstance(other, Period):
1274
+ return self.start < other.start
1275
+ return NotImplemented
1276
+
1277
+ def __gt__(self, other):
1278
+ if isinstance(other, Period):
1279
+ return self.start > other.start
1280
+ return NotImplemented
1281
+
1282
+ # Optionally, define __le__ and __ge__
1283
+ def __le__(self, other):
1284
+ return self < other or self == other
1285
+
1286
+ def __ge__(self, other):
1287
+ return self > other or self == other
1288
+
1289
+ def start(self):
1290
+ return self.start
1291
+
1292
+ def end(self):
1293
+ return self.end
1294
+
1295
+ def diff(self):
1296
+ return self.diff