ominfra 0.0.0.dev427__py3-none-any.whl → 0.0.0.dev428__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -5,7 +5,7 @@
5
5
  # @omlish-generated
6
6
  # @omlish-amalg-output ../clouds/aws/journald2aws/main.py
7
7
  # @omlish-git-diff-omit
8
- # ruff: noqa: N802 UP006 UP007 UP036 UP043 UP045
8
+ # ruff: noqa: N802 UP006 UP007 UP036 UP043 UP045 UP046
9
9
  import abc
10
10
  import argparse
11
11
  import base64
@@ -38,6 +38,7 @@ import subprocess
38
38
  import sys
39
39
  import threading
40
40
  import time
41
+ import traceback
41
42
  import types
42
43
  import typing as ta
43
44
  import urllib.parse
@@ -85,12 +86,23 @@ CheckArgsRenderer = ta.Callable[..., ta.Optional[str]] # ta.TypeAlias
85
86
  ExitStackedT = ta.TypeVar('ExitStackedT', bound='ExitStacked')
86
87
  AsyncExitStackedT = ta.TypeVar('AsyncExitStackedT', bound='AsyncExitStacked')
87
88
 
88
- # ../../../threadworkers.py
89
- ThreadWorkerT = ta.TypeVar('ThreadWorkerT', bound='ThreadWorker')
89
+ # ../../../../omlish/logs/levels.py
90
+ LogLevel = int # ta.TypeAlias
90
91
 
91
92
  # ../../../../omlish/configs/formats.py
92
93
  ConfigDataT = ta.TypeVar('ConfigDataT', bound='ConfigData')
93
94
 
95
+ # ../../../../omlish/logs/contexts.py
96
+ LoggingExcInfoTuple = ta.Tuple[ta.Type[BaseException], BaseException, ta.Optional[types.TracebackType]] # ta.TypeAlias
97
+ LoggingExcInfo = ta.Union[BaseException, LoggingExcInfoTuple] # ta.TypeAlias
98
+ LoggingExcInfoArg = ta.Union[LoggingExcInfo, bool, None] # ta.TypeAlias
99
+
100
+ # ../../../../omlish/logs/base.py
101
+ LoggingMsgFn = ta.Callable[[], ta.Union[str, tuple]] # ta.TypeAlias
102
+
103
+ # ../../../threadworkers.py
104
+ ThreadWorkerT = ta.TypeVar('ThreadWorkerT', bound='ThreadWorker')
105
+
94
106
 
95
107
  ########################################
96
108
  # ../../../../../omlish/configs/types.py
@@ -2673,14 +2685,188 @@ def format_num_bytes(num_bytes: int) -> str:
2673
2685
 
2674
2686
 
2675
2687
  ########################################
2676
- # ../../../../../omlish/logs/modules.py
2688
+ # ../../../../../omlish/logs/infos.py
2689
+
2690
+
2691
+ ##
2692
+
2693
+
2694
+ class _LoggingContextInfo:
2695
+ def __mro_entries__(self, bases):
2696
+ return ()
2697
+
2698
+
2699
+ LoggingContextInfo: type = ta.cast(ta.Any, _LoggingContextInfo())
2700
+
2701
+
2702
+ ##
2703
+
2704
+
2705
+ @ta.final
2706
+ class LoggingSourceFileInfo(LoggingContextInfo, ta.NamedTuple): # type: ignore[misc]
2707
+ file_name: str
2708
+ module: str
2709
+
2710
+ @classmethod
2711
+ def build(cls, file_path: ta.Optional[str]) -> ta.Optional['LoggingSourceFileInfo']:
2712
+ if file_path is None:
2713
+ return None
2714
+
2715
+ # https://github.com/python/cpython/blob/e709361fc87d0d9ab9c58033a0a7f2fef0ad43d2/Lib/logging/__init__.py#L331-L336 # noqa
2716
+ try:
2717
+ file_name = os.path.basename(file_path)
2718
+ module = os.path.splitext(file_name)[0]
2719
+ except (TypeError, ValueError, AttributeError):
2720
+ return None
2721
+
2722
+ return cls(
2723
+ file_name,
2724
+ module,
2725
+ )
2726
+
2727
+
2728
+ ##
2729
+
2730
+
2731
+ @ta.final
2732
+ class LoggingThreadInfo(LoggingContextInfo, ta.NamedTuple): # type: ignore[misc]
2733
+ ident: int
2734
+ native_id: ta.Optional[int]
2735
+ name: str
2736
+
2737
+ @classmethod
2738
+ def build(cls) -> 'LoggingThreadInfo':
2739
+ return cls(
2740
+ threading.get_ident(),
2741
+ threading.get_native_id() if hasattr(threading, 'get_native_id') else None,
2742
+ threading.current_thread().name,
2743
+ )
2744
+
2745
+
2746
+ ##
2747
+
2748
+
2749
+ @ta.final
2750
+ class LoggingProcessInfo(LoggingContextInfo, ta.NamedTuple): # type: ignore[misc]
2751
+ pid: int
2752
+
2753
+ @classmethod
2754
+ def build(cls) -> 'LoggingProcessInfo':
2755
+ return cls(
2756
+ os.getpid(),
2757
+ )
2758
+
2759
+
2760
+ ##
2761
+
2762
+
2763
+ @ta.final
2764
+ class LoggingMultiprocessingInfo(LoggingContextInfo, ta.NamedTuple): # type: ignore[misc]
2765
+ process_name: str
2766
+
2767
+ @classmethod
2768
+ def build(cls) -> ta.Optional['LoggingMultiprocessingInfo']:
2769
+ # https://github.com/python/cpython/blob/e709361fc87d0d9ab9c58033a0a7f2fef0ad43d2/Lib/logging/__init__.py#L355-L364 # noqa
2770
+ if (mp := sys.modules.get('multiprocessing')) is None:
2771
+ return None
2772
+
2773
+ return cls(
2774
+ mp.current_process().name,
2775
+ )
2776
+
2777
+
2778
+ ##
2779
+
2780
+
2781
+ @ta.final
2782
+ class LoggingAsyncioTaskInfo(LoggingContextInfo, ta.NamedTuple): # type: ignore[misc]
2783
+ name: str
2784
+
2785
+ @classmethod
2786
+ def build(cls) -> ta.Optional['LoggingAsyncioTaskInfo']:
2787
+ # https://github.com/python/cpython/blob/e709361fc87d0d9ab9c58033a0a7f2fef0ad43d2/Lib/logging/__init__.py#L372-L377 # noqa
2788
+ if (asyncio := sys.modules.get('asyncio')) is None:
2789
+ return None
2790
+
2791
+ try:
2792
+ task = asyncio.current_task()
2793
+ except Exception: # noqa
2794
+ return None
2795
+
2796
+ if task is None:
2797
+ return None
2798
+
2799
+ return cls(
2800
+ task.get_name(), # Always non-None
2801
+ )
2802
+
2803
+
2804
+ ########################################
2805
+ # ../../../../../omlish/logs/levels.py
2677
2806
 
2678
2807
 
2679
2808
  ##
2680
2809
 
2681
2810
 
2682
- def get_module_logger(mod_globals: ta.Mapping[str, ta.Any]) -> logging.Logger:
2683
- return logging.getLogger(mod_globals.get('__name__'))
2811
+ @ta.final
2812
+ class NamedLogLevel(int):
2813
+ # logging.getLevelNamesMapping (or, as that is unavailable <3.11, logging._nameToLevel) includes the deprecated
2814
+ # aliases.
2815
+ _NAMES_BY_INT: ta.ClassVar[ta.Mapping[LogLevel, str]] = dict(sorted(logging._levelToName.items(), key=lambda t: -t[0])) # noqa
2816
+
2817
+ _INTS_BY_NAME: ta.ClassVar[ta.Mapping[str, LogLevel]] = {v: k for k, v in _NAMES_BY_INT.items()}
2818
+
2819
+ _NAME_INT_PAIRS: ta.ClassVar[ta.Sequence[ta.Tuple[str, LogLevel]]] = list(_INTS_BY_NAME.items())
2820
+
2821
+ #
2822
+
2823
+ @property
2824
+ def exact_name(self) -> ta.Optional[str]:
2825
+ return self._NAMES_BY_INT.get(self)
2826
+
2827
+ _effective_name: ta.Optional[str]
2828
+
2829
+ @property
2830
+ def effective_name(self) -> ta.Optional[str]:
2831
+ try:
2832
+ return self._effective_name
2833
+ except AttributeError:
2834
+ pass
2835
+
2836
+ if (n := self.exact_name) is None:
2837
+ for n, i in self._NAME_INT_PAIRS: # noqa
2838
+ if self >= i:
2839
+ break
2840
+ else:
2841
+ n = None
2842
+
2843
+ self._effective_name = n
2844
+ return n
2845
+
2846
+ #
2847
+
2848
+ def __repr__(self) -> str:
2849
+ return f'{self.__class__.__name__}({int(self)})'
2850
+
2851
+ def __str__(self) -> str:
2852
+ return self.exact_name or f'{self.effective_name or "INVALID"}:{int(self)}'
2853
+
2854
+ #
2855
+
2856
+ CRITICAL: ta.ClassVar['NamedLogLevel']
2857
+ ERROR: ta.ClassVar['NamedLogLevel']
2858
+ WARNING: ta.ClassVar['NamedLogLevel']
2859
+ INFO: ta.ClassVar['NamedLogLevel']
2860
+ DEBUG: ta.ClassVar['NamedLogLevel']
2861
+ NOTSET: ta.ClassVar['NamedLogLevel']
2862
+
2863
+
2864
+ NamedLogLevel.CRITICAL = NamedLogLevel(logging.CRITICAL)
2865
+ NamedLogLevel.ERROR = NamedLogLevel(logging.ERROR)
2866
+ NamedLogLevel.WARNING = NamedLogLevel(logging.WARNING)
2867
+ NamedLogLevel.INFO = NamedLogLevel(logging.INFO)
2868
+ NamedLogLevel.DEBUG = NamedLogLevel(logging.DEBUG)
2869
+ NamedLogLevel.NOTSET = NamedLogLevel(logging.NOTSET)
2684
2870
 
2685
2871
 
2686
2872
  ########################################
@@ -2803,6 +2989,17 @@ class ProxyLoggingHandler(ProxyLoggingFilterer, logging.Handler):
2803
2989
  self._underlying.handleError(record)
2804
2990
 
2805
2991
 
2992
+ ########################################
2993
+ # ../../../../../omlish/logs/warnings.py
2994
+
2995
+
2996
+ ##
2997
+
2998
+
2999
+ class LoggingSetupWarning(Warning):
3000
+ pass
3001
+
3002
+
2806
3003
  ########################################
2807
3004
  # ../../../../../omlish/os/pidfiles/pidfile.py
2808
3005
  """
@@ -3393,341 +3590,98 @@ class AwsDataclassMeta:
3393
3590
 
3394
3591
 
3395
3592
  ########################################
3396
- # ../cursor.py
3397
-
3593
+ # ../../../../../omlish/configs/formats.py
3594
+ """
3595
+ Notes:
3596
+ - necessarily string-oriented
3597
+ - single file, as this is intended to be amalg'd and thus all included anyway
3398
3598
 
3399
- log = get_module_logger(globals()) # noqa
3599
+ TODO:
3600
+ - ConfigDataMapper? to_map -> ConfigMap?
3601
+ - nginx ?
3602
+ - raw ?
3603
+ """
3400
3604
 
3401
3605
 
3402
3606
  ##
3403
3607
 
3404
3608
 
3405
- class JournalctlToAwsCursor:
3406
- def __init__(
3407
- self,
3408
- cursor_file: ta.Optional[str] = None,
3409
- *,
3410
- ensure_locked: ta.Optional[ta.Callable[[], None]] = None,
3411
- ) -> None:
3412
- super().__init__()
3609
+ @dc.dataclass(frozen=True)
3610
+ class ConfigData(Abstract):
3611
+ @abc.abstractmethod
3612
+ def as_map(self) -> ConfigMap:
3613
+ raise NotImplementedError
3413
3614
 
3414
- self._cursor_file = cursor_file
3415
- self._ensure_locked = ensure_locked
3416
3615
 
3417
- #
3616
+ #
3418
3617
 
3419
- def get(self) -> ta.Optional[str]:
3420
- if self._ensure_locked is not None:
3421
- self._ensure_locked()
3422
3618
 
3423
- if not (cf := self._cursor_file):
3424
- return None
3425
- cf = os.path.expanduser(cf)
3619
+ class ConfigLoader(Abstract, ta.Generic[ConfigDataT]):
3620
+ @property
3621
+ def file_exts(self) -> ta.Sequence[str]:
3622
+ return ()
3426
3623
 
3427
- try:
3428
- with open(cf) as f:
3429
- return f.read().strip()
3430
- except FileNotFoundError:
3431
- return None
3624
+ def match_file(self, n: str) -> bool:
3625
+ return '.' in n and n.split('.')[-1] in check.not_isinstance(self.file_exts, str)
3432
3626
 
3433
- def set(self, cursor: str) -> None:
3434
- if self._ensure_locked is not None:
3435
- self._ensure_locked()
3627
+ #
3436
3628
 
3437
- if not (cf := self._cursor_file):
3438
- return
3439
- cf = os.path.expanduser(cf)
3629
+ def load_file(self, p: str) -> ConfigDataT:
3630
+ with open(p) as f:
3631
+ return self.load_str(f.read())
3440
3632
 
3441
- log.info('Writing cursor file %s : %s', cf, cursor)
3442
- with open(ncf := cf + '.next', 'w') as f:
3443
- f.write(cursor)
3633
+ @abc.abstractmethod
3634
+ def load_str(self, s: str) -> ConfigDataT:
3635
+ raise NotImplementedError
3444
3636
 
3445
- os.rename(ncf, cf)
3446
3637
 
3638
+ #
3447
3639
 
3448
- ########################################
3449
- # ../../../../threadworkers.py
3450
- """
3451
- FIXME:
3452
- - group is racy af - meditate on has_started, etc
3453
3640
 
3454
- TODO:
3455
- - overhaul stop lol
3456
- - group -> 'context'? :|
3457
- - shared stop_event?
3458
- """
3641
+ class ConfigRenderer(Abstract, ta.Generic[ConfigDataT]):
3642
+ @property
3643
+ @abc.abstractmethod
3644
+ def data_cls(self) -> ta.Type[ConfigDataT]:
3645
+ raise NotImplementedError
3459
3646
 
3647
+ def match_data(self, d: ConfigDataT) -> bool:
3648
+ return isinstance(d, self.data_cls)
3460
3649
 
3461
- log = get_module_logger(globals()) # noqa
3650
+ #
3651
+
3652
+ @abc.abstractmethod
3653
+ def render(self, d: ConfigDataT) -> str:
3654
+ raise NotImplementedError
3462
3655
 
3463
3656
 
3464
3657
  ##
3465
3658
 
3466
3659
 
3467
- class ThreadWorker(ExitStacked, Abstract):
3468
- def __init__(
3469
- self,
3470
- *,
3471
- stop_event: ta.Optional[threading.Event] = None,
3472
- worker_groups: ta.Optional[ta.Iterable['ThreadWorkerGroup']] = None,
3473
- ) -> None:
3474
- super().__init__()
3660
+ @dc.dataclass(frozen=True)
3661
+ class ObjConfigData(ConfigData, Abstract):
3662
+ obj: ta.Any
3475
3663
 
3476
- if stop_event is None:
3477
- stop_event = threading.Event()
3478
- self._stop_event = stop_event
3664
+ def as_map(self) -> ConfigMap:
3665
+ return check.isinstance(self.obj, collections.abc.Mapping)
3479
3666
 
3480
- self._lock = threading.RLock()
3481
- self._thread: ta.Optional[threading.Thread] = None
3482
- self._last_heartbeat: ta.Optional[float] = None
3483
3667
 
3484
- for g in worker_groups or []:
3485
- g.add(self)
3668
+ ##
3486
3669
 
3487
- #
3488
3670
 
3489
- @contextlib.contextmanager
3490
- def _exit_stacked_init_wrapper(self) -> ta.Iterator[None]:
3491
- with self._lock:
3492
- yield
3671
+ @dc.dataclass(frozen=True)
3672
+ class JsonConfigData(ObjConfigData):
3673
+ pass
3493
3674
 
3494
- #
3495
3675
 
3496
- def should_stop(self) -> bool:
3497
- return self._stop_event.is_set()
3676
+ class JsonConfigLoader(ConfigLoader[JsonConfigData]):
3677
+ file_exts = ('json',)
3498
3678
 
3499
- class Stopping(Exception): # noqa
3500
- pass
3679
+ def load_str(self, s: str) -> JsonConfigData:
3680
+ return JsonConfigData(json.loads(s))
3501
3681
 
3502
- #
3503
3682
 
3504
- @property
3505
- def last_heartbeat(self) -> ta.Optional[float]:
3506
- return self._last_heartbeat
3507
-
3508
- def _heartbeat(
3509
- self,
3510
- *,
3511
- no_stop_check: bool = False,
3512
- ) -> None:
3513
- self._last_heartbeat = time.time()
3514
-
3515
- if not no_stop_check and self.should_stop():
3516
- log.info('Stopping: %s', self)
3517
- raise ThreadWorker.Stopping
3518
-
3519
- #
3520
-
3521
- def has_started(self) -> bool:
3522
- return self._thread is not None
3523
-
3524
- def is_alive(self) -> bool:
3525
- return (thr := self._thread) is not None and thr.is_alive()
3526
-
3527
- def start(self) -> None:
3528
- with self._lock:
3529
- if self._thread is not None:
3530
- raise RuntimeError('Thread already started: %r', self)
3531
-
3532
- thr = threading.Thread(target=self.__thread_main)
3533
- self._thread = thr
3534
- thr.start()
3535
-
3536
- #
3537
-
3538
- def __thread_main(self) -> None:
3539
- try:
3540
- self._run()
3541
- except ThreadWorker.Stopping:
3542
- log.exception('Thread worker stopped: %r', self)
3543
- except Exception: # noqa
3544
- log.exception('Error in worker thread: %r', self)
3545
- raise
3546
-
3547
- @abc.abstractmethod
3548
- def _run(self) -> None:
3549
- raise NotImplementedError
3550
-
3551
- #
3552
-
3553
- def stop(self) -> None:
3554
- self._stop_event.set()
3555
-
3556
- def join(
3557
- self,
3558
- timeout: ta.Optional[float] = None,
3559
- *,
3560
- unless_not_started: bool = False,
3561
- ) -> None:
3562
- with self._lock:
3563
- if self._thread is None:
3564
- if not unless_not_started:
3565
- raise RuntimeError('Thread not started: %r', self)
3566
- return
3567
- self._thread.join(timeout)
3568
-
3569
-
3570
- ##
3571
-
3572
-
3573
- class ThreadWorkerGroup:
3574
- @dc.dataclass()
3575
- class _State:
3576
- worker: ThreadWorker
3577
-
3578
- last_heartbeat: ta.Optional[float] = None
3579
-
3580
- def __init__(self) -> None:
3581
- super().__init__()
3582
-
3583
- self._lock = threading.RLock()
3584
- self._states: ta.Dict[ThreadWorker, ThreadWorkerGroup._State] = {}
3585
- self._last_heartbeat_check: ta.Optional[float] = None
3586
-
3587
- #
3588
-
3589
- def add(self, *workers: ThreadWorker) -> 'ThreadWorkerGroup':
3590
- with self._lock:
3591
- for w in workers:
3592
- if w in self._states:
3593
- raise KeyError(w)
3594
- self._states[w] = ThreadWorkerGroup._State(w)
3595
-
3596
- return self
3597
-
3598
- #
3599
-
3600
- def start_all(self) -> None:
3601
- thrs = list(self._states)
3602
- with self._lock:
3603
- for thr in thrs:
3604
- if not thr.has_started():
3605
- thr.start()
3606
-
3607
- def stop_all(self) -> None:
3608
- for w in reversed(list(self._states)):
3609
- if w.has_started():
3610
- w.stop()
3611
-
3612
- def join_all(self, timeout: ta.Optional[float] = None) -> None:
3613
- for w in reversed(list(self._states)):
3614
- if w.has_started():
3615
- w.join(timeout, unless_not_started=True)
3616
-
3617
- #
3618
-
3619
- def get_dead(self) -> ta.List[ThreadWorker]:
3620
- with self._lock:
3621
- return [thr for thr in self._states if not thr.is_alive()]
3622
-
3623
- def check_heartbeats(self) -> ta.Dict[ThreadWorker, float]:
3624
- with self._lock:
3625
- dct: ta.Dict[ThreadWorker, float] = {}
3626
- for thr, st in self._states.items():
3627
- if not thr.has_started():
3628
- continue
3629
- hb = thr.last_heartbeat
3630
- if hb is None:
3631
- hb = time.time()
3632
- st.last_heartbeat = hb
3633
- dct[st.worker] = time.time() - hb
3634
- self._last_heartbeat_check = time.time()
3635
- return dct
3636
-
3637
-
3638
- ########################################
3639
- # ../../../../../omlish/configs/formats.py
3640
- """
3641
- Notes:
3642
- - necessarily string-oriented
3643
- - single file, as this is intended to be amalg'd and thus all included anyway
3644
-
3645
- TODO:
3646
- - ConfigDataMapper? to_map -> ConfigMap?
3647
- - nginx ?
3648
- - raw ?
3649
- """
3650
-
3651
-
3652
- ##
3653
-
3654
-
3655
- @dc.dataclass(frozen=True)
3656
- class ConfigData(Abstract):
3657
- @abc.abstractmethod
3658
- def as_map(self) -> ConfigMap:
3659
- raise NotImplementedError
3660
-
3661
-
3662
- #
3663
-
3664
-
3665
- class ConfigLoader(Abstract, ta.Generic[ConfigDataT]):
3666
- @property
3667
- def file_exts(self) -> ta.Sequence[str]:
3668
- return ()
3669
-
3670
- def match_file(self, n: str) -> bool:
3671
- return '.' in n and n.split('.')[-1] in check.not_isinstance(self.file_exts, str)
3672
-
3673
- #
3674
-
3675
- def load_file(self, p: str) -> ConfigDataT:
3676
- with open(p) as f:
3677
- return self.load_str(f.read())
3678
-
3679
- @abc.abstractmethod
3680
- def load_str(self, s: str) -> ConfigDataT:
3681
- raise NotImplementedError
3682
-
3683
-
3684
- #
3685
-
3686
-
3687
- class ConfigRenderer(Abstract, ta.Generic[ConfigDataT]):
3688
- @property
3689
- @abc.abstractmethod
3690
- def data_cls(self) -> ta.Type[ConfigDataT]:
3691
- raise NotImplementedError
3692
-
3693
- def match_data(self, d: ConfigDataT) -> bool:
3694
- return isinstance(d, self.data_cls)
3695
-
3696
- #
3697
-
3698
- @abc.abstractmethod
3699
- def render(self, d: ConfigDataT) -> str:
3700
- raise NotImplementedError
3701
-
3702
-
3703
- ##
3704
-
3705
-
3706
- @dc.dataclass(frozen=True)
3707
- class ObjConfigData(ConfigData, Abstract):
3708
- obj: ta.Any
3709
-
3710
- def as_map(self) -> ConfigMap:
3711
- return check.isinstance(self.obj, collections.abc.Mapping)
3712
-
3713
-
3714
- ##
3715
-
3716
-
3717
- @dc.dataclass(frozen=True)
3718
- class JsonConfigData(ObjConfigData):
3719
- pass
3720
-
3721
-
3722
- class JsonConfigLoader(ConfigLoader[JsonConfigData]):
3723
- file_exts = ('json',)
3724
-
3725
- def load_str(self, s: str) -> JsonConfigData:
3726
- return JsonConfigData(json.loads(s))
3727
-
3728
-
3729
- class JsonConfigRenderer(ConfigRenderer[JsonConfigData]):
3730
- data_cls = JsonConfigData
3683
+ class JsonConfigRenderer(ConfigRenderer[JsonConfigData]):
3684
+ data_cls = JsonConfigData
3731
3685
 
3732
3686
  def render(self, d: JsonConfigData) -> str:
3733
3687
  return json_dumps_pretty(d.obj)
@@ -4951,6 +4905,73 @@ def check_lite_runtime_version() -> None:
4951
4905
  raise OSError(f'Requires python {LITE_REQUIRED_PYTHON_VERSION}, got {sys.version_info} from {sys.executable}') # noqa
4952
4906
 
4953
4907
 
4908
+ ########################################
4909
+ # ../../../../../omlish/logs/callers.py
4910
+
4911
+
4912
+ ##
4913
+
4914
+
4915
+ class LoggingCaller(LoggingContextInfo, ta.NamedTuple): # type: ignore[misc]
4916
+ file_path: str
4917
+ line_no: int
4918
+ name: str
4919
+ stack_info: ta.Optional[str]
4920
+
4921
+ @classmethod
4922
+ def is_internal_frame(cls, frame: types.FrameType) -> bool:
4923
+ file_path = os.path.normcase(frame.f_code.co_filename)
4924
+
4925
+ # Yes, really.
4926
+ # https://github.com/python/cpython/blob/e709361fc87d0d9ab9c58033a0a7f2fef0ad43d2/Lib/logging/__init__.py#L204
4927
+ # https://github.com/python/cpython/commit/5ca6d7469be53960843df39bb900e9c3359f127f
4928
+ if 'importlib' in file_path and '_bootstrap' in file_path:
4929
+ return True
4930
+
4931
+ return False
4932
+
4933
+ @classmethod
4934
+ def find_frame(cls, ofs: int = 0) -> ta.Optional[types.FrameType]:
4935
+ f: ta.Optional[types.FrameType] = sys._getframe(2 + ofs) # noqa
4936
+
4937
+ while f is not None:
4938
+ # NOTE: We don't check __file__ like stdlib since we may be running amalgamated - we rely on careful, manual
4939
+ # stack_offset management.
4940
+ if hasattr(f, 'f_code'):
4941
+ return f
4942
+
4943
+ f = f.f_back
4944
+
4945
+ return None
4946
+
4947
+ @classmethod
4948
+ def find(
4949
+ cls,
4950
+ ofs: int = 0,
4951
+ *,
4952
+ stack_info: bool = False,
4953
+ ) -> ta.Optional['LoggingCaller']:
4954
+ if (f := cls.find_frame(ofs + 1)) is None:
4955
+ return None
4956
+
4957
+ # https://github.com/python/cpython/blob/08e9794517063c8cd92c48714071b1d3c60b71bd/Lib/logging/__init__.py#L1616-L1623 # noqa
4958
+ sinfo = None
4959
+ if stack_info:
4960
+ sio = io.StringIO()
4961
+ traceback.print_stack(f, file=sio)
4962
+ sinfo = sio.getvalue()
4963
+ sio.close()
4964
+ if sinfo[-1] == '\n':
4965
+ sinfo = sinfo[:-1]
4966
+
4967
+ return cls(
4968
+ f.f_code.co_filename,
4969
+ f.f_lineno or 0,
4970
+ f.f_code.co_name,
4971
+ sinfo,
4972
+ )
4973
+
4974
+
4954
4975
  ########################################
4955
4976
  # ../../../../../omlish/logs/std/json.py
4956
4977
  """
@@ -5009,12 +5030,95 @@ class JsonLoggingFormatter(logging.Formatter):
5009
5030
 
5010
5031
 
5011
5032
  ########################################
5012
- # ../../logs.py
5013
- """
5014
- https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html :
5015
- - The maximum batch size is 1,048,576 bytes. This size is calculated as the sum of all event messages in UTF-8, plus 26
5016
- bytes for each log event.
5017
- - None of the log events in the batch can be more than 2 hours in the future.
5033
+ # ../../../../../omlish/logs/times.py
5034
+
5035
+
5036
+ ##
5037
+
5038
+
5039
+ class LoggingTimeFields(LoggingContextInfo, ta.NamedTuple): # type: ignore[misc]
5040
+ """Maps directly to stdlib `logging.LogRecord` fields, and must be kept in sync with it."""
5041
+
5042
+ created: float
5043
+ msecs: float
5044
+ relative_created: float
5045
+
5046
+ @classmethod
5047
+ def get_std_start_time_ns(cls) -> int:
5048
+ x: ta.Any = logging._startTime # type: ignore[attr-defined] # noqa
5049
+
5050
+ # Before 3.13.0b1 this will be `time.time()`, a float of seconds. After that, it will be `time.time_ns()`, an
5051
+ # int.
5052
+ #
5053
+ # See:
5054
+ # - https://github.com/python/cpython/commit/1316692e8c7c1e1f3b6639e51804f9db5ed892ea
5055
+ #
5056
+ if isinstance(x, float):
5057
+ return int(x * 1e9)
5058
+ else:
5059
+ return x
5060
+
5061
+ @classmethod
5062
+ def build(
5063
+ cls,
5064
+ time_ns: int,
5065
+ *,
5066
+ start_time_ns: ta.Optional[int] = None,
5067
+ ) -> 'LoggingTimeFields':
5068
+ # https://github.com/python/cpython/commit/1316692e8c7c1e1f3b6639e51804f9db5ed892ea
5069
+ created = time_ns / 1e9 # ns to float seconds
5070
+
5071
+ # Get the number of whole milliseconds (0-999) in the fractional part of seconds.
5072
+ # Eg: 1_677_903_920_999_998_503 ns --> 999_998_503 ns--> 999 ms
5073
+ # Convert to float by adding 0.0 for historical reasons. See gh-89047
5074
+ msecs = (time_ns % 1_000_000_000) // 1_000_000 + 0.0
5075
+
5076
+ # https://github.com/python/cpython/commit/1500a23f33f5a6d052ff1ef6383d9839928b8ff1
5077
+ if msecs == 999.0 and int(created) != time_ns // 1_000_000_000:
5078
+ # ns -> sec conversion can round up, e.g:
5079
+ # 1_677_903_920_999_999_900 ns --> 1_677_903_921.0 sec
5080
+ msecs = 0.0
5081
+
5082
+ if start_time_ns is None:
5083
+ start_time_ns = cls.get_std_start_time_ns()
5084
+ relative_created = (time_ns - start_time_ns) / 1e6
5085
+
5086
+ return cls(
5087
+ created,
5088
+ msecs,
5089
+ relative_created,
5090
+ )
5091
+
5092
+
5093
+ ##
5094
+
5095
+
5096
+ class UnexpectedLoggingStartTimeWarning(LoggingSetupWarning):
5097
+ pass
5098
+
5099
+
5100
+ def _check_logging_start_time() -> None:
5101
+ if (x := LoggingTimeFields.get_std_start_time_ns()) < (t := time.time()):
5102
+ import warnings # noqa
5103
+
5104
+ warnings.warn(
5105
+ f'Unexpected logging start time detected: '
5106
+ f'get_std_start_time_ns={x}, '
5107
+ f'time.time()={t}',
5108
+ UnexpectedLoggingStartTimeWarning,
5109
+ )
5110
+
5111
+
5112
+ _check_logging_start_time()
5113
+
5114
+
5115
+ ########################################
5116
+ # ../../logs.py
5117
+ """
5118
+ https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html :
5119
+ - The maximum batch size is 1,048,576 bytes. This size is calculated as the sum of all event messages in UTF-8, plus 26
5120
+ bytes for each log event.
5121
+ - None of the log events in the batch can be more than 2 hours in the future.
5018
5122
  - None of the log events in the batch can be more than 14 days in the past. Also, none of the log events can be from
5019
5123
  earlier than the retention period of the log group.
5020
5124
  - The log events in the batch must be in chronological order by their timestamp. The timestamp is the time that the
@@ -5179,85 +5283,6 @@ class AwsLogMessageBuilder:
5179
5283
  return [post]
5180
5284
 
5181
5285
 
5182
- ########################################
5183
- # ../../../../journald/messages.py
5184
-
5185
-
5186
- log = get_module_logger(globals()) # noqa
5187
-
5188
-
5189
- ##
5190
-
5191
-
5192
- @dc.dataclass(frozen=True)
5193
- class JournalctlMessage:
5194
- raw: bytes
5195
- dct: ta.Optional[ta.Mapping[str, ta.Any]] = None
5196
- cursor: ta.Optional[str] = None
5197
- ts_us: ta.Optional[int] = None # microseconds UTC
5198
-
5199
-
5200
- class JournalctlMessageBuilder:
5201
- def __init__(self) -> None:
5202
- super().__init__()
5203
-
5204
- self._buf = DelimitingBuffer(b'\n')
5205
-
5206
- _cursor_field = '__CURSOR'
5207
-
5208
- _timestamp_fields: ta.Sequence[str] = [
5209
- '_SOURCE_REALTIME_TIMESTAMP',
5210
- '__REALTIME_TIMESTAMP',
5211
- ]
5212
-
5213
- def _get_message_timestamp(self, dct: ta.Mapping[str, ta.Any]) -> ta.Optional[int]:
5214
- for fld in self._timestamp_fields:
5215
- if (tsv := dct.get(fld)) is None:
5216
- continue
5217
-
5218
- if isinstance(tsv, str):
5219
- try:
5220
- return int(tsv)
5221
- except ValueError:
5222
- try:
5223
- return int(float(tsv))
5224
- except ValueError:
5225
- log.exception('Failed to parse timestamp: %r', tsv)
5226
-
5227
- elif isinstance(tsv, (int, float)):
5228
- return int(tsv)
5229
-
5230
- log.error('Invalid timestamp: %r', dct)
5231
- return None
5232
-
5233
- def _make_message(self, raw: bytes) -> JournalctlMessage:
5234
- dct = None
5235
- cursor = None
5236
- ts = None
5237
-
5238
- try:
5239
- dct = json.loads(raw.decode('utf-8', 'replace'))
5240
- except Exception: # noqa
5241
- log.exception('Failed to parse raw message: %r', raw)
5242
-
5243
- else:
5244
- cursor = dct.get(self._cursor_field)
5245
- ts = self._get_message_timestamp(dct)
5246
-
5247
- return JournalctlMessage(
5248
- raw=raw,
5249
- dct=dct,
5250
- cursor=cursor,
5251
- ts_us=ts,
5252
- )
5253
-
5254
- def feed(self, data: bytes) -> ta.Sequence[JournalctlMessage]:
5255
- ret: ta.List[JournalctlMessage] = []
5256
- for line in self._buf.feed(data):
5257
- ret.append(self._make_message(check.isinstance(line, bytes)))
5258
- return ret
5259
-
5260
-
5261
5286
  ########################################
5262
5287
  # ../../../../../omlish/lite/configs.py
5263
5288
 
@@ -5291,132 +5316,389 @@ def load_config_file_obj(
5291
5316
 
5292
5317
 
5293
5318
  ########################################
5294
- # ../../../../../omlish/logs/standard.py
5295
- """
5296
- TODO:
5297
- - !! move to std !!
5298
- - structured
5299
- - prefixed
5300
- - debug
5301
- - optional noisy? noisy will never be lite - some kinda configure_standard callback mechanism?
5302
- """
5319
+ # ../../../../../omlish/logs/contexts.py
5303
5320
 
5304
5321
 
5305
5322
  ##
5306
5323
 
5307
5324
 
5308
- STANDARD_LOG_FORMAT_PARTS = [
5309
- ('asctime', '%(asctime)-15s'),
5310
- ('process', 'pid=%(process)s'),
5311
- ('thread', 'tid=%(thread)x'),
5312
- ('levelname', '%(levelname)s'),
5313
- ('name', '%(name)s'),
5314
- ('separator', '::'),
5315
- ('message', '%(message)s'),
5316
- ]
5325
+ class LoggingContext(Abstract):
5326
+ @property
5327
+ @abc.abstractmethod
5328
+ def level(self) -> NamedLogLevel:
5329
+ raise NotImplementedError
5317
5330
 
5331
+ #
5318
5332
 
5319
- class StandardLoggingFormatter(logging.Formatter):
5320
- @staticmethod
5321
- def build_log_format(parts: ta.Iterable[ta.Tuple[str, str]]) -> str:
5322
- return ' '.join(v for k, v in parts)
5333
+ @property
5334
+ @abc.abstractmethod
5335
+ def time_ns(self) -> int:
5336
+ raise NotImplementedError
5323
5337
 
5324
- converter = datetime.datetime.fromtimestamp # type: ignore
5338
+ @property
5339
+ @abc.abstractmethod
5340
+ def times(self) -> LoggingTimeFields:
5341
+ raise NotImplementedError
5325
5342
 
5326
- def formatTime(self, record, datefmt=None):
5327
- ct = self.converter(record.created)
5328
- if datefmt:
5329
- return ct.strftime(datefmt) # noqa
5330
- else:
5331
- t = ct.strftime('%Y-%m-%d %H:%M:%S')
5332
- return '%s.%03d' % (t, record.msecs) # noqa
5343
+ #
5333
5344
 
5345
+ @property
5346
+ @abc.abstractmethod
5347
+ def exc_info(self) -> ta.Optional[LoggingExcInfo]:
5348
+ raise NotImplementedError
5334
5349
 
5335
- ##
5350
+ @property
5351
+ @abc.abstractmethod
5352
+ def exc_info_tuple(self) -> ta.Optional[LoggingExcInfoTuple]:
5353
+ raise NotImplementedError
5336
5354
 
5355
+ #
5337
5356
 
5338
- class StandardConfiguredLoggingHandler(ProxyLoggingHandler):
5339
- def __init_subclass__(cls, **kwargs):
5340
- raise TypeError('This class serves only as a marker and should not be subclassed.')
5357
+ @abc.abstractmethod
5358
+ def caller(self) -> ta.Optional[LoggingCaller]:
5359
+ raise NotImplementedError
5341
5360
 
5361
+ @abc.abstractmethod
5362
+ def source_file(self) -> ta.Optional[LoggingSourceFileInfo]:
5363
+ raise NotImplementedError
5342
5364
 
5343
- ##
5365
+ #
5344
5366
 
5367
+ @abc.abstractmethod
5368
+ def thread(self) -> ta.Optional[LoggingThreadInfo]:
5369
+ raise NotImplementedError
5345
5370
 
5346
- @contextlib.contextmanager
5347
- def _locking_logging_module_lock() -> ta.Iterator[None]:
5348
- if hasattr(logging, '_acquireLock'):
5349
- logging._acquireLock() # noqa
5350
- try:
5351
- yield
5352
- finally:
5353
- logging._releaseLock() # type: ignore # noqa
5371
+ @abc.abstractmethod
5372
+ def process(self) -> ta.Optional[LoggingProcessInfo]:
5373
+ raise NotImplementedError
5354
5374
 
5355
- elif hasattr(logging, '_lock'):
5356
- # https://github.com/python/cpython/commit/74723e11109a320e628898817ab449b3dad9ee96
5357
- with logging._lock: # noqa
5358
- yield
5375
+ @abc.abstractmethod
5376
+ def multiprocessing(self) -> ta.Optional[LoggingMultiprocessingInfo]:
5377
+ raise NotImplementedError
5359
5378
 
5360
- else:
5361
- raise Exception("Can't find lock in logging module")
5379
+ @abc.abstractmethod
5380
+ def asyncio_task(self) -> ta.Optional[LoggingAsyncioTaskInfo]:
5381
+ raise NotImplementedError
5362
5382
 
5363
5383
 
5364
- def configure_standard_logging(
5365
- level: ta.Union[int, str] = logging.INFO,
5366
- *,
5367
- json: bool = False,
5368
- target: ta.Optional[logging.Logger] = None,
5369
- force: bool = False,
5370
- handler_factory: ta.Optional[ta.Callable[[], logging.Handler]] = None,
5371
- ) -> ta.Optional[StandardConfiguredLoggingHandler]:
5372
- with _locking_logging_module_lock():
5373
- if target is None:
5374
- target = logging.root
5384
+ ##
5375
5385
 
5376
- #
5377
5386
 
5378
- if not force:
5379
- if any(isinstance(h, StandardConfiguredLoggingHandler) for h in list(target.handlers)):
5380
- return None
5387
+ class CaptureLoggingContext(LoggingContext, Abstract):
5388
+ class AlreadyCapturedError(Exception):
5389
+ pass
5381
5390
 
5382
- #
5391
+ class NotCapturedError(Exception):
5392
+ pass
5383
5393
 
5384
- if handler_factory is not None:
5385
- handler = handler_factory()
5386
- else:
5387
- handler = logging.StreamHandler()
5394
+ @abc.abstractmethod
5395
+ def capture(self) -> None:
5396
+ """Must be cooperatively called only from the expected locations."""
5388
5397
 
5389
- #
5398
+ raise NotImplementedError
5390
5399
 
5391
- formatter: logging.Formatter
5392
- if json:
5393
- formatter = JsonLoggingFormatter()
5394
- else:
5395
- formatter = StandardLoggingFormatter(StandardLoggingFormatter.build_log_format(STANDARD_LOG_FORMAT_PARTS))
5396
- handler.setFormatter(formatter)
5397
5400
 
5398
- #
5401
+ @ta.final
5402
+ class CaptureLoggingContextImpl(CaptureLoggingContext):
5403
+ @ta.final
5404
+ class NOT_SET: # noqa
5405
+ def __new__(cls, *args, **kwargs): # noqa
5406
+ raise TypeError
5399
5407
 
5400
- handler.addFilter(TidLoggingFilter())
5408
+ #
5409
+
5410
+ def __init__(
5411
+ self,
5412
+ level: LogLevel,
5413
+ *,
5414
+ time_ns: ta.Optional[int] = None,
5415
+
5416
+ exc_info: LoggingExcInfoArg = False,
5417
+
5418
+ caller: ta.Union[LoggingCaller, ta.Type[NOT_SET], None] = NOT_SET,
5419
+ stack_offset: int = 0,
5420
+ stack_info: bool = False,
5421
+ ) -> None:
5422
+ self._level: NamedLogLevel = level if level.__class__ is NamedLogLevel else NamedLogLevel(level) # type: ignore[assignment] # noqa
5401
5423
 
5402
5424
  #
5403
5425
 
5404
- target.addHandler(handler)
5426
+ if time_ns is None:
5427
+ time_ns = time.time_ns()
5428
+ self._time_ns: int = time_ns
5405
5429
 
5406
5430
  #
5407
5431
 
5408
- if level is not None:
5409
- target.setLevel(level)
5432
+ if exc_info is True:
5433
+ sys_exc_info = sys.exc_info()
5434
+ if sys_exc_info[0] is not None:
5435
+ exc_info = sys_exc_info
5436
+ else:
5437
+ exc_info = None
5438
+ elif exc_info is False:
5439
+ exc_info = None
5440
+
5441
+ if exc_info is not None:
5442
+ self._exc_info: ta.Optional[LoggingExcInfo] = exc_info
5443
+ if isinstance(exc_info, BaseException):
5444
+ self._exc_info_tuple: ta.Optional[LoggingExcInfoTuple] = (type(exc_info), exc_info, exc_info.__traceback__) # noqa
5445
+ else:
5446
+ self._exc_info_tuple = exc_info
5410
5447
 
5411
5448
  #
5412
5449
 
5413
- return StandardConfiguredLoggingHandler(handler)
5450
+ if caller is not CaptureLoggingContextImpl.NOT_SET:
5451
+ self._caller = caller # type: ignore[assignment]
5452
+ else:
5453
+ self._stack_offset = stack_offset
5454
+ self._stack_info = stack_info
5414
5455
 
5456
+ ##
5415
5457
 
5416
- ########################################
5417
- # ../../../../../omlish/subprocesses/wrap.py
5418
- """
5419
- This bypasses debuggers attaching to spawned subprocess children that look like python processes. See:
5458
+ @property
5459
+ def level(self) -> NamedLogLevel:
5460
+ return self._level
5461
+
5462
+ #
5463
+
5464
+ @property
5465
+ def time_ns(self) -> int:
5466
+ return self._time_ns
5467
+
5468
+ _times: LoggingTimeFields
5469
+
5470
+ @property
5471
+ def times(self) -> LoggingTimeFields:
5472
+ try:
5473
+ return self._times
5474
+ except AttributeError:
5475
+ pass
5476
+
5477
+ times = self._times = LoggingTimeFields.build(self.time_ns)
5478
+ return times
5479
+
5480
+ #
5481
+
5482
+ _exc_info: ta.Optional[LoggingExcInfo] = None
5483
+ _exc_info_tuple: ta.Optional[LoggingExcInfoTuple] = None
5484
+
5485
+ @property
5486
+ def exc_info(self) -> ta.Optional[LoggingExcInfo]:
5487
+ return self._exc_info
5488
+
5489
+ @property
5490
+ def exc_info_tuple(self) -> ta.Optional[LoggingExcInfoTuple]:
5491
+ return self._exc_info_tuple
5492
+
5493
+ ##
5494
+
5495
+ _stack_offset: int
5496
+ _stack_info: bool
5497
+
5498
+ def inc_stack_offset(self, ofs: int = 1) -> 'CaptureLoggingContext':
5499
+ if hasattr(self, '_stack_offset'):
5500
+ self._stack_offset += ofs
5501
+ return self
5502
+
5503
+ _has_captured: bool = False
5504
+
5505
+ _caller: ta.Optional[LoggingCaller]
5506
+ _source_file: ta.Optional[LoggingSourceFileInfo]
5507
+
5508
+ _thread: ta.Optional[LoggingThreadInfo]
5509
+ _process: ta.Optional[LoggingProcessInfo]
5510
+ _multiprocessing: ta.Optional[LoggingMultiprocessingInfo]
5511
+ _asyncio_task: ta.Optional[LoggingAsyncioTaskInfo]
5512
+
5513
+ def capture(self) -> None:
5514
+ if self._has_captured:
5515
+ raise CaptureLoggingContextImpl.AlreadyCapturedError
5516
+ self._has_captured = True
5517
+
5518
+ if not hasattr(self, '_caller'):
5519
+ self._caller = LoggingCaller.find(
5520
+ self._stack_offset + 1,
5521
+ stack_info=self._stack_info,
5522
+ )
5523
+
5524
+ if (caller := self._caller) is not None:
5525
+ self._source_file = LoggingSourceFileInfo.build(caller.file_path)
5526
+ else:
5527
+ self._source_file = None
5528
+
5529
+ self._thread = LoggingThreadInfo.build()
5530
+ self._process = LoggingProcessInfo.build()
5531
+ self._multiprocessing = LoggingMultiprocessingInfo.build()
5532
+ self._asyncio_task = LoggingAsyncioTaskInfo.build()
5533
+
5534
+ #
5535
+
5536
+ def caller(self) -> ta.Optional[LoggingCaller]:
5537
+ try:
5538
+ return self._caller
5539
+ except AttributeError:
5540
+ raise CaptureLoggingContext.NotCapturedError from None
5541
+
5542
+ def source_file(self) -> ta.Optional[LoggingSourceFileInfo]:
5543
+ try:
5544
+ return self._source_file
5545
+ except AttributeError:
5546
+ raise CaptureLoggingContext.NotCapturedError from None
5547
+
5548
+ #
5549
+
5550
+ def thread(self) -> ta.Optional[LoggingThreadInfo]:
5551
+ try:
5552
+ return self._thread
5553
+ except AttributeError:
5554
+ raise CaptureLoggingContext.NotCapturedError from None
5555
+
5556
+ def process(self) -> ta.Optional[LoggingProcessInfo]:
5557
+ try:
5558
+ return self._process
5559
+ except AttributeError:
5560
+ raise CaptureLoggingContext.NotCapturedError from None
5561
+
5562
+ def multiprocessing(self) -> ta.Optional[LoggingMultiprocessingInfo]:
5563
+ try:
5564
+ return self._multiprocessing
5565
+ except AttributeError:
5566
+ raise CaptureLoggingContext.NotCapturedError from None
5567
+
5568
+ def asyncio_task(self) -> ta.Optional[LoggingAsyncioTaskInfo]:
5569
+ try:
5570
+ return self._asyncio_task
5571
+ except AttributeError:
5572
+ raise CaptureLoggingContext.NotCapturedError from None
5573
+
5574
+
5575
+ ########################################
5576
+ # ../../../../../omlish/logs/standard.py
5577
+ """
5578
+ TODO:
5579
+ - !! move to std !!
5580
+ - structured
5581
+ - prefixed
5582
+ - debug
5583
+ - optional noisy? noisy will never be lite - some kinda configure_standard callback mechanism?
5584
+ """
5585
+
5586
+
5587
+ ##
5588
+
5589
+
5590
+ STANDARD_LOG_FORMAT_PARTS = [
5591
+ ('asctime', '%(asctime)-15s'),
5592
+ ('process', 'pid=%(process)s'),
5593
+ ('thread', 'tid=%(thread)x'),
5594
+ ('levelname', '%(levelname)s'),
5595
+ ('name', '%(name)s'),
5596
+ ('separator', '::'),
5597
+ ('message', '%(message)s'),
5598
+ ]
5599
+
5600
+
5601
+ class StandardLoggingFormatter(logging.Formatter):
5602
+ @staticmethod
5603
+ def build_log_format(parts: ta.Iterable[ta.Tuple[str, str]]) -> str:
5604
+ return ' '.join(v for k, v in parts)
5605
+
5606
+ converter = datetime.datetime.fromtimestamp # type: ignore
5607
+
5608
+ def formatTime(self, record, datefmt=None):
5609
+ ct = self.converter(record.created)
5610
+ if datefmt:
5611
+ return ct.strftime(datefmt) # noqa
5612
+ else:
5613
+ t = ct.strftime('%Y-%m-%d %H:%M:%S')
5614
+ return '%s.%03d' % (t, record.msecs) # noqa
5615
+
5616
+
5617
+ ##
5618
+
5619
+
5620
+ class StandardConfiguredLoggingHandler(ProxyLoggingHandler):
5621
+ def __init_subclass__(cls, **kwargs):
5622
+ raise TypeError('This class serves only as a marker and should not be subclassed.')
5623
+
5624
+
5625
+ ##
5626
+
5627
+
5628
+ @contextlib.contextmanager
5629
+ def _locking_logging_module_lock() -> ta.Iterator[None]:
5630
+ if hasattr(logging, '_acquireLock'):
5631
+ logging._acquireLock() # noqa
5632
+ try:
5633
+ yield
5634
+ finally:
5635
+ logging._releaseLock() # type: ignore # noqa
5636
+
5637
+ elif hasattr(logging, '_lock'):
5638
+ # https://github.com/python/cpython/commit/74723e11109a320e628898817ab449b3dad9ee96
5639
+ with logging._lock: # noqa
5640
+ yield
5641
+
5642
+ else:
5643
+ raise Exception("Can't find lock in logging module")
5644
+
5645
+
5646
+ def configure_standard_logging(
5647
+ level: ta.Union[int, str] = logging.INFO,
5648
+ *,
5649
+ json: bool = False,
5650
+ target: ta.Optional[logging.Logger] = None,
5651
+ force: bool = False,
5652
+ handler_factory: ta.Optional[ta.Callable[[], logging.Handler]] = None,
5653
+ ) -> ta.Optional[StandardConfiguredLoggingHandler]:
5654
+ with _locking_logging_module_lock():
5655
+ if target is None:
5656
+ target = logging.root
5657
+
5658
+ #
5659
+
5660
+ if not force:
5661
+ if any(isinstance(h, StandardConfiguredLoggingHandler) for h in list(target.handlers)):
5662
+ return None
5663
+
5664
+ #
5665
+
5666
+ if handler_factory is not None:
5667
+ handler = handler_factory()
5668
+ else:
5669
+ handler = logging.StreamHandler()
5670
+
5671
+ #
5672
+
5673
+ formatter: logging.Formatter
5674
+ if json:
5675
+ formatter = JsonLoggingFormatter()
5676
+ else:
5677
+ formatter = StandardLoggingFormatter(StandardLoggingFormatter.build_log_format(STANDARD_LOG_FORMAT_PARTS))
5678
+ handler.setFormatter(formatter)
5679
+
5680
+ #
5681
+
5682
+ handler.addFilter(TidLoggingFilter())
5683
+
5684
+ #
5685
+
5686
+ target.addHandler(handler)
5687
+
5688
+ #
5689
+
5690
+ if level is not None:
5691
+ target.setLevel(level)
5692
+
5693
+ #
5694
+
5695
+ return StandardConfiguredLoggingHandler(handler)
5696
+
5697
+
5698
+ ########################################
5699
+ # ../../../../../omlish/subprocesses/wrap.py
5700
+ """
5701
+ This bypasses debuggers attaching to spawned subprocess children that look like python processes. See:
5420
5702
 
5421
5703
  https://github.com/JetBrains/intellij-community/blob/e9d8f126c286acf9df3ff272f440b305bf2ff585/python/helpers/pydev/_pydev_bundle/pydev_monkey.py
5422
5704
  """
@@ -5439,6 +5721,893 @@ def subprocess_maybe_shell_wrap_exec(*cmd: str) -> ta.Tuple[str, ...]:
5439
5721
  return cmd
5440
5722
 
5441
5723
 
5724
+ ########################################
5725
+ # ../../../../../omlish/logs/base.py
5726
+
5727
+
5728
+ ##
5729
+
5730
+
5731
+ class AnyLogger(Abstract, ta.Generic[T]):
5732
+ def is_enabled_for(self, level: LogLevel) -> bool:
5733
+ return self.get_effective_level() >= level
5734
+
5735
+ @abc.abstractmethod
5736
+ def get_effective_level(self) -> LogLevel:
5737
+ raise NotImplementedError
5738
+
5739
+ #
5740
+
5741
+ @ta.final
5742
+ def isEnabledFor(self, level: LogLevel) -> bool: # noqa
5743
+ return self.is_enabled_for(level)
5744
+
5745
+ @ta.final
5746
+ def getEffectiveLevel(self) -> LogLevel: # noqa
5747
+ return self.get_effective_level()
5748
+
5749
+ ##
5750
+
5751
+ @ta.overload
5752
+ def log(self, level: LogLevel, msg: str, *args: ta.Any, **kwargs: ta.Any) -> T:
5753
+ ...
5754
+
5755
+ @ta.overload
5756
+ def log(self, level: LogLevel, msg: ta.Tuple[ta.Any, ...], **kwargs: ta.Any) -> T:
5757
+ ...
5758
+
5759
+ @ta.overload
5760
+ def log(self, level: LogLevel, msg_fn: LoggingMsgFn, **kwargs: ta.Any) -> T:
5761
+ ...
5762
+
5763
+ @ta.final
5764
+ def log(self, level: LogLevel, *args, **kwargs):
5765
+ return self._log(CaptureLoggingContextImpl(level, stack_offset=1), *args, **kwargs)
5766
+
5767
+ #
5768
+
5769
+ @ta.overload
5770
+ def debug(self, msg: str, *args: ta.Any, **kwargs: ta.Any) -> T:
5771
+ ...
5772
+
5773
+ @ta.overload
5774
+ def debug(self, msg: ta.Tuple[ta.Any, ...], **kwargs: ta.Any) -> T:
5775
+ ...
5776
+
5777
+ @ta.overload
5778
+ def debug(self, msg_fn: LoggingMsgFn, **kwargs: ta.Any) -> T:
5779
+ ...
5780
+
5781
+ @ta.final
5782
+ def debug(self, *args, **kwargs):
5783
+ return self._log(CaptureLoggingContextImpl(NamedLogLevel.DEBUG, stack_offset=1), *args, **kwargs)
5784
+
5785
+ #
5786
+
5787
+ @ta.overload
5788
+ def info(self, msg: str, *args: ta.Any, **kwargs: ta.Any) -> T:
5789
+ ...
5790
+
5791
+ @ta.overload
5792
+ def info(self, msg: ta.Tuple[ta.Any, ...], **kwargs: ta.Any) -> T:
5793
+ ...
5794
+
5795
+ @ta.overload
5796
+ def info(self, msg_fn: LoggingMsgFn, **kwargs: ta.Any) -> T:
5797
+ ...
5798
+
5799
+ @ta.final
5800
+ def info(self, *args, **kwargs):
5801
+ return self._log(CaptureLoggingContextImpl(NamedLogLevel.INFO, stack_offset=1), *args, **kwargs)
5802
+
5803
+ #
5804
+
5805
+ @ta.overload
5806
+ def warning(self, msg: str, *args: ta.Any, **kwargs: ta.Any) -> T:
5807
+ ...
5808
+
5809
+ @ta.overload
5810
+ def warning(self, msg: ta.Tuple[ta.Any, ...], **kwargs: ta.Any) -> T:
5811
+ ...
5812
+
5813
+ @ta.overload
5814
+ def warning(self, msg_fn: LoggingMsgFn, **kwargs: ta.Any) -> T:
5815
+ ...
5816
+
5817
+ @ta.final
5818
+ def warning(self, *args, **kwargs):
5819
+ return self._log(CaptureLoggingContextImpl(NamedLogLevel.WARNING, stack_offset=1), *args, **kwargs)
5820
+
5821
+ #
5822
+
5823
+ @ta.overload
5824
+ def error(self, msg: str, *args: ta.Any, **kwargs: ta.Any) -> T:
5825
+ ...
5826
+
5827
+ @ta.overload
5828
+ def error(self, msg: ta.Tuple[ta.Any, ...], **kwargs: ta.Any) -> T:
5829
+ ...
5830
+
5831
+ @ta.overload
5832
+ def error(self, msg_fn: LoggingMsgFn, **kwargs: ta.Any) -> T:
5833
+ ...
5834
+
5835
+ @ta.final
5836
+ def error(self, *args, **kwargs):
5837
+ return self._log(CaptureLoggingContextImpl(NamedLogLevel.ERROR, stack_offset=1), *args, **kwargs)
5838
+
5839
+ #
5840
+
5841
+ @ta.overload
5842
+ def exception(self, msg: str, *args: ta.Any, exc_info: LoggingExcInfoArg = True, **kwargs: ta.Any) -> T:
5843
+ ...
5844
+
5845
+ @ta.overload
5846
+ def exception(self, msg: ta.Tuple[ta.Any, ...], *, exc_info: LoggingExcInfoArg = True, **kwargs: ta.Any) -> T:
5847
+ ...
5848
+
5849
+ @ta.overload
5850
+ def exception(self, msg_fn: LoggingMsgFn, *, exc_info: LoggingExcInfoArg = True, **kwargs: ta.Any) -> T:
5851
+ ...
5852
+
5853
+ @ta.final
5854
+ def exception(self, *args, exc_info: LoggingExcInfoArg = True, **kwargs):
5855
+ return self._log(CaptureLoggingContextImpl(NamedLogLevel.ERROR, exc_info=exc_info, stack_offset=1), *args, **kwargs) # noqa
5856
+
5857
+ #
5858
+
5859
+ @ta.overload
5860
+ def critical(self, msg: str, *args: ta.Any, **kwargs: ta.Any) -> T:
5861
+ ...
5862
+
5863
+ @ta.overload
5864
+ def critical(self, msg: ta.Tuple[ta.Any, ...], **kwargs: ta.Any) -> T:
5865
+ ...
5866
+
5867
+ @ta.overload
5868
+ def critical(self, msg_fn: LoggingMsgFn, **kwargs: ta.Any) -> T:
5869
+ ...
5870
+
5871
+ @ta.final
5872
+ def critical(self, *args, **kwargs):
5873
+ return self._log(CaptureLoggingContextImpl(NamedLogLevel.CRITICAL, stack_offset=1), *args, **kwargs)
5874
+
5875
+ ##
5876
+
5877
+ @classmethod
5878
+ def _prepare_msg_args(cls, msg: ta.Union[str, tuple, LoggingMsgFn], *args: ta.Any) -> ta.Tuple[str, tuple]:
5879
+ if callable(msg):
5880
+ if args:
5881
+ raise TypeError(f'Must not provide both a message function and args: {msg=} {args=}')
5882
+ x = msg()
5883
+ if isinstance(x, str):
5884
+ return x, ()
5885
+ elif isinstance(x, tuple):
5886
+ if x:
5887
+ return x[0], x[1:]
5888
+ else:
5889
+ return '', ()
5890
+ else:
5891
+ raise TypeError(x)
5892
+
5893
+ elif isinstance(msg, tuple):
5894
+ if args:
5895
+ raise TypeError(f'Must not provide both a tuple message and args: {msg=} {args=}')
5896
+ if msg:
5897
+ return msg[0], msg[1:]
5898
+ else:
5899
+ return '', ()
5900
+
5901
+ elif isinstance(msg, str):
5902
+ return msg, args
5903
+
5904
+ else:
5905
+ raise TypeError(msg)
5906
+
5907
+ @abc.abstractmethod
5908
+ def _log(self, ctx: CaptureLoggingContext, msg: ta.Union[str, tuple, LoggingMsgFn], *args: ta.Any, **kwargs: ta.Any) -> T: # noqa
5909
+ raise NotImplementedError
5910
+
5911
+
5912
+ class Logger(AnyLogger[None], Abstract):
5913
+ @abc.abstractmethod
5914
+ def _log(self, ctx: CaptureLoggingContext, msg: ta.Union[str, tuple, LoggingMsgFn], *args: ta.Any, **kwargs: ta.Any) -> None: # noqa
5915
+ raise NotImplementedError
5916
+
5917
+
5918
+ class AsyncLogger(AnyLogger[ta.Awaitable[None]], Abstract):
5919
+ @abc.abstractmethod
5920
+ def _log(self, ctx: CaptureLoggingContext, msg: ta.Union[str, tuple, LoggingMsgFn], *args: ta.Any, **kwargs: ta.Any) -> ta.Awaitable[None]: # noqa
5921
+ raise NotImplementedError
5922
+
5923
+
5924
+ ##
5925
+
5926
+
5927
+ class AnyNopLogger(AnyLogger[T], Abstract):
5928
+ @ta.final
5929
+ def get_effective_level(self) -> LogLevel:
5930
+ return 999
5931
+
5932
+
5933
+ @ta.final
5934
+ class NopLogger(AnyNopLogger[None], Logger):
5935
+ def _log(self, ctx: CaptureLoggingContext, msg: ta.Union[str, tuple, LoggingMsgFn], *args: ta.Any, **kwargs: ta.Any) -> None: # noqa
5936
+ pass
5937
+
5938
+
5939
+ @ta.final
5940
+ class AsyncNopLogger(AnyNopLogger[ta.Awaitable[None]], AsyncLogger):
5941
+ async def _log(self, ctx: CaptureLoggingContext, msg: ta.Union[str, tuple, LoggingMsgFn], *args: ta.Any, **kwargs: ta.Any) -> None: # noqa
5942
+ pass
5943
+
5944
+
5945
+ ########################################
5946
+ # ../../../../../omlish/logs/std/records.py
5947
+
5948
+
5949
+ ##
5950
+
5951
+
5952
+ # Ref:
5953
+ # - https://docs.python.org/3/library/logging.html#logrecord-attributes
5954
+ #
5955
+ # LogRecord:
5956
+ # - https://github.com/python/cpython/blob/39b2f82717a69dde7212bc39b673b0f55c99e6a3/Lib/logging/__init__.py#L276 (3.8)
5957
+ # - https://github.com/python/cpython/blob/f070f54c5f4a42c7c61d1d5d3b8f3b7203b4a0fb/Lib/logging/__init__.py#L286 (~3.14) # noqa
5958
+ #
5959
+ # LogRecord.__init__ args:
5960
+ # - name: str
5961
+ # - level: int
5962
+ # - pathname: str - Confusingly referred to as `fn` before the LogRecord ctor. May be empty or "(unknown file)".
5963
+ # - lineno: int - May be 0.
5964
+ # - msg: str
5965
+ # - args: tuple | dict | 1-tuple[dict]
5966
+ # - exc_info: LoggingExcInfoTuple | None
5967
+ # - func: str | None = None -> funcName
5968
+ # - sinfo: str | None = None -> stack_info
5969
+ #
5970
+ KNOWN_STD_LOGGING_RECORD_ATTRS: ta.Dict[str, ta.Any] = dict(
5971
+ # Name of the logger used to log the call. Unmodified by ctor.
5972
+ name=str,
5973
+
5974
+ # The format string passed in the original logging call. Merged with args to produce message, or an arbitrary object
5975
+ # (see Using arbitrary objects as messages). Unmodified by ctor.
5976
+ msg=str,
5977
+
5978
+ # The tuple of arguments merged into msg to produce message, or a dict whose values are used for the merge (when
5979
+ # there is only one argument, and it is a dictionary). Ctor will transform a 1-tuple containing a Mapping into just
5980
+ # the mapping, but is otherwise unmodified.
5981
+ args=ta.Union[tuple, dict],
5982
+
5983
+ #
5984
+
5985
+ # Text logging level for the message ('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'). Set to
5986
+ # `getLevelName(level)`.
5987
+ levelname=str,
5988
+
5989
+ # Numeric logging level for the message (DEBUG, INFO, WARNING, ERROR, CRITICAL). Unmodified by ctor.
5990
+ levelno=int,
5991
+
5992
+ #
5993
+
5994
+ # Full pathname of the source file where the logging call was issued (if available). Unmodified by ctor. May default
5995
+ # to "(unknown file)" by Logger.findCaller / Logger._log.
5996
+ pathname=str,
5997
+
5998
+ # Filename portion of pathname. Set to `os.path.basename(pathname)` if successful, otherwise defaults to pathname.
5999
+ filename=str,
6000
+
6001
+ # Module (name portion of filename). Set to `os.path.splitext(filename)[0]`, otherwise defaults to
6002
+ # "Unknown module".
6003
+ module=str,
6004
+
6005
+ #
6006
+
6007
+ # Exception tuple (à la sys.exc_info) or, if no exception has occurred, None. Unmodified by ctor.
6008
+ exc_info=ta.Optional[LoggingExcInfoTuple],
6009
+
6010
+ # Used to cache the traceback text. Simply set to None by ctor, later set by Formatter.format.
6011
+ exc_text=ta.Optional[str],
6012
+
6013
+ #
6014
+
6015
+ # Stack frame information (where available) from the bottom of the stack in the current thread, up to and including
6016
+ # the stack frame of the logging call which resulted in the creation of this record. Set by ctor to `sinfo` arg,
6017
+ # unmodified. Mostly set, if requested, by `Logger.findCaller`, to `traceback.print_stack(f)`, but prepended with
6018
+ # the literal "Stack (most recent call last):\n", and stripped of exactly one trailing `\n` if present.
6019
+ stack_info=ta.Optional[str],
6020
+
6021
+ # Source line number where the logging call was issued (if available). Unmodified by ctor. May default to 0 by
6022
+ # Logger.findCaller / Logger._log.
6023
+ lineno=int,
6024
+
6025
+ # Name of function containing the logging call. Set by ctor to `func` arg, unmodified. May default to
6026
+ # "(unknown function)" by Logger.findCaller / Logger._log.
6027
+ funcName=str,
6028
+
6029
+ #
6030
+
6031
+ # Time when the LogRecord was created. Set to `time.time_ns() / 1e9` for >=3.13.0b1, otherwise simply `time.time()`.
6032
+ #
6033
+ # See:
6034
+ # - https://github.com/python/cpython/commit/1316692e8c7c1e1f3b6639e51804f9db5ed892ea
6035
+ # - https://github.com/python/cpython/commit/1500a23f33f5a6d052ff1ef6383d9839928b8ff1
6036
+ #
6037
+ created=float,
6038
+
6039
+ # Millisecond portion of the time when the LogRecord was created.
6040
+ msecs=float,
6041
+
6042
+ # Time in milliseconds when the LogRecord was created, relative to the time the logging module was loaded.
6043
+ relativeCreated=float,
6044
+
6045
+ #
6046
+
6047
+ # Thread ID if available, and `logging.logThreads` is truthy.
6048
+ thread=ta.Optional[int],
6049
+
6050
+ # Thread name if available, and `logging.logThreads` is truthy.
6051
+ threadName=ta.Optional[str],
6052
+
6053
+ #
6054
+
6055
+ # Process name if available. Set to None if `logging.logMultiprocessing` is not truthy. Otherwise, set to
6056
+ # 'MainProcess', then `sys.modules.get('multiprocessing').current_process().name` if that works, otherwise remains
6057
+ # as 'MainProcess'.
6058
+ #
6059
+ # As noted by stdlib:
6060
+ #
6061
+ # Errors may occur if multiprocessing has not finished loading yet - e.g. if a custom import hook causes
6062
+ # third-party code to run when multiprocessing calls import. See issue 8200 for an example
6063
+ #
6064
+ processName=ta.Optional[str],
6065
+
6066
+ # Process ID if available - that is, if `hasattr(os, 'getpid')` - and `logging.logProcesses` is truthy, otherwise
6067
+ # None.
6068
+ process=ta.Optional[int],
6069
+
6070
+ #
6071
+
6072
+ # Absent <3.12, otherwise asyncio.Task name if available, and `logging.logAsyncioTasks` is truthy. Set to
6073
+ # `sys.modules.get('asyncio').current_task().get_name()`, otherwise None.
6074
+ taskName=ta.Optional[str],
6075
+ )
6076
+
6077
+ KNOWN_STD_LOGGING_RECORD_ATTR_SET: ta.FrozenSet[str] = frozenset(KNOWN_STD_LOGGING_RECORD_ATTRS)
6078
+
6079
+
6080
+ # Formatter:
6081
+ # - https://github.com/python/cpython/blob/39b2f82717a69dde7212bc39b673b0f55c99e6a3/Lib/logging/__init__.py#L514 (3.8)
6082
+ # - https://github.com/python/cpython/blob/f070f54c5f4a42c7c61d1d5d3b8f3b7203b4a0fb/Lib/logging/__init__.py#L554 (~3.14) # noqa
6083
+ #
6084
+ KNOWN_STD_LOGGING_FORMATTER_RECORD_ATTRS: ta.Dict[str, ta.Any] = dict(
6085
+ # The logged message, computed as msg % args. Set to `record.getMessage()`.
6086
+ message=str,
6087
+
6088
+ # Human-readable time when the LogRecord was created. By default this is of the form '2003-07-08 16:49:45,896' (the
6089
+ # numbers after the comma are millisecond portion of the time). Set to `self.formatTime(record, self.datefmt)` if
6090
+ # `self.usesTime()`, otherwise unset.
6091
+ asctime=str,
6092
+
6093
+ # Used to cache the traceback text. If unset (falsey) on the record and `exc_info` is truthy, set to
6094
+ # `self.formatException(record.exc_info)` - otherwise unmodified.
6095
+ exc_text=ta.Optional[str],
6096
+ )
6097
+
6098
+ KNOWN_STD_LOGGING_FORMATTER_RECORD_ATTR_SET: ta.FrozenSet[str] = frozenset(KNOWN_STD_LOGGING_FORMATTER_RECORD_ATTRS)
6099
+
6100
+
6101
+ ##
6102
+
6103
+
6104
+ class UnknownStdLoggingRecordAttrsWarning(LoggingSetupWarning):
6105
+ pass
6106
+
6107
+
6108
+ def _check_std_logging_record_attrs() -> None:
6109
+ rec_dct = dict(logging.makeLogRecord({}).__dict__)
6110
+
6111
+ if (unk_rec_fields := frozenset(rec_dct) - KNOWN_STD_LOGGING_RECORD_ATTR_SET):
6112
+ import warnings # noqa
6113
+
6114
+ warnings.warn(
6115
+ f'Unknown log record attrs detected: {sorted(unk_rec_fields)!r}',
6116
+ UnknownStdLoggingRecordAttrsWarning,
6117
+ )
6118
+
6119
+
6120
+ _check_std_logging_record_attrs()
6121
+
6122
+
6123
+ ##
6124
+
6125
+
6126
+ class LoggingContextLogRecord(logging.LogRecord):
6127
+ _SHOULD_ADD_TASK_NAME: ta.ClassVar[bool] = sys.version_info >= (3, 12)
6128
+
6129
+ _UNKNOWN_PATH_NAME: ta.ClassVar[str] = '(unknown file)'
6130
+ _UNKNOWN_FUNC_NAME: ta.ClassVar[str] = '(unknown function)'
6131
+ _UNKNOWN_MODULE: ta.ClassVar[str] = 'Unknown module'
6132
+
6133
+ _STACK_INFO_PREFIX: ta.ClassVar[str] = 'Stack (most recent call last):\n'
6134
+
6135
+ def __init__( # noqa
6136
+ self,
6137
+ # name,
6138
+ # level,
6139
+ # pathname,
6140
+ # lineno,
6141
+ # msg,
6142
+ # args,
6143
+ # exc_info,
6144
+ # func=None,
6145
+ # sinfo=None,
6146
+ # **kwargs,
6147
+ *,
6148
+ name: str,
6149
+ msg: str,
6150
+ args: ta.Union[tuple, dict],
6151
+
6152
+ _logging_context: LoggingContext,
6153
+ ) -> None:
6154
+ ctx = _logging_context
6155
+
6156
+ self.name: str = name
6157
+
6158
+ self.msg: str = msg
6159
+
6160
+ # https://github.com/python/cpython/blob/e709361fc87d0d9ab9c58033a0a7f2fef0ad43d2/Lib/logging/__init__.py#L307
6161
+ if args and len(args) == 1 and isinstance(args[0], collections.abc.Mapping) and args[0]:
6162
+ args = args[0] # type: ignore[assignment]
6163
+ self.args: ta.Union[tuple, dict] = args
6164
+
6165
+ self.levelname: str = logging.getLevelName(ctx.level)
6166
+ self.levelno: int = ctx.level
6167
+
6168
+ if (caller := ctx.caller()) is not None:
6169
+ self.pathname: str = caller.file_path
6170
+ else:
6171
+ self.pathname = self._UNKNOWN_PATH_NAME
6172
+
6173
+ if (src_file := ctx.source_file()) is not None:
6174
+ self.filename: str = src_file.file_name
6175
+ self.module: str = src_file.module
6176
+ else:
6177
+ self.filename = self.pathname
6178
+ self.module = self._UNKNOWN_MODULE
6179
+
6180
+ self.exc_info: ta.Optional[LoggingExcInfoTuple] = ctx.exc_info_tuple
6181
+ self.exc_text: ta.Optional[str] = None
6182
+
6183
+ # If ctx.build_caller() was never called, we simply don't have a stack trace.
6184
+ if caller is not None:
6185
+ if (sinfo := caller.stack_info) is not None:
6186
+ self.stack_info: ta.Optional[str] = '\n'.join([
6187
+ self._STACK_INFO_PREFIX,
6188
+ sinfo[1:] if sinfo.endswith('\n') else sinfo,
6189
+ ])
6190
+ else:
6191
+ self.stack_info = None
6192
+
6193
+ self.lineno: int = caller.line_no
6194
+ self.funcName: str = caller.name
6195
+
6196
+ else:
6197
+ self.stack_info = None
6198
+
6199
+ self.lineno = 0
6200
+ self.funcName = self._UNKNOWN_FUNC_NAME
6201
+
6202
+ times = ctx.times
6203
+ self.created: float = times.created
6204
+ self.msecs: float = times.msecs
6205
+ self.relativeCreated: float = times.relative_created
6206
+
6207
+ if logging.logThreads:
6208
+ thread = check.not_none(ctx.thread())
6209
+ self.thread: ta.Optional[int] = thread.ident
6210
+ self.threadName: ta.Optional[str] = thread.name
6211
+ else:
6212
+ self.thread = None
6213
+ self.threadName = None
6214
+
6215
+ if logging.logProcesses:
6216
+ process = check.not_none(ctx.process())
6217
+ self.process: ta.Optional[int] = process.pid
6218
+ else:
6219
+ self.process = None
6220
+
6221
+ if logging.logMultiprocessing:
6222
+ if (mp := ctx.multiprocessing()) is not None:
6223
+ self.processName: ta.Optional[str] = mp.process_name
6224
+ else:
6225
+ self.processName = None
6226
+ else:
6227
+ self.processName = None
6228
+
6229
+ # Absent <3.12
6230
+ if getattr(logging, 'logAsyncioTasks', None):
6231
+ if (at := ctx.asyncio_task()) is not None:
6232
+ self.taskName: ta.Optional[str] = at.name
6233
+ else:
6234
+ self.taskName = None
6235
+ else:
6236
+ self.taskName = None
6237
+
6238
+
6239
+ ########################################
6240
+ # ../../../../../omlish/logs/std/adapters.py
6241
+
6242
+
6243
+ ##
6244
+
6245
+
6246
+ class StdLogger(Logger):
6247
+ def __init__(self, std: logging.Logger) -> None:
6248
+ super().__init__()
6249
+
6250
+ self._std = std
6251
+
6252
+ @property
6253
+ def std(self) -> logging.Logger:
6254
+ return self._std
6255
+
6256
+ def get_effective_level(self) -> LogLevel:
6257
+ return self._std.getEffectiveLevel()
6258
+
6259
+ def _log(self, ctx: CaptureLoggingContext, msg: ta.Union[str, tuple, LoggingMsgFn], *args: ta.Any) -> None:
6260
+ if not self.is_enabled_for(ctx.level):
6261
+ return
6262
+
6263
+ ctx.capture()
6264
+
6265
+ ms, args = self._prepare_msg_args(msg, *args)
6266
+
6267
+ rec = LoggingContextLogRecord(
6268
+ name=self._std.name,
6269
+ msg=ms,
6270
+ args=args,
6271
+
6272
+ _logging_context=ctx,
6273
+ )
6274
+
6275
+ self._std.handle(rec)
6276
+
6277
+
6278
+ ########################################
6279
+ # ../../../../../omlish/logs/modules.py
6280
+
6281
+
6282
+ ##
6283
+
6284
+
6285
+ def get_module_logger(mod_globals: ta.Mapping[str, ta.Any]) -> Logger:
6286
+ return StdLogger(logging.getLogger(mod_globals.get('__name__'))) # noqa
6287
+
6288
+
6289
+ ########################################
6290
+ # ../cursor.py
6291
+
6292
+
6293
+ log = get_module_logger(globals()) # noqa
6294
+
6295
+
6296
+ ##
6297
+
6298
+
6299
+ class JournalctlToAwsCursor:
6300
+ def __init__(
6301
+ self,
6302
+ cursor_file: ta.Optional[str] = None,
6303
+ *,
6304
+ ensure_locked: ta.Optional[ta.Callable[[], None]] = None,
6305
+ ) -> None:
6306
+ super().__init__()
6307
+
6308
+ self._cursor_file = cursor_file
6309
+ self._ensure_locked = ensure_locked
6310
+
6311
+ #
6312
+
6313
+ def get(self) -> ta.Optional[str]:
6314
+ if self._ensure_locked is not None:
6315
+ self._ensure_locked()
6316
+
6317
+ if not (cf := self._cursor_file):
6318
+ return None
6319
+ cf = os.path.expanduser(cf)
6320
+
6321
+ try:
6322
+ with open(cf) as f:
6323
+ return f.read().strip()
6324
+ except FileNotFoundError:
6325
+ return None
6326
+
6327
+ def set(self, cursor: str) -> None:
6328
+ if self._ensure_locked is not None:
6329
+ self._ensure_locked()
6330
+
6331
+ if not (cf := self._cursor_file):
6332
+ return
6333
+ cf = os.path.expanduser(cf)
6334
+
6335
+ log.info('Writing cursor file %s : %s', cf, cursor)
6336
+ with open(ncf := cf + '.next', 'w') as f:
6337
+ f.write(cursor)
6338
+
6339
+ os.rename(ncf, cf)
6340
+
6341
+
6342
+ ########################################
6343
+ # ../../../../journald/messages.py
6344
+
6345
+
6346
+ log = get_module_logger(globals()) # noqa
6347
+
6348
+
6349
+ ##
6350
+
6351
+
6352
+ @dc.dataclass(frozen=True)
6353
+ class JournalctlMessage:
6354
+ raw: bytes
6355
+ dct: ta.Optional[ta.Mapping[str, ta.Any]] = None
6356
+ cursor: ta.Optional[str] = None
6357
+ ts_us: ta.Optional[int] = None # microseconds UTC
6358
+
6359
+
6360
+ class JournalctlMessageBuilder:
6361
+ def __init__(self) -> None:
6362
+ super().__init__()
6363
+
6364
+ self._buf = DelimitingBuffer(b'\n')
6365
+
6366
+ _cursor_field = '__CURSOR'
6367
+
6368
+ _timestamp_fields: ta.Sequence[str] = [
6369
+ '_SOURCE_REALTIME_TIMESTAMP',
6370
+ '__REALTIME_TIMESTAMP',
6371
+ ]
6372
+
6373
+ def _get_message_timestamp(self, dct: ta.Mapping[str, ta.Any]) -> ta.Optional[int]:
6374
+ for fld in self._timestamp_fields:
6375
+ if (tsv := dct.get(fld)) is None:
6376
+ continue
6377
+
6378
+ if isinstance(tsv, str):
6379
+ try:
6380
+ return int(tsv)
6381
+ except ValueError:
6382
+ try:
6383
+ return int(float(tsv))
6384
+ except ValueError:
6385
+ log.exception('Failed to parse timestamp: %r', tsv)
6386
+
6387
+ elif isinstance(tsv, (int, float)):
6388
+ return int(tsv)
6389
+
6390
+ log.error('Invalid timestamp: %r', dct)
6391
+ return None
6392
+
6393
+ def _make_message(self, raw: bytes) -> JournalctlMessage:
6394
+ dct = None
6395
+ cursor = None
6396
+ ts = None
6397
+
6398
+ try:
6399
+ dct = json.loads(raw.decode('utf-8', 'replace'))
6400
+ except Exception: # noqa
6401
+ log.exception('Failed to parse raw message: %r', raw)
6402
+
6403
+ else:
6404
+ cursor = dct.get(self._cursor_field)
6405
+ ts = self._get_message_timestamp(dct)
6406
+
6407
+ return JournalctlMessage(
6408
+ raw=raw,
6409
+ dct=dct,
6410
+ cursor=cursor,
6411
+ ts_us=ts,
6412
+ )
6413
+
6414
+ def feed(self, data: bytes) -> ta.Sequence[JournalctlMessage]:
6415
+ ret: ta.List[JournalctlMessage] = []
6416
+ for line in self._buf.feed(data):
6417
+ ret.append(self._make_message(check.isinstance(line, bytes)))
6418
+ return ret
6419
+
6420
+
6421
+ ########################################
6422
+ # ../../../../threadworkers.py
6423
+ """
6424
+ FIXME:
6425
+ - group is racy af - meditate on has_started, etc
6426
+
6427
+ TODO:
6428
+ - overhaul stop lol
6429
+ - group -> 'context'? :|
6430
+ - shared stop_event?
6431
+ """
6432
+
6433
+
6434
+ log = get_module_logger(globals()) # noqa
6435
+
6436
+
6437
+ ##
6438
+
6439
+
6440
+ class ThreadWorker(ExitStacked, Abstract):
6441
+ def __init__(
6442
+ self,
6443
+ *,
6444
+ stop_event: ta.Optional[threading.Event] = None,
6445
+ worker_groups: ta.Optional[ta.Iterable['ThreadWorkerGroup']] = None,
6446
+ ) -> None:
6447
+ super().__init__()
6448
+
6449
+ if stop_event is None:
6450
+ stop_event = threading.Event()
6451
+ self._stop_event = stop_event
6452
+
6453
+ self._lock = threading.RLock()
6454
+ self._thread: ta.Optional[threading.Thread] = None
6455
+ self._last_heartbeat: ta.Optional[float] = None
6456
+
6457
+ for g in worker_groups or []:
6458
+ g.add(self)
6459
+
6460
+ #
6461
+
6462
+ @contextlib.contextmanager
6463
+ def _exit_stacked_init_wrapper(self) -> ta.Iterator[None]:
6464
+ with self._lock:
6465
+ yield
6466
+
6467
+ #
6468
+
6469
+ def should_stop(self) -> bool:
6470
+ return self._stop_event.is_set()
6471
+
6472
+ class Stopping(Exception): # noqa
6473
+ pass
6474
+
6475
+ #
6476
+
6477
+ @property
6478
+ def last_heartbeat(self) -> ta.Optional[float]:
6479
+ return self._last_heartbeat
6480
+
6481
+ def _heartbeat(
6482
+ self,
6483
+ *,
6484
+ no_stop_check: bool = False,
6485
+ ) -> None:
6486
+ self._last_heartbeat = time.time()
6487
+
6488
+ if not no_stop_check and self.should_stop():
6489
+ log.info('Stopping: %s', self)
6490
+ raise ThreadWorker.Stopping
6491
+
6492
+ #
6493
+
6494
+ def has_started(self) -> bool:
6495
+ return self._thread is not None
6496
+
6497
+ def is_alive(self) -> bool:
6498
+ return (thr := self._thread) is not None and thr.is_alive()
6499
+
6500
+ def start(self) -> None:
6501
+ with self._lock:
6502
+ if self._thread is not None:
6503
+ raise RuntimeError('Thread already started: %r', self)
6504
+
6505
+ thr = threading.Thread(target=self.__thread_main)
6506
+ self._thread = thr
6507
+ thr.start()
6508
+
6509
+ #
6510
+
6511
+ def __thread_main(self) -> None:
6512
+ try:
6513
+ self._run()
6514
+ except ThreadWorker.Stopping:
6515
+ log.exception('Thread worker stopped: %r', self)
6516
+ except Exception: # noqa
6517
+ log.exception('Error in worker thread: %r', self)
6518
+ raise
6519
+
6520
+ @abc.abstractmethod
6521
+ def _run(self) -> None:
6522
+ raise NotImplementedError
6523
+
6524
+ #
6525
+
6526
+ def stop(self) -> None:
6527
+ self._stop_event.set()
6528
+
6529
+ def join(
6530
+ self,
6531
+ timeout: ta.Optional[float] = None,
6532
+ *,
6533
+ unless_not_started: bool = False,
6534
+ ) -> None:
6535
+ with self._lock:
6536
+ if self._thread is None:
6537
+ if not unless_not_started:
6538
+ raise RuntimeError('Thread not started: %r', self)
6539
+ return
6540
+ self._thread.join(timeout)
6541
+
6542
+
6543
+ ##
6544
+
6545
+
6546
+ class ThreadWorkerGroup:
6547
+ @dc.dataclass()
6548
+ class _State:
6549
+ worker: ThreadWorker
6550
+
6551
+ last_heartbeat: ta.Optional[float] = None
6552
+
6553
+ def __init__(self) -> None:
6554
+ super().__init__()
6555
+
6556
+ self._lock = threading.RLock()
6557
+ self._states: ta.Dict[ThreadWorker, ThreadWorkerGroup._State] = {}
6558
+ self._last_heartbeat_check: ta.Optional[float] = None
6559
+
6560
+ #
6561
+
6562
+ def add(self, *workers: ThreadWorker) -> 'ThreadWorkerGroup':
6563
+ with self._lock:
6564
+ for w in workers:
6565
+ if w in self._states:
6566
+ raise KeyError(w)
6567
+ self._states[w] = ThreadWorkerGroup._State(w)
6568
+
6569
+ return self
6570
+
6571
+ #
6572
+
6573
+ def start_all(self) -> None:
6574
+ thrs = list(self._states)
6575
+ with self._lock:
6576
+ for thr in thrs:
6577
+ if not thr.has_started():
6578
+ thr.start()
6579
+
6580
+ def stop_all(self) -> None:
6581
+ for w in reversed(list(self._states)):
6582
+ if w.has_started():
6583
+ w.stop()
6584
+
6585
+ def join_all(self, timeout: ta.Optional[float] = None) -> None:
6586
+ for w in reversed(list(self._states)):
6587
+ if w.has_started():
6588
+ w.join(timeout, unless_not_started=True)
6589
+
6590
+ #
6591
+
6592
+ def get_dead(self) -> ta.List[ThreadWorker]:
6593
+ with self._lock:
6594
+ return [thr for thr in self._states if not thr.is_alive()]
6595
+
6596
+ def check_heartbeats(self) -> ta.Dict[ThreadWorker, float]:
6597
+ with self._lock:
6598
+ dct: ta.Dict[ThreadWorker, float] = {}
6599
+ for thr, st in self._states.items():
6600
+ if not thr.has_started():
6601
+ continue
6602
+ hb = thr.last_heartbeat
6603
+ if hb is None:
6604
+ hb = time.time()
6605
+ st.last_heartbeat = hb
6606
+ dct[st.worker] = time.time() - hb
6607
+ self._last_heartbeat_check = time.time()
6608
+ return dct
6609
+
6610
+
5442
6611
  ########################################
5443
6612
  # ../poster.py
5444
6613
  """