ominfra 0.0.0.dev427__py3-none-any.whl → 0.0.0.dev429__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -5,7 +5,7 @@
5
5
  # @omlish-generated
6
6
  # @omlish-amalg-output ../clouds/aws/journald2aws/main.py
7
7
  # @omlish-git-diff-omit
8
- # ruff: noqa: N802 UP006 UP007 UP036 UP043 UP045
8
+ # ruff: noqa: N802 UP006 UP007 UP036 UP043 UP045 UP046
9
9
  import abc
10
10
  import argparse
11
11
  import base64
@@ -38,6 +38,7 @@ import subprocess
38
38
  import sys
39
39
  import threading
40
40
  import time
41
+ import traceback
41
42
  import types
42
43
  import typing as ta
43
44
  import urllib.parse
@@ -85,12 +86,23 @@ CheckArgsRenderer = ta.Callable[..., ta.Optional[str]] # ta.TypeAlias
85
86
  ExitStackedT = ta.TypeVar('ExitStackedT', bound='ExitStacked')
86
87
  AsyncExitStackedT = ta.TypeVar('AsyncExitStackedT', bound='AsyncExitStacked')
87
88
 
88
- # ../../../threadworkers.py
89
- ThreadWorkerT = ta.TypeVar('ThreadWorkerT', bound='ThreadWorker')
89
+ # ../../../../omlish/logs/levels.py
90
+ LogLevel = int # ta.TypeAlias
90
91
 
91
92
  # ../../../../omlish/configs/formats.py
92
93
  ConfigDataT = ta.TypeVar('ConfigDataT', bound='ConfigData')
93
94
 
95
+ # ../../../../omlish/logs/contexts.py
96
+ LoggingExcInfoTuple = ta.Tuple[ta.Type[BaseException], BaseException, ta.Optional[types.TracebackType]] # ta.TypeAlias
97
+ LoggingExcInfo = ta.Union[BaseException, LoggingExcInfoTuple] # ta.TypeAlias
98
+ LoggingExcInfoArg = ta.Union[LoggingExcInfo, bool, None] # ta.TypeAlias
99
+
100
+ # ../../../../omlish/logs/base.py
101
+ LoggingMsgFn = ta.Callable[[], ta.Union[str, tuple]] # ta.TypeAlias
102
+
103
+ # ../../../threadworkers.py
104
+ ThreadWorkerT = ta.TypeVar('ThreadWorkerT', bound='ThreadWorker')
105
+
94
106
 
95
107
  ########################################
96
108
  # ../../../../../omlish/configs/types.py
@@ -2673,14 +2685,189 @@ def format_num_bytes(num_bytes: int) -> str:
2673
2685
 
2674
2686
 
2675
2687
  ########################################
2676
- # ../../../../../omlish/logs/modules.py
2688
+ # ../../../../../omlish/logs/infos.py
2689
+
2690
+
2691
+ ##
2692
+
2693
+
2694
+ def logging_context_info(cls):
2695
+ return cls
2696
+
2697
+
2698
+ ##
2699
+
2700
+
2701
+ @logging_context_info
2702
+ @ta.final
2703
+ class LoggingSourceFileInfo(ta.NamedTuple):
2704
+ file_name: str
2705
+ module: str
2706
+
2707
+ @classmethod
2708
+ def build(cls, file_path: ta.Optional[str]) -> ta.Optional['LoggingSourceFileInfo']:
2709
+ if file_path is None:
2710
+ return None
2711
+
2712
+ # https://github.com/python/cpython/blob/e709361fc87d0d9ab9c58033a0a7f2fef0ad43d2/Lib/logging/__init__.py#L331-L336 # noqa
2713
+ try:
2714
+ file_name = os.path.basename(file_path)
2715
+ module = os.path.splitext(file_name)[0]
2716
+ except (TypeError, ValueError, AttributeError):
2717
+ return None
2718
+
2719
+ return cls(
2720
+ file_name,
2721
+ module,
2722
+ )
2723
+
2724
+
2725
+ ##
2726
+
2727
+
2728
+ @logging_context_info
2729
+ @ta.final
2730
+ class LoggingThreadInfo(ta.NamedTuple):
2731
+ ident: int
2732
+ native_id: ta.Optional[int]
2733
+ name: str
2734
+
2735
+ @classmethod
2736
+ def build(cls) -> 'LoggingThreadInfo':
2737
+ return cls(
2738
+ threading.get_ident(),
2739
+ threading.get_native_id() if hasattr(threading, 'get_native_id') else None,
2740
+ threading.current_thread().name,
2741
+ )
2742
+
2743
+
2744
+ ##
2745
+
2746
+
2747
+ @logging_context_info
2748
+ @ta.final
2749
+ class LoggingProcessInfo(ta.NamedTuple):
2750
+ pid: int
2751
+
2752
+ @classmethod
2753
+ def build(cls) -> 'LoggingProcessInfo':
2754
+ return cls(
2755
+ os.getpid(),
2756
+ )
2757
+
2758
+
2759
+ ##
2760
+
2761
+
2762
+ @logging_context_info
2763
+ @ta.final
2764
+ class LoggingMultiprocessingInfo(ta.NamedTuple):
2765
+ process_name: str
2766
+
2767
+ @classmethod
2768
+ def build(cls) -> ta.Optional['LoggingMultiprocessingInfo']:
2769
+ # https://github.com/python/cpython/blob/e709361fc87d0d9ab9c58033a0a7f2fef0ad43d2/Lib/logging/__init__.py#L355-L364 # noqa
2770
+ if (mp := sys.modules.get('multiprocessing')) is None:
2771
+ return None
2772
+
2773
+ return cls(
2774
+ mp.current_process().name,
2775
+ )
2776
+
2777
+
2778
+ ##
2779
+
2780
+
2781
+ @logging_context_info
2782
+ @ta.final
2783
+ class LoggingAsyncioTaskInfo(ta.NamedTuple):
2784
+ name: str
2785
+
2786
+ @classmethod
2787
+ def build(cls) -> ta.Optional['LoggingAsyncioTaskInfo']:
2788
+ # https://github.com/python/cpython/blob/e709361fc87d0d9ab9c58033a0a7f2fef0ad43d2/Lib/logging/__init__.py#L372-L377 # noqa
2789
+ if (asyncio := sys.modules.get('asyncio')) is None:
2790
+ return None
2791
+
2792
+ try:
2793
+ task = asyncio.current_task()
2794
+ except Exception: # noqa
2795
+ return None
2796
+
2797
+ if task is None:
2798
+ return None
2799
+
2800
+ return cls(
2801
+ task.get_name(), # Always non-None
2802
+ )
2803
+
2804
+
2805
+ ########################################
2806
+ # ../../../../../omlish/logs/levels.py
2677
2807
 
2678
2808
 
2679
2809
  ##
2680
2810
 
2681
2811
 
2682
- def get_module_logger(mod_globals: ta.Mapping[str, ta.Any]) -> logging.Logger:
2683
- return logging.getLogger(mod_globals.get('__name__'))
2812
+ @ta.final
2813
+ class NamedLogLevel(int):
2814
+ # logging.getLevelNamesMapping (or, as that is unavailable <3.11, logging._nameToLevel) includes the deprecated
2815
+ # aliases.
2816
+ _NAMES_BY_INT: ta.ClassVar[ta.Mapping[LogLevel, str]] = dict(sorted(logging._levelToName.items(), key=lambda t: -t[0])) # noqa
2817
+
2818
+ _INTS_BY_NAME: ta.ClassVar[ta.Mapping[str, LogLevel]] = {v: k for k, v in _NAMES_BY_INT.items()}
2819
+
2820
+ _NAME_INT_PAIRS: ta.ClassVar[ta.Sequence[ta.Tuple[str, LogLevel]]] = list(_INTS_BY_NAME.items())
2821
+
2822
+ #
2823
+
2824
+ @property
2825
+ def exact_name(self) -> ta.Optional[str]:
2826
+ return self._NAMES_BY_INT.get(self)
2827
+
2828
+ _effective_name: ta.Optional[str]
2829
+
2830
+ @property
2831
+ def effective_name(self) -> ta.Optional[str]:
2832
+ try:
2833
+ return self._effective_name
2834
+ except AttributeError:
2835
+ pass
2836
+
2837
+ if (n := self.exact_name) is None:
2838
+ for n, i in self._NAME_INT_PAIRS: # noqa
2839
+ if self >= i:
2840
+ break
2841
+ else:
2842
+ n = None
2843
+
2844
+ self._effective_name = n
2845
+ return n
2846
+
2847
+ #
2848
+
2849
+ def __repr__(self) -> str:
2850
+ return f'{self.__class__.__name__}({int(self)})'
2851
+
2852
+ def __str__(self) -> str:
2853
+ return self.exact_name or f'{self.effective_name or "INVALID"}:{int(self)}'
2854
+
2855
+ #
2856
+
2857
+ CRITICAL: ta.ClassVar['NamedLogLevel']
2858
+ ERROR: ta.ClassVar['NamedLogLevel']
2859
+ WARNING: ta.ClassVar['NamedLogLevel']
2860
+ INFO: ta.ClassVar['NamedLogLevel']
2861
+ DEBUG: ta.ClassVar['NamedLogLevel']
2862
+ NOTSET: ta.ClassVar['NamedLogLevel']
2863
+
2864
+
2865
+ NamedLogLevel.CRITICAL = NamedLogLevel(logging.CRITICAL)
2866
+ NamedLogLevel.ERROR = NamedLogLevel(logging.ERROR)
2867
+ NamedLogLevel.WARNING = NamedLogLevel(logging.WARNING)
2868
+ NamedLogLevel.INFO = NamedLogLevel(logging.INFO)
2869
+ NamedLogLevel.DEBUG = NamedLogLevel(logging.DEBUG)
2870
+ NamedLogLevel.NOTSET = NamedLogLevel(logging.NOTSET)
2684
2871
 
2685
2872
 
2686
2873
  ########################################
@@ -2803,6 +2990,17 @@ class ProxyLoggingHandler(ProxyLoggingFilterer, logging.Handler):
2803
2990
  self._underlying.handleError(record)
2804
2991
 
2805
2992
 
2993
+ ########################################
2994
+ # ../../../../../omlish/logs/warnings.py
2995
+
2996
+
2997
+ ##
2998
+
2999
+
3000
+ class LoggingSetupWarning(Warning):
3001
+ pass
3002
+
3003
+
2806
3004
  ########################################
2807
3005
  # ../../../../../omlish/os/pidfiles/pidfile.py
2808
3006
  """
@@ -3393,341 +3591,98 @@ class AwsDataclassMeta:
3393
3591
 
3394
3592
 
3395
3593
  ########################################
3396
- # ../cursor.py
3397
-
3594
+ # ../../../../../omlish/configs/formats.py
3595
+ """
3596
+ Notes:
3597
+ - necessarily string-oriented
3598
+ - single file, as this is intended to be amalg'd and thus all included anyway
3398
3599
 
3399
- log = get_module_logger(globals()) # noqa
3600
+ TODO:
3601
+ - ConfigDataMapper? to_map -> ConfigMap?
3602
+ - nginx ?
3603
+ - raw ?
3604
+ """
3400
3605
 
3401
3606
 
3402
3607
  ##
3403
3608
 
3404
3609
 
3405
- class JournalctlToAwsCursor:
3406
- def __init__(
3407
- self,
3408
- cursor_file: ta.Optional[str] = None,
3409
- *,
3410
- ensure_locked: ta.Optional[ta.Callable[[], None]] = None,
3411
- ) -> None:
3412
- super().__init__()
3610
+ @dc.dataclass(frozen=True)
3611
+ class ConfigData(Abstract):
3612
+ @abc.abstractmethod
3613
+ def as_map(self) -> ConfigMap:
3614
+ raise NotImplementedError
3413
3615
 
3414
- self._cursor_file = cursor_file
3415
- self._ensure_locked = ensure_locked
3416
3616
 
3417
- #
3617
+ #
3418
3618
 
3419
- def get(self) -> ta.Optional[str]:
3420
- if self._ensure_locked is not None:
3421
- self._ensure_locked()
3422
3619
 
3423
- if not (cf := self._cursor_file):
3424
- return None
3425
- cf = os.path.expanduser(cf)
3620
+ class ConfigLoader(Abstract, ta.Generic[ConfigDataT]):
3621
+ @property
3622
+ def file_exts(self) -> ta.Sequence[str]:
3623
+ return ()
3426
3624
 
3427
- try:
3428
- with open(cf) as f:
3429
- return f.read().strip()
3430
- except FileNotFoundError:
3431
- return None
3625
+ def match_file(self, n: str) -> bool:
3626
+ return '.' in n and n.split('.')[-1] in check.not_isinstance(self.file_exts, str)
3432
3627
 
3433
- def set(self, cursor: str) -> None:
3434
- if self._ensure_locked is not None:
3435
- self._ensure_locked()
3628
+ #
3436
3629
 
3437
- if not (cf := self._cursor_file):
3438
- return
3439
- cf = os.path.expanduser(cf)
3630
+ def load_file(self, p: str) -> ConfigDataT:
3631
+ with open(p) as f:
3632
+ return self.load_str(f.read())
3440
3633
 
3441
- log.info('Writing cursor file %s : %s', cf, cursor)
3442
- with open(ncf := cf + '.next', 'w') as f:
3443
- f.write(cursor)
3634
+ @abc.abstractmethod
3635
+ def load_str(self, s: str) -> ConfigDataT:
3636
+ raise NotImplementedError
3444
3637
 
3445
- os.rename(ncf, cf)
3446
3638
 
3639
+ #
3447
3640
 
3448
- ########################################
3449
- # ../../../../threadworkers.py
3450
- """
3451
- FIXME:
3452
- - group is racy af - meditate on has_started, etc
3453
3641
 
3454
- TODO:
3455
- - overhaul stop lol
3456
- - group -> 'context'? :|
3457
- - shared stop_event?
3458
- """
3642
+ class ConfigRenderer(Abstract, ta.Generic[ConfigDataT]):
3643
+ @property
3644
+ @abc.abstractmethod
3645
+ def data_cls(self) -> ta.Type[ConfigDataT]:
3646
+ raise NotImplementedError
3647
+
3648
+ def match_data(self, d: ConfigDataT) -> bool:
3649
+ return isinstance(d, self.data_cls)
3459
3650
 
3651
+ #
3460
3652
 
3461
- log = get_module_logger(globals()) # noqa
3653
+ @abc.abstractmethod
3654
+ def render(self, d: ConfigDataT) -> str:
3655
+ raise NotImplementedError
3462
3656
 
3463
3657
 
3464
3658
  ##
3465
3659
 
3466
3660
 
3467
- class ThreadWorker(ExitStacked, Abstract):
3468
- def __init__(
3469
- self,
3470
- *,
3471
- stop_event: ta.Optional[threading.Event] = None,
3472
- worker_groups: ta.Optional[ta.Iterable['ThreadWorkerGroup']] = None,
3473
- ) -> None:
3474
- super().__init__()
3661
+ @dc.dataclass(frozen=True)
3662
+ class ObjConfigData(ConfigData, Abstract):
3663
+ obj: ta.Any
3475
3664
 
3476
- if stop_event is None:
3477
- stop_event = threading.Event()
3478
- self._stop_event = stop_event
3665
+ def as_map(self) -> ConfigMap:
3666
+ return check.isinstance(self.obj, collections.abc.Mapping)
3479
3667
 
3480
- self._lock = threading.RLock()
3481
- self._thread: ta.Optional[threading.Thread] = None
3482
- self._last_heartbeat: ta.Optional[float] = None
3483
3668
 
3484
- for g in worker_groups or []:
3485
- g.add(self)
3669
+ ##
3486
3670
 
3487
- #
3488
3671
 
3489
- @contextlib.contextmanager
3490
- def _exit_stacked_init_wrapper(self) -> ta.Iterator[None]:
3491
- with self._lock:
3492
- yield
3672
+ @dc.dataclass(frozen=True)
3673
+ class JsonConfigData(ObjConfigData):
3674
+ pass
3493
3675
 
3494
- #
3495
3676
 
3496
- def should_stop(self) -> bool:
3497
- return self._stop_event.is_set()
3677
+ class JsonConfigLoader(ConfigLoader[JsonConfigData]):
3678
+ file_exts = ('json',)
3498
3679
 
3499
- class Stopping(Exception): # noqa
3500
- pass
3680
+ def load_str(self, s: str) -> JsonConfigData:
3681
+ return JsonConfigData(json.loads(s))
3501
3682
 
3502
- #
3503
3683
 
3504
- @property
3505
- def last_heartbeat(self) -> ta.Optional[float]:
3506
- return self._last_heartbeat
3507
-
3508
- def _heartbeat(
3509
- self,
3510
- *,
3511
- no_stop_check: bool = False,
3512
- ) -> None:
3513
- self._last_heartbeat = time.time()
3514
-
3515
- if not no_stop_check and self.should_stop():
3516
- log.info('Stopping: %s', self)
3517
- raise ThreadWorker.Stopping
3518
-
3519
- #
3520
-
3521
- def has_started(self) -> bool:
3522
- return self._thread is not None
3523
-
3524
- def is_alive(self) -> bool:
3525
- return (thr := self._thread) is not None and thr.is_alive()
3526
-
3527
- def start(self) -> None:
3528
- with self._lock:
3529
- if self._thread is not None:
3530
- raise RuntimeError('Thread already started: %r', self)
3531
-
3532
- thr = threading.Thread(target=self.__thread_main)
3533
- self._thread = thr
3534
- thr.start()
3535
-
3536
- #
3537
-
3538
- def __thread_main(self) -> None:
3539
- try:
3540
- self._run()
3541
- except ThreadWorker.Stopping:
3542
- log.exception('Thread worker stopped: %r', self)
3543
- except Exception: # noqa
3544
- log.exception('Error in worker thread: %r', self)
3545
- raise
3546
-
3547
- @abc.abstractmethod
3548
- def _run(self) -> None:
3549
- raise NotImplementedError
3550
-
3551
- #
3552
-
3553
- def stop(self) -> None:
3554
- self._stop_event.set()
3555
-
3556
- def join(
3557
- self,
3558
- timeout: ta.Optional[float] = None,
3559
- *,
3560
- unless_not_started: bool = False,
3561
- ) -> None:
3562
- with self._lock:
3563
- if self._thread is None:
3564
- if not unless_not_started:
3565
- raise RuntimeError('Thread not started: %r', self)
3566
- return
3567
- self._thread.join(timeout)
3568
-
3569
-
3570
- ##
3571
-
3572
-
3573
- class ThreadWorkerGroup:
3574
- @dc.dataclass()
3575
- class _State:
3576
- worker: ThreadWorker
3577
-
3578
- last_heartbeat: ta.Optional[float] = None
3579
-
3580
- def __init__(self) -> None:
3581
- super().__init__()
3582
-
3583
- self._lock = threading.RLock()
3584
- self._states: ta.Dict[ThreadWorker, ThreadWorkerGroup._State] = {}
3585
- self._last_heartbeat_check: ta.Optional[float] = None
3586
-
3587
- #
3588
-
3589
- def add(self, *workers: ThreadWorker) -> 'ThreadWorkerGroup':
3590
- with self._lock:
3591
- for w in workers:
3592
- if w in self._states:
3593
- raise KeyError(w)
3594
- self._states[w] = ThreadWorkerGroup._State(w)
3595
-
3596
- return self
3597
-
3598
- #
3599
-
3600
- def start_all(self) -> None:
3601
- thrs = list(self._states)
3602
- with self._lock:
3603
- for thr in thrs:
3604
- if not thr.has_started():
3605
- thr.start()
3606
-
3607
- def stop_all(self) -> None:
3608
- for w in reversed(list(self._states)):
3609
- if w.has_started():
3610
- w.stop()
3611
-
3612
- def join_all(self, timeout: ta.Optional[float] = None) -> None:
3613
- for w in reversed(list(self._states)):
3614
- if w.has_started():
3615
- w.join(timeout, unless_not_started=True)
3616
-
3617
- #
3618
-
3619
- def get_dead(self) -> ta.List[ThreadWorker]:
3620
- with self._lock:
3621
- return [thr for thr in self._states if not thr.is_alive()]
3622
-
3623
- def check_heartbeats(self) -> ta.Dict[ThreadWorker, float]:
3624
- with self._lock:
3625
- dct: ta.Dict[ThreadWorker, float] = {}
3626
- for thr, st in self._states.items():
3627
- if not thr.has_started():
3628
- continue
3629
- hb = thr.last_heartbeat
3630
- if hb is None:
3631
- hb = time.time()
3632
- st.last_heartbeat = hb
3633
- dct[st.worker] = time.time() - hb
3634
- self._last_heartbeat_check = time.time()
3635
- return dct
3636
-
3637
-
3638
- ########################################
3639
- # ../../../../../omlish/configs/formats.py
3640
- """
3641
- Notes:
3642
- - necessarily string-oriented
3643
- - single file, as this is intended to be amalg'd and thus all included anyway
3644
-
3645
- TODO:
3646
- - ConfigDataMapper? to_map -> ConfigMap?
3647
- - nginx ?
3648
- - raw ?
3649
- """
3650
-
3651
-
3652
- ##
3653
-
3654
-
3655
- @dc.dataclass(frozen=True)
3656
- class ConfigData(Abstract):
3657
- @abc.abstractmethod
3658
- def as_map(self) -> ConfigMap:
3659
- raise NotImplementedError
3660
-
3661
-
3662
- #
3663
-
3664
-
3665
- class ConfigLoader(Abstract, ta.Generic[ConfigDataT]):
3666
- @property
3667
- def file_exts(self) -> ta.Sequence[str]:
3668
- return ()
3669
-
3670
- def match_file(self, n: str) -> bool:
3671
- return '.' in n and n.split('.')[-1] in check.not_isinstance(self.file_exts, str)
3672
-
3673
- #
3674
-
3675
- def load_file(self, p: str) -> ConfigDataT:
3676
- with open(p) as f:
3677
- return self.load_str(f.read())
3678
-
3679
- @abc.abstractmethod
3680
- def load_str(self, s: str) -> ConfigDataT:
3681
- raise NotImplementedError
3682
-
3683
-
3684
- #
3685
-
3686
-
3687
- class ConfigRenderer(Abstract, ta.Generic[ConfigDataT]):
3688
- @property
3689
- @abc.abstractmethod
3690
- def data_cls(self) -> ta.Type[ConfigDataT]:
3691
- raise NotImplementedError
3692
-
3693
- def match_data(self, d: ConfigDataT) -> bool:
3694
- return isinstance(d, self.data_cls)
3695
-
3696
- #
3697
-
3698
- @abc.abstractmethod
3699
- def render(self, d: ConfigDataT) -> str:
3700
- raise NotImplementedError
3701
-
3702
-
3703
- ##
3704
-
3705
-
3706
- @dc.dataclass(frozen=True)
3707
- class ObjConfigData(ConfigData, Abstract):
3708
- obj: ta.Any
3709
-
3710
- def as_map(self) -> ConfigMap:
3711
- return check.isinstance(self.obj, collections.abc.Mapping)
3712
-
3713
-
3714
- ##
3715
-
3716
-
3717
- @dc.dataclass(frozen=True)
3718
- class JsonConfigData(ObjConfigData):
3719
- pass
3720
-
3721
-
3722
- class JsonConfigLoader(ConfigLoader[JsonConfigData]):
3723
- file_exts = ('json',)
3724
-
3725
- def load_str(self, s: str) -> JsonConfigData:
3726
- return JsonConfigData(json.loads(s))
3727
-
3728
-
3729
- class JsonConfigRenderer(ConfigRenderer[JsonConfigData]):
3730
- data_cls = JsonConfigData
3684
+ class JsonConfigRenderer(ConfigRenderer[JsonConfigData]):
3685
+ data_cls = JsonConfigData
3731
3686
 
3732
3687
  def render(self, d: JsonConfigData) -> str:
3733
3688
  return json_dumps_pretty(d.obj)
@@ -4951,6 +4906,75 @@ def check_lite_runtime_version() -> None:
4951
4906
  raise OSError(f'Requires python {LITE_REQUIRED_PYTHON_VERSION}, got {sys.version_info} from {sys.executable}') # noqa
4952
4907
 
4953
4908
 
4909
+ ########################################
4910
+ # ../../../../../omlish/logs/callers.py
4911
+
4912
+
4913
+ ##
4914
+
4915
+
4916
+ @logging_context_info
4917
+ @ta.final
4918
+ class LoggingCaller(ta.NamedTuple):
4919
+ file_path: str
4920
+ line_no: int
4921
+ name: str
4922
+ stack_info: ta.Optional[str]
4923
+
4924
+ @classmethod
4925
+ def is_internal_frame(cls, frame: types.FrameType) -> bool:
4926
+ file_path = os.path.normcase(frame.f_code.co_filename)
4927
+
4928
+ # Yes, really.
4929
+ # https://github.com/python/cpython/blob/e709361fc87d0d9ab9c58033a0a7f2fef0ad43d2/Lib/logging/__init__.py#L204
4930
+ # https://github.com/python/cpython/commit/5ca6d7469be53960843df39bb900e9c3359f127f
4931
+ if 'importlib' in file_path and '_bootstrap' in file_path:
4932
+ return True
4933
+
4934
+ return False
4935
+
4936
+ @classmethod
4937
+ def find_frame(cls, ofs: int = 0) -> ta.Optional[types.FrameType]:
4938
+ f: ta.Optional[types.FrameType] = sys._getframe(2 + ofs) # noqa
4939
+
4940
+ while f is not None:
4941
+ # NOTE: We don't check __file__ like stdlib since we may be running amalgamated - we rely on careful, manual
4942
+ # stack_offset management.
4943
+ if hasattr(f, 'f_code'):
4944
+ return f
4945
+
4946
+ f = f.f_back
4947
+
4948
+ return None
4949
+
4950
+ @classmethod
4951
+ def find(
4952
+ cls,
4953
+ ofs: int = 0,
4954
+ *,
4955
+ stack_info: bool = False,
4956
+ ) -> ta.Optional['LoggingCaller']:
4957
+ if (f := cls.find_frame(ofs + 1)) is None:
4958
+ return None
4959
+
4960
+ # https://github.com/python/cpython/blob/08e9794517063c8cd92c48714071b1d3c60b71bd/Lib/logging/__init__.py#L1616-L1623 # noqa
4961
+ sinfo = None
4962
+ if stack_info:
4963
+ sio = io.StringIO()
4964
+ traceback.print_stack(f, file=sio)
4965
+ sinfo = sio.getvalue()
4966
+ sio.close()
4967
+ if sinfo[-1] == '\n':
4968
+ sinfo = sinfo[:-1]
4969
+
4970
+ return cls(
4971
+ f.f_code.co_filename,
4972
+ f.f_lineno or 0,
4973
+ f.f_code.co_name,
4974
+ sinfo,
4975
+ )
4976
+
4977
+
4954
4978
  ########################################
4955
4979
  # ../../../../../omlish/logs/std/json.py
4956
4980
  """
@@ -5009,13 +5033,98 @@ class JsonLoggingFormatter(logging.Formatter):
5009
5033
 
5010
5034
 
5011
5035
  ########################################
5012
- # ../../logs.py
5013
- """
5014
- https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html :
5015
- - The maximum batch size is 1,048,576 bytes. This size is calculated as the sum of all event messages in UTF-8, plus 26
5016
- bytes for each log event.
5017
- - None of the log events in the batch can be more than 2 hours in the future.
5018
- - None of the log events in the batch can be more than 14 days in the past. Also, none of the log events can be from
5036
+ # ../../../../../omlish/logs/times.py
5037
+
5038
+
5039
+ ##
5040
+
5041
+
5042
+ @logging_context_info
5043
+ @ta.final
5044
+ class LoggingTimeFields(ta.NamedTuple):
5045
+ """Maps directly to stdlib `logging.LogRecord` fields, and must be kept in sync with it."""
5046
+
5047
+ created: float
5048
+ msecs: float
5049
+ relative_created: float
5050
+
5051
+ @classmethod
5052
+ def get_std_start_time_ns(cls) -> int:
5053
+ x: ta.Any = logging._startTime # type: ignore[attr-defined] # noqa
5054
+
5055
+ # Before 3.13.0b1 this will be `time.time()`, a float of seconds. After that, it will be `time.time_ns()`, an
5056
+ # int.
5057
+ #
5058
+ # See:
5059
+ # - https://github.com/python/cpython/commit/1316692e8c7c1e1f3b6639e51804f9db5ed892ea
5060
+ #
5061
+ if isinstance(x, float):
5062
+ return int(x * 1e9)
5063
+ else:
5064
+ return x
5065
+
5066
+ @classmethod
5067
+ def build(
5068
+ cls,
5069
+ time_ns: int,
5070
+ *,
5071
+ start_time_ns: ta.Optional[int] = None,
5072
+ ) -> 'LoggingTimeFields':
5073
+ # https://github.com/python/cpython/commit/1316692e8c7c1e1f3b6639e51804f9db5ed892ea
5074
+ created = time_ns / 1e9 # ns to float seconds
5075
+
5076
+ # Get the number of whole milliseconds (0-999) in the fractional part of seconds.
5077
+ # Eg: 1_677_903_920_999_998_503 ns --> 999_998_503 ns--> 999 ms
5078
+ # Convert to float by adding 0.0 for historical reasons. See gh-89047
5079
+ msecs = (time_ns % 1_000_000_000) // 1_000_000 + 0.0
5080
+
5081
+ # https://github.com/python/cpython/commit/1500a23f33f5a6d052ff1ef6383d9839928b8ff1
5082
+ if msecs == 999.0 and int(created) != time_ns // 1_000_000_000:
5083
+ # ns -> sec conversion can round up, e.g:
5084
+ # 1_677_903_920_999_999_900 ns --> 1_677_903_921.0 sec
5085
+ msecs = 0.0
5086
+
5087
+ if start_time_ns is None:
5088
+ start_time_ns = cls.get_std_start_time_ns()
5089
+ relative_created = (time_ns - start_time_ns) / 1e6
5090
+
5091
+ return cls(
5092
+ created,
5093
+ msecs,
5094
+ relative_created,
5095
+ )
5096
+
5097
+
5098
+ ##
5099
+
5100
+
5101
+ class UnexpectedLoggingStartTimeWarning(LoggingSetupWarning):
5102
+ pass
5103
+
5104
+
5105
+ def _check_logging_start_time() -> None:
5106
+ if (x := LoggingTimeFields.get_std_start_time_ns()) < (t := time.time()):
5107
+ import warnings # noqa
5108
+
5109
+ warnings.warn(
5110
+ f'Unexpected logging start time detected: '
5111
+ f'get_std_start_time_ns={x}, '
5112
+ f'time.time()={t}',
5113
+ UnexpectedLoggingStartTimeWarning,
5114
+ )
5115
+
5116
+
5117
+ _check_logging_start_time()
5118
+
5119
+
5120
+ ########################################
5121
+ # ../../logs.py
5122
+ """
5123
+ https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html :
5124
+ - The maximum batch size is 1,048,576 bytes. This size is calculated as the sum of all event messages in UTF-8, plus 26
5125
+ bytes for each log event.
5126
+ - None of the log events in the batch can be more than 2 hours in the future.
5127
+ - None of the log events in the batch can be more than 14 days in the past. Also, none of the log events can be from
5019
5128
  earlier than the retention period of the log group.
5020
5129
  - The log events in the batch must be in chronological order by their timestamp. The timestamp is the time that the
5021
5130
  event occurred, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. (In AWS Tools for PowerShell
@@ -5179,85 +5288,6 @@ class AwsLogMessageBuilder:
5179
5288
  return [post]
5180
5289
 
5181
5290
 
5182
- ########################################
5183
- # ../../../../journald/messages.py
5184
-
5185
-
5186
- log = get_module_logger(globals()) # noqa
5187
-
5188
-
5189
- ##
5190
-
5191
-
5192
- @dc.dataclass(frozen=True)
5193
- class JournalctlMessage:
5194
- raw: bytes
5195
- dct: ta.Optional[ta.Mapping[str, ta.Any]] = None
5196
- cursor: ta.Optional[str] = None
5197
- ts_us: ta.Optional[int] = None # microseconds UTC
5198
-
5199
-
5200
- class JournalctlMessageBuilder:
5201
- def __init__(self) -> None:
5202
- super().__init__()
5203
-
5204
- self._buf = DelimitingBuffer(b'\n')
5205
-
5206
- _cursor_field = '__CURSOR'
5207
-
5208
- _timestamp_fields: ta.Sequence[str] = [
5209
- '_SOURCE_REALTIME_TIMESTAMP',
5210
- '__REALTIME_TIMESTAMP',
5211
- ]
5212
-
5213
- def _get_message_timestamp(self, dct: ta.Mapping[str, ta.Any]) -> ta.Optional[int]:
5214
- for fld in self._timestamp_fields:
5215
- if (tsv := dct.get(fld)) is None:
5216
- continue
5217
-
5218
- if isinstance(tsv, str):
5219
- try:
5220
- return int(tsv)
5221
- except ValueError:
5222
- try:
5223
- return int(float(tsv))
5224
- except ValueError:
5225
- log.exception('Failed to parse timestamp: %r', tsv)
5226
-
5227
- elif isinstance(tsv, (int, float)):
5228
- return int(tsv)
5229
-
5230
- log.error('Invalid timestamp: %r', dct)
5231
- return None
5232
-
5233
- def _make_message(self, raw: bytes) -> JournalctlMessage:
5234
- dct = None
5235
- cursor = None
5236
- ts = None
5237
-
5238
- try:
5239
- dct = json.loads(raw.decode('utf-8', 'replace'))
5240
- except Exception: # noqa
5241
- log.exception('Failed to parse raw message: %r', raw)
5242
-
5243
- else:
5244
- cursor = dct.get(self._cursor_field)
5245
- ts = self._get_message_timestamp(dct)
5246
-
5247
- return JournalctlMessage(
5248
- raw=raw,
5249
- dct=dct,
5250
- cursor=cursor,
5251
- ts_us=ts,
5252
- )
5253
-
5254
- def feed(self, data: bytes) -> ta.Sequence[JournalctlMessage]:
5255
- ret: ta.List[JournalctlMessage] = []
5256
- for line in self._buf.feed(data):
5257
- ret.append(self._make_message(check.isinstance(line, bytes)))
5258
- return ret
5259
-
5260
-
5261
5291
  ########################################
5262
5292
  # ../../../../../omlish/lite/configs.py
5263
5293
 
@@ -5291,132 +5321,389 @@ def load_config_file_obj(
5291
5321
 
5292
5322
 
5293
5323
  ########################################
5294
- # ../../../../../omlish/logs/standard.py
5295
- """
5296
- TODO:
5297
- - !! move to std !!
5298
- - structured
5299
- - prefixed
5300
- - debug
5301
- - optional noisy? noisy will never be lite - some kinda configure_standard callback mechanism?
5302
- """
5324
+ # ../../../../../omlish/logs/contexts.py
5303
5325
 
5304
5326
 
5305
5327
  ##
5306
5328
 
5307
5329
 
5308
- STANDARD_LOG_FORMAT_PARTS = [
5309
- ('asctime', '%(asctime)-15s'),
5310
- ('process', 'pid=%(process)s'),
5311
- ('thread', 'tid=%(thread)x'),
5312
- ('levelname', '%(levelname)s'),
5313
- ('name', '%(name)s'),
5314
- ('separator', '::'),
5315
- ('message', '%(message)s'),
5316
- ]
5330
+ class LoggingContext(Abstract):
5331
+ @property
5332
+ @abc.abstractmethod
5333
+ def level(self) -> NamedLogLevel:
5334
+ raise NotImplementedError
5317
5335
 
5336
+ #
5318
5337
 
5319
- class StandardLoggingFormatter(logging.Formatter):
5320
- @staticmethod
5321
- def build_log_format(parts: ta.Iterable[ta.Tuple[str, str]]) -> str:
5322
- return ' '.join(v for k, v in parts)
5338
+ @property
5339
+ @abc.abstractmethod
5340
+ def time_ns(self) -> int:
5341
+ raise NotImplementedError
5323
5342
 
5324
- converter = datetime.datetime.fromtimestamp # type: ignore
5343
+ @property
5344
+ @abc.abstractmethod
5345
+ def times(self) -> LoggingTimeFields:
5346
+ raise NotImplementedError
5325
5347
 
5326
- def formatTime(self, record, datefmt=None):
5327
- ct = self.converter(record.created)
5328
- if datefmt:
5329
- return ct.strftime(datefmt) # noqa
5330
- else:
5331
- t = ct.strftime('%Y-%m-%d %H:%M:%S')
5332
- return '%s.%03d' % (t, record.msecs) # noqa
5348
+ #
5333
5349
 
5350
+ @property
5351
+ @abc.abstractmethod
5352
+ def exc_info(self) -> ta.Optional[LoggingExcInfo]:
5353
+ raise NotImplementedError
5334
5354
 
5335
- ##
5355
+ @property
5356
+ @abc.abstractmethod
5357
+ def exc_info_tuple(self) -> ta.Optional[LoggingExcInfoTuple]:
5358
+ raise NotImplementedError
5336
5359
 
5360
+ #
5337
5361
 
5338
- class StandardConfiguredLoggingHandler(ProxyLoggingHandler):
5339
- def __init_subclass__(cls, **kwargs):
5340
- raise TypeError('This class serves only as a marker and should not be subclassed.')
5362
+ @abc.abstractmethod
5363
+ def caller(self) -> ta.Optional[LoggingCaller]:
5364
+ raise NotImplementedError
5341
5365
 
5366
+ @abc.abstractmethod
5367
+ def source_file(self) -> ta.Optional[LoggingSourceFileInfo]:
5368
+ raise NotImplementedError
5342
5369
 
5343
- ##
5370
+ #
5344
5371
 
5372
+ @abc.abstractmethod
5373
+ def thread(self) -> ta.Optional[LoggingThreadInfo]:
5374
+ raise NotImplementedError
5345
5375
 
5346
- @contextlib.contextmanager
5347
- def _locking_logging_module_lock() -> ta.Iterator[None]:
5348
- if hasattr(logging, '_acquireLock'):
5349
- logging._acquireLock() # noqa
5350
- try:
5351
- yield
5352
- finally:
5353
- logging._releaseLock() # type: ignore # noqa
5376
+ @abc.abstractmethod
5377
+ def process(self) -> ta.Optional[LoggingProcessInfo]:
5378
+ raise NotImplementedError
5354
5379
 
5355
- elif hasattr(logging, '_lock'):
5356
- # https://github.com/python/cpython/commit/74723e11109a320e628898817ab449b3dad9ee96
5357
- with logging._lock: # noqa
5358
- yield
5380
+ @abc.abstractmethod
5381
+ def multiprocessing(self) -> ta.Optional[LoggingMultiprocessingInfo]:
5382
+ raise NotImplementedError
5359
5383
 
5360
- else:
5361
- raise Exception("Can't find lock in logging module")
5384
+ @abc.abstractmethod
5385
+ def asyncio_task(self) -> ta.Optional[LoggingAsyncioTaskInfo]:
5386
+ raise NotImplementedError
5362
5387
 
5363
5388
 
5364
- def configure_standard_logging(
5365
- level: ta.Union[int, str] = logging.INFO,
5366
- *,
5367
- json: bool = False,
5368
- target: ta.Optional[logging.Logger] = None,
5369
- force: bool = False,
5370
- handler_factory: ta.Optional[ta.Callable[[], logging.Handler]] = None,
5371
- ) -> ta.Optional[StandardConfiguredLoggingHandler]:
5372
- with _locking_logging_module_lock():
5373
- if target is None:
5374
- target = logging.root
5389
+ ##
5375
5390
 
5376
- #
5377
5391
 
5378
- if not force:
5379
- if any(isinstance(h, StandardConfiguredLoggingHandler) for h in list(target.handlers)):
5380
- return None
5392
+ class CaptureLoggingContext(LoggingContext, Abstract):
5393
+ class AlreadyCapturedError(Exception):
5394
+ pass
5381
5395
 
5382
- #
5396
+ class NotCapturedError(Exception):
5397
+ pass
5383
5398
 
5384
- if handler_factory is not None:
5385
- handler = handler_factory()
5386
- else:
5387
- handler = logging.StreamHandler()
5399
+ @abc.abstractmethod
5400
+ def capture(self) -> None:
5401
+ """Must be cooperatively called only from the expected locations."""
5388
5402
 
5389
- #
5403
+ raise NotImplementedError
5390
5404
 
5391
- formatter: logging.Formatter
5392
- if json:
5393
- formatter = JsonLoggingFormatter()
5394
- else:
5395
- formatter = StandardLoggingFormatter(StandardLoggingFormatter.build_log_format(STANDARD_LOG_FORMAT_PARTS))
5396
- handler.setFormatter(formatter)
5397
5405
 
5398
- #
5406
+ @ta.final
5407
+ class CaptureLoggingContextImpl(CaptureLoggingContext):
5408
+ @ta.final
5409
+ class NOT_SET: # noqa
5410
+ def __new__(cls, *args, **kwargs): # noqa
5411
+ raise TypeError
5399
5412
 
5400
- handler.addFilter(TidLoggingFilter())
5413
+ #
5401
5414
 
5402
- #
5415
+ def __init__(
5416
+ self,
5417
+ level: LogLevel,
5418
+ *,
5419
+ time_ns: ta.Optional[int] = None,
5403
5420
 
5404
- target.addHandler(handler)
5421
+ exc_info: LoggingExcInfoArg = False,
5422
+
5423
+ caller: ta.Union[LoggingCaller, ta.Type[NOT_SET], None] = NOT_SET,
5424
+ stack_offset: int = 0,
5425
+ stack_info: bool = False,
5426
+ ) -> None:
5427
+ self._level: NamedLogLevel = level if level.__class__ is NamedLogLevel else NamedLogLevel(level) # type: ignore[assignment] # noqa
5405
5428
 
5406
5429
  #
5407
5430
 
5408
- if level is not None:
5409
- target.setLevel(level)
5431
+ if time_ns is None:
5432
+ time_ns = time.time_ns()
5433
+ self._time_ns: int = time_ns
5410
5434
 
5411
5435
  #
5412
5436
 
5413
- return StandardConfiguredLoggingHandler(handler)
5437
+ if exc_info is True:
5438
+ sys_exc_info = sys.exc_info()
5439
+ if sys_exc_info[0] is not None:
5440
+ exc_info = sys_exc_info
5441
+ else:
5442
+ exc_info = None
5443
+ elif exc_info is False:
5444
+ exc_info = None
5445
+
5446
+ if exc_info is not None:
5447
+ self._exc_info: ta.Optional[LoggingExcInfo] = exc_info
5448
+ if isinstance(exc_info, BaseException):
5449
+ self._exc_info_tuple: ta.Optional[LoggingExcInfoTuple] = (type(exc_info), exc_info, exc_info.__traceback__) # noqa
5450
+ else:
5451
+ self._exc_info_tuple = exc_info
5414
5452
 
5453
+ #
5415
5454
 
5416
- ########################################
5417
- # ../../../../../omlish/subprocesses/wrap.py
5418
- """
5419
- This bypasses debuggers attaching to spawned subprocess children that look like python processes. See:
5455
+ if caller is not CaptureLoggingContextImpl.NOT_SET:
5456
+ self._caller = caller # type: ignore[assignment]
5457
+ else:
5458
+ self._stack_offset = stack_offset
5459
+ self._stack_info = stack_info
5460
+
5461
+ ##
5462
+
5463
+ @property
5464
+ def level(self) -> NamedLogLevel:
5465
+ return self._level
5466
+
5467
+ #
5468
+
5469
+ @property
5470
+ def time_ns(self) -> int:
5471
+ return self._time_ns
5472
+
5473
+ _times: LoggingTimeFields
5474
+
5475
+ @property
5476
+ def times(self) -> LoggingTimeFields:
5477
+ try:
5478
+ return self._times
5479
+ except AttributeError:
5480
+ pass
5481
+
5482
+ times = self._times = LoggingTimeFields.build(self.time_ns)
5483
+ return times
5484
+
5485
+ #
5486
+
5487
+ _exc_info: ta.Optional[LoggingExcInfo] = None
5488
+ _exc_info_tuple: ta.Optional[LoggingExcInfoTuple] = None
5489
+
5490
+ @property
5491
+ def exc_info(self) -> ta.Optional[LoggingExcInfo]:
5492
+ return self._exc_info
5493
+
5494
+ @property
5495
+ def exc_info_tuple(self) -> ta.Optional[LoggingExcInfoTuple]:
5496
+ return self._exc_info_tuple
5497
+
5498
+ ##
5499
+
5500
+ _stack_offset: int
5501
+ _stack_info: bool
5502
+
5503
+ def inc_stack_offset(self, ofs: int = 1) -> 'CaptureLoggingContext':
5504
+ if hasattr(self, '_stack_offset'):
5505
+ self._stack_offset += ofs
5506
+ return self
5507
+
5508
+ _has_captured: bool = False
5509
+
5510
+ _caller: ta.Optional[LoggingCaller]
5511
+ _source_file: ta.Optional[LoggingSourceFileInfo]
5512
+
5513
+ _thread: ta.Optional[LoggingThreadInfo]
5514
+ _process: ta.Optional[LoggingProcessInfo]
5515
+ _multiprocessing: ta.Optional[LoggingMultiprocessingInfo]
5516
+ _asyncio_task: ta.Optional[LoggingAsyncioTaskInfo]
5517
+
5518
+ def capture(self) -> None:
5519
+ if self._has_captured:
5520
+ raise CaptureLoggingContextImpl.AlreadyCapturedError
5521
+ self._has_captured = True
5522
+
5523
+ if not hasattr(self, '_caller'):
5524
+ self._caller = LoggingCaller.find(
5525
+ self._stack_offset + 1,
5526
+ stack_info=self._stack_info,
5527
+ )
5528
+
5529
+ if (caller := self._caller) is not None:
5530
+ self._source_file = LoggingSourceFileInfo.build(caller.file_path)
5531
+ else:
5532
+ self._source_file = None
5533
+
5534
+ self._thread = LoggingThreadInfo.build()
5535
+ self._process = LoggingProcessInfo.build()
5536
+ self._multiprocessing = LoggingMultiprocessingInfo.build()
5537
+ self._asyncio_task = LoggingAsyncioTaskInfo.build()
5538
+
5539
+ #
5540
+
5541
+ def caller(self) -> ta.Optional[LoggingCaller]:
5542
+ try:
5543
+ return self._caller
5544
+ except AttributeError:
5545
+ raise CaptureLoggingContext.NotCapturedError from None
5546
+
5547
+ def source_file(self) -> ta.Optional[LoggingSourceFileInfo]:
5548
+ try:
5549
+ return self._source_file
5550
+ except AttributeError:
5551
+ raise CaptureLoggingContext.NotCapturedError from None
5552
+
5553
+ #
5554
+
5555
+ def thread(self) -> ta.Optional[LoggingThreadInfo]:
5556
+ try:
5557
+ return self._thread
5558
+ except AttributeError:
5559
+ raise CaptureLoggingContext.NotCapturedError from None
5560
+
5561
+ def process(self) -> ta.Optional[LoggingProcessInfo]:
5562
+ try:
5563
+ return self._process
5564
+ except AttributeError:
5565
+ raise CaptureLoggingContext.NotCapturedError from None
5566
+
5567
+ def multiprocessing(self) -> ta.Optional[LoggingMultiprocessingInfo]:
5568
+ try:
5569
+ return self._multiprocessing
5570
+ except AttributeError:
5571
+ raise CaptureLoggingContext.NotCapturedError from None
5572
+
5573
+ def asyncio_task(self) -> ta.Optional[LoggingAsyncioTaskInfo]:
5574
+ try:
5575
+ return self._asyncio_task
5576
+ except AttributeError:
5577
+ raise CaptureLoggingContext.NotCapturedError from None
5578
+
5579
+
5580
+ ########################################
5581
+ # ../../../../../omlish/logs/standard.py
5582
+ """
5583
+ TODO:
5584
+ - !! move to std !!
5585
+ - structured
5586
+ - prefixed
5587
+ - debug
5588
+ - optional noisy? noisy will never be lite - some kinda configure_standard callback mechanism?
5589
+ """
5590
+
5591
+
5592
+ ##
5593
+
5594
+
5595
+ STANDARD_LOG_FORMAT_PARTS = [
5596
+ ('asctime', '%(asctime)-15s'),
5597
+ ('process', 'pid=%(process)s'),
5598
+ ('thread', 'tid=%(thread)x'),
5599
+ ('levelname', '%(levelname)s'),
5600
+ ('name', '%(name)s'),
5601
+ ('separator', '::'),
5602
+ ('message', '%(message)s'),
5603
+ ]
5604
+
5605
+
5606
+ class StandardLoggingFormatter(logging.Formatter):
5607
+ @staticmethod
5608
+ def build_log_format(parts: ta.Iterable[ta.Tuple[str, str]]) -> str:
5609
+ return ' '.join(v for k, v in parts)
5610
+
5611
+ converter = datetime.datetime.fromtimestamp # type: ignore
5612
+
5613
+ def formatTime(self, record, datefmt=None):
5614
+ ct = self.converter(record.created)
5615
+ if datefmt:
5616
+ return ct.strftime(datefmt) # noqa
5617
+ else:
5618
+ t = ct.strftime('%Y-%m-%d %H:%M:%S')
5619
+ return '%s.%03d' % (t, record.msecs) # noqa
5620
+
5621
+
5622
+ ##
5623
+
5624
+
5625
+ class StandardConfiguredLoggingHandler(ProxyLoggingHandler):
5626
+ def __init_subclass__(cls, **kwargs):
5627
+ raise TypeError('This class serves only as a marker and should not be subclassed.')
5628
+
5629
+
5630
+ ##
5631
+
5632
+
5633
+ @contextlib.contextmanager
5634
+ def _locking_logging_module_lock() -> ta.Iterator[None]:
5635
+ if hasattr(logging, '_acquireLock'):
5636
+ logging._acquireLock() # noqa
5637
+ try:
5638
+ yield
5639
+ finally:
5640
+ logging._releaseLock() # type: ignore # noqa
5641
+
5642
+ elif hasattr(logging, '_lock'):
5643
+ # https://github.com/python/cpython/commit/74723e11109a320e628898817ab449b3dad9ee96
5644
+ with logging._lock: # noqa
5645
+ yield
5646
+
5647
+ else:
5648
+ raise Exception("Can't find lock in logging module")
5649
+
5650
+
5651
+ def configure_standard_logging(
5652
+ level: ta.Union[int, str] = logging.INFO,
5653
+ *,
5654
+ json: bool = False,
5655
+ target: ta.Optional[logging.Logger] = None,
5656
+ force: bool = False,
5657
+ handler_factory: ta.Optional[ta.Callable[[], logging.Handler]] = None,
5658
+ ) -> ta.Optional[StandardConfiguredLoggingHandler]:
5659
+ with _locking_logging_module_lock():
5660
+ if target is None:
5661
+ target = logging.root
5662
+
5663
+ #
5664
+
5665
+ if not force:
5666
+ if any(isinstance(h, StandardConfiguredLoggingHandler) for h in list(target.handlers)):
5667
+ return None
5668
+
5669
+ #
5670
+
5671
+ if handler_factory is not None:
5672
+ handler = handler_factory()
5673
+ else:
5674
+ handler = logging.StreamHandler()
5675
+
5676
+ #
5677
+
5678
+ formatter: logging.Formatter
5679
+ if json:
5680
+ formatter = JsonLoggingFormatter()
5681
+ else:
5682
+ formatter = StandardLoggingFormatter(StandardLoggingFormatter.build_log_format(STANDARD_LOG_FORMAT_PARTS))
5683
+ handler.setFormatter(formatter)
5684
+
5685
+ #
5686
+
5687
+ handler.addFilter(TidLoggingFilter())
5688
+
5689
+ #
5690
+
5691
+ target.addHandler(handler)
5692
+
5693
+ #
5694
+
5695
+ if level is not None:
5696
+ target.setLevel(level)
5697
+
5698
+ #
5699
+
5700
+ return StandardConfiguredLoggingHandler(handler)
5701
+
5702
+
5703
+ ########################################
5704
+ # ../../../../../omlish/subprocesses/wrap.py
5705
+ """
5706
+ This bypasses debuggers attaching to spawned subprocess children that look like python processes. See:
5420
5707
 
5421
5708
  https://github.com/JetBrains/intellij-community/blob/e9d8f126c286acf9df3ff272f440b305bf2ff585/python/helpers/pydev/_pydev_bundle/pydev_monkey.py
5422
5709
  """
@@ -5439,6 +5726,894 @@ def subprocess_maybe_shell_wrap_exec(*cmd: str) -> ta.Tuple[str, ...]:
5439
5726
  return cmd
5440
5727
 
5441
5728
 
5729
+ ########################################
5730
+ # ../../../../../omlish/logs/base.py
5731
+
5732
+
5733
+ ##
5734
+
5735
+
5736
+ class AnyLogger(Abstract, ta.Generic[T]):
5737
+ @ta.final
5738
+ def is_enabled_for(self, level: LogLevel) -> bool:
5739
+ return level >= self.get_effective_level()
5740
+
5741
+ @abc.abstractmethod
5742
+ def get_effective_level(self) -> LogLevel:
5743
+ raise NotImplementedError
5744
+
5745
+ #
5746
+
5747
+ @ta.final
5748
+ def isEnabledFor(self, level: LogLevel) -> bool: # noqa
5749
+ return self.is_enabled_for(level)
5750
+
5751
+ @ta.final
5752
+ def getEffectiveLevel(self) -> LogLevel: # noqa
5753
+ return self.get_effective_level()
5754
+
5755
+ ##
5756
+
5757
+ @ta.overload
5758
+ def log(self, level: LogLevel, msg: str, *args: ta.Any, **kwargs: ta.Any) -> T:
5759
+ ...
5760
+
5761
+ @ta.overload
5762
+ def log(self, level: LogLevel, msg: ta.Tuple[ta.Any, ...], **kwargs: ta.Any) -> T:
5763
+ ...
5764
+
5765
+ @ta.overload
5766
+ def log(self, level: LogLevel, msg_fn: LoggingMsgFn, **kwargs: ta.Any) -> T:
5767
+ ...
5768
+
5769
+ @ta.final
5770
+ def log(self, level: LogLevel, *args, **kwargs):
5771
+ return self._log(CaptureLoggingContextImpl(level, stack_offset=1), *args, **kwargs)
5772
+
5773
+ #
5774
+
5775
+ @ta.overload
5776
+ def debug(self, msg: str, *args: ta.Any, **kwargs: ta.Any) -> T:
5777
+ ...
5778
+
5779
+ @ta.overload
5780
+ def debug(self, msg: ta.Tuple[ta.Any, ...], **kwargs: ta.Any) -> T:
5781
+ ...
5782
+
5783
+ @ta.overload
5784
+ def debug(self, msg_fn: LoggingMsgFn, **kwargs: ta.Any) -> T:
5785
+ ...
5786
+
5787
+ @ta.final
5788
+ def debug(self, *args, **kwargs):
5789
+ return self._log(CaptureLoggingContextImpl(NamedLogLevel.DEBUG, stack_offset=1), *args, **kwargs)
5790
+
5791
+ #
5792
+
5793
+ @ta.overload
5794
+ def info(self, msg: str, *args: ta.Any, **kwargs: ta.Any) -> T:
5795
+ ...
5796
+
5797
+ @ta.overload
5798
+ def info(self, msg: ta.Tuple[ta.Any, ...], **kwargs: ta.Any) -> T:
5799
+ ...
5800
+
5801
+ @ta.overload
5802
+ def info(self, msg_fn: LoggingMsgFn, **kwargs: ta.Any) -> T:
5803
+ ...
5804
+
5805
+ @ta.final
5806
+ def info(self, *args, **kwargs):
5807
+ return self._log(CaptureLoggingContextImpl(NamedLogLevel.INFO, stack_offset=1), *args, **kwargs)
5808
+
5809
+ #
5810
+
5811
+ @ta.overload
5812
+ def warning(self, msg: str, *args: ta.Any, **kwargs: ta.Any) -> T:
5813
+ ...
5814
+
5815
+ @ta.overload
5816
+ def warning(self, msg: ta.Tuple[ta.Any, ...], **kwargs: ta.Any) -> T:
5817
+ ...
5818
+
5819
+ @ta.overload
5820
+ def warning(self, msg_fn: LoggingMsgFn, **kwargs: ta.Any) -> T:
5821
+ ...
5822
+
5823
+ @ta.final
5824
+ def warning(self, *args, **kwargs):
5825
+ return self._log(CaptureLoggingContextImpl(NamedLogLevel.WARNING, stack_offset=1), *args, **kwargs)
5826
+
5827
+ #
5828
+
5829
+ @ta.overload
5830
+ def error(self, msg: str, *args: ta.Any, **kwargs: ta.Any) -> T:
5831
+ ...
5832
+
5833
+ @ta.overload
5834
+ def error(self, msg: ta.Tuple[ta.Any, ...], **kwargs: ta.Any) -> T:
5835
+ ...
5836
+
5837
+ @ta.overload
5838
+ def error(self, msg_fn: LoggingMsgFn, **kwargs: ta.Any) -> T:
5839
+ ...
5840
+
5841
+ @ta.final
5842
+ def error(self, *args, **kwargs):
5843
+ return self._log(CaptureLoggingContextImpl(NamedLogLevel.ERROR, stack_offset=1), *args, **kwargs)
5844
+
5845
+ #
5846
+
5847
+ @ta.overload
5848
+ def exception(self, msg: str, *args: ta.Any, exc_info: LoggingExcInfoArg = True, **kwargs: ta.Any) -> T:
5849
+ ...
5850
+
5851
+ @ta.overload
5852
+ def exception(self, msg: ta.Tuple[ta.Any, ...], *, exc_info: LoggingExcInfoArg = True, **kwargs: ta.Any) -> T:
5853
+ ...
5854
+
5855
+ @ta.overload
5856
+ def exception(self, msg_fn: LoggingMsgFn, *, exc_info: LoggingExcInfoArg = True, **kwargs: ta.Any) -> T:
5857
+ ...
5858
+
5859
+ @ta.final
5860
+ def exception(self, *args, exc_info: LoggingExcInfoArg = True, **kwargs):
5861
+ return self._log(CaptureLoggingContextImpl(NamedLogLevel.ERROR, exc_info=exc_info, stack_offset=1), *args, **kwargs) # noqa
5862
+
5863
+ #
5864
+
5865
+ @ta.overload
5866
+ def critical(self, msg: str, *args: ta.Any, **kwargs: ta.Any) -> T:
5867
+ ...
5868
+
5869
+ @ta.overload
5870
+ def critical(self, msg: ta.Tuple[ta.Any, ...], **kwargs: ta.Any) -> T:
5871
+ ...
5872
+
5873
+ @ta.overload
5874
+ def critical(self, msg_fn: LoggingMsgFn, **kwargs: ta.Any) -> T:
5875
+ ...
5876
+
5877
+ @ta.final
5878
+ def critical(self, *args, **kwargs):
5879
+ return self._log(CaptureLoggingContextImpl(NamedLogLevel.CRITICAL, stack_offset=1), *args, **kwargs)
5880
+
5881
+ ##
5882
+
5883
+ @classmethod
5884
+ def _prepare_msg_args(cls, msg: ta.Union[str, tuple, LoggingMsgFn], *args: ta.Any) -> ta.Tuple[str, tuple]:
5885
+ if callable(msg):
5886
+ if args:
5887
+ raise TypeError(f'Must not provide both a message function and args: {msg=} {args=}')
5888
+ x = msg()
5889
+ if isinstance(x, str):
5890
+ return x, ()
5891
+ elif isinstance(x, tuple):
5892
+ if x:
5893
+ return x[0], x[1:]
5894
+ else:
5895
+ return '', ()
5896
+ else:
5897
+ raise TypeError(x)
5898
+
5899
+ elif isinstance(msg, tuple):
5900
+ if args:
5901
+ raise TypeError(f'Must not provide both a tuple message and args: {msg=} {args=}')
5902
+ if msg:
5903
+ return msg[0], msg[1:]
5904
+ else:
5905
+ return '', ()
5906
+
5907
+ elif isinstance(msg, str):
5908
+ return msg, args
5909
+
5910
+ else:
5911
+ raise TypeError(msg)
5912
+
5913
+ @abc.abstractmethod
5914
+ def _log(self, ctx: CaptureLoggingContext, msg: ta.Union[str, tuple, LoggingMsgFn], *args: ta.Any, **kwargs: ta.Any) -> T: # noqa
5915
+ raise NotImplementedError
5916
+
5917
+
5918
+ class Logger(AnyLogger[None], Abstract):
5919
+ @abc.abstractmethod
5920
+ def _log(self, ctx: CaptureLoggingContext, msg: ta.Union[str, tuple, LoggingMsgFn], *args: ta.Any, **kwargs: ta.Any) -> None: # noqa
5921
+ raise NotImplementedError
5922
+
5923
+
5924
+ class AsyncLogger(AnyLogger[ta.Awaitable[None]], Abstract):
5925
+ @abc.abstractmethod
5926
+ def _log(self, ctx: CaptureLoggingContext, msg: ta.Union[str, tuple, LoggingMsgFn], *args: ta.Any, **kwargs: ta.Any) -> ta.Awaitable[None]: # noqa
5927
+ raise NotImplementedError
5928
+
5929
+
5930
+ ##
5931
+
5932
+
5933
+ class AnyNopLogger(AnyLogger[T], Abstract):
5934
+ @ta.final
5935
+ def get_effective_level(self) -> LogLevel:
5936
+ return 999
5937
+
5938
+
5939
+ @ta.final
5940
+ class NopLogger(AnyNopLogger[None], Logger):
5941
+ def _log(self, ctx: CaptureLoggingContext, msg: ta.Union[str, tuple, LoggingMsgFn], *args: ta.Any, **kwargs: ta.Any) -> None: # noqa
5942
+ pass
5943
+
5944
+
5945
+ @ta.final
5946
+ class AsyncNopLogger(AnyNopLogger[ta.Awaitable[None]], AsyncLogger):
5947
+ async def _log(self, ctx: CaptureLoggingContext, msg: ta.Union[str, tuple, LoggingMsgFn], *args: ta.Any, **kwargs: ta.Any) -> None: # noqa
5948
+ pass
5949
+
5950
+
5951
+ ########################################
5952
+ # ../../../../../omlish/logs/std/records.py
5953
+
5954
+
5955
+ ##
5956
+
5957
+
5958
+ # Ref:
5959
+ # - https://docs.python.org/3/library/logging.html#logrecord-attributes
5960
+ #
5961
+ # LogRecord:
5962
+ # - https://github.com/python/cpython/blob/39b2f82717a69dde7212bc39b673b0f55c99e6a3/Lib/logging/__init__.py#L276 (3.8)
5963
+ # - https://github.com/python/cpython/blob/f070f54c5f4a42c7c61d1d5d3b8f3b7203b4a0fb/Lib/logging/__init__.py#L286 (~3.14) # noqa
5964
+ #
5965
+ # LogRecord.__init__ args:
5966
+ # - name: str
5967
+ # - level: int
5968
+ # - pathname: str - Confusingly referred to as `fn` before the LogRecord ctor. May be empty or "(unknown file)".
5969
+ # - lineno: int - May be 0.
5970
+ # - msg: str
5971
+ # - args: tuple | dict | 1-tuple[dict]
5972
+ # - exc_info: LoggingExcInfoTuple | None
5973
+ # - func: str | None = None -> funcName
5974
+ # - sinfo: str | None = None -> stack_info
5975
+ #
5976
+ KNOWN_STD_LOGGING_RECORD_ATTRS: ta.Dict[str, ta.Any] = dict(
5977
+ # Name of the logger used to log the call. Unmodified by ctor.
5978
+ name=str,
5979
+
5980
+ # The format string passed in the original logging call. Merged with args to produce message, or an arbitrary object
5981
+ # (see Using arbitrary objects as messages). Unmodified by ctor.
5982
+ msg=str,
5983
+
5984
+ # The tuple of arguments merged into msg to produce message, or a dict whose values are used for the merge (when
5985
+ # there is only one argument, and it is a dictionary). Ctor will transform a 1-tuple containing a Mapping into just
5986
+ # the mapping, but is otherwise unmodified.
5987
+ args=ta.Union[tuple, dict],
5988
+
5989
+ #
5990
+
5991
+ # Text logging level for the message ('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'). Set to
5992
+ # `getLevelName(level)`.
5993
+ levelname=str,
5994
+
5995
+ # Numeric logging level for the message (DEBUG, INFO, WARNING, ERROR, CRITICAL). Unmodified by ctor.
5996
+ levelno=int,
5997
+
5998
+ #
5999
+
6000
+ # Full pathname of the source file where the logging call was issued (if available). Unmodified by ctor. May default
6001
+ # to "(unknown file)" by Logger.findCaller / Logger._log.
6002
+ pathname=str,
6003
+
6004
+ # Filename portion of pathname. Set to `os.path.basename(pathname)` if successful, otherwise defaults to pathname.
6005
+ filename=str,
6006
+
6007
+ # Module (name portion of filename). Set to `os.path.splitext(filename)[0]`, otherwise defaults to
6008
+ # "Unknown module".
6009
+ module=str,
6010
+
6011
+ #
6012
+
6013
+ # Exception tuple (à la sys.exc_info) or, if no exception has occurred, None. Unmodified by ctor.
6014
+ exc_info=ta.Optional[LoggingExcInfoTuple],
6015
+
6016
+ # Used to cache the traceback text. Simply set to None by ctor, later set by Formatter.format.
6017
+ exc_text=ta.Optional[str],
6018
+
6019
+ #
6020
+
6021
+ # Stack frame information (where available) from the bottom of the stack in the current thread, up to and including
6022
+ # the stack frame of the logging call which resulted in the creation of this record. Set by ctor to `sinfo` arg,
6023
+ # unmodified. Mostly set, if requested, by `Logger.findCaller`, to `traceback.print_stack(f)`, but prepended with
6024
+ # the literal "Stack (most recent call last):\n", and stripped of exactly one trailing `\n` if present.
6025
+ stack_info=ta.Optional[str],
6026
+
6027
+ # Source line number where the logging call was issued (if available). Unmodified by ctor. May default to 0 by
6028
+ # Logger.findCaller / Logger._log.
6029
+ lineno=int,
6030
+
6031
+ # Name of function containing the logging call. Set by ctor to `func` arg, unmodified. May default to
6032
+ # "(unknown function)" by Logger.findCaller / Logger._log.
6033
+ funcName=str,
6034
+
6035
+ #
6036
+
6037
+ # Time when the LogRecord was created. Set to `time.time_ns() / 1e9` for >=3.13.0b1, otherwise simply `time.time()`.
6038
+ #
6039
+ # See:
6040
+ # - https://github.com/python/cpython/commit/1316692e8c7c1e1f3b6639e51804f9db5ed892ea
6041
+ # - https://github.com/python/cpython/commit/1500a23f33f5a6d052ff1ef6383d9839928b8ff1
6042
+ #
6043
+ created=float,
6044
+
6045
+ # Millisecond portion of the time when the LogRecord was created.
6046
+ msecs=float,
6047
+
6048
+ # Time in milliseconds when the LogRecord was created, relative to the time the logging module was loaded.
6049
+ relativeCreated=float,
6050
+
6051
+ #
6052
+
6053
+ # Thread ID if available, and `logging.logThreads` is truthy.
6054
+ thread=ta.Optional[int],
6055
+
6056
+ # Thread name if available, and `logging.logThreads` is truthy.
6057
+ threadName=ta.Optional[str],
6058
+
6059
+ #
6060
+
6061
+ # Process name if available. Set to None if `logging.logMultiprocessing` is not truthy. Otherwise, set to
6062
+ # 'MainProcess', then `sys.modules.get('multiprocessing').current_process().name` if that works, otherwise remains
6063
+ # as 'MainProcess'.
6064
+ #
6065
+ # As noted by stdlib:
6066
+ #
6067
+ # Errors may occur if multiprocessing has not finished loading yet - e.g. if a custom import hook causes
6068
+ # third-party code to run when multiprocessing calls import. See issue 8200 for an example
6069
+ #
6070
+ processName=ta.Optional[str],
6071
+
6072
+ # Process ID if available - that is, if `hasattr(os, 'getpid')` - and `logging.logProcesses` is truthy, otherwise
6073
+ # None.
6074
+ process=ta.Optional[int],
6075
+
6076
+ #
6077
+
6078
+ # Absent <3.12, otherwise asyncio.Task name if available, and `logging.logAsyncioTasks` is truthy. Set to
6079
+ # `sys.modules.get('asyncio').current_task().get_name()`, otherwise None.
6080
+ taskName=ta.Optional[str],
6081
+ )
6082
+
6083
+ KNOWN_STD_LOGGING_RECORD_ATTR_SET: ta.FrozenSet[str] = frozenset(KNOWN_STD_LOGGING_RECORD_ATTRS)
6084
+
6085
+
6086
+ # Formatter:
6087
+ # - https://github.com/python/cpython/blob/39b2f82717a69dde7212bc39b673b0f55c99e6a3/Lib/logging/__init__.py#L514 (3.8)
6088
+ # - https://github.com/python/cpython/blob/f070f54c5f4a42c7c61d1d5d3b8f3b7203b4a0fb/Lib/logging/__init__.py#L554 (~3.14) # noqa
6089
+ #
6090
+ KNOWN_STD_LOGGING_FORMATTER_RECORD_ATTRS: ta.Dict[str, ta.Any] = dict(
6091
+ # The logged message, computed as msg % args. Set to `record.getMessage()`.
6092
+ message=str,
6093
+
6094
+ # Human-readable time when the LogRecord was created. By default this is of the form '2003-07-08 16:49:45,896' (the
6095
+ # numbers after the comma are millisecond portion of the time). Set to `self.formatTime(record, self.datefmt)` if
6096
+ # `self.usesTime()`, otherwise unset.
6097
+ asctime=str,
6098
+
6099
+ # Used to cache the traceback text. If unset (falsey) on the record and `exc_info` is truthy, set to
6100
+ # `self.formatException(record.exc_info)` - otherwise unmodified.
6101
+ exc_text=ta.Optional[str],
6102
+ )
6103
+
6104
+ KNOWN_STD_LOGGING_FORMATTER_RECORD_ATTR_SET: ta.FrozenSet[str] = frozenset(KNOWN_STD_LOGGING_FORMATTER_RECORD_ATTRS)
6105
+
6106
+
6107
+ ##
6108
+
6109
+
6110
+ class UnknownStdLoggingRecordAttrsWarning(LoggingSetupWarning):
6111
+ pass
6112
+
6113
+
6114
+ def _check_std_logging_record_attrs() -> None:
6115
+ rec_dct = dict(logging.makeLogRecord({}).__dict__)
6116
+
6117
+ if (unk_rec_fields := frozenset(rec_dct) - KNOWN_STD_LOGGING_RECORD_ATTR_SET):
6118
+ import warnings # noqa
6119
+
6120
+ warnings.warn(
6121
+ f'Unknown log record attrs detected: {sorted(unk_rec_fields)!r}',
6122
+ UnknownStdLoggingRecordAttrsWarning,
6123
+ )
6124
+
6125
+
6126
+ _check_std_logging_record_attrs()
6127
+
6128
+
6129
+ ##
6130
+
6131
+
6132
+ class LoggingContextLogRecord(logging.LogRecord):
6133
+ _SHOULD_ADD_TASK_NAME: ta.ClassVar[bool] = sys.version_info >= (3, 12)
6134
+
6135
+ _UNKNOWN_PATH_NAME: ta.ClassVar[str] = '(unknown file)'
6136
+ _UNKNOWN_FUNC_NAME: ta.ClassVar[str] = '(unknown function)'
6137
+ _UNKNOWN_MODULE: ta.ClassVar[str] = 'Unknown module'
6138
+
6139
+ _STACK_INFO_PREFIX: ta.ClassVar[str] = 'Stack (most recent call last):\n'
6140
+
6141
+ def __init__( # noqa
6142
+ self,
6143
+ # name,
6144
+ # level,
6145
+ # pathname,
6146
+ # lineno,
6147
+ # msg,
6148
+ # args,
6149
+ # exc_info,
6150
+ # func=None,
6151
+ # sinfo=None,
6152
+ # **kwargs,
6153
+ *,
6154
+ name: str,
6155
+ msg: str,
6156
+ args: ta.Union[tuple, dict],
6157
+
6158
+ _logging_context: LoggingContext,
6159
+ ) -> None:
6160
+ ctx = _logging_context
6161
+
6162
+ self.name: str = name
6163
+
6164
+ self.msg: str = msg
6165
+
6166
+ # https://github.com/python/cpython/blob/e709361fc87d0d9ab9c58033a0a7f2fef0ad43d2/Lib/logging/__init__.py#L307
6167
+ if args and len(args) == 1 and isinstance(args[0], collections.abc.Mapping) and args[0]:
6168
+ args = args[0] # type: ignore[assignment]
6169
+ self.args: ta.Union[tuple, dict] = args
6170
+
6171
+ self.levelname: str = logging.getLevelName(ctx.level)
6172
+ self.levelno: int = ctx.level
6173
+
6174
+ if (caller := ctx.caller()) is not None:
6175
+ self.pathname: str = caller.file_path
6176
+ else:
6177
+ self.pathname = self._UNKNOWN_PATH_NAME
6178
+
6179
+ if (src_file := ctx.source_file()) is not None:
6180
+ self.filename: str = src_file.file_name
6181
+ self.module: str = src_file.module
6182
+ else:
6183
+ self.filename = self.pathname
6184
+ self.module = self._UNKNOWN_MODULE
6185
+
6186
+ self.exc_info: ta.Optional[LoggingExcInfoTuple] = ctx.exc_info_tuple
6187
+ self.exc_text: ta.Optional[str] = None
6188
+
6189
+ # If ctx.build_caller() was never called, we simply don't have a stack trace.
6190
+ if caller is not None:
6191
+ if (sinfo := caller.stack_info) is not None:
6192
+ self.stack_info: ta.Optional[str] = '\n'.join([
6193
+ self._STACK_INFO_PREFIX,
6194
+ sinfo[1:] if sinfo.endswith('\n') else sinfo,
6195
+ ])
6196
+ else:
6197
+ self.stack_info = None
6198
+
6199
+ self.lineno: int = caller.line_no
6200
+ self.funcName: str = caller.name
6201
+
6202
+ else:
6203
+ self.stack_info = None
6204
+
6205
+ self.lineno = 0
6206
+ self.funcName = self._UNKNOWN_FUNC_NAME
6207
+
6208
+ times = ctx.times
6209
+ self.created: float = times.created
6210
+ self.msecs: float = times.msecs
6211
+ self.relativeCreated: float = times.relative_created
6212
+
6213
+ if logging.logThreads:
6214
+ thread = check.not_none(ctx.thread())
6215
+ self.thread: ta.Optional[int] = thread.ident
6216
+ self.threadName: ta.Optional[str] = thread.name
6217
+ else:
6218
+ self.thread = None
6219
+ self.threadName = None
6220
+
6221
+ if logging.logProcesses:
6222
+ process = check.not_none(ctx.process())
6223
+ self.process: ta.Optional[int] = process.pid
6224
+ else:
6225
+ self.process = None
6226
+
6227
+ if logging.logMultiprocessing:
6228
+ if (mp := ctx.multiprocessing()) is not None:
6229
+ self.processName: ta.Optional[str] = mp.process_name
6230
+ else:
6231
+ self.processName = None
6232
+ else:
6233
+ self.processName = None
6234
+
6235
+ # Absent <3.12
6236
+ if getattr(logging, 'logAsyncioTasks', None):
6237
+ if (at := ctx.asyncio_task()) is not None:
6238
+ self.taskName: ta.Optional[str] = at.name
6239
+ else:
6240
+ self.taskName = None
6241
+ else:
6242
+ self.taskName = None
6243
+
6244
+
6245
+ ########################################
6246
+ # ../../../../../omlish/logs/std/adapters.py
6247
+
6248
+
6249
+ ##
6250
+
6251
+
6252
+ class StdLogger(Logger):
6253
+ def __init__(self, std: logging.Logger) -> None:
6254
+ super().__init__()
6255
+
6256
+ self._std = std
6257
+
6258
+ @property
6259
+ def std(self) -> logging.Logger:
6260
+ return self._std
6261
+
6262
+ def get_effective_level(self) -> LogLevel:
6263
+ return self._std.getEffectiveLevel()
6264
+
6265
+ def _log(self, ctx: CaptureLoggingContext, msg: ta.Union[str, tuple, LoggingMsgFn], *args: ta.Any) -> None:
6266
+ if not self.is_enabled_for(ctx.level):
6267
+ return
6268
+
6269
+ ctx.capture()
6270
+
6271
+ ms, args = self._prepare_msg_args(msg, *args)
6272
+
6273
+ rec = LoggingContextLogRecord(
6274
+ name=self._std.name,
6275
+ msg=ms,
6276
+ args=args,
6277
+
6278
+ _logging_context=ctx,
6279
+ )
6280
+
6281
+ self._std.handle(rec)
6282
+
6283
+
6284
+ ########################################
6285
+ # ../../../../../omlish/logs/modules.py
6286
+
6287
+
6288
+ ##
6289
+
6290
+
6291
+ def get_module_logger(mod_globals: ta.Mapping[str, ta.Any]) -> Logger:
6292
+ return StdLogger(logging.getLogger(mod_globals.get('__name__'))) # noqa
6293
+
6294
+
6295
+ ########################################
6296
+ # ../cursor.py
6297
+
6298
+
6299
+ log = get_module_logger(globals()) # noqa
6300
+
6301
+
6302
+ ##
6303
+
6304
+
6305
+ class JournalctlToAwsCursor:
6306
+ def __init__(
6307
+ self,
6308
+ cursor_file: ta.Optional[str] = None,
6309
+ *,
6310
+ ensure_locked: ta.Optional[ta.Callable[[], None]] = None,
6311
+ ) -> None:
6312
+ super().__init__()
6313
+
6314
+ self._cursor_file = cursor_file
6315
+ self._ensure_locked = ensure_locked
6316
+
6317
+ #
6318
+
6319
+ def get(self) -> ta.Optional[str]:
6320
+ if self._ensure_locked is not None:
6321
+ self._ensure_locked()
6322
+
6323
+ if not (cf := self._cursor_file):
6324
+ return None
6325
+ cf = os.path.expanduser(cf)
6326
+
6327
+ try:
6328
+ with open(cf) as f:
6329
+ return f.read().strip()
6330
+ except FileNotFoundError:
6331
+ return None
6332
+
6333
+ def set(self, cursor: str) -> None:
6334
+ if self._ensure_locked is not None:
6335
+ self._ensure_locked()
6336
+
6337
+ if not (cf := self._cursor_file):
6338
+ return
6339
+ cf = os.path.expanduser(cf)
6340
+
6341
+ log.info('Writing cursor file %s : %s', cf, cursor)
6342
+ with open(ncf := cf + '.next', 'w') as f:
6343
+ f.write(cursor)
6344
+
6345
+ os.rename(ncf, cf)
6346
+
6347
+
6348
+ ########################################
6349
+ # ../../../../journald/messages.py
6350
+
6351
+
6352
+ log = get_module_logger(globals()) # noqa
6353
+
6354
+
6355
+ ##
6356
+
6357
+
6358
+ @dc.dataclass(frozen=True)
6359
+ class JournalctlMessage:
6360
+ raw: bytes
6361
+ dct: ta.Optional[ta.Mapping[str, ta.Any]] = None
6362
+ cursor: ta.Optional[str] = None
6363
+ ts_us: ta.Optional[int] = None # microseconds UTC
6364
+
6365
+
6366
+ class JournalctlMessageBuilder:
6367
+ def __init__(self) -> None:
6368
+ super().__init__()
6369
+
6370
+ self._buf = DelimitingBuffer(b'\n')
6371
+
6372
+ _cursor_field = '__CURSOR'
6373
+
6374
+ _timestamp_fields: ta.Sequence[str] = [
6375
+ '_SOURCE_REALTIME_TIMESTAMP',
6376
+ '__REALTIME_TIMESTAMP',
6377
+ ]
6378
+
6379
+ def _get_message_timestamp(self, dct: ta.Mapping[str, ta.Any]) -> ta.Optional[int]:
6380
+ for fld in self._timestamp_fields:
6381
+ if (tsv := dct.get(fld)) is None:
6382
+ continue
6383
+
6384
+ if isinstance(tsv, str):
6385
+ try:
6386
+ return int(tsv)
6387
+ except ValueError:
6388
+ try:
6389
+ return int(float(tsv))
6390
+ except ValueError:
6391
+ log.exception('Failed to parse timestamp: %r', tsv)
6392
+
6393
+ elif isinstance(tsv, (int, float)):
6394
+ return int(tsv)
6395
+
6396
+ log.error('Invalid timestamp: %r', dct)
6397
+ return None
6398
+
6399
+ def _make_message(self, raw: bytes) -> JournalctlMessage:
6400
+ dct = None
6401
+ cursor = None
6402
+ ts = None
6403
+
6404
+ try:
6405
+ dct = json.loads(raw.decode('utf-8', 'replace'))
6406
+ except Exception: # noqa
6407
+ log.exception('Failed to parse raw message: %r', raw)
6408
+
6409
+ else:
6410
+ cursor = dct.get(self._cursor_field)
6411
+ ts = self._get_message_timestamp(dct)
6412
+
6413
+ return JournalctlMessage(
6414
+ raw=raw,
6415
+ dct=dct,
6416
+ cursor=cursor,
6417
+ ts_us=ts,
6418
+ )
6419
+
6420
+ def feed(self, data: bytes) -> ta.Sequence[JournalctlMessage]:
6421
+ ret: ta.List[JournalctlMessage] = []
6422
+ for line in self._buf.feed(data):
6423
+ ret.append(self._make_message(check.isinstance(line, bytes)))
6424
+ return ret
6425
+
6426
+
6427
+ ########################################
6428
+ # ../../../../threadworkers.py
6429
+ """
6430
+ FIXME:
6431
+ - group is racy af - meditate on has_started, etc
6432
+
6433
+ TODO:
6434
+ - overhaul stop lol
6435
+ - group -> 'context'? :|
6436
+ - shared stop_event?
6437
+ """
6438
+
6439
+
6440
+ log = get_module_logger(globals()) # noqa
6441
+
6442
+
6443
+ ##
6444
+
6445
+
6446
+ class ThreadWorker(ExitStacked, Abstract):
6447
+ def __init__(
6448
+ self,
6449
+ *,
6450
+ stop_event: ta.Optional[threading.Event] = None,
6451
+ worker_groups: ta.Optional[ta.Iterable['ThreadWorkerGroup']] = None,
6452
+ ) -> None:
6453
+ super().__init__()
6454
+
6455
+ if stop_event is None:
6456
+ stop_event = threading.Event()
6457
+ self._stop_event = stop_event
6458
+
6459
+ self._lock = threading.RLock()
6460
+ self._thread: ta.Optional[threading.Thread] = None
6461
+ self._last_heartbeat: ta.Optional[float] = None
6462
+
6463
+ for g in worker_groups or []:
6464
+ g.add(self)
6465
+
6466
+ #
6467
+
6468
+ @contextlib.contextmanager
6469
+ def _exit_stacked_init_wrapper(self) -> ta.Iterator[None]:
6470
+ with self._lock:
6471
+ yield
6472
+
6473
+ #
6474
+
6475
+ def should_stop(self) -> bool:
6476
+ return self._stop_event.is_set()
6477
+
6478
+ class Stopping(Exception): # noqa
6479
+ pass
6480
+
6481
+ #
6482
+
6483
+ @property
6484
+ def last_heartbeat(self) -> ta.Optional[float]:
6485
+ return self._last_heartbeat
6486
+
6487
+ def _heartbeat(
6488
+ self,
6489
+ *,
6490
+ no_stop_check: bool = False,
6491
+ ) -> None:
6492
+ self._last_heartbeat = time.time()
6493
+
6494
+ if not no_stop_check and self.should_stop():
6495
+ log.info('Stopping: %s', self)
6496
+ raise ThreadWorker.Stopping
6497
+
6498
+ #
6499
+
6500
+ def has_started(self) -> bool:
6501
+ return self._thread is not None
6502
+
6503
+ def is_alive(self) -> bool:
6504
+ return (thr := self._thread) is not None and thr.is_alive()
6505
+
6506
+ def start(self) -> None:
6507
+ with self._lock:
6508
+ if self._thread is not None:
6509
+ raise RuntimeError('Thread already started: %r', self)
6510
+
6511
+ thr = threading.Thread(target=self.__thread_main)
6512
+ self._thread = thr
6513
+ thr.start()
6514
+
6515
+ #
6516
+
6517
+ def __thread_main(self) -> None:
6518
+ try:
6519
+ self._run()
6520
+ except ThreadWorker.Stopping:
6521
+ log.exception('Thread worker stopped: %r', self)
6522
+ except Exception: # noqa
6523
+ log.exception('Error in worker thread: %r', self)
6524
+ raise
6525
+
6526
+ @abc.abstractmethod
6527
+ def _run(self) -> None:
6528
+ raise NotImplementedError
6529
+
6530
+ #
6531
+
6532
+ def stop(self) -> None:
6533
+ self._stop_event.set()
6534
+
6535
+ def join(
6536
+ self,
6537
+ timeout: ta.Optional[float] = None,
6538
+ *,
6539
+ unless_not_started: bool = False,
6540
+ ) -> None:
6541
+ with self._lock:
6542
+ if self._thread is None:
6543
+ if not unless_not_started:
6544
+ raise RuntimeError('Thread not started: %r', self)
6545
+ return
6546
+ self._thread.join(timeout)
6547
+
6548
+
6549
+ ##
6550
+
6551
+
6552
+ class ThreadWorkerGroup:
6553
+ @dc.dataclass()
6554
+ class _State:
6555
+ worker: ThreadWorker
6556
+
6557
+ last_heartbeat: ta.Optional[float] = None
6558
+
6559
+ def __init__(self) -> None:
6560
+ super().__init__()
6561
+
6562
+ self._lock = threading.RLock()
6563
+ self._states: ta.Dict[ThreadWorker, ThreadWorkerGroup._State] = {}
6564
+ self._last_heartbeat_check: ta.Optional[float] = None
6565
+
6566
+ #
6567
+
6568
+ def add(self, *workers: ThreadWorker) -> 'ThreadWorkerGroup':
6569
+ with self._lock:
6570
+ for w in workers:
6571
+ if w in self._states:
6572
+ raise KeyError(w)
6573
+ self._states[w] = ThreadWorkerGroup._State(w)
6574
+
6575
+ return self
6576
+
6577
+ #
6578
+
6579
+ def start_all(self) -> None:
6580
+ thrs = list(self._states)
6581
+ with self._lock:
6582
+ for thr in thrs:
6583
+ if not thr.has_started():
6584
+ thr.start()
6585
+
6586
+ def stop_all(self) -> None:
6587
+ for w in reversed(list(self._states)):
6588
+ if w.has_started():
6589
+ w.stop()
6590
+
6591
+ def join_all(self, timeout: ta.Optional[float] = None) -> None:
6592
+ for w in reversed(list(self._states)):
6593
+ if w.has_started():
6594
+ w.join(timeout, unless_not_started=True)
6595
+
6596
+ #
6597
+
6598
+ def get_dead(self) -> ta.List[ThreadWorker]:
6599
+ with self._lock:
6600
+ return [thr for thr in self._states if not thr.is_alive()]
6601
+
6602
+ def check_heartbeats(self) -> ta.Dict[ThreadWorker, float]:
6603
+ with self._lock:
6604
+ dct: ta.Dict[ThreadWorker, float] = {}
6605
+ for thr, st in self._states.items():
6606
+ if not thr.has_started():
6607
+ continue
6608
+ hb = thr.last_heartbeat
6609
+ if hb is None:
6610
+ hb = time.time()
6611
+ st.last_heartbeat = hb
6612
+ dct[st.worker] = time.time() - hb
6613
+ self._last_heartbeat_check = time.time()
6614
+ return dct
6615
+
6616
+
5442
6617
  ########################################
5443
6618
  # ../poster.py
5444
6619
  """