omdev 0.0.0.dev438__py3-none-any.whl → 0.0.0.dev440__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,2031 @@
1
+ #!/usr/bin/env python3
2
+ # noinspection DuplicatedCode
3
+ # @omlish-lite
4
+ # @omlish-script
5
+ # @omlish-generated
6
+ # @omlish-amalg-output ../../../omlish/logs/_amalg.py
7
+ # @omlish-git-diff-omit
8
+ # ruff: noqa: N802 UP006 UP007 UP036 UP045 UP046
9
+ import abc
10
+ import collections.abc
11
+ import contextlib
12
+ import datetime
13
+ import functools
14
+ import io
15
+ import json
16
+ import logging
17
+ import os.path
18
+ import sys
19
+ import threading
20
+ import time
21
+ import traceback
22
+ import types
23
+ import typing as ta
24
+
25
+
26
+ ########################################
27
+
28
+
29
+ if sys.version_info < (3, 8):
30
+ raise OSError(f'Requires python (3, 8), got {sys.version_info} from {sys.executable}') # noqa
31
+
32
+
33
+ ########################################
34
+
35
+
36
+ # levels.py
37
+ LogLevel = int # ta.TypeAlias
38
+
39
+ # infos.py
40
+ LoggingMsgFn = ta.Callable[[], ta.Union[str, tuple]] # ta.TypeAlias
41
+ LoggingExcInfoTuple = ta.Tuple[ta.Type[BaseException], BaseException, ta.Optional[types.TracebackType]] # ta.TypeAlias
42
+ LoggingExcInfo = ta.Union[BaseException, LoggingExcInfoTuple] # ta.TypeAlias
43
+ LoggingExcInfoArg = ta.Union[LoggingExcInfo, bool, None] # ta.TypeAlias
44
+ LoggingContextInfo = ta.Any # ta.TypeAlias
45
+
46
+ # contexts.py
47
+ LoggingContextInfoT = ta.TypeVar('LoggingContextInfoT', bound=LoggingContextInfo)
48
+
49
+ # base.py
50
+ T = ta.TypeVar('T')
51
+
52
+
53
+ ########################################
54
+ # ../../lite/abstract.py
55
+
56
+
57
+ ##
58
+
59
+
60
+ _ABSTRACT_METHODS_ATTR = '__abstractmethods__'
61
+ _IS_ABSTRACT_METHOD_ATTR = '__isabstractmethod__'
62
+
63
+
64
+ def is_abstract_method(obj: ta.Any) -> bool:
65
+ return bool(getattr(obj, _IS_ABSTRACT_METHOD_ATTR, False))
66
+
67
+
68
+ def update_abstracts(cls, *, force=False):
69
+ if not force and not hasattr(cls, _ABSTRACT_METHODS_ATTR):
70
+ # Per stdlib: We check for __abstractmethods__ here because cls might by a C implementation or a python
71
+ # implementation (especially during testing), and we want to handle both cases.
72
+ return cls
73
+
74
+ abstracts: ta.Set[str] = set()
75
+
76
+ for scls in cls.__bases__:
77
+ for name in getattr(scls, _ABSTRACT_METHODS_ATTR, ()):
78
+ value = getattr(cls, name, None)
79
+ if getattr(value, _IS_ABSTRACT_METHOD_ATTR, False):
80
+ abstracts.add(name)
81
+
82
+ for name, value in cls.__dict__.items():
83
+ if getattr(value, _IS_ABSTRACT_METHOD_ATTR, False):
84
+ abstracts.add(name)
85
+
86
+ setattr(cls, _ABSTRACT_METHODS_ATTR, frozenset(abstracts))
87
+ return cls
88
+
89
+
90
+ #
91
+
92
+
93
+ class AbstractTypeError(TypeError):
94
+ pass
95
+
96
+
97
+ _FORCE_ABSTRACT_ATTR = '__forceabstract__'
98
+
99
+
100
+ class Abstract:
101
+ """
102
+ Different from, but interoperable with, abc.ABC / abc.ABCMeta:
103
+
104
+ - This raises AbstractTypeError during class creation, not instance instantiation - unless Abstract or abc.ABC are
105
+ explicitly present in the class's direct bases.
106
+ - This will forbid instantiation of classes with Abstract in their direct bases even if there are no
107
+ abstractmethods left on the class.
108
+ - This is a mixin, not a metaclass.
109
+ - As it is not an ABCMeta, this does not support virtual base classes. As a result, operations like `isinstance`
110
+ and `issubclass` are ~7x faster.
111
+ - It additionally enforces a base class order of (Abstract, abc.ABC) to preemptively prevent common mro conflicts.
112
+
113
+ If not mixed-in with an ABCMeta, it will update __abstractmethods__ itself.
114
+ """
115
+
116
+ __slots__ = ()
117
+
118
+ __abstractmethods__: ta.ClassVar[ta.FrozenSet[str]] = frozenset()
119
+
120
+ #
121
+
122
+ def __forceabstract__(self):
123
+ raise TypeError
124
+
125
+ # This is done manually, rather than through @abc.abstractmethod, to mask it from static analysis.
126
+ setattr(__forceabstract__, _IS_ABSTRACT_METHOD_ATTR, True)
127
+
128
+ #
129
+
130
+ def __init_subclass__(cls, **kwargs: ta.Any) -> None:
131
+ setattr(
132
+ cls,
133
+ _FORCE_ABSTRACT_ATTR,
134
+ getattr(Abstract, _FORCE_ABSTRACT_ATTR) if Abstract in cls.__bases__ else False,
135
+ )
136
+
137
+ super().__init_subclass__(**kwargs)
138
+
139
+ if not (Abstract in cls.__bases__ or abc.ABC in cls.__bases__):
140
+ ams = {a: cls for a, o in cls.__dict__.items() if is_abstract_method(o)}
141
+
142
+ seen = set(cls.__dict__)
143
+ for b in cls.__bases__:
144
+ ams.update({a: b for a in set(getattr(b, _ABSTRACT_METHODS_ATTR, [])) - seen}) # noqa
145
+ seen.update(dir(b))
146
+
147
+ if ams:
148
+ raise AbstractTypeError(
149
+ f'Cannot subclass abstract class {cls.__name__} with abstract methods: ' +
150
+ ', '.join(sorted([
151
+ '.'.join([
152
+ *([m] if (m := getattr(c, '__module__')) else []),
153
+ getattr(c, '__qualname__', getattr(c, '__name__')),
154
+ a,
155
+ ])
156
+ for a, c in ams.items()
157
+ ])),
158
+ )
159
+
160
+ xbi = (Abstract, abc.ABC) # , ta.Generic ?
161
+ bis = [(cls.__bases__.index(b), b) for b in xbi if b in cls.__bases__]
162
+ if bis != sorted(bis):
163
+ raise TypeError(
164
+ f'Abstract subclass {cls.__name__} must have proper base class order of '
165
+ f'({", ".join(getattr(b, "__name__") for b in xbi)}), got: '
166
+ f'({", ".join(getattr(b, "__name__") for _, b in sorted(bis))})',
167
+ )
168
+
169
+ if not isinstance(cls, abc.ABCMeta):
170
+ update_abstracts(cls, force=True)
171
+
172
+
173
+ ########################################
174
+ # ../../lite/json.py
175
+
176
+
177
+ ##
178
+
179
+
180
+ JSON_PRETTY_INDENT = 2
181
+
182
+ JSON_PRETTY_KWARGS: ta.Mapping[str, ta.Any] = dict(
183
+ indent=JSON_PRETTY_INDENT,
184
+ )
185
+
186
+ json_dump_pretty: ta.Callable[..., None] = functools.partial(json.dump, **JSON_PRETTY_KWARGS)
187
+ json_dumps_pretty: ta.Callable[..., str] = functools.partial(json.dumps, **JSON_PRETTY_KWARGS)
188
+
189
+
190
+ ##
191
+
192
+
193
+ JSON_COMPACT_SEPARATORS = (',', ':')
194
+
195
+ JSON_COMPACT_KWARGS: ta.Mapping[str, ta.Any] = dict(
196
+ indent=None,
197
+ separators=JSON_COMPACT_SEPARATORS,
198
+ )
199
+
200
+ json_dump_compact: ta.Callable[..., None] = functools.partial(json.dump, **JSON_COMPACT_KWARGS)
201
+ json_dumps_compact: ta.Callable[..., str] = functools.partial(json.dumps, **JSON_COMPACT_KWARGS)
202
+
203
+
204
+ ########################################
205
+ # ../levels.py
206
+
207
+
208
+ ##
209
+
210
+
211
+ @ta.final
212
+ class NamedLogLevel(int):
213
+ # logging.getLevelNamesMapping (or, as that is unavailable <3.11, logging._nameToLevel) includes the deprecated
214
+ # aliases.
215
+ _NAMES_BY_INT: ta.ClassVar[ta.Mapping[LogLevel, str]] = dict(sorted(logging._levelToName.items(), key=lambda t: -t[0])) # noqa
216
+
217
+ _INTS_BY_NAME: ta.ClassVar[ta.Mapping[str, LogLevel]] = {v: k for k, v in _NAMES_BY_INT.items()}
218
+
219
+ _NAME_INT_PAIRS: ta.ClassVar[ta.Sequence[ta.Tuple[str, LogLevel]]] = list(_INTS_BY_NAME.items())
220
+
221
+ #
222
+
223
+ _CACHE: ta.ClassVar[ta.MutableMapping[int, 'NamedLogLevel']] = {}
224
+
225
+ @ta.overload
226
+ def __new__(cls, name: str, offset: int = 0, /) -> 'NamedLogLevel':
227
+ ...
228
+
229
+ @ta.overload
230
+ def __new__(cls, i: int, /) -> 'NamedLogLevel':
231
+ ...
232
+
233
+ def __new__(cls, x, offset=0, /):
234
+ if isinstance(x, str):
235
+ return cls(cls._INTS_BY_NAME[x.upper()] + offset)
236
+ elif not offset and (c := cls._CACHE.get(x)) is not None:
237
+ return c
238
+ else:
239
+ return super().__new__(cls, x + offset)
240
+
241
+ #
242
+
243
+ _name_and_offset: ta.Tuple[str, int]
244
+
245
+ @property
246
+ def name_and_offset(self) -> ta.Tuple[str, int]:
247
+ try:
248
+ return self._name_and_offset
249
+ except AttributeError:
250
+ pass
251
+
252
+ if (n := self._NAMES_BY_INT.get(self)) is not None:
253
+ t = (n, 0)
254
+ else:
255
+ for n, i in self._NAME_INT_PAIRS: # noqa
256
+ if self >= i:
257
+ t = (n, (self - i))
258
+ break
259
+ else:
260
+ t = ('NOTSET', int(self))
261
+
262
+ self._name_and_offset = t
263
+ return t
264
+
265
+ @property
266
+ def exact_name(self) -> ta.Optional[str]:
267
+ n, o = self.name_and_offset
268
+ return n if not o else None
269
+
270
+ @property
271
+ def effective_name(self) -> str:
272
+ n, _ = self.name_and_offset
273
+ return n
274
+
275
+ #
276
+
277
+ def __str__(self) -> str:
278
+ return self.exact_name or f'{self.effective_name}{int(self):+}'
279
+
280
+ def __repr__(self) -> str:
281
+ n, o = self.name_and_offset
282
+ return f'{self.__class__.__name__}({n!r}{f", {int(o)}" if o else ""})'
283
+
284
+ #
285
+
286
+ CRITICAL: ta.ClassVar['NamedLogLevel']
287
+ ERROR: ta.ClassVar['NamedLogLevel']
288
+ WARNING: ta.ClassVar['NamedLogLevel']
289
+ INFO: ta.ClassVar['NamedLogLevel']
290
+ DEBUG: ta.ClassVar['NamedLogLevel']
291
+ NOTSET: ta.ClassVar['NamedLogLevel']
292
+
293
+
294
+ NamedLogLevel.CRITICAL = NamedLogLevel(logging.CRITICAL)
295
+ NamedLogLevel.ERROR = NamedLogLevel(logging.ERROR)
296
+ NamedLogLevel.WARNING = NamedLogLevel(logging.WARNING)
297
+ NamedLogLevel.INFO = NamedLogLevel(logging.INFO)
298
+ NamedLogLevel.DEBUG = NamedLogLevel(logging.DEBUG)
299
+ NamedLogLevel.NOTSET = NamedLogLevel(logging.NOTSET)
300
+
301
+
302
+ NamedLogLevel._CACHE.update({i: NamedLogLevel(i) for i in NamedLogLevel._NAMES_BY_INT}) # noqa
303
+
304
+
305
+ ########################################
306
+ # ../std/filters.py
307
+
308
+
309
+ ##
310
+
311
+
312
+ class TidLoggingFilter(logging.Filter):
313
+ def filter(self, record):
314
+ # FIXME: handle better - missing from wasm and cosmos
315
+ if hasattr(threading, 'get_native_id'):
316
+ record.tid = threading.get_native_id()
317
+ else:
318
+ record.tid = '?'
319
+ return True
320
+
321
+
322
+ ########################################
323
+ # ../std/proxy.py
324
+
325
+
326
+ ##
327
+
328
+
329
+ class ProxyLoggingFilterer(logging.Filterer):
330
+ def __init__(self, underlying: logging.Filterer) -> None: # noqa
331
+ self._underlying = underlying
332
+
333
+ @property
334
+ def underlying(self) -> logging.Filterer:
335
+ return self._underlying
336
+
337
+ @property
338
+ def filters(self):
339
+ return self._underlying.filters
340
+
341
+ @filters.setter
342
+ def filters(self, filters):
343
+ self._underlying.filters = filters
344
+
345
+ def addFilter(self, filter): # noqa
346
+ self._underlying.addFilter(filter)
347
+
348
+ def removeFilter(self, filter): # noqa
349
+ self._underlying.removeFilter(filter)
350
+
351
+ def filter(self, record):
352
+ return self._underlying.filter(record)
353
+
354
+
355
+ class ProxyLoggingHandler(ProxyLoggingFilterer, logging.Handler):
356
+ def __init__(self, underlying: logging.Handler) -> None: # noqa
357
+ ProxyLoggingFilterer.__init__(self, underlying)
358
+
359
+ _underlying: logging.Handler
360
+
361
+ @property
362
+ def underlying(self) -> logging.Handler:
363
+ return self._underlying
364
+
365
+ def get_name(self):
366
+ return self._underlying.get_name()
367
+
368
+ def set_name(self, name):
369
+ self._underlying.set_name(name)
370
+
371
+ @property
372
+ def name(self): # type: ignore[override]
373
+ return self._underlying.name
374
+
375
+ @property
376
+ def level(self):
377
+ return self._underlying.level
378
+
379
+ @level.setter
380
+ def level(self, level):
381
+ self._underlying.level = level
382
+
383
+ @property
384
+ def formatter(self):
385
+ return self._underlying.formatter
386
+
387
+ @formatter.setter
388
+ def formatter(self, formatter):
389
+ self._underlying.formatter = formatter
390
+
391
+ def createLock(self):
392
+ self._underlying.createLock()
393
+
394
+ def acquire(self):
395
+ self._underlying.acquire()
396
+
397
+ def release(self):
398
+ self._underlying.release()
399
+
400
+ def setLevel(self, level):
401
+ self._underlying.setLevel(level)
402
+
403
+ def format(self, record):
404
+ return self._underlying.format(record)
405
+
406
+ def emit(self, record):
407
+ self._underlying.emit(record)
408
+
409
+ def handle(self, record):
410
+ return self._underlying.handle(record)
411
+
412
+ def setFormatter(self, fmt):
413
+ self._underlying.setFormatter(fmt)
414
+
415
+ def flush(self):
416
+ self._underlying.flush()
417
+
418
+ def close(self):
419
+ self._underlying.close()
420
+
421
+ def handleError(self, record):
422
+ self._underlying.handleError(record)
423
+
424
+
425
+ ########################################
426
+ # ../warnings.py
427
+
428
+
429
+ ##
430
+
431
+
432
+ class LoggingSetupWarning(Warning):
433
+ pass
434
+
435
+
436
+ ########################################
437
+ # ../infos.py
438
+ """
439
+ TODO:
440
+ - remove redundant info fields only present for std adaptation (Level.name, ...)
441
+ """
442
+
443
+
444
+ ##
445
+
446
+
447
+ def logging_context_info(cls):
448
+ return cls
449
+
450
+
451
+ @ta.final
452
+ class LoggingContextInfos:
453
+ def __new__(cls, *args, **kwargs): # noqa
454
+ raise TypeError
455
+
456
+ #
457
+
458
+ @logging_context_info
459
+ @ta.final
460
+ class Name(ta.NamedTuple):
461
+ name: str
462
+
463
+ @logging_context_info
464
+ @ta.final
465
+ class Level(ta.NamedTuple):
466
+ level: NamedLogLevel
467
+ name: str
468
+
469
+ @classmethod
470
+ def build(cls, level: int) -> 'LoggingContextInfos.Level':
471
+ nl: NamedLogLevel = level if level.__class__ is NamedLogLevel else NamedLogLevel(level) # type: ignore[assignment] # noqa
472
+ return cls(
473
+ level=nl,
474
+ name=logging.getLevelName(nl),
475
+ )
476
+
477
+ @logging_context_info
478
+ @ta.final
479
+ class Msg(ta.NamedTuple):
480
+ msg: str
481
+ args: ta.Union[tuple, ta.Mapping[ta.Any, ta.Any], None]
482
+
483
+ @classmethod
484
+ def build(
485
+ cls,
486
+ msg: ta.Union[str, tuple, LoggingMsgFn],
487
+ *args: ta.Any,
488
+ ) -> 'LoggingContextInfos.Msg':
489
+ s: str
490
+ a: ta.Any
491
+
492
+ if callable(msg):
493
+ if args:
494
+ raise TypeError(f'Must not provide both a message function and args: {msg=} {args=}')
495
+ x = msg()
496
+ if isinstance(x, str):
497
+ s, a = x, ()
498
+ elif isinstance(x, tuple):
499
+ if x:
500
+ s, a = x[0], x[1:]
501
+ else:
502
+ s, a = '', ()
503
+ else:
504
+ raise TypeError(x)
505
+
506
+ elif isinstance(msg, tuple):
507
+ if args:
508
+ raise TypeError(f'Must not provide both a tuple message and args: {msg=} {args=}')
509
+ if msg:
510
+ s, a = msg[0], msg[1:]
511
+ else:
512
+ s, a = '', ()
513
+
514
+ elif isinstance(msg, str):
515
+ s, a = msg, args
516
+
517
+ else:
518
+ raise TypeError(msg)
519
+
520
+ # https://github.com/python/cpython/blob/e709361fc87d0d9ab9c58033a0a7f2fef0ad43d2/Lib/logging/__init__.py#L307 # noqa
521
+ if a and len(a) == 1 and isinstance(a[0], collections.abc.Mapping) and a[0]:
522
+ a = a[0]
523
+
524
+ return cls(
525
+ msg=s,
526
+ args=a,
527
+ )
528
+
529
+ @logging_context_info
530
+ @ta.final
531
+ class Extra(ta.NamedTuple):
532
+ extra: ta.Mapping[ta.Any, ta.Any]
533
+
534
+ @logging_context_info
535
+ @ta.final
536
+ class Time(ta.NamedTuple):
537
+ ns: int
538
+ secs: float
539
+ msecs: float
540
+ relative_secs: float
541
+
542
+ @classmethod
543
+ def get_std_start_ns(cls) -> int:
544
+ x: ta.Any = logging._startTime # type: ignore[attr-defined] # noqa
545
+
546
+ # Before 3.13.0b1 this will be `time.time()`, a float of seconds. After that, it will be `time.time_ns()`,
547
+ # an int.
548
+ #
549
+ # See:
550
+ # - https://github.com/python/cpython/commit/1316692e8c7c1e1f3b6639e51804f9db5ed892ea
551
+ #
552
+ if isinstance(x, float):
553
+ return int(x * 1e9)
554
+ else:
555
+ return x
556
+
557
+ @classmethod
558
+ def build(
559
+ cls,
560
+ ns: int,
561
+ *,
562
+ start_ns: ta.Optional[int] = None,
563
+ ) -> 'LoggingContextInfos.Time':
564
+ # https://github.com/python/cpython/commit/1316692e8c7c1e1f3b6639e51804f9db5ed892ea
565
+ secs = ns / 1e9 # ns to float seconds
566
+
567
+ # Get the number of whole milliseconds (0-999) in the fractional part of seconds.
568
+ # Eg: 1_677_903_920_999_998_503 ns --> 999_998_503 ns--> 999 ms
569
+ # Convert to float by adding 0.0 for historical reasons. See gh-89047
570
+ msecs = (ns % 1_000_000_000) // 1_000_000 + 0.0
571
+
572
+ # https://github.com/python/cpython/commit/1500a23f33f5a6d052ff1ef6383d9839928b8ff1
573
+ if msecs == 999.0 and int(secs) != ns // 1_000_000_000:
574
+ # ns -> sec conversion can round up, e.g:
575
+ # 1_677_903_920_999_999_900 ns --> 1_677_903_921.0 sec
576
+ msecs = 0.0
577
+
578
+ if start_ns is None:
579
+ start_ns = cls.get_std_start_ns()
580
+ relative_secs = (ns - start_ns) / 1e6
581
+
582
+ return cls(
583
+ ns=ns,
584
+ secs=secs,
585
+ msecs=msecs,
586
+ relative_secs=relative_secs,
587
+ )
588
+
589
+ @logging_context_info
590
+ @ta.final
591
+ class Exc(ta.NamedTuple):
592
+ info: LoggingExcInfo
593
+ info_tuple: LoggingExcInfoTuple
594
+
595
+ @classmethod
596
+ def build(
597
+ cls,
598
+ arg: LoggingExcInfoArg = False,
599
+ ) -> ta.Optional['LoggingContextInfos.Exc']:
600
+ if arg is True:
601
+ sys_exc_info = sys.exc_info()
602
+ if sys_exc_info[0] is not None:
603
+ arg = sys_exc_info
604
+ else:
605
+ arg = None
606
+ elif arg is False:
607
+ arg = None
608
+ if arg is None:
609
+ return None
610
+
611
+ info: LoggingExcInfo = arg
612
+ if isinstance(info, BaseException):
613
+ info_tuple: LoggingExcInfoTuple = (type(info), info, info.__traceback__) # noqa
614
+ else:
615
+ info_tuple = info
616
+
617
+ return cls(
618
+ info=info,
619
+ info_tuple=info_tuple,
620
+ )
621
+
622
+ @logging_context_info
623
+ @ta.final
624
+ class Caller(ta.NamedTuple):
625
+ file_path: str
626
+ line_no: int
627
+ func_name: str
628
+ stack_info: ta.Optional[str]
629
+
630
+ @classmethod
631
+ def is_internal_frame(cls, frame: types.FrameType) -> bool:
632
+ file_path = os.path.normcase(frame.f_code.co_filename)
633
+
634
+ # Yes, really.
635
+ # https://github.com/python/cpython/blob/e709361fc87d0d9ab9c58033a0a7f2fef0ad43d2/Lib/logging/__init__.py#L204 # noqa
636
+ # https://github.com/python/cpython/commit/5ca6d7469be53960843df39bb900e9c3359f127f
637
+ if 'importlib' in file_path and '_bootstrap' in file_path:
638
+ return True
639
+
640
+ return False
641
+
642
+ @classmethod
643
+ def find_frame(cls, stack_offset: int = 0) -> ta.Optional[types.FrameType]:
644
+ f: ta.Optional[types.FrameType] = sys._getframe(2 + stack_offset) # noqa
645
+
646
+ while f is not None:
647
+ # NOTE: We don't check __file__ like stdlib since we may be running amalgamated - we rely on careful,
648
+ # manual stack_offset management.
649
+ if hasattr(f, 'f_code'):
650
+ return f
651
+
652
+ f = f.f_back
653
+
654
+ return None
655
+
656
+ @classmethod
657
+ def build(
658
+ cls,
659
+ stack_offset: int = 0,
660
+ *,
661
+ stack_info: bool = False,
662
+ ) -> ta.Optional['LoggingContextInfos.Caller']:
663
+ if (f := cls.find_frame(stack_offset + 1)) is None:
664
+ return None
665
+
666
+ # https://github.com/python/cpython/blob/08e9794517063c8cd92c48714071b1d3c60b71bd/Lib/logging/__init__.py#L1616-L1623 # noqa
667
+ sinfo = None
668
+ if stack_info:
669
+ sio = io.StringIO()
670
+ traceback.print_stack(f, file=sio)
671
+ sinfo = sio.getvalue()
672
+ sio.close()
673
+ if sinfo[-1] == '\n':
674
+ sinfo = sinfo[:-1]
675
+
676
+ return cls(
677
+ file_path=f.f_code.co_filename,
678
+ line_no=f.f_lineno or 0,
679
+ func_name=f.f_code.co_name,
680
+ stack_info=sinfo,
681
+ )
682
+
683
+ @logging_context_info
684
+ @ta.final
685
+ class SourceFile(ta.NamedTuple):
686
+ file_name: str
687
+ module: str
688
+
689
+ @classmethod
690
+ def build(cls, caller_file_path: ta.Optional[str]) -> ta.Optional['LoggingContextInfos.SourceFile']:
691
+ if caller_file_path is None:
692
+ return None
693
+
694
+ # https://github.com/python/cpython/blob/e709361fc87d0d9ab9c58033a0a7f2fef0ad43d2/Lib/logging/__init__.py#L331-L336 # noqa
695
+ try:
696
+ file_name = os.path.basename(caller_file_path)
697
+ module = os.path.splitext(file_name)[0]
698
+ except (TypeError, ValueError, AttributeError):
699
+ return None
700
+
701
+ return cls(
702
+ file_name=file_name,
703
+ module=module,
704
+ )
705
+
706
+ @logging_context_info
707
+ @ta.final
708
+ class Thread(ta.NamedTuple):
709
+ ident: int
710
+ native_id: ta.Optional[int]
711
+ name: str
712
+
713
+ @classmethod
714
+ def build(cls) -> 'LoggingContextInfos.Thread':
715
+ return cls(
716
+ ident=threading.get_ident(),
717
+ native_id=threading.get_native_id() if hasattr(threading, 'get_native_id') else None,
718
+ name=threading.current_thread().name,
719
+ )
720
+
721
+ @logging_context_info
722
+ @ta.final
723
+ class Process(ta.NamedTuple):
724
+ pid: int
725
+
726
+ @classmethod
727
+ def build(cls) -> 'LoggingContextInfos.Process':
728
+ return cls(
729
+ pid=os.getpid(),
730
+ )
731
+
732
+ @logging_context_info
733
+ @ta.final
734
+ class Multiprocessing(ta.NamedTuple):
735
+ process_name: str
736
+
737
+ @classmethod
738
+ def build(cls) -> ta.Optional['LoggingContextInfos.Multiprocessing']:
739
+ # https://github.com/python/cpython/blob/e709361fc87d0d9ab9c58033a0a7f2fef0ad43d2/Lib/logging/__init__.py#L355-L364 # noqa
740
+ if (mp := sys.modules.get('multiprocessing')) is None:
741
+ return None
742
+
743
+ return cls(
744
+ process_name=mp.current_process().name,
745
+ )
746
+
747
+ @logging_context_info
748
+ @ta.final
749
+ class AsyncioTask(ta.NamedTuple):
750
+ name: str
751
+
752
+ @classmethod
753
+ def build(cls) -> ta.Optional['LoggingContextInfos.AsyncioTask']:
754
+ # https://github.com/python/cpython/blob/e709361fc87d0d9ab9c58033a0a7f2fef0ad43d2/Lib/logging/__init__.py#L372-L377 # noqa
755
+ if (asyncio := sys.modules.get('asyncio')) is None:
756
+ return None
757
+
758
+ try:
759
+ task = asyncio.current_task()
760
+ except Exception: # noqa
761
+ return None
762
+
763
+ if task is None:
764
+ return None
765
+
766
+ return cls(
767
+ name=task.get_name(), # Always non-None
768
+ )
769
+
770
+
771
+ ##
772
+
773
+
774
+ class UnexpectedLoggingStartTimeWarning(LoggingSetupWarning):
775
+ pass
776
+
777
+
778
+ def _check_logging_start_time() -> None:
779
+ if (x := LoggingContextInfos.Time.get_std_start_ns()) < (t := time.time()):
780
+ import warnings # noqa
781
+
782
+ warnings.warn(
783
+ f'Unexpected logging start time detected: '
784
+ f'get_std_start_ns={x}, '
785
+ f'time.time()={t}',
786
+ UnexpectedLoggingStartTimeWarning,
787
+ )
788
+
789
+
790
+ _check_logging_start_time()
791
+
792
+
793
+ ########################################
794
+ # ../std/json.py
795
+ """
796
+ TODO:
797
+ - translate json keys
798
+ """
799
+
800
+
801
+ ##
802
+
803
+
804
+ class JsonLoggingFormatter(logging.Formatter):
805
+ KEYS: ta.Mapping[str, bool] = {
806
+ 'name': False,
807
+ 'msg': False,
808
+ 'args': False,
809
+ 'levelname': False,
810
+ 'levelno': False,
811
+ 'pathname': False,
812
+ 'filename': False,
813
+ 'module': False,
814
+ 'exc_info': True,
815
+ 'exc_text': True,
816
+ 'stack_info': True,
817
+ 'lineno': False,
818
+ 'funcName': False,
819
+ 'created': False,
820
+ 'msecs': False,
821
+ 'relativeCreated': False,
822
+ 'thread': False,
823
+ 'threadName': False,
824
+ 'processName': False,
825
+ 'process': False,
826
+ }
827
+
828
+ def __init__(
829
+ self,
830
+ *args: ta.Any,
831
+ json_dumps: ta.Optional[ta.Callable[[ta.Any], str]] = None,
832
+ **kwargs: ta.Any,
833
+ ) -> None:
834
+ super().__init__(*args, **kwargs)
835
+
836
+ if json_dumps is None:
837
+ json_dumps = json_dumps_compact
838
+ self._json_dumps = json_dumps
839
+
840
+ def format(self, record: logging.LogRecord) -> str:
841
+ dct = {
842
+ k: v
843
+ for k, o in self.KEYS.items()
844
+ for v in [getattr(record, k)]
845
+ if not (o and v is None)
846
+ }
847
+ return self._json_dumps(dct)
848
+
849
+
850
+ ########################################
851
+ # ../contexts.py
852
+
853
+
854
+ ##
855
+
856
+
857
+ class LoggingContext(Abstract):
858
+ @abc.abstractmethod
859
+ def get_info(self, ty: ta.Type[LoggingContextInfoT]) -> ta.Optional[LoggingContextInfoT]:
860
+ raise NotImplementedError
861
+
862
+ @ta.final
863
+ def __getitem__(self, ty: ta.Type[LoggingContextInfoT]) -> ta.Optional[LoggingContextInfoT]:
864
+ return self.get_info(ty)
865
+
866
+ @ta.final
867
+ def must_get_info(self, ty: ta.Type[LoggingContextInfoT]) -> LoggingContextInfoT:
868
+ if (info := self.get_info(ty)) is None:
869
+ raise TypeError(f'LoggingContextInfo absent: {ty}')
870
+ return info
871
+
872
+
873
+ @ta.final
874
+ class SimpleLoggingContext(LoggingContext):
875
+ def __init__(self, *infos: LoggingContextInfo) -> None:
876
+ self._infos: ta.Dict[ta.Type[LoggingContextInfo], LoggingContextInfo] = {type(i): i for i in infos}
877
+
878
+ def get_info(self, ty: ta.Type[LoggingContextInfoT]) -> ta.Optional[LoggingContextInfoT]:
879
+ return self._infos.get(ty)
880
+
881
+
882
+ ##
883
+
884
+
885
+ class CaptureLoggingContext(LoggingContext, Abstract):
886
+ @abc.abstractmethod
887
+ def set_basic(
888
+ self,
889
+ name: str,
890
+
891
+ msg: ta.Union[str, tuple, LoggingMsgFn],
892
+ args: tuple,
893
+ ) -> 'CaptureLoggingContext':
894
+ raise NotImplementedError
895
+
896
+ #
897
+
898
+ class AlreadyCapturedError(Exception):
899
+ pass
900
+
901
+ class NotCapturedError(Exception):
902
+ pass
903
+
904
+ @abc.abstractmethod
905
+ def capture(self) -> None:
906
+ """Must be cooperatively called only from the expected locations."""
907
+
908
+ raise NotImplementedError
909
+
910
+
911
+ @ta.final
912
+ class CaptureLoggingContextImpl(CaptureLoggingContext):
913
+ @ta.final
914
+ class NOT_SET: # noqa
915
+ def __new__(cls, *args, **kwargs): # noqa
916
+ raise TypeError
917
+
918
+ #
919
+
920
+ def __init__(
921
+ self,
922
+ level: LogLevel,
923
+ *,
924
+ time_ns: ta.Optional[int] = None,
925
+
926
+ exc_info: LoggingExcInfoArg = False,
927
+
928
+ caller: ta.Union[LoggingContextInfos.Caller, ta.Type[NOT_SET], None] = NOT_SET,
929
+ stack_offset: int = 0,
930
+ stack_info: bool = False,
931
+ ) -> None:
932
+ if time_ns is None:
933
+ time_ns = time.time_ns()
934
+
935
+ # Done early to not trample on sys.exc_info()
936
+ exc = LoggingContextInfos.Exc.build(exc_info)
937
+
938
+ self._infos: ta.Dict[ta.Type[LoggingContextInfo], LoggingContextInfo] = {}
939
+ self._set_info(
940
+ LoggingContextInfos.Level.build(level),
941
+ exc,
942
+ LoggingContextInfos.Time.build(time_ns),
943
+ )
944
+
945
+ if caller is not CaptureLoggingContextImpl.NOT_SET:
946
+ self._infos[LoggingContextInfos.Caller] = caller
947
+ else:
948
+ self._stack_offset = stack_offset
949
+ self._stack_info = stack_info
950
+
951
+ def _set_info(self, *infos: ta.Optional[LoggingContextInfo]) -> 'CaptureLoggingContextImpl':
952
+ for info in infos:
953
+ if info is not None:
954
+ self._infos[type(info)] = info
955
+ return self
956
+
957
+ def get_info(self, ty: ta.Type[LoggingContextInfoT]) -> ta.Optional[LoggingContextInfoT]:
958
+ return self._infos.get(ty)
959
+
960
+ ##
961
+
962
+ def set_basic(
963
+ self,
964
+ name: str,
965
+
966
+ msg: ta.Union[str, tuple, LoggingMsgFn],
967
+ args: tuple,
968
+ ) -> 'CaptureLoggingContextImpl':
969
+ return self._set_info(
970
+ LoggingContextInfos.Name(name),
971
+ LoggingContextInfos.Msg.build(msg, *args),
972
+ )
973
+
974
+ ##
975
+
976
+ _stack_offset: int
977
+ _stack_info: bool
978
+
979
+ def inc_stack_offset(self, ofs: int = 1) -> 'CaptureLoggingContext':
980
+ if hasattr(self, '_stack_offset'):
981
+ self._stack_offset += ofs
982
+ return self
983
+
984
+ _has_captured: bool = False
985
+
986
+ def capture(self) -> None:
987
+ if self._has_captured:
988
+ raise CaptureLoggingContextImpl.AlreadyCapturedError
989
+ self._has_captured = True
990
+
991
+ if LoggingContextInfos.Caller not in self._infos:
992
+ self._set_info(LoggingContextInfos.Caller.build(
993
+ self._stack_offset + 1,
994
+ stack_info=self._stack_info,
995
+ ))
996
+
997
+ if (caller := self[LoggingContextInfos.Caller]) is not None:
998
+ self._set_info(LoggingContextInfos.SourceFile.build(
999
+ caller.file_path,
1000
+ ))
1001
+
1002
+ self._set_info(
1003
+ LoggingContextInfos.Thread.build(),
1004
+ LoggingContextInfos.Process.build(),
1005
+ LoggingContextInfos.Multiprocessing.build(),
1006
+ LoggingContextInfos.AsyncioTask.build(),
1007
+ )
1008
+
1009
+
1010
+ ########################################
1011
+ # ../standard.py
1012
+ """
1013
+ TODO:
1014
+ - !! move to std !!
1015
+ - structured
1016
+ - prefixed
1017
+ - debug
1018
+ - optional noisy? noisy will never be lite - some kinda configure_standard callback mechanism?
1019
+ """
1020
+
1021
+
1022
+ ##
1023
+
1024
+
1025
+ STANDARD_LOG_FORMAT_PARTS = [
1026
+ ('asctime', '%(asctime)-15s'),
1027
+ ('process', 'pid=%(process)s'),
1028
+ ('thread', 'tid=%(thread)x'),
1029
+ ('levelname', '%(levelname)s'),
1030
+ ('name', '%(name)s'),
1031
+ ('separator', '::'),
1032
+ ('message', '%(message)s'),
1033
+ ]
1034
+
1035
+
1036
+ class StandardLoggingFormatter(logging.Formatter):
1037
+ @staticmethod
1038
+ def build_log_format(parts: ta.Iterable[ta.Tuple[str, str]]) -> str:
1039
+ return ' '.join(v for k, v in parts)
1040
+
1041
+ converter = datetime.datetime.fromtimestamp # type: ignore
1042
+
1043
+ def formatTime(self, record, datefmt=None):
1044
+ ct = self.converter(record.created)
1045
+ if datefmt:
1046
+ return ct.strftime(datefmt) # noqa
1047
+ else:
1048
+ t = ct.strftime('%Y-%m-%d %H:%M:%S')
1049
+ return '%s.%03d' % (t, record.msecs) # noqa
1050
+
1051
+
1052
+ ##
1053
+
1054
+
1055
+ class StandardConfiguredLoggingHandler(ProxyLoggingHandler):
1056
+ def __init_subclass__(cls, **kwargs):
1057
+ raise TypeError('This class serves only as a marker and should not be subclassed.')
1058
+
1059
+
1060
+ ##
1061
+
1062
+
1063
+ @contextlib.contextmanager
1064
+ def _locking_logging_module_lock() -> ta.Iterator[None]:
1065
+ if hasattr(logging, '_acquireLock'):
1066
+ logging._acquireLock() # noqa
1067
+ try:
1068
+ yield
1069
+ finally:
1070
+ logging._releaseLock() # type: ignore # noqa
1071
+
1072
+ elif hasattr(logging, '_lock'):
1073
+ # https://github.com/python/cpython/commit/74723e11109a320e628898817ab449b3dad9ee96
1074
+ with logging._lock: # noqa
1075
+ yield
1076
+
1077
+ else:
1078
+ raise Exception("Can't find lock in logging module")
1079
+
1080
+
1081
+ def configure_standard_logging(
1082
+ level: ta.Union[int, str] = logging.INFO,
1083
+ *,
1084
+ target: ta.Optional[logging.Logger] = None,
1085
+
1086
+ force: bool = False,
1087
+
1088
+ handler_factory: ta.Optional[ta.Callable[[], logging.Handler]] = None,
1089
+
1090
+ formatter: ta.Optional[logging.Formatter] = None, # noqa
1091
+ json: bool = False,
1092
+ ) -> ta.Optional[StandardConfiguredLoggingHandler]:
1093
+ with _locking_logging_module_lock():
1094
+ if target is None:
1095
+ target = logging.root
1096
+
1097
+ #
1098
+
1099
+ if not force:
1100
+ if any(isinstance(h, StandardConfiguredLoggingHandler) for h in list(target.handlers)):
1101
+ return None
1102
+
1103
+ #
1104
+
1105
+ if handler_factory is not None:
1106
+ handler = handler_factory()
1107
+ else:
1108
+ handler = logging.StreamHandler()
1109
+
1110
+ #
1111
+
1112
+ if formatter is None:
1113
+ if json:
1114
+ formatter = JsonLoggingFormatter()
1115
+ else:
1116
+ formatter = StandardLoggingFormatter(StandardLoggingFormatter.build_log_format(STANDARD_LOG_FORMAT_PARTS)) # noqa
1117
+ handler.setFormatter(formatter)
1118
+
1119
+ #
1120
+
1121
+ handler.addFilter(TidLoggingFilter())
1122
+
1123
+ #
1124
+
1125
+ target.addHandler(handler)
1126
+
1127
+ #
1128
+
1129
+ if level is not None:
1130
+ target.setLevel(level)
1131
+
1132
+ #
1133
+
1134
+ return StandardConfiguredLoggingHandler(handler)
1135
+
1136
+
1137
+ ########################################
1138
+ # ../base.py
1139
+
1140
+
1141
+ ##
1142
+
1143
+
1144
+ class AnyLogger(Abstract, ta.Generic[T]):
1145
+ def is_enabled_for(self, level: LogLevel) -> bool:
1146
+ return level >= self.get_effective_level()
1147
+
1148
+ @abc.abstractmethod
1149
+ def get_effective_level(self) -> LogLevel:
1150
+ raise NotImplementedError
1151
+
1152
+ #
1153
+
1154
+ @ta.final
1155
+ def isEnabledFor(self, level: LogLevel) -> bool: # noqa
1156
+ return self.is_enabled_for(level)
1157
+
1158
+ @ta.final
1159
+ def getEffectiveLevel(self) -> LogLevel: # noqa
1160
+ return self.get_effective_level()
1161
+
1162
+ ##
1163
+
1164
+ @ta.overload
1165
+ def log(self, level: LogLevel, msg: str, *args: ta.Any, **kwargs: ta.Any) -> T:
1166
+ ...
1167
+
1168
+ @ta.overload
1169
+ def log(self, level: LogLevel, msg: ta.Tuple[ta.Any, ...], **kwargs: ta.Any) -> T:
1170
+ ...
1171
+
1172
+ @ta.overload
1173
+ def log(self, level: LogLevel, msg_fn: LoggingMsgFn, **kwargs: ta.Any) -> T:
1174
+ ...
1175
+
1176
+ @ta.final
1177
+ def log(self, level: LogLevel, *args, **kwargs):
1178
+ return self._log(CaptureLoggingContextImpl(level, stack_offset=1), *args, **kwargs)
1179
+
1180
+ #
1181
+
1182
+ @ta.overload
1183
+ def debug(self, msg: str, *args: ta.Any, **kwargs: ta.Any) -> T:
1184
+ ...
1185
+
1186
+ @ta.overload
1187
+ def debug(self, msg: ta.Tuple[ta.Any, ...], **kwargs: ta.Any) -> T:
1188
+ ...
1189
+
1190
+ @ta.overload
1191
+ def debug(self, msg_fn: LoggingMsgFn, **kwargs: ta.Any) -> T:
1192
+ ...
1193
+
1194
+ @ta.final
1195
+ def debug(self, *args, **kwargs):
1196
+ return self._log(CaptureLoggingContextImpl(NamedLogLevel.DEBUG, stack_offset=1), *args, **kwargs)
1197
+
1198
+ #
1199
+
1200
+ @ta.overload
1201
+ def info(self, msg: str, *args: ta.Any, **kwargs: ta.Any) -> T:
1202
+ ...
1203
+
1204
+ @ta.overload
1205
+ def info(self, msg: ta.Tuple[ta.Any, ...], **kwargs: ta.Any) -> T:
1206
+ ...
1207
+
1208
+ @ta.overload
1209
+ def info(self, msg_fn: LoggingMsgFn, **kwargs: ta.Any) -> T:
1210
+ ...
1211
+
1212
+ @ta.final
1213
+ def info(self, *args, **kwargs):
1214
+ return self._log(CaptureLoggingContextImpl(NamedLogLevel.INFO, stack_offset=1), *args, **kwargs)
1215
+
1216
+ #
1217
+
1218
+ @ta.overload
1219
+ def warning(self, msg: str, *args: ta.Any, **kwargs: ta.Any) -> T:
1220
+ ...
1221
+
1222
+ @ta.overload
1223
+ def warning(self, msg: ta.Tuple[ta.Any, ...], **kwargs: ta.Any) -> T:
1224
+ ...
1225
+
1226
+ @ta.overload
1227
+ def warning(self, msg_fn: LoggingMsgFn, **kwargs: ta.Any) -> T:
1228
+ ...
1229
+
1230
+ @ta.final
1231
+ def warning(self, *args, **kwargs):
1232
+ return self._log(CaptureLoggingContextImpl(NamedLogLevel.WARNING, stack_offset=1), *args, **kwargs)
1233
+
1234
+ #
1235
+
1236
+ @ta.overload
1237
+ def error(self, msg: str, *args: ta.Any, **kwargs: ta.Any) -> T:
1238
+ ...
1239
+
1240
+ @ta.overload
1241
+ def error(self, msg: ta.Tuple[ta.Any, ...], **kwargs: ta.Any) -> T:
1242
+ ...
1243
+
1244
+ @ta.overload
1245
+ def error(self, msg_fn: LoggingMsgFn, **kwargs: ta.Any) -> T:
1246
+ ...
1247
+
1248
+ @ta.final
1249
+ def error(self, *args, **kwargs):
1250
+ return self._log(CaptureLoggingContextImpl(NamedLogLevel.ERROR, stack_offset=1), *args, **kwargs)
1251
+
1252
+ #
1253
+
1254
+ @ta.overload
1255
+ def exception(self, msg: str, *args: ta.Any, exc_info: LoggingExcInfoArg = True, **kwargs: ta.Any) -> T:
1256
+ ...
1257
+
1258
+ @ta.overload
1259
+ def exception(self, msg: ta.Tuple[ta.Any, ...], *, exc_info: LoggingExcInfoArg = True, **kwargs: ta.Any) -> T:
1260
+ ...
1261
+
1262
+ @ta.overload
1263
+ def exception(self, msg_fn: LoggingMsgFn, *, exc_info: LoggingExcInfoArg = True, **kwargs: ta.Any) -> T:
1264
+ ...
1265
+
1266
+ @ta.final
1267
+ def exception(self, *args, exc_info: LoggingExcInfoArg = True, **kwargs):
1268
+ return self._log(CaptureLoggingContextImpl(NamedLogLevel.ERROR, exc_info=exc_info, stack_offset=1), *args, **kwargs) # noqa
1269
+
1270
+ #
1271
+
1272
+ @ta.overload
1273
+ def critical(self, msg: str, *args: ta.Any, **kwargs: ta.Any) -> T:
1274
+ ...
1275
+
1276
+ @ta.overload
1277
+ def critical(self, msg: ta.Tuple[ta.Any, ...], **kwargs: ta.Any) -> T:
1278
+ ...
1279
+
1280
+ @ta.overload
1281
+ def critical(self, msg_fn: LoggingMsgFn, **kwargs: ta.Any) -> T:
1282
+ ...
1283
+
1284
+ @ta.final
1285
+ def critical(self, *args, **kwargs):
1286
+ return self._log(CaptureLoggingContextImpl(NamedLogLevel.CRITICAL, stack_offset=1), *args, **kwargs)
1287
+
1288
+ ##
1289
+
1290
+ @abc.abstractmethod
1291
+ def _log(self, ctx: CaptureLoggingContext, msg: ta.Union[str, tuple, LoggingMsgFn], *args: ta.Any, **kwargs: ta.Any) -> T: # noqa
1292
+ raise NotImplementedError
1293
+
1294
+
1295
+ class Logger(AnyLogger[None], Abstract):
1296
+ @abc.abstractmethod
1297
+ def _log(self, ctx: CaptureLoggingContext, msg: ta.Union[str, tuple, LoggingMsgFn], *args: ta.Any, **kwargs: ta.Any) -> None: # noqa
1298
+ raise NotImplementedError
1299
+
1300
+
1301
+ class AsyncLogger(AnyLogger[ta.Awaitable[None]], Abstract):
1302
+ @abc.abstractmethod
1303
+ def _log(self, ctx: CaptureLoggingContext, msg: ta.Union[str, tuple, LoggingMsgFn], *args: ta.Any, **kwargs: ta.Any) -> ta.Awaitable[None]: # noqa
1304
+ raise NotImplementedError
1305
+
1306
+
1307
+ ##
1308
+
1309
+
1310
+ class AnyNopLogger(AnyLogger[T], Abstract):
1311
+ @ta.final
1312
+ def get_effective_level(self) -> LogLevel:
1313
+ return -999
1314
+
1315
+
1316
+ @ta.final
1317
+ class NopLogger(AnyNopLogger[None], Logger):
1318
+ def _log(self, ctx: CaptureLoggingContext, msg: ta.Union[str, tuple, LoggingMsgFn], *args: ta.Any, **kwargs: ta.Any) -> None: # noqa
1319
+ pass
1320
+
1321
+
1322
+ @ta.final
1323
+ class AsyncNopLogger(AnyNopLogger[ta.Awaitable[None]], AsyncLogger):
1324
+ async def _log(self, ctx: CaptureLoggingContext, msg: ta.Union[str, tuple, LoggingMsgFn], *args: ta.Any, **kwargs: ta.Any) -> None: # noqa
1325
+ pass
1326
+
1327
+
1328
+ ########################################
1329
+ # ../std/records.py
1330
+ """
1331
+ TODO:
1332
+ - TypedDict?
1333
+ """
1334
+
1335
+
1336
+ ##
1337
+
1338
+
1339
+ class LoggingContextInfoRecordAdapters:
1340
+ # Ref:
1341
+ # - https://docs.python.org/3/library/logging.html#logrecord-attributes
1342
+ #
1343
+ # LogRecord:
1344
+ # - https://github.com/python/cpython/blob/39b2f82717a69dde7212bc39b673b0f55c99e6a3/Lib/logging/__init__.py#L276 (3.8) # noqa
1345
+ # - https://github.com/python/cpython/blob/f070f54c5f4a42c7c61d1d5d3b8f3b7203b4a0fb/Lib/logging/__init__.py#L286 (~3.14) # noqa
1346
+ #
1347
+
1348
+ def __new__(cls, *args, **kwargs): # noqa
1349
+ raise TypeError
1350
+
1351
+ class Adapter(Abstract, ta.Generic[T]):
1352
+ @property
1353
+ @abc.abstractmethod
1354
+ def info_cls(self) -> ta.Type[LoggingContextInfo]:
1355
+ raise NotImplementedError
1356
+
1357
+ #
1358
+
1359
+ @ta.final
1360
+ class NOT_SET: # noqa
1361
+ def __new__(cls, *args, **kwargs): # noqa
1362
+ raise TypeError
1363
+
1364
+ class RecordAttr(ta.NamedTuple):
1365
+ name: str
1366
+ type: ta.Any
1367
+ default: ta.Any
1368
+
1369
+ # @abc.abstractmethod
1370
+ record_attrs: ta.ClassVar[ta.Mapping[str, RecordAttr]]
1371
+
1372
+ @property
1373
+ @abc.abstractmethod
1374
+ def _record_attrs(self) -> ta.Union[
1375
+ ta.Mapping[str, ta.Any],
1376
+ ta.Mapping[str, ta.Tuple[ta.Any, ta.Any]],
1377
+ ]:
1378
+ raise NotImplementedError
1379
+
1380
+ #
1381
+
1382
+ @abc.abstractmethod
1383
+ def context_to_record(self, ctx: LoggingContext) -> ta.Mapping[str, ta.Any]:
1384
+ raise NotImplementedError
1385
+
1386
+ #
1387
+
1388
+ @abc.abstractmethod
1389
+ def record_to_info(self, rec: logging.LogRecord) -> ta.Optional[T]:
1390
+ raise NotImplementedError
1391
+
1392
+ #
1393
+
1394
+ def __init_subclass__(cls, **kwargs: ta.Any) -> None:
1395
+ super().__init_subclass__(**kwargs)
1396
+
1397
+ if Abstract in cls.__bases__:
1398
+ return
1399
+
1400
+ if 'record_attrs' in cls.__dict__:
1401
+ raise TypeError(cls)
1402
+ if not isinstance(ra := cls.__dict__['_record_attrs'], collections.abc.Mapping):
1403
+ raise TypeError(ra)
1404
+
1405
+ rd: ta.Dict[str, LoggingContextInfoRecordAdapters.Adapter.RecordAttr] = {}
1406
+ for n, v in ra.items():
1407
+ if not n or not isinstance(n, str) or n in rd:
1408
+ raise AttributeError(n)
1409
+ if isinstance(v, tuple):
1410
+ t, d = v
1411
+ else:
1412
+ t, d = v, cls.NOT_SET
1413
+ rd[n] = cls.RecordAttr(
1414
+ name=n,
1415
+ type=t,
1416
+ default=d,
1417
+ )
1418
+ cls.record_attrs = rd
1419
+
1420
+ class RequiredAdapter(Adapter[T], Abstract):
1421
+ @property
1422
+ @abc.abstractmethod
1423
+ def _record_attrs(self) -> ta.Mapping[str, ta.Any]:
1424
+ raise NotImplementedError
1425
+
1426
+ #
1427
+
1428
+ @ta.final
1429
+ def context_to_record(self, ctx: LoggingContext) -> ta.Mapping[str, ta.Any]:
1430
+ if (info := ctx.get_info(self.info_cls)) is not None:
1431
+ return self._info_to_record(info)
1432
+ else:
1433
+ raise TypeError # FIXME: fallback?
1434
+
1435
+ @abc.abstractmethod
1436
+ def _info_to_record(self, info: T) -> ta.Mapping[str, ta.Any]:
1437
+ raise NotImplementedError
1438
+
1439
+ #
1440
+
1441
+ @abc.abstractmethod
1442
+ def record_to_info(self, rec: logging.LogRecord) -> T:
1443
+ raise NotImplementedError
1444
+
1445
+ #
1446
+
1447
+ def __init_subclass__(cls, **kwargs: ta.Any) -> None:
1448
+ super().__init_subclass__(**kwargs)
1449
+
1450
+ if any(a.default is not cls.NOT_SET for a in cls.record_attrs.values()):
1451
+ raise TypeError(cls.record_attrs)
1452
+
1453
+ class OptionalAdapter(Adapter[T], Abstract, ta.Generic[T]):
1454
+ @property
1455
+ @abc.abstractmethod
1456
+ def _record_attrs(self) -> ta.Mapping[str, ta.Tuple[ta.Any, ta.Any]]:
1457
+ raise NotImplementedError
1458
+
1459
+ record_defaults: ta.ClassVar[ta.Mapping[str, ta.Any]]
1460
+
1461
+ #
1462
+
1463
+ @ta.final
1464
+ def context_to_record(self, ctx: LoggingContext) -> ta.Mapping[str, ta.Any]:
1465
+ if (info := ctx.get_info(self.info_cls)) is not None:
1466
+ return self._info_to_record(info)
1467
+ else:
1468
+ return self.record_defaults
1469
+
1470
+ @abc.abstractmethod
1471
+ def _info_to_record(self, info: T) -> ta.Mapping[str, ta.Any]:
1472
+ raise NotImplementedError
1473
+
1474
+ #
1475
+
1476
+ def __init_subclass__(cls, **kwargs: ta.Any) -> None:
1477
+ super().__init_subclass__(**kwargs)
1478
+
1479
+ dd: ta.Dict[str, ta.Any] = {a.name: a.default for a in cls.record_attrs.values()}
1480
+ if any(d is cls.NOT_SET for d in dd.values()):
1481
+ raise TypeError(cls.record_attrs)
1482
+ cls.record_defaults = dd
1483
+
1484
+ #
1485
+
1486
+ class Name(RequiredAdapter[LoggingContextInfos.Name]):
1487
+ info_cls: ta.ClassVar[ta.Type[LoggingContextInfos.Name]] = LoggingContextInfos.Name
1488
+
1489
+ _record_attrs: ta.ClassVar[ta.Mapping[str, ta.Any]] = dict(
1490
+ # Name of the logger used to log the call. Unmodified by ctor.
1491
+ name=str,
1492
+ )
1493
+
1494
+ def _info_to_record(self, info: LoggingContextInfos.Name) -> ta.Mapping[str, ta.Any]:
1495
+ return dict(
1496
+ name=info.name,
1497
+ )
1498
+
1499
+ def record_to_info(self, rec: logging.LogRecord) -> LoggingContextInfos.Name:
1500
+ return LoggingContextInfos.Name(
1501
+ name=rec.name,
1502
+ )
1503
+
1504
+ class Level(RequiredAdapter[LoggingContextInfos.Level]):
1505
+ info_cls: ta.ClassVar[ta.Type[LoggingContextInfos.Level]] = LoggingContextInfos.Level
1506
+
1507
+ _record_attrs: ta.ClassVar[ta.Mapping[str, ta.Any]] = dict(
1508
+ # Text logging level for the message ('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'). Set to
1509
+ # `getLevelName(level)`.
1510
+ levelname=str,
1511
+
1512
+ # Numeric logging level for the message (DEBUG, INFO, WARNING, ERROR, CRITICAL). Unmodified by ctor.
1513
+ levelno=int,
1514
+ )
1515
+
1516
+ def _info_to_record(self, info: LoggingContextInfos.Level) -> ta.Mapping[str, ta.Any]:
1517
+ return dict(
1518
+ levelname=info.name,
1519
+ levelno=int(info.level),
1520
+ )
1521
+
1522
+ def record_to_info(self, rec: logging.LogRecord) -> LoggingContextInfos.Level:
1523
+ return LoggingContextInfos.Level.build(rec.levelno)
1524
+
1525
+ class Msg(RequiredAdapter[LoggingContextInfos.Msg]):
1526
+ info_cls: ta.ClassVar[ta.Type[LoggingContextInfos.Msg]] = LoggingContextInfos.Msg
1527
+
1528
+ _record_attrs: ta.ClassVar[ta.Mapping[str, ta.Any]] = dict(
1529
+ # The format string passed in the original logging call. Merged with args to produce message, or an
1530
+ # arbitrary object (see Using arbitrary objects as messages). Unmodified by ctor.
1531
+ msg=str,
1532
+
1533
+ # The tuple of arguments merged into msg to produce message, or a dict whose values are used for the merge
1534
+ # (when there is only one argument, and it is a dictionary). Ctor will transform a 1-tuple containing a
1535
+ # Mapping into just the mapping, but is otherwise unmodified.
1536
+ args=ta.Union[tuple, dict, None],
1537
+ )
1538
+
1539
+ def _info_to_record(self, info: LoggingContextInfos.Msg) -> ta.Mapping[str, ta.Any]:
1540
+ return dict(
1541
+ msg=info.msg,
1542
+ args=info.args,
1543
+ )
1544
+
1545
+ def record_to_info(self, rec: logging.LogRecord) -> LoggingContextInfos.Msg:
1546
+ return LoggingContextInfos.Msg(
1547
+ msg=rec.msg,
1548
+ args=rec.args,
1549
+ )
1550
+
1551
+ class Time(RequiredAdapter[LoggingContextInfos.Time]):
1552
+ info_cls: ta.ClassVar[ta.Type[LoggingContextInfos.Time]] = LoggingContextInfos.Time
1553
+
1554
+ _record_attrs: ta.ClassVar[ta.Mapping[str, ta.Any]] = dict(
1555
+ # Time when the LogRecord was created. Set to `time.time_ns() / 1e9` for >=3.13.0b1, otherwise simply
1556
+ # `time.time()`.
1557
+ #
1558
+ # See:
1559
+ # - https://github.com/python/cpython/commit/1316692e8c7c1e1f3b6639e51804f9db5ed892ea
1560
+ # - https://github.com/python/cpython/commit/1500a23f33f5a6d052ff1ef6383d9839928b8ff1
1561
+ #
1562
+ created=float,
1563
+
1564
+ # Millisecond portion of the time when the LogRecord was created.
1565
+ msecs=float,
1566
+
1567
+ # Time in milliseconds when the LogRecord was created, relative to the time the logging module was loaded.
1568
+ relativeCreated=float,
1569
+ )
1570
+
1571
+ def _info_to_record(self, info: LoggingContextInfos.Time) -> ta.Mapping[str, ta.Any]:
1572
+ return dict(
1573
+ created=info.secs,
1574
+ msecs=info.msecs,
1575
+ relativeCreated=info.relative_secs,
1576
+ )
1577
+
1578
+ def record_to_info(self, rec: logging.LogRecord) -> LoggingContextInfos.Time:
1579
+ return LoggingContextInfos.Time.build(
1580
+ int(rec.created * 1e9),
1581
+ )
1582
+
1583
+ class Exc(OptionalAdapter[LoggingContextInfos.Exc]):
1584
+ info_cls: ta.ClassVar[ta.Type[LoggingContextInfos.Exc]] = LoggingContextInfos.Exc
1585
+
1586
+ _record_attrs: ta.ClassVar[ta.Mapping[str, ta.Tuple[ta.Any, ta.Any]]] = dict(
1587
+ # Exception tuple (à la sys.exc_info) or, if no exception has occurred, None. Unmodified by ctor.
1588
+ exc_info=(ta.Optional[LoggingExcInfoTuple], None),
1589
+
1590
+ # Used to cache the traceback text. Simply set to None by ctor, later set by Formatter.format.
1591
+ exc_text=(ta.Optional[str], None),
1592
+ )
1593
+
1594
+ def _info_to_record(self, info: LoggingContextInfos.Exc) -> ta.Mapping[str, ta.Any]:
1595
+ return dict(
1596
+ exc_info=info.info_tuple,
1597
+ exc_text=None,
1598
+ )
1599
+
1600
+ def record_to_info(self, rec: logging.LogRecord) -> ta.Optional[LoggingContextInfos.Exc]:
1601
+ # FIXME:
1602
+ # error: Argument 1 to "build" of "Exc" has incompatible type
1603
+ # "tuple[type[BaseException], BaseException, TracebackType | None] | tuple[None, None, None] | None"; expected # noqa
1604
+ # "BaseException | tuple[type[BaseException], BaseException, TracebackType | None] | bool | None" [arg-type] # noqa
1605
+ return LoggingContextInfos.Exc.build(rec.exc_info) # type: ignore[arg-type]
1606
+
1607
+ class Caller(OptionalAdapter[LoggingContextInfos.Caller]):
1608
+ info_cls: ta.ClassVar[ta.Type[LoggingContextInfos.Caller]] = LoggingContextInfos.Caller
1609
+
1610
+ _UNKNOWN_PATH_NAME: ta.ClassVar[str] = '(unknown file)'
1611
+ _UNKNOWN_FUNC_NAME: ta.ClassVar[str] = '(unknown function)'
1612
+
1613
+ _STACK_INFO_PREFIX: ta.ClassVar[str] = 'Stack (most recent call last):\n'
1614
+
1615
+ _record_attrs: ta.ClassVar[ta.Mapping[str, ta.Tuple[ta.Any, ta.Any]]] = dict(
1616
+ # Full pathname of the source file where the logging call was issued (if available). Unmodified by ctor. May
1617
+ # default to "(unknown file)" by Logger.findCaller / Logger._log.
1618
+ pathname=(str, _UNKNOWN_PATH_NAME),
1619
+
1620
+ # Source line number where the logging call was issued (if available). Unmodified by ctor. May default to 0
1621
+ # y Logger.findCaller / Logger._log.
1622
+ lineno=(int, 0),
1623
+
1624
+ # Name of function containing the logging call. Set by ctor to `func` arg, unmodified. May default to
1625
+ # "(unknown function)" by Logger.findCaller / Logger._log.
1626
+ funcName=(str, _UNKNOWN_FUNC_NAME),
1627
+
1628
+ # Stack frame information (where available) from the bottom of the stack in the current thread, up to and
1629
+ # including the stack frame of the logging call which resulted in the creation of this record. Set by ctor
1630
+ # to `sinfo` arg, unmodified. Mostly set, if requested, by `Logger.findCaller`, to
1631
+ # `traceback.print_stack(f)`, but prepended with the literal "Stack (most recent call last):\n", and
1632
+ # stripped of exactly one trailing `\n` if present.
1633
+ stack_info=(ta.Optional[str], None),
1634
+ )
1635
+
1636
+ def _info_to_record(self, caller: LoggingContextInfos.Caller) -> ta.Mapping[str, ta.Any]:
1637
+ if (sinfo := caller.stack_info) is not None:
1638
+ stack_info: ta.Optional[str] = '\n'.join([
1639
+ self._STACK_INFO_PREFIX,
1640
+ sinfo[1:] if sinfo.endswith('\n') else sinfo,
1641
+ ])
1642
+ else:
1643
+ stack_info = None
1644
+
1645
+ return dict(
1646
+ pathname=caller.file_path,
1647
+
1648
+ lineno=caller.line_no,
1649
+ funcName=caller.func_name,
1650
+
1651
+ stack_info=stack_info,
1652
+ )
1653
+
1654
+ def record_to_info(self, rec: logging.LogRecord) -> ta.Optional[LoggingContextInfos.Caller]:
1655
+ # FIXME: piecemeal?
1656
+ if (
1657
+ rec.pathname != self._UNKNOWN_PATH_NAME and
1658
+ rec.lineno != 0 and
1659
+ rec.funcName != self._UNKNOWN_FUNC_NAME
1660
+ ):
1661
+ if (sinfo := rec.stack_info) is not None and sinfo.startswith(self._STACK_INFO_PREFIX):
1662
+ sinfo = sinfo[len(self._STACK_INFO_PREFIX):]
1663
+ return LoggingContextInfos.Caller(
1664
+ file_path=rec.pathname,
1665
+
1666
+ line_no=rec.lineno,
1667
+ func_name=rec.funcName,
1668
+
1669
+ stack_info=sinfo,
1670
+ )
1671
+
1672
+ return None
1673
+
1674
+ class SourceFile(Adapter[LoggingContextInfos.SourceFile]):
1675
+ info_cls: ta.ClassVar[ta.Type[LoggingContextInfos.SourceFile]] = LoggingContextInfos.SourceFile
1676
+
1677
+ _record_attrs: ta.ClassVar[ta.Mapping[str, ta.Any]] = dict(
1678
+ # Filename portion of pathname. Set to `os.path.basename(pathname)` if successful, otherwise defaults to
1679
+ # pathname.
1680
+ filename=str,
1681
+
1682
+ # Module (name portion of filename). Set to `os.path.splitext(filename)[0]`, otherwise defaults to
1683
+ # "Unknown module".
1684
+ module=str,
1685
+ )
1686
+
1687
+ _UNKNOWN_MODULE: ta.ClassVar[str] = 'Unknown module'
1688
+
1689
+ def context_to_record(self, ctx: LoggingContext) -> ta.Mapping[str, ta.Any]:
1690
+ if (info := ctx.get_info(LoggingContextInfos.SourceFile)) is not None:
1691
+ return dict(
1692
+ filename=info.file_name,
1693
+ module=info.module,
1694
+ )
1695
+
1696
+ if (caller := ctx.get_info(LoggingContextInfos.Caller)) is not None:
1697
+ return dict(
1698
+ filename=caller.file_path,
1699
+ module=self._UNKNOWN_MODULE,
1700
+ )
1701
+
1702
+ return dict(
1703
+ filename=LoggingContextInfoRecordAdapters.Caller._UNKNOWN_PATH_NAME, # noqa
1704
+ module=self._UNKNOWN_MODULE,
1705
+ )
1706
+
1707
+ def record_to_info(self, rec: logging.LogRecord) -> ta.Optional[LoggingContextInfos.SourceFile]:
1708
+ if (
1709
+ rec.module is not None and
1710
+ rec.module != self._UNKNOWN_MODULE
1711
+ ):
1712
+ return LoggingContextInfos.SourceFile(
1713
+ file_name=rec.filename,
1714
+ module=rec.module, # FIXME: piecemeal?
1715
+ )
1716
+
1717
+ return None
1718
+
1719
+ class Thread(OptionalAdapter[LoggingContextInfos.Thread]):
1720
+ info_cls: ta.ClassVar[ta.Type[LoggingContextInfos.Thread]] = LoggingContextInfos.Thread
1721
+
1722
+ _record_attrs: ta.ClassVar[ta.Mapping[str, ta.Tuple[ta.Any, ta.Any]]] = dict(
1723
+ # Thread ID if available, and `logging.logThreads` is truthy.
1724
+ thread=(ta.Optional[int], None),
1725
+
1726
+ # Thread name if available, and `logging.logThreads` is truthy.
1727
+ threadName=(ta.Optional[str], None),
1728
+ )
1729
+
1730
+ def _info_to_record(self, info: LoggingContextInfos.Thread) -> ta.Mapping[str, ta.Any]:
1731
+ if logging.logThreads:
1732
+ return dict(
1733
+ thread=info.ident,
1734
+ threadName=info.name,
1735
+ )
1736
+
1737
+ return self.record_defaults
1738
+
1739
+ def record_to_info(self, rec: logging.LogRecord) -> ta.Optional[LoggingContextInfos.Thread]:
1740
+ if (
1741
+ (ident := rec.thread) is not None and
1742
+ (name := rec.threadName) is not None
1743
+ ):
1744
+ return LoggingContextInfos.Thread(
1745
+ ident=ident,
1746
+ native_id=None,
1747
+ name=name,
1748
+ )
1749
+
1750
+ return None
1751
+
1752
+ class Process(OptionalAdapter[LoggingContextInfos.Process]):
1753
+ info_cls: ta.ClassVar[ta.Type[LoggingContextInfos.Process]] = LoggingContextInfos.Process
1754
+
1755
+ _record_attrs: ta.ClassVar[ta.Mapping[str, ta.Tuple[ta.Any, ta.Any]]] = dict(
1756
+ # Process ID if available - that is, if `hasattr(os, 'getpid')` - and `logging.logProcesses` is truthy,
1757
+ # otherwise None.
1758
+ process=(ta.Optional[int], None),
1759
+ )
1760
+
1761
+ def _info_to_record(self, info: LoggingContextInfos.Process) -> ta.Mapping[str, ta.Any]:
1762
+ if logging.logProcesses:
1763
+ return dict(
1764
+ process=info.pid,
1765
+ )
1766
+
1767
+ return self.record_defaults
1768
+
1769
+ def record_to_info(self, rec: logging.LogRecord) -> ta.Optional[LoggingContextInfos.Process]:
1770
+ if (
1771
+ (pid := rec.process) is not None
1772
+ ):
1773
+ return LoggingContextInfos.Process(
1774
+ pid=pid,
1775
+ )
1776
+
1777
+ return None
1778
+
1779
+ class Multiprocessing(OptionalAdapter[LoggingContextInfos.Multiprocessing]):
1780
+ info_cls: ta.ClassVar[ta.Type[LoggingContextInfos.Multiprocessing]] = LoggingContextInfos.Multiprocessing
1781
+
1782
+ _record_attrs: ta.ClassVar[ta.Mapping[str, ta.Tuple[ta.Any, ta.Any]]] = dict(
1783
+ # Process name if available. Set to None if `logging.logMultiprocessing` is not truthy. Otherwise, set to
1784
+ # 'MainProcess', then `sys.modules.get('multiprocessing').current_process().name` if that works, otherwise
1785
+ # remains as 'MainProcess'.
1786
+ #
1787
+ # As noted by stdlib:
1788
+ #
1789
+ # Errors may occur if multiprocessing has not finished loading yet - e.g. if a custom import hook causes
1790
+ # third-party code to run when multiprocessing calls import. See issue 8200 for an example
1791
+ #
1792
+ processName=(ta.Optional[str], None),
1793
+ )
1794
+
1795
+ def _info_to_record(self, info: LoggingContextInfos.Multiprocessing) -> ta.Mapping[str, ta.Any]:
1796
+ if logging.logMultiprocessing:
1797
+ return dict(
1798
+ processName=info.process_name,
1799
+ )
1800
+
1801
+ return self.record_defaults
1802
+
1803
+ def record_to_info(self, rec: logging.LogRecord) -> ta.Optional[LoggingContextInfos.Multiprocessing]:
1804
+ if (
1805
+ (process_name := rec.processName) is not None
1806
+ ):
1807
+ return LoggingContextInfos.Multiprocessing(
1808
+ process_name=process_name,
1809
+ )
1810
+
1811
+ return None
1812
+
1813
+ class AsyncioTask(OptionalAdapter[LoggingContextInfos.AsyncioTask]):
1814
+ info_cls: ta.ClassVar[ta.Type[LoggingContextInfos.AsyncioTask]] = LoggingContextInfos.AsyncioTask
1815
+
1816
+ _record_attrs: ta.ClassVar[ta.Mapping[str, ta.Union[ta.Any, ta.Tuple[ta.Any, ta.Any]]]] = dict(
1817
+ # Absent <3.12, otherwise asyncio.Task name if available, and `logging.logAsyncioTasks` is truthy. Set to
1818
+ # `sys.modules.get('asyncio').current_task().get_name()`, otherwise None.
1819
+ taskName=(ta.Optional[str], None),
1820
+ )
1821
+
1822
+ def _info_to_record(self, info: LoggingContextInfos.AsyncioTask) -> ta.Mapping[str, ta.Any]:
1823
+ if getattr(logging, 'logAsyncioTasks', None): # Absent <3.12
1824
+ return dict(
1825
+ taskName=info.name,
1826
+ )
1827
+
1828
+ return self.record_defaults
1829
+
1830
+ def record_to_info(self, rec: logging.LogRecord) -> ta.Optional[LoggingContextInfos.AsyncioTask]:
1831
+ if (
1832
+ (name := getattr(rec, 'taskName', None)) is not None
1833
+ ):
1834
+ return LoggingContextInfos.AsyncioTask(
1835
+ name=name,
1836
+ )
1837
+
1838
+ return None
1839
+
1840
+
1841
+ _LOGGING_CONTEXT_INFO_RECORD_ADAPTERS_: ta.Sequence[LoggingContextInfoRecordAdapters.Adapter] = [ # noqa
1842
+ LoggingContextInfoRecordAdapters.Name(),
1843
+ LoggingContextInfoRecordAdapters.Level(),
1844
+ LoggingContextInfoRecordAdapters.Msg(),
1845
+ LoggingContextInfoRecordAdapters.Time(),
1846
+ LoggingContextInfoRecordAdapters.Exc(),
1847
+ LoggingContextInfoRecordAdapters.Caller(),
1848
+ LoggingContextInfoRecordAdapters.SourceFile(),
1849
+ LoggingContextInfoRecordAdapters.Thread(),
1850
+ LoggingContextInfoRecordAdapters.Process(),
1851
+ LoggingContextInfoRecordAdapters.Multiprocessing(),
1852
+ LoggingContextInfoRecordAdapters.AsyncioTask(),
1853
+ ]
1854
+
1855
+ _LOGGING_CONTEXT_INFO_RECORD_ADAPTERS: ta.Mapping[ta.Type[LoggingContextInfo], LoggingContextInfoRecordAdapters.Adapter] = { # noqa
1856
+ ad.info_cls: ad for ad in _LOGGING_CONTEXT_INFO_RECORD_ADAPTERS_
1857
+ }
1858
+
1859
+
1860
+ ##
1861
+
1862
+
1863
+ # Formatter:
1864
+ # - https://github.com/python/cpython/blob/39b2f82717a69dde7212bc39b673b0f55c99e6a3/Lib/logging/__init__.py#L514 (3.8)
1865
+ # - https://github.com/python/cpython/blob/f070f54c5f4a42c7c61d1d5d3b8f3b7203b4a0fb/Lib/logging/__init__.py#L554 (~3.14) # noqa
1866
+ #
1867
+ _KNOWN_STD_LOGGING_FORMATTER_RECORD_ATTRS: ta.Dict[str, ta.Any] = dict(
1868
+ # The logged message, computed as msg % args. Set to `record.getMessage()`.
1869
+ message=str,
1870
+
1871
+ # Human-readable time when the LogRecord was created. By default this is of the form '2003-07-08 16:49:45,896' (the
1872
+ # numbers after the comma are millisecond portion of the time). Set to `self.formatTime(record, self.datefmt)` if
1873
+ # `self.usesTime()`, otherwise unset.
1874
+ asctime=str,
1875
+
1876
+ # Used to cache the traceback text. If unset (falsey) on the record and `exc_info` is truthy, set to
1877
+ # `self.formatException(record.exc_info)` - otherwise unmodified.
1878
+ exc_text=ta.Optional[str],
1879
+ )
1880
+
1881
+
1882
+ ##
1883
+
1884
+
1885
+ _KNOWN_STD_LOGGING_RECORD_ATTR_SET: ta.FrozenSet[str] = frozenset(
1886
+ a for ad in _LOGGING_CONTEXT_INFO_RECORD_ADAPTERS.values() for a in ad.record_attrs
1887
+ )
1888
+
1889
+ _KNOWN_STD_LOGGING_FORMATTER_RECORD_ATTR_SET: ta.FrozenSet[str] = frozenset(_KNOWN_STD_LOGGING_FORMATTER_RECORD_ATTRS)
1890
+
1891
+
1892
+ class UnknownStdLoggingRecordAttrsWarning(LoggingSetupWarning):
1893
+ pass
1894
+
1895
+
1896
+ def _check_std_logging_record_attrs() -> None:
1897
+ if (
1898
+ len([a for ad in _LOGGING_CONTEXT_INFO_RECORD_ADAPTERS.values() for a in ad.record_attrs]) !=
1899
+ len(_KNOWN_STD_LOGGING_RECORD_ATTR_SET)
1900
+ ):
1901
+ raise RuntimeError('Duplicate LoggingContextInfoRecordAdapter record attrs')
1902
+
1903
+ rec_dct = dict(logging.makeLogRecord({}).__dict__)
1904
+
1905
+ if (unk_rec_fields := frozenset(rec_dct) - _KNOWN_STD_LOGGING_RECORD_ATTR_SET):
1906
+ import warnings # noqa
1907
+
1908
+ warnings.warn(
1909
+ f'Unknown log record attrs detected: {sorted(unk_rec_fields)!r}',
1910
+ UnknownStdLoggingRecordAttrsWarning,
1911
+ )
1912
+
1913
+
1914
+ _check_std_logging_record_attrs()
1915
+
1916
+
1917
+ ##
1918
+
1919
+
1920
+ class LoggingContextLogRecord(logging.LogRecord):
1921
+ # LogRecord.__init__ args:
1922
+ # - name: str
1923
+ # - level: int
1924
+ # - pathname: str - Confusingly referred to as `fn` before the LogRecord ctor. May be empty or "(unknown file)".
1925
+ # - lineno: int - May be 0.
1926
+ # - msg: str
1927
+ # - args: tuple | dict | 1-tuple[dict]
1928
+ # - exc_info: LoggingExcInfoTuple | None
1929
+ # - func: str | None = None -> funcName
1930
+ # - sinfo: str | None = None -> stack_info
1931
+ #
1932
+
1933
+ def __init__(self, *, _logging_context: LoggingContext) -> None: # noqa
1934
+ self.__dict__.update(_logging_context=_logging_context)
1935
+
1936
+ for ad in _LOGGING_CONTEXT_INFO_RECORD_ADAPTERS_:
1937
+ self.__dict__.update(ad.context_to_record(_logging_context))
1938
+
1939
+ _logging_context: LoggingContext
1940
+
1941
+ # FIXME: track extra
1942
+ # def __setattr__(self, key, value):
1943
+ # super().__setattr__(key, value)
1944
+
1945
+
1946
+ ##
1947
+
1948
+
1949
+ @ta.final
1950
+ class LogRecordLoggingContext(LoggingContext):
1951
+ def __init__(self, rec: logging.LogRecord) -> None:
1952
+ if isinstance(rec, LoggingContextLogRecord):
1953
+ raise TypeError(rec)
1954
+
1955
+ self._rec = rec
1956
+
1957
+ infos: ta.List[LoggingContextInfo] = [
1958
+ info
1959
+ for ad in _LOGGING_CONTEXT_INFO_RECORD_ADAPTERS_
1960
+ if (info := ad.record_to_info(rec)) is not None
1961
+ ]
1962
+
1963
+ # FIXME:
1964
+ # if extra is not None:
1965
+ # for key in extra:
1966
+ # if (key in ["message", "asctime"]) or (key in rv.__dict__):
1967
+ # raise KeyError("Attempt to overwrite %r in LogRecord" % key)
1968
+ # rv.__dict__[key] = extra[key]
1969
+
1970
+ if (extra := {
1971
+ a: v
1972
+ for a, v in rec.__dict__.items()
1973
+ if a not in _KNOWN_STD_LOGGING_RECORD_ATTR_SET
1974
+ }):
1975
+ infos.append(LoggingContextInfos.Extra(extra))
1976
+
1977
+ self._infos: ta.Dict[ta.Type[LoggingContextInfo], LoggingContextInfo] = {
1978
+ type(info): info
1979
+ for info in infos
1980
+ }
1981
+
1982
+ def get_info(self, ty: ta.Type[LoggingContextInfoT]) -> ta.Optional[LoggingContextInfoT]:
1983
+ return self._infos.get(ty)
1984
+
1985
+
1986
+ ########################################
1987
+ # ../std/loggers.py
1988
+
1989
+
1990
+ ##
1991
+
1992
+
1993
+ class StdLogger(Logger):
1994
+ def __init__(self, std: logging.Logger) -> None:
1995
+ super().__init__()
1996
+
1997
+ self._std = std
1998
+
1999
+ @property
2000
+ def std(self) -> logging.Logger:
2001
+ return self._std
2002
+
2003
+ def is_enabled_for(self, level: LogLevel) -> bool:
2004
+ return self._std.isEnabledFor(level)
2005
+
2006
+ def get_effective_level(self) -> LogLevel:
2007
+ return self._std.getEffectiveLevel()
2008
+
2009
+ def _log(self, ctx: CaptureLoggingContext, msg: ta.Union[str, tuple, LoggingMsgFn], *args: ta.Any) -> None:
2010
+ if not self.is_enabled_for(ctx.must_get_info(LoggingContextInfos.Level).level):
2011
+ return
2012
+
2013
+ ctx.set_basic(
2014
+ name=self._std.name,
2015
+
2016
+ msg=msg,
2017
+ args=args,
2018
+ )
2019
+
2020
+ ctx.capture()
2021
+
2022
+ rec = LoggingContextLogRecord(_logging_context=ctx)
2023
+
2024
+ self._std.handle(rec)
2025
+
2026
+
2027
+ ########################################
2028
+ # _amalg.py
2029
+
2030
+
2031
+ ##