ominfra 0.0.0.dev427__py3-none-any.whl → 0.0.0.dev428__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -5,7 +5,7 @@
5
5
  # @omlish-generated
6
6
  # @omlish-amalg-output ../supervisor/main.py
7
7
  # @omlish-git-diff-omit
8
- # ruff: noqa: N802 UP006 UP007 UP012 UP036 UP043 UP045
8
+ # ruff: noqa: N802 UP006 UP007 UP012 UP036 UP043 UP045 UP046
9
9
  #
10
10
  # Supervisor is Copyright (c) 2006-2015 Agendaless Consulting and Contributors.
11
11
  # (http://www.agendaless.com), All Rights Reserved
@@ -131,6 +131,9 @@ A0 = ta.TypeVar('A0')
131
131
  A1 = ta.TypeVar('A1')
132
132
  A2 = ta.TypeVar('A2')
133
133
 
134
+ # ../../omlish/logs/levels.py
135
+ LogLevel = int # ta.TypeAlias
136
+
134
137
  # ../../omlish/sockets/addresses.py
135
138
  SocketAddress = ta.Any
136
139
 
@@ -161,9 +164,17 @@ InjectorProviderFn = ta.Callable[['Injector'], ta.Any]
161
164
  InjectorProviderFnMap = ta.Mapping['InjectorKey', 'InjectorProviderFn']
162
165
  InjectorBindingOrBindings = ta.Union['InjectorBinding', 'InjectorBindings']
163
166
 
167
+ # ../../omlish/logs/contexts.py
168
+ LoggingExcInfoTuple = ta.Tuple[ta.Type[BaseException], BaseException, ta.Optional[types.TracebackType]] # ta.TypeAlias
169
+ LoggingExcInfo = ta.Union[BaseException, LoggingExcInfoTuple] # ta.TypeAlias
170
+ LoggingExcInfoArg = ta.Union[LoggingExcInfo, bool, None] # ta.TypeAlias
171
+
164
172
  # ../../omlish/http/coro/server/server.py
165
173
  CoroHttpServerFactory = ta.Callable[[SocketAddress], 'CoroHttpServer']
166
174
 
175
+ # ../../omlish/logs/base.py
176
+ LoggingMsgFn = ta.Callable[[], ta.Union[str, tuple]] # ta.TypeAlias
177
+
167
178
 
168
179
  ########################################
169
180
  # ../errors.py
@@ -3059,14 +3070,188 @@ def typing_annotations_attr() -> str:
3059
3070
 
3060
3071
 
3061
3072
  ########################################
3062
- # ../../../omlish/logs/modules.py
3073
+ # ../../../omlish/logs/infos.py
3074
+
3075
+
3076
+ ##
3077
+
3078
+
3079
+ class _LoggingContextInfo:
3080
+ def __mro_entries__(self, bases):
3081
+ return ()
3082
+
3083
+
3084
+ LoggingContextInfo: type = ta.cast(ta.Any, _LoggingContextInfo())
3085
+
3086
+
3087
+ ##
3088
+
3089
+
3090
+ @ta.final
3091
+ class LoggingSourceFileInfo(LoggingContextInfo, ta.NamedTuple): # type: ignore[misc]
3092
+ file_name: str
3093
+ module: str
3094
+
3095
+ @classmethod
3096
+ def build(cls, file_path: ta.Optional[str]) -> ta.Optional['LoggingSourceFileInfo']:
3097
+ if file_path is None:
3098
+ return None
3099
+
3100
+ # https://github.com/python/cpython/blob/e709361fc87d0d9ab9c58033a0a7f2fef0ad43d2/Lib/logging/__init__.py#L331-L336 # noqa
3101
+ try:
3102
+ file_name = os.path.basename(file_path)
3103
+ module = os.path.splitext(file_name)[0]
3104
+ except (TypeError, ValueError, AttributeError):
3105
+ return None
3106
+
3107
+ return cls(
3108
+ file_name,
3109
+ module,
3110
+ )
3111
+
3112
+
3113
+ ##
3114
+
3115
+
3116
+ @ta.final
3117
+ class LoggingThreadInfo(LoggingContextInfo, ta.NamedTuple): # type: ignore[misc]
3118
+ ident: int
3119
+ native_id: ta.Optional[int]
3120
+ name: str
3121
+
3122
+ @classmethod
3123
+ def build(cls) -> 'LoggingThreadInfo':
3124
+ return cls(
3125
+ threading.get_ident(),
3126
+ threading.get_native_id() if hasattr(threading, 'get_native_id') else None,
3127
+ threading.current_thread().name,
3128
+ )
3129
+
3130
+
3131
+ ##
3132
+
3133
+
3134
+ @ta.final
3135
+ class LoggingProcessInfo(LoggingContextInfo, ta.NamedTuple): # type: ignore[misc]
3136
+ pid: int
3137
+
3138
+ @classmethod
3139
+ def build(cls) -> 'LoggingProcessInfo':
3140
+ return cls(
3141
+ os.getpid(),
3142
+ )
3143
+
3144
+
3145
+ ##
3146
+
3147
+
3148
+ @ta.final
3149
+ class LoggingMultiprocessingInfo(LoggingContextInfo, ta.NamedTuple): # type: ignore[misc]
3150
+ process_name: str
3151
+
3152
+ @classmethod
3153
+ def build(cls) -> ta.Optional['LoggingMultiprocessingInfo']:
3154
+ # https://github.com/python/cpython/blob/e709361fc87d0d9ab9c58033a0a7f2fef0ad43d2/Lib/logging/__init__.py#L355-L364 # noqa
3155
+ if (mp := sys.modules.get('multiprocessing')) is None:
3156
+ return None
3157
+
3158
+ return cls(
3159
+ mp.current_process().name,
3160
+ )
3161
+
3162
+
3163
+ ##
3164
+
3165
+
3166
+ @ta.final
3167
+ class LoggingAsyncioTaskInfo(LoggingContextInfo, ta.NamedTuple): # type: ignore[misc]
3168
+ name: str
3169
+
3170
+ @classmethod
3171
+ def build(cls) -> ta.Optional['LoggingAsyncioTaskInfo']:
3172
+ # https://github.com/python/cpython/blob/e709361fc87d0d9ab9c58033a0a7f2fef0ad43d2/Lib/logging/__init__.py#L372-L377 # noqa
3173
+ if (asyncio := sys.modules.get('asyncio')) is None:
3174
+ return None
3175
+
3176
+ try:
3177
+ task = asyncio.current_task()
3178
+ except Exception: # noqa
3179
+ return None
3180
+
3181
+ if task is None:
3182
+ return None
3183
+
3184
+ return cls(
3185
+ task.get_name(), # Always non-None
3186
+ )
3187
+
3188
+
3189
+ ########################################
3190
+ # ../../../omlish/logs/levels.py
3063
3191
 
3064
3192
 
3065
3193
  ##
3066
3194
 
3067
3195
 
3068
- def get_module_logger(mod_globals: ta.Mapping[str, ta.Any]) -> logging.Logger:
3069
- return logging.getLogger(mod_globals.get('__name__'))
3196
+ @ta.final
3197
+ class NamedLogLevel(int):
3198
+ # logging.getLevelNamesMapping (or, as that is unavailable <3.11, logging._nameToLevel) includes the deprecated
3199
+ # aliases.
3200
+ _NAMES_BY_INT: ta.ClassVar[ta.Mapping[LogLevel, str]] = dict(sorted(logging._levelToName.items(), key=lambda t: -t[0])) # noqa
3201
+
3202
+ _INTS_BY_NAME: ta.ClassVar[ta.Mapping[str, LogLevel]] = {v: k for k, v in _NAMES_BY_INT.items()}
3203
+
3204
+ _NAME_INT_PAIRS: ta.ClassVar[ta.Sequence[ta.Tuple[str, LogLevel]]] = list(_INTS_BY_NAME.items())
3205
+
3206
+ #
3207
+
3208
+ @property
3209
+ def exact_name(self) -> ta.Optional[str]:
3210
+ return self._NAMES_BY_INT.get(self)
3211
+
3212
+ _effective_name: ta.Optional[str]
3213
+
3214
+ @property
3215
+ def effective_name(self) -> ta.Optional[str]:
3216
+ try:
3217
+ return self._effective_name
3218
+ except AttributeError:
3219
+ pass
3220
+
3221
+ if (n := self.exact_name) is None:
3222
+ for n, i in self._NAME_INT_PAIRS: # noqa
3223
+ if self >= i:
3224
+ break
3225
+ else:
3226
+ n = None
3227
+
3228
+ self._effective_name = n
3229
+ return n
3230
+
3231
+ #
3232
+
3233
+ def __repr__(self) -> str:
3234
+ return f'{self.__class__.__name__}({int(self)})'
3235
+
3236
+ def __str__(self) -> str:
3237
+ return self.exact_name or f'{self.effective_name or "INVALID"}:{int(self)}'
3238
+
3239
+ #
3240
+
3241
+ CRITICAL: ta.ClassVar['NamedLogLevel']
3242
+ ERROR: ta.ClassVar['NamedLogLevel']
3243
+ WARNING: ta.ClassVar['NamedLogLevel']
3244
+ INFO: ta.ClassVar['NamedLogLevel']
3245
+ DEBUG: ta.ClassVar['NamedLogLevel']
3246
+ NOTSET: ta.ClassVar['NamedLogLevel']
3247
+
3248
+
3249
+ NamedLogLevel.CRITICAL = NamedLogLevel(logging.CRITICAL)
3250
+ NamedLogLevel.ERROR = NamedLogLevel(logging.ERROR)
3251
+ NamedLogLevel.WARNING = NamedLogLevel(logging.WARNING)
3252
+ NamedLogLevel.INFO = NamedLogLevel(logging.INFO)
3253
+ NamedLogLevel.DEBUG = NamedLogLevel(logging.DEBUG)
3254
+ NamedLogLevel.NOTSET = NamedLogLevel(logging.NOTSET)
3070
3255
 
3071
3256
 
3072
3257
  ########################################
@@ -3189,6 +3374,17 @@ class ProxyLoggingHandler(ProxyLoggingFilterer, logging.Handler):
3189
3374
  self._underlying.handleError(record)
3190
3375
 
3191
3376
 
3377
+ ########################################
3378
+ # ../../../omlish/logs/warnings.py
3379
+
3380
+
3381
+ ##
3382
+
3383
+
3384
+ class LoggingSetupWarning(Warning):
3385
+ pass
3386
+
3387
+
3192
3388
  ########################################
3193
3389
  # ../../../omlish/sockets/addresses.py
3194
3390
  """
@@ -3650,87 +3846,6 @@ def get_open_fds(limit: int) -> ta.FrozenSet[Fd]:
3650
3846
  return frozenset(fd for i in range(limit) if is_fd_open(fd := Fd(i)))
3651
3847
 
3652
3848
 
3653
- ########################################
3654
- # ../utils/os.py
3655
-
3656
-
3657
- ##
3658
-
3659
-
3660
- def real_exit(code: Rc) -> None:
3661
- os._exit(code) # noqa
3662
-
3663
-
3664
- ##
3665
-
3666
-
3667
- def decode_wait_status(sts: int) -> ta.Tuple[Rc, str]:
3668
- """
3669
- Decode the status returned by wait() or waitpid().
3670
-
3671
- Return a tuple (exitstatus, message) where exitstatus is the exit status, or -1 if the process was killed by a
3672
- signal; and message is a message telling what happened. It is the caller's responsibility to display the message.
3673
- """
3674
-
3675
- if os.WIFEXITED(sts):
3676
- es = os.WEXITSTATUS(sts) & 0xffff
3677
- msg = f'exit status {es}'
3678
- return Rc(es), msg
3679
-
3680
- elif os.WIFSIGNALED(sts):
3681
- sig = os.WTERMSIG(sts)
3682
- msg = f'terminated by {sig_name(sig)}'
3683
- if hasattr(os, 'WCOREDUMP'):
3684
- iscore = os.WCOREDUMP(sts)
3685
- else:
3686
- iscore = bool(sts & 0x80)
3687
- if iscore:
3688
- msg += ' (core dumped)'
3689
- return Rc(-1), msg
3690
-
3691
- else:
3692
- msg = 'unknown termination cause 0x%04x' % sts # noqa
3693
- return Rc(-1), msg
3694
-
3695
-
3696
- ##
3697
-
3698
-
3699
- class WaitedPid(ta.NamedTuple):
3700
- pid: Pid
3701
- sts: Rc
3702
-
3703
-
3704
- def waitpid(
3705
- *,
3706
- log: ta.Optional[logging.Logger] = None,
3707
- ) -> ta.Optional[WaitedPid]:
3708
- # Need pthread_sigmask here to avoid concurrent sigchld, but Python doesn't offer in Python < 3.4. There is
3709
- # still a race condition here; we can get a sigchld while we're sitting in the waitpid call. However, AFAICT, if
3710
- # waitpid is interrupted by SIGCHLD, as long as we call waitpid again (which happens every so often during the
3711
- # normal course in the mainloop), we'll eventually reap the child that we tried to reap during the interrupted
3712
- # call. At least on Linux, this appears to be true, or at least stopping 50 processes at once never left zombies
3713
- # lying around.
3714
- try:
3715
- pid, sts = os.waitpid(-1, os.WNOHANG)
3716
-
3717
- except OSError as exc:
3718
- code = exc.args[0]
3719
-
3720
- if code not in (errno.ECHILD, errno.EINTR):
3721
- if log is not None:
3722
- log.critical('waitpid error %r; a process may not be cleaned up properly', code)
3723
-
3724
- if code == errno.EINTR:
3725
- if log is not None:
3726
- log.debug('EINTR during reap')
3727
-
3728
- return None
3729
-
3730
- else:
3731
- return WaitedPid(pid, sts) # type: ignore
3732
-
3733
-
3734
3849
  ########################################
3735
3850
  # ../utils/users.py
3736
3851
 
@@ -6055,6 +6170,104 @@ def check_lite_runtime_version() -> None:
6055
6170
  raise OSError(f'Requires python {LITE_REQUIRED_PYTHON_VERSION}, got {sys.version_info} from {sys.executable}') # noqa
6056
6171
 
6057
6172
 
6173
+ ########################################
6174
+ # ../../../omlish/logs/callers.py
6175
+
6176
+
6177
+ ##
6178
+
6179
+
6180
+ class LoggingCaller(LoggingContextInfo, ta.NamedTuple): # type: ignore[misc]
6181
+ file_path: str
6182
+ line_no: int
6183
+ name: str
6184
+ stack_info: ta.Optional[str]
6185
+
6186
+ @classmethod
6187
+ def is_internal_frame(cls, frame: types.FrameType) -> bool:
6188
+ file_path = os.path.normcase(frame.f_code.co_filename)
6189
+
6190
+ # Yes, really.
6191
+ # https://github.com/python/cpython/blob/e709361fc87d0d9ab9c58033a0a7f2fef0ad43d2/Lib/logging/__init__.py#L204
6192
+ # https://github.com/python/cpython/commit/5ca6d7469be53960843df39bb900e9c3359f127f
6193
+ if 'importlib' in file_path and '_bootstrap' in file_path:
6194
+ return True
6195
+
6196
+ return False
6197
+
6198
+ @classmethod
6199
+ def find_frame(cls, ofs: int = 0) -> ta.Optional[types.FrameType]:
6200
+ f: ta.Optional[types.FrameType] = sys._getframe(2 + ofs) # noqa
6201
+
6202
+ while f is not None:
6203
+ # NOTE: We don't check __file__ like stdlib since we may be running amalgamated - we rely on careful, manual
6204
+ # stack_offset management.
6205
+ if hasattr(f, 'f_code'):
6206
+ return f
6207
+
6208
+ f = f.f_back
6209
+
6210
+ return None
6211
+
6212
+ @classmethod
6213
+ def find(
6214
+ cls,
6215
+ ofs: int = 0,
6216
+ *,
6217
+ stack_info: bool = False,
6218
+ ) -> ta.Optional['LoggingCaller']:
6219
+ if (f := cls.find_frame(ofs + 1)) is None:
6220
+ return None
6221
+
6222
+ # https://github.com/python/cpython/blob/08e9794517063c8cd92c48714071b1d3c60b71bd/Lib/logging/__init__.py#L1616-L1623 # noqa
6223
+ sinfo = None
6224
+ if stack_info:
6225
+ sio = io.StringIO()
6226
+ traceback.print_stack(f, file=sio)
6227
+ sinfo = sio.getvalue()
6228
+ sio.close()
6229
+ if sinfo[-1] == '\n':
6230
+ sinfo = sinfo[:-1]
6231
+
6232
+ return cls(
6233
+ f.f_code.co_filename,
6234
+ f.f_lineno or 0,
6235
+ f.f_code.co_name,
6236
+ sinfo,
6237
+ )
6238
+
6239
+
6240
+ ########################################
6241
+ # ../../../omlish/logs/protocols.py
6242
+
6243
+
6244
+ ##
6245
+
6246
+
6247
+ class LoggerLike(ta.Protocol):
6248
+ """Satisfied by both our Logger and stdlib logging.Logger."""
6249
+
6250
+ def isEnabledFor(self, level: LogLevel) -> bool: ... # noqa
6251
+
6252
+ def getEffectiveLevel(self) -> LogLevel: ... # noqa
6253
+
6254
+ #
6255
+
6256
+ def log(self, level: LogLevel, msg: str, /, *args: ta.Any, **kwargs: ta.Any) -> None: ... # noqa
6257
+
6258
+ def debug(self, msg: str, /, *args: ta.Any, **kwargs: ta.Any) -> None: ... # noqa
6259
+
6260
+ def info(self, msg: str, /, *args: ta.Any, **kwargs: ta.Any) -> None: ... # noqa
6261
+
6262
+ def warning(self, msg: str, /, *args: ta.Any, **kwargs: ta.Any) -> None: ... # noqa
6263
+
6264
+ def error(self, msg: str, /, *args: ta.Any, **kwargs: ta.Any) -> None: ... # noqa
6265
+
6266
+ def exception(self, msg: str, /, *args: ta.Any, **kwargs: ta.Any) -> None: ... # noqa
6267
+
6268
+ def critical(self, msg: str, /, *args: ta.Any, **kwargs: ta.Any) -> None: ... # noqa
6269
+
6270
+
6058
6271
  ########################################
6059
6272
  # ../../../omlish/logs/std/json.py
6060
6273
  """
@@ -6113,20 +6326,103 @@ class JsonLoggingFormatter(logging.Formatter):
6113
6326
 
6114
6327
 
6115
6328
  ########################################
6116
- # ../../../omlish/os/journald.py
6329
+ # ../../../omlish/logs/times.py
6117
6330
 
6118
6331
 
6119
6332
  ##
6120
6333
 
6121
6334
 
6122
- class sd_iovec(ct.Structure): # noqa
6123
- pass
6335
+ class LoggingTimeFields(LoggingContextInfo, ta.NamedTuple): # type: ignore[misc]
6336
+ """Maps directly to stdlib `logging.LogRecord` fields, and must be kept in sync with it."""
6124
6337
 
6338
+ created: float
6339
+ msecs: float
6340
+ relative_created: float
6125
6341
 
6126
- sd_iovec._fields_ = [
6127
- ('iov_base', ct.c_void_p), # Pointer to data.
6128
- ('iov_len', ct.c_size_t), # Length of data.
6129
- ]
6342
+ @classmethod
6343
+ def get_std_start_time_ns(cls) -> int:
6344
+ x: ta.Any = logging._startTime # type: ignore[attr-defined] # noqa
6345
+
6346
+ # Before 3.13.0b1 this will be `time.time()`, a float of seconds. After that, it will be `time.time_ns()`, an
6347
+ # int.
6348
+ #
6349
+ # See:
6350
+ # - https://github.com/python/cpython/commit/1316692e8c7c1e1f3b6639e51804f9db5ed892ea
6351
+ #
6352
+ if isinstance(x, float):
6353
+ return int(x * 1e9)
6354
+ else:
6355
+ return x
6356
+
6357
+ @classmethod
6358
+ def build(
6359
+ cls,
6360
+ time_ns: int,
6361
+ *,
6362
+ start_time_ns: ta.Optional[int] = None,
6363
+ ) -> 'LoggingTimeFields':
6364
+ # https://github.com/python/cpython/commit/1316692e8c7c1e1f3b6639e51804f9db5ed892ea
6365
+ created = time_ns / 1e9 # ns to float seconds
6366
+
6367
+ # Get the number of whole milliseconds (0-999) in the fractional part of seconds.
6368
+ # Eg: 1_677_903_920_999_998_503 ns --> 999_998_503 ns--> 999 ms
6369
+ # Convert to float by adding 0.0 for historical reasons. See gh-89047
6370
+ msecs = (time_ns % 1_000_000_000) // 1_000_000 + 0.0
6371
+
6372
+ # https://github.com/python/cpython/commit/1500a23f33f5a6d052ff1ef6383d9839928b8ff1
6373
+ if msecs == 999.0 and int(created) != time_ns // 1_000_000_000:
6374
+ # ns -> sec conversion can round up, e.g:
6375
+ # 1_677_903_920_999_999_900 ns --> 1_677_903_921.0 sec
6376
+ msecs = 0.0
6377
+
6378
+ if start_time_ns is None:
6379
+ start_time_ns = cls.get_std_start_time_ns()
6380
+ relative_created = (time_ns - start_time_ns) / 1e6
6381
+
6382
+ return cls(
6383
+ created,
6384
+ msecs,
6385
+ relative_created,
6386
+ )
6387
+
6388
+
6389
+ ##
6390
+
6391
+
6392
+ class UnexpectedLoggingStartTimeWarning(LoggingSetupWarning):
6393
+ pass
6394
+
6395
+
6396
+ def _check_logging_start_time() -> None:
6397
+ if (x := LoggingTimeFields.get_std_start_time_ns()) < (t := time.time()):
6398
+ import warnings # noqa
6399
+
6400
+ warnings.warn(
6401
+ f'Unexpected logging start time detected: '
6402
+ f'get_std_start_time_ns={x}, '
6403
+ f'time.time()={t}',
6404
+ UnexpectedLoggingStartTimeWarning,
6405
+ )
6406
+
6407
+
6408
+ _check_logging_start_time()
6409
+
6410
+
6411
+ ########################################
6412
+ # ../../../omlish/os/journald.py
6413
+
6414
+
6415
+ ##
6416
+
6417
+
6418
+ class sd_iovec(ct.Structure): # noqa
6419
+ pass
6420
+
6421
+
6422
+ sd_iovec._fields_ = [
6423
+ ('iov_base', ct.c_void_p), # Pointer to data.
6424
+ ('iov_len', ct.c_size_t), # Length of data.
6425
+ ]
6130
6426
 
6131
6427
 
6132
6428
  ##
@@ -6688,6 +6984,87 @@ class SupervisorSetup(Abstract):
6688
6984
  raise NotImplementedError
6689
6985
 
6690
6986
 
6987
+ ########################################
6988
+ # ../utils/os.py
6989
+
6990
+
6991
+ ##
6992
+
6993
+
6994
+ def real_exit(code: Rc) -> None:
6995
+ os._exit(code) # noqa
6996
+
6997
+
6998
+ ##
6999
+
7000
+
7001
+ def decode_wait_status(sts: int) -> ta.Tuple[Rc, str]:
7002
+ """
7003
+ Decode the status returned by wait() or waitpid().
7004
+
7005
+ Return a tuple (exitstatus, message) where exitstatus is the exit status, or -1 if the process was killed by a
7006
+ signal; and message is a message telling what happened. It is the caller's responsibility to display the message.
7007
+ """
7008
+
7009
+ if os.WIFEXITED(sts):
7010
+ es = os.WEXITSTATUS(sts) & 0xffff
7011
+ msg = f'exit status {es}'
7012
+ return Rc(es), msg
7013
+
7014
+ elif os.WIFSIGNALED(sts):
7015
+ sig = os.WTERMSIG(sts)
7016
+ msg = f'terminated by {sig_name(sig)}'
7017
+ if hasattr(os, 'WCOREDUMP'):
7018
+ iscore = os.WCOREDUMP(sts)
7019
+ else:
7020
+ iscore = bool(sts & 0x80)
7021
+ if iscore:
7022
+ msg += ' (core dumped)'
7023
+ return Rc(-1), msg
7024
+
7025
+ else:
7026
+ msg = 'unknown termination cause 0x%04x' % sts # noqa
7027
+ return Rc(-1), msg
7028
+
7029
+
7030
+ ##
7031
+
7032
+
7033
+ class WaitedPid(ta.NamedTuple):
7034
+ pid: Pid
7035
+ sts: Rc
7036
+
7037
+
7038
+ def waitpid(
7039
+ *,
7040
+ log: ta.Optional[LoggerLike] = None,
7041
+ ) -> ta.Optional[WaitedPid]:
7042
+ # Need pthread_sigmask here to avoid concurrent sigchld, but Python doesn't offer in Python < 3.4. There is
7043
+ # still a race condition here; we can get a sigchld while we're sitting in the waitpid call. However, AFAICT, if
7044
+ # waitpid is interrupted by SIGCHLD, as long as we call waitpid again (which happens every so often during the
7045
+ # normal course in the mainloop), we'll eventually reap the child that we tried to reap during the interrupted
7046
+ # call. At least on Linux, this appears to be true, or at least stopping 50 processes at once never left zombies
7047
+ # lying around.
7048
+ try:
7049
+ pid, sts = os.waitpid(-1, os.WNOHANG)
7050
+
7051
+ except OSError as exc:
7052
+ code = exc.args[0]
7053
+
7054
+ if code not in (errno.ECHILD, errno.EINTR):
7055
+ if log is not None:
7056
+ log.critical('waitpid error %r; a process may not be cleaned up properly', code)
7057
+
7058
+ if code == errno.EINTR:
7059
+ if log is not None:
7060
+ log.debug('EINTR during reap')
7061
+
7062
+ return None
7063
+
7064
+ else:
7065
+ return WaitedPid(pid, sts) # type: ignore
7066
+
7067
+
6691
7068
  ########################################
6692
7069
  # ../../../omlish/http/handlers.py
6693
7070
 
@@ -6747,7 +7124,7 @@ class HttpHandler_(Abstract): # noqa
6747
7124
  @dc.dataclass(frozen=True)
6748
7125
  class LoggingHttpHandler(HttpHandler_):
6749
7126
  handler: HttpHandler
6750
- log: logging.Logger
7127
+ log: LoggerLike
6751
7128
  level: int = logging.DEBUG
6752
7129
 
6753
7130
  def __call__(self, req: HttpHandlerRequest) -> HttpHandlerResponse:
@@ -6760,7 +7137,7 @@ class LoggingHttpHandler(HttpHandler_):
6760
7137
  @dc.dataclass(frozen=True)
6761
7138
  class ExceptionLoggingHttpHandler(HttpHandler_):
6762
7139
  handler: HttpHandler
6763
- log: logging.Logger
7140
+ log: LoggerLike
6764
7141
  message: ta.Union[str, ta.Callable[[HttpHandlerRequest, BaseException], str]] = 'Error in http handler'
6765
7142
 
6766
7143
  def __call__(self, req: HttpHandlerRequest) -> HttpHandlerResponse:
@@ -8062,130 +8439,387 @@ inj = InjectionApi()
8062
8439
 
8063
8440
 
8064
8441
  ########################################
8065
- # ../../../omlish/logs/standard.py
8066
- """
8067
- TODO:
8068
- - !! move to std !!
8069
- - structured
8070
- - prefixed
8071
- - debug
8072
- - optional noisy? noisy will never be lite - some kinda configure_standard callback mechanism?
8073
- """
8442
+ # ../../../omlish/logs/contexts.py
8074
8443
 
8075
8444
 
8076
8445
  ##
8077
8446
 
8078
8447
 
8079
- STANDARD_LOG_FORMAT_PARTS = [
8080
- ('asctime', '%(asctime)-15s'),
8081
- ('process', 'pid=%(process)s'),
8082
- ('thread', 'tid=%(thread)x'),
8083
- ('levelname', '%(levelname)s'),
8084
- ('name', '%(name)s'),
8085
- ('separator', '::'),
8086
- ('message', '%(message)s'),
8087
- ]
8448
+ class LoggingContext(Abstract):
8449
+ @property
8450
+ @abc.abstractmethod
8451
+ def level(self) -> NamedLogLevel:
8452
+ raise NotImplementedError
8088
8453
 
8454
+ #
8089
8455
 
8090
- class StandardLoggingFormatter(logging.Formatter):
8091
- @staticmethod
8092
- def build_log_format(parts: ta.Iterable[ta.Tuple[str, str]]) -> str:
8093
- return ' '.join(v for k, v in parts)
8456
+ @property
8457
+ @abc.abstractmethod
8458
+ def time_ns(self) -> int:
8459
+ raise NotImplementedError
8094
8460
 
8095
- converter = datetime.datetime.fromtimestamp # type: ignore
8461
+ @property
8462
+ @abc.abstractmethod
8463
+ def times(self) -> LoggingTimeFields:
8464
+ raise NotImplementedError
8096
8465
 
8097
- def formatTime(self, record, datefmt=None):
8098
- ct = self.converter(record.created)
8099
- if datefmt:
8100
- return ct.strftime(datefmt) # noqa
8101
- else:
8102
- t = ct.strftime('%Y-%m-%d %H:%M:%S')
8103
- return '%s.%03d' % (t, record.msecs) # noqa
8466
+ #
8104
8467
 
8468
+ @property
8469
+ @abc.abstractmethod
8470
+ def exc_info(self) -> ta.Optional[LoggingExcInfo]:
8471
+ raise NotImplementedError
8105
8472
 
8106
- ##
8473
+ @property
8474
+ @abc.abstractmethod
8475
+ def exc_info_tuple(self) -> ta.Optional[LoggingExcInfoTuple]:
8476
+ raise NotImplementedError
8107
8477
 
8478
+ #
8108
8479
 
8109
- class StandardConfiguredLoggingHandler(ProxyLoggingHandler):
8110
- def __init_subclass__(cls, **kwargs):
8111
- raise TypeError('This class serves only as a marker and should not be subclassed.')
8480
+ @abc.abstractmethod
8481
+ def caller(self) -> ta.Optional[LoggingCaller]:
8482
+ raise NotImplementedError
8112
8483
 
8484
+ @abc.abstractmethod
8485
+ def source_file(self) -> ta.Optional[LoggingSourceFileInfo]:
8486
+ raise NotImplementedError
8113
8487
 
8114
- ##
8488
+ #
8115
8489
 
8490
+ @abc.abstractmethod
8491
+ def thread(self) -> ta.Optional[LoggingThreadInfo]:
8492
+ raise NotImplementedError
8116
8493
 
8117
- @contextlib.contextmanager
8118
- def _locking_logging_module_lock() -> ta.Iterator[None]:
8119
- if hasattr(logging, '_acquireLock'):
8120
- logging._acquireLock() # noqa
8121
- try:
8122
- yield
8123
- finally:
8124
- logging._releaseLock() # type: ignore # noqa
8494
+ @abc.abstractmethod
8495
+ def process(self) -> ta.Optional[LoggingProcessInfo]:
8496
+ raise NotImplementedError
8125
8497
 
8126
- elif hasattr(logging, '_lock'):
8127
- # https://github.com/python/cpython/commit/74723e11109a320e628898817ab449b3dad9ee96
8128
- with logging._lock: # noqa
8129
- yield
8498
+ @abc.abstractmethod
8499
+ def multiprocessing(self) -> ta.Optional[LoggingMultiprocessingInfo]:
8500
+ raise NotImplementedError
8130
8501
 
8131
- else:
8132
- raise Exception("Can't find lock in logging module")
8502
+ @abc.abstractmethod
8503
+ def asyncio_task(self) -> ta.Optional[LoggingAsyncioTaskInfo]:
8504
+ raise NotImplementedError
8133
8505
 
8134
8506
 
8135
- def configure_standard_logging(
8136
- level: ta.Union[int, str] = logging.INFO,
8137
- *,
8138
- json: bool = False,
8139
- target: ta.Optional[logging.Logger] = None,
8140
- force: bool = False,
8141
- handler_factory: ta.Optional[ta.Callable[[], logging.Handler]] = None,
8142
- ) -> ta.Optional[StandardConfiguredLoggingHandler]:
8143
- with _locking_logging_module_lock():
8144
- if target is None:
8145
- target = logging.root
8507
+ ##
8146
8508
 
8147
- #
8148
8509
 
8149
- if not force:
8150
- if any(isinstance(h, StandardConfiguredLoggingHandler) for h in list(target.handlers)):
8151
- return None
8510
+ class CaptureLoggingContext(LoggingContext, Abstract):
8511
+ class AlreadyCapturedError(Exception):
8512
+ pass
8152
8513
 
8153
- #
8514
+ class NotCapturedError(Exception):
8515
+ pass
8154
8516
 
8155
- if handler_factory is not None:
8156
- handler = handler_factory()
8157
- else:
8158
- handler = logging.StreamHandler()
8517
+ @abc.abstractmethod
8518
+ def capture(self) -> None:
8519
+ """Must be cooperatively called only from the expected locations."""
8159
8520
 
8160
- #
8521
+ raise NotImplementedError
8161
8522
 
8162
- formatter: logging.Formatter
8163
- if json:
8164
- formatter = JsonLoggingFormatter()
8165
- else:
8166
- formatter = StandardLoggingFormatter(StandardLoggingFormatter.build_log_format(STANDARD_LOG_FORMAT_PARTS))
8167
- handler.setFormatter(formatter)
8168
8523
 
8169
- #
8524
+ @ta.final
8525
+ class CaptureLoggingContextImpl(CaptureLoggingContext):
8526
+ @ta.final
8527
+ class NOT_SET: # noqa
8528
+ def __new__(cls, *args, **kwargs): # noqa
8529
+ raise TypeError
8170
8530
 
8171
- handler.addFilter(TidLoggingFilter())
8531
+ #
8532
+
8533
+ def __init__(
8534
+ self,
8535
+ level: LogLevel,
8536
+ *,
8537
+ time_ns: ta.Optional[int] = None,
8538
+
8539
+ exc_info: LoggingExcInfoArg = False,
8540
+
8541
+ caller: ta.Union[LoggingCaller, ta.Type[NOT_SET], None] = NOT_SET,
8542
+ stack_offset: int = 0,
8543
+ stack_info: bool = False,
8544
+ ) -> None:
8545
+ self._level: NamedLogLevel = level if level.__class__ is NamedLogLevel else NamedLogLevel(level) # type: ignore[assignment] # noqa
8172
8546
 
8173
8547
  #
8174
8548
 
8175
- target.addHandler(handler)
8549
+ if time_ns is None:
8550
+ time_ns = time.time_ns()
8551
+ self._time_ns: int = time_ns
8176
8552
 
8177
8553
  #
8178
8554
 
8179
- if level is not None:
8180
- target.setLevel(level)
8555
+ if exc_info is True:
8556
+ sys_exc_info = sys.exc_info()
8557
+ if sys_exc_info[0] is not None:
8558
+ exc_info = sys_exc_info
8559
+ else:
8560
+ exc_info = None
8561
+ elif exc_info is False:
8562
+ exc_info = None
8563
+
8564
+ if exc_info is not None:
8565
+ self._exc_info: ta.Optional[LoggingExcInfo] = exc_info
8566
+ if isinstance(exc_info, BaseException):
8567
+ self._exc_info_tuple: ta.Optional[LoggingExcInfoTuple] = (type(exc_info), exc_info, exc_info.__traceback__) # noqa
8568
+ else:
8569
+ self._exc_info_tuple = exc_info
8181
8570
 
8182
8571
  #
8183
8572
 
8184
- return StandardConfiguredLoggingHandler(handler)
8573
+ if caller is not CaptureLoggingContextImpl.NOT_SET:
8574
+ self._caller = caller # type: ignore[assignment]
8575
+ else:
8576
+ self._stack_offset = stack_offset
8577
+ self._stack_info = stack_info
8185
8578
 
8579
+ ##
8186
8580
 
8187
- ########################################
8188
- # ../types.py
8581
+ @property
8582
+ def level(self) -> NamedLogLevel:
8583
+ return self._level
8584
+
8585
+ #
8586
+
8587
+ @property
8588
+ def time_ns(self) -> int:
8589
+ return self._time_ns
8590
+
8591
+ _times: LoggingTimeFields
8592
+
8593
+ @property
8594
+ def times(self) -> LoggingTimeFields:
8595
+ try:
8596
+ return self._times
8597
+ except AttributeError:
8598
+ pass
8599
+
8600
+ times = self._times = LoggingTimeFields.build(self.time_ns)
8601
+ return times
8602
+
8603
+ #
8604
+
8605
+ _exc_info: ta.Optional[LoggingExcInfo] = None
8606
+ _exc_info_tuple: ta.Optional[LoggingExcInfoTuple] = None
8607
+
8608
+ @property
8609
+ def exc_info(self) -> ta.Optional[LoggingExcInfo]:
8610
+ return self._exc_info
8611
+
8612
+ @property
8613
+ def exc_info_tuple(self) -> ta.Optional[LoggingExcInfoTuple]:
8614
+ return self._exc_info_tuple
8615
+
8616
+ ##
8617
+
8618
+ _stack_offset: int
8619
+ _stack_info: bool
8620
+
8621
+ def inc_stack_offset(self, ofs: int = 1) -> 'CaptureLoggingContext':
8622
+ if hasattr(self, '_stack_offset'):
8623
+ self._stack_offset += ofs
8624
+ return self
8625
+
8626
+ _has_captured: bool = False
8627
+
8628
+ _caller: ta.Optional[LoggingCaller]
8629
+ _source_file: ta.Optional[LoggingSourceFileInfo]
8630
+
8631
+ _thread: ta.Optional[LoggingThreadInfo]
8632
+ _process: ta.Optional[LoggingProcessInfo]
8633
+ _multiprocessing: ta.Optional[LoggingMultiprocessingInfo]
8634
+ _asyncio_task: ta.Optional[LoggingAsyncioTaskInfo]
8635
+
8636
+ def capture(self) -> None:
8637
+ if self._has_captured:
8638
+ raise CaptureLoggingContextImpl.AlreadyCapturedError
8639
+ self._has_captured = True
8640
+
8641
+ if not hasattr(self, '_caller'):
8642
+ self._caller = LoggingCaller.find(
8643
+ self._stack_offset + 1,
8644
+ stack_info=self._stack_info,
8645
+ )
8646
+
8647
+ if (caller := self._caller) is not None:
8648
+ self._source_file = LoggingSourceFileInfo.build(caller.file_path)
8649
+ else:
8650
+ self._source_file = None
8651
+
8652
+ self._thread = LoggingThreadInfo.build()
8653
+ self._process = LoggingProcessInfo.build()
8654
+ self._multiprocessing = LoggingMultiprocessingInfo.build()
8655
+ self._asyncio_task = LoggingAsyncioTaskInfo.build()
8656
+
8657
+ #
8658
+
8659
+ def caller(self) -> ta.Optional[LoggingCaller]:
8660
+ try:
8661
+ return self._caller
8662
+ except AttributeError:
8663
+ raise CaptureLoggingContext.NotCapturedError from None
8664
+
8665
+ def source_file(self) -> ta.Optional[LoggingSourceFileInfo]:
8666
+ try:
8667
+ return self._source_file
8668
+ except AttributeError:
8669
+ raise CaptureLoggingContext.NotCapturedError from None
8670
+
8671
+ #
8672
+
8673
+ def thread(self) -> ta.Optional[LoggingThreadInfo]:
8674
+ try:
8675
+ return self._thread
8676
+ except AttributeError:
8677
+ raise CaptureLoggingContext.NotCapturedError from None
8678
+
8679
+ def process(self) -> ta.Optional[LoggingProcessInfo]:
8680
+ try:
8681
+ return self._process
8682
+ except AttributeError:
8683
+ raise CaptureLoggingContext.NotCapturedError from None
8684
+
8685
+ def multiprocessing(self) -> ta.Optional[LoggingMultiprocessingInfo]:
8686
+ try:
8687
+ return self._multiprocessing
8688
+ except AttributeError:
8689
+ raise CaptureLoggingContext.NotCapturedError from None
8690
+
8691
+ def asyncio_task(self) -> ta.Optional[LoggingAsyncioTaskInfo]:
8692
+ try:
8693
+ return self._asyncio_task
8694
+ except AttributeError:
8695
+ raise CaptureLoggingContext.NotCapturedError from None
8696
+
8697
+
8698
+ ########################################
8699
+ # ../../../omlish/logs/standard.py
8700
+ """
8701
+ TODO:
8702
+ - !! move to std !!
8703
+ - structured
8704
+ - prefixed
8705
+ - debug
8706
+ - optional noisy? noisy will never be lite - some kinda configure_standard callback mechanism?
8707
+ """
8708
+
8709
+
8710
+ ##
8711
+
8712
+
8713
+ STANDARD_LOG_FORMAT_PARTS = [
8714
+ ('asctime', '%(asctime)-15s'),
8715
+ ('process', 'pid=%(process)s'),
8716
+ ('thread', 'tid=%(thread)x'),
8717
+ ('levelname', '%(levelname)s'),
8718
+ ('name', '%(name)s'),
8719
+ ('separator', '::'),
8720
+ ('message', '%(message)s'),
8721
+ ]
8722
+
8723
+
8724
+ class StandardLoggingFormatter(logging.Formatter):
8725
+ @staticmethod
8726
+ def build_log_format(parts: ta.Iterable[ta.Tuple[str, str]]) -> str:
8727
+ return ' '.join(v for k, v in parts)
8728
+
8729
+ converter = datetime.datetime.fromtimestamp # type: ignore
8730
+
8731
+ def formatTime(self, record, datefmt=None):
8732
+ ct = self.converter(record.created)
8733
+ if datefmt:
8734
+ return ct.strftime(datefmt) # noqa
8735
+ else:
8736
+ t = ct.strftime('%Y-%m-%d %H:%M:%S')
8737
+ return '%s.%03d' % (t, record.msecs) # noqa
8738
+
8739
+
8740
+ ##
8741
+
8742
+
8743
+ class StandardConfiguredLoggingHandler(ProxyLoggingHandler):
8744
+ def __init_subclass__(cls, **kwargs):
8745
+ raise TypeError('This class serves only as a marker and should not be subclassed.')
8746
+
8747
+
8748
+ ##
8749
+
8750
+
8751
+ @contextlib.contextmanager
8752
+ def _locking_logging_module_lock() -> ta.Iterator[None]:
8753
+ if hasattr(logging, '_acquireLock'):
8754
+ logging._acquireLock() # noqa
8755
+ try:
8756
+ yield
8757
+ finally:
8758
+ logging._releaseLock() # type: ignore # noqa
8759
+
8760
+ elif hasattr(logging, '_lock'):
8761
+ # https://github.com/python/cpython/commit/74723e11109a320e628898817ab449b3dad9ee96
8762
+ with logging._lock: # noqa
8763
+ yield
8764
+
8765
+ else:
8766
+ raise Exception("Can't find lock in logging module")
8767
+
8768
+
8769
+ def configure_standard_logging(
8770
+ level: ta.Union[int, str] = logging.INFO,
8771
+ *,
8772
+ json: bool = False,
8773
+ target: ta.Optional[logging.Logger] = None,
8774
+ force: bool = False,
8775
+ handler_factory: ta.Optional[ta.Callable[[], logging.Handler]] = None,
8776
+ ) -> ta.Optional[StandardConfiguredLoggingHandler]:
8777
+ with _locking_logging_module_lock():
8778
+ if target is None:
8779
+ target = logging.root
8780
+
8781
+ #
8782
+
8783
+ if not force:
8784
+ if any(isinstance(h, StandardConfiguredLoggingHandler) for h in list(target.handlers)):
8785
+ return None
8786
+
8787
+ #
8788
+
8789
+ if handler_factory is not None:
8790
+ handler = handler_factory()
8791
+ else:
8792
+ handler = logging.StreamHandler()
8793
+
8794
+ #
8795
+
8796
+ formatter: logging.Formatter
8797
+ if json:
8798
+ formatter = JsonLoggingFormatter()
8799
+ else:
8800
+ formatter = StandardLoggingFormatter(StandardLoggingFormatter.build_log_format(STANDARD_LOG_FORMAT_PARTS))
8801
+ handler.setFormatter(formatter)
8802
+
8803
+ #
8804
+
8805
+ handler.addFilter(TidLoggingFilter())
8806
+
8807
+ #
8808
+
8809
+ target.addHandler(handler)
8810
+
8811
+ #
8812
+
8813
+ if level is not None:
8814
+ target.setLevel(level)
8815
+
8816
+ #
8817
+
8818
+ return StandardConfiguredLoggingHandler(handler)
8819
+
8820
+
8821
+ ########################################
8822
+ # ../types.py
8189
8823
 
8190
8824
 
8191
8825
  class ExitNow(Exception): # noqa
@@ -8932,718 +9566,653 @@ class CoroHttpServer:
8932
9566
 
8933
9567
 
8934
9568
  ########################################
8935
- # ../dispatchers.py
9569
+ # ../../../omlish/logs/base.py
8936
9570
 
8937
9571
 
8938
9572
  ##
8939
9573
 
8940
9574
 
8941
- class Dispatchers(KeyedCollection[Fd, FdioHandler]):
8942
- def _key(self, v: FdioHandler) -> Fd:
8943
- return Fd(v.fd())
8944
-
8945
- #
9575
+ class AnyLogger(Abstract, ta.Generic[T]):
9576
+ def is_enabled_for(self, level: LogLevel) -> bool:
9577
+ return self.get_effective_level() >= level
8946
9578
 
8947
- def drain(self) -> None:
8948
- for d in self:
8949
- # note that we *must* call readable() for every dispatcher, as it may have side effects for a given
8950
- # dispatcher (eg. call handle_listener_state_change for event listener processes)
8951
- if d.readable():
8952
- d.on_readable()
8953
- if d.writable():
8954
- d.on_writable()
9579
+ @abc.abstractmethod
9580
+ def get_effective_level(self) -> LogLevel:
9581
+ raise NotImplementedError
8955
9582
 
8956
9583
  #
8957
9584
 
8958
- def remove_logs(self) -> None:
8959
- for d in self:
8960
- if isinstance(d, ProcessOutputDispatcher):
8961
- d.remove_logs()
9585
+ @ta.final
9586
+ def isEnabledFor(self, level: LogLevel) -> bool: # noqa
9587
+ return self.is_enabled_for(level)
8962
9588
 
8963
- def reopen_logs(self) -> None:
8964
- for d in self:
8965
- if isinstance(d, ProcessOutputDispatcher):
8966
- d.reopen_logs()
9589
+ @ta.final
9590
+ def getEffectiveLevel(self) -> LogLevel: # noqa
9591
+ return self.get_effective_level()
8967
9592
 
9593
+ ##
8968
9594
 
8969
- ########################################
8970
- # ../dispatchersimpl.py
9595
+ @ta.overload
9596
+ def log(self, level: LogLevel, msg: str, *args: ta.Any, **kwargs: ta.Any) -> T:
9597
+ ...
8971
9598
 
9599
+ @ta.overload
9600
+ def log(self, level: LogLevel, msg: ta.Tuple[ta.Any, ...], **kwargs: ta.Any) -> T:
9601
+ ...
8972
9602
 
8973
- log = get_module_logger(globals()) # noqa
9603
+ @ta.overload
9604
+ def log(self, level: LogLevel, msg_fn: LoggingMsgFn, **kwargs: ta.Any) -> T:
9605
+ ...
8974
9606
 
9607
+ @ta.final
9608
+ def log(self, level: LogLevel, *args, **kwargs):
9609
+ return self._log(CaptureLoggingContextImpl(level, stack_offset=1), *args, **kwargs)
8975
9610
 
8976
- ##
9611
+ #
8977
9612
 
9613
+ @ta.overload
9614
+ def debug(self, msg: str, *args: ta.Any, **kwargs: ta.Any) -> T:
9615
+ ...
8978
9616
 
8979
- class BaseProcessDispatcherImpl(ProcessDispatcher, Abstract):
8980
- def __init__(
8981
- self,
8982
- process: Process,
8983
- channel: ProcessOutputChannel,
8984
- fd: Fd,
8985
- *,
8986
- event_callbacks: EventCallbacks,
8987
- server_config: ServerConfig,
8988
- ) -> None:
8989
- super().__init__()
9617
+ @ta.overload
9618
+ def debug(self, msg: ta.Tuple[ta.Any, ...], **kwargs: ta.Any) -> T:
9619
+ ...
8990
9620
 
8991
- self._process = process # process which "owns" this dispatcher
8992
- self._channel = channel # 'stderr' or 'stdout'
8993
- self._fd = fd
8994
- self._event_callbacks = event_callbacks
8995
- self._server_config = server_config
9621
+ @ta.overload
9622
+ def debug(self, msg_fn: LoggingMsgFn, **kwargs: ta.Any) -> T:
9623
+ ...
8996
9624
 
8997
- self._closed = False # True if close() has been called
9625
+ @ta.final
9626
+ def debug(self, *args, **kwargs):
9627
+ return self._log(CaptureLoggingContextImpl(NamedLogLevel.DEBUG, stack_offset=1), *args, **kwargs)
8998
9628
 
8999
9629
  #
9000
9630
 
9001
- def __repr__(self) -> str:
9002
- return f'<{self.__class__.__name__} at {id(self)} for {self._process} ({self._channel})>'
9631
+ @ta.overload
9632
+ def info(self, msg: str, *args: ta.Any, **kwargs: ta.Any) -> T:
9633
+ ...
9003
9634
 
9004
- #
9635
+ @ta.overload
9636
+ def info(self, msg: ta.Tuple[ta.Any, ...], **kwargs: ta.Any) -> T:
9637
+ ...
9005
9638
 
9006
- @property
9007
- def process(self) -> Process:
9008
- return self._process
9639
+ @ta.overload
9640
+ def info(self, msg_fn: LoggingMsgFn, **kwargs: ta.Any) -> T:
9641
+ ...
9009
9642
 
9010
- @property
9011
- def channel(self) -> ProcessOutputChannel:
9012
- return self._channel
9643
+ @ta.final
9644
+ def info(self, *args, **kwargs):
9645
+ return self._log(CaptureLoggingContextImpl(NamedLogLevel.INFO, stack_offset=1), *args, **kwargs)
9013
9646
 
9014
- def fd(self) -> Fd:
9015
- return self._fd
9647
+ #
9016
9648
 
9017
- @property
9018
- def closed(self) -> bool:
9019
- return self._closed
9649
+ @ta.overload
9650
+ def warning(self, msg: str, *args: ta.Any, **kwargs: ta.Any) -> T:
9651
+ ...
9020
9652
 
9021
- #
9653
+ @ta.overload
9654
+ def warning(self, msg: ta.Tuple[ta.Any, ...], **kwargs: ta.Any) -> T:
9655
+ ...
9022
9656
 
9023
- def close(self) -> None:
9024
- if not self._closed:
9025
- log.debug('fd %s closed, stopped monitoring %s', self._fd, self)
9026
- self._closed = True
9657
+ @ta.overload
9658
+ def warning(self, msg_fn: LoggingMsgFn, **kwargs: ta.Any) -> T:
9659
+ ...
9027
9660
 
9028
- def on_error(self, exc: ta.Optional[BaseException] = None) -> None:
9029
- nil, t, v, tbinfo = compact_traceback()
9661
+ @ta.final
9662
+ def warning(self, *args, **kwargs):
9663
+ return self._log(CaptureLoggingContextImpl(NamedLogLevel.WARNING, stack_offset=1), *args, **kwargs)
9030
9664
 
9031
- log.critical('uncaptured python exception, closing channel %s (%s:%s %s)', repr(self), t, v, tbinfo)
9032
- self.close()
9665
+ #
9033
9666
 
9667
+ @ta.overload
9668
+ def error(self, msg: str, *args: ta.Any, **kwargs: ta.Any) -> T:
9669
+ ...
9034
9670
 
9035
- class ProcessOutputDispatcherImpl(BaseProcessDispatcherImpl, ProcessOutputDispatcher):
9036
- """
9037
- Dispatcher for one channel (stdout or stderr) of one process. Serves several purposes:
9671
+ @ta.overload
9672
+ def error(self, msg: ta.Tuple[ta.Any, ...], **kwargs: ta.Any) -> T:
9673
+ ...
9038
9674
 
9039
- - capture output sent within <!--XSUPERVISOR:BEGIN--> and <!--XSUPERVISOR:END--> tags and signal a
9040
- ProcessCommunicationEvent by calling notify_event(event).
9041
- - route the output to the appropriate log handlers as specified in the config.
9042
- """
9675
+ @ta.overload
9676
+ def error(self, msg_fn: LoggingMsgFn, **kwargs: ta.Any) -> T:
9677
+ ...
9043
9678
 
9044
- def __init__(
9045
- self,
9046
- process: Process,
9047
- event_type: ta.Type[ProcessCommunicationEvent],
9048
- fd: Fd,
9049
- *,
9050
- event_callbacks: EventCallbacks,
9051
- server_config: ServerConfig,
9052
- ) -> None:
9053
- super().__init__(
9054
- process,
9055
- event_type.channel,
9056
- fd,
9057
- event_callbacks=event_callbacks,
9058
- server_config=server_config,
9059
- )
9679
+ @ta.final
9680
+ def error(self, *args, **kwargs):
9681
+ return self._log(CaptureLoggingContextImpl(NamedLogLevel.ERROR, stack_offset=1), *args, **kwargs)
9060
9682
 
9061
- self._event_type = event_type
9683
+ #
9062
9684
 
9063
- self._lc: ProcessConfig.Log = getattr(process.config, self._channel)
9685
+ @ta.overload
9686
+ def exception(self, msg: str, *args: ta.Any, exc_info: LoggingExcInfoArg = True, **kwargs: ta.Any) -> T:
9687
+ ...
9064
9688
 
9065
- self._init_normal_log()
9066
- self._init_capture_log()
9689
+ @ta.overload
9690
+ def exception(self, msg: ta.Tuple[ta.Any, ...], *, exc_info: LoggingExcInfoArg = True, **kwargs: ta.Any) -> T:
9691
+ ...
9067
9692
 
9068
- self._child_log = self._normal_log
9693
+ @ta.overload
9694
+ def exception(self, msg_fn: LoggingMsgFn, *, exc_info: LoggingExcInfoArg = True, **kwargs: ta.Any) -> T:
9695
+ ...
9069
9696
 
9070
- self._capture_mode = False # are we capturing process event data
9071
- self._output_buffer = b'' # data waiting to be logged
9697
+ @ta.final
9698
+ def exception(self, *args, exc_info: LoggingExcInfoArg = True, **kwargs):
9699
+ return self._log(CaptureLoggingContextImpl(NamedLogLevel.ERROR, exc_info=exc_info, stack_offset=1), *args, **kwargs) # noqa
9072
9700
 
9073
- # all code below is purely for minor speedups
9701
+ #
9074
9702
 
9075
- begin_token = self._event_type.BEGIN_TOKEN
9076
- end_token = self._event_type.END_TOKEN
9077
- self._begin_token_data = (begin_token, len(begin_token))
9078
- self._end_token_data = (end_token, len(end_token))
9703
+ @ta.overload
9704
+ def critical(self, msg: str, *args: ta.Any, **kwargs: ta.Any) -> T:
9705
+ ...
9079
9706
 
9080
- self._main_log_level = logging.DEBUG
9707
+ @ta.overload
9708
+ def critical(self, msg: ta.Tuple[ta.Any, ...], **kwargs: ta.Any) -> T:
9709
+ ...
9081
9710
 
9082
- self._log_to_main_log = self._server_config.loglevel <= self._main_log_level
9711
+ @ta.overload
9712
+ def critical(self, msg_fn: LoggingMsgFn, **kwargs: ta.Any) -> T:
9713
+ ...
9083
9714
 
9084
- self._stdout_events_enabled = self._process.config.stdout.events_enabled
9085
- self._stderr_events_enabled = self._process.config.stderr.events_enabled
9715
+ @ta.final
9716
+ def critical(self, *args, **kwargs):
9717
+ return self._log(CaptureLoggingContextImpl(NamedLogLevel.CRITICAL, stack_offset=1), *args, **kwargs)
9086
9718
 
9087
- _child_log: ta.Optional[logging.Logger] = None # the current logger (normal_log or capture_log)
9088
- _normal_log: ta.Optional[logging.Logger] = None # the "normal" (non-capture) logger
9089
- _capture_log: ta.Optional[logging.Logger] = None # the logger used while we're in capture_mode
9719
+ ##
9090
9720
 
9091
- def _init_normal_log(self) -> None:
9092
- """
9093
- Configure the "normal" (non-capture) log for this channel of this process. Sets self.normal_log if logging is
9094
- enabled.
9095
- """
9721
+ @classmethod
9722
+ def _prepare_msg_args(cls, msg: ta.Union[str, tuple, LoggingMsgFn], *args: ta.Any) -> ta.Tuple[str, tuple]:
9723
+ if callable(msg):
9724
+ if args:
9725
+ raise TypeError(f'Must not provide both a message function and args: {msg=} {args=}')
9726
+ x = msg()
9727
+ if isinstance(x, str):
9728
+ return x, ()
9729
+ elif isinstance(x, tuple):
9730
+ if x:
9731
+ return x[0], x[1:]
9732
+ else:
9733
+ return '', ()
9734
+ else:
9735
+ raise TypeError(x)
9096
9736
 
9097
- config = self._process.config # noqa
9098
- channel = self._channel # noqa
9737
+ elif isinstance(msg, tuple):
9738
+ if args:
9739
+ raise TypeError(f'Must not provide both a tuple message and args: {msg=} {args=}')
9740
+ if msg:
9741
+ return msg[0], msg[1:]
9742
+ else:
9743
+ return '', ()
9099
9744
 
9100
- logfile = self._lc.file
9101
- max_bytes = self._lc.max_bytes # noqa
9102
- backups = self._lc.backups # noqa
9103
- to_syslog = self._lc.syslog
9745
+ elif isinstance(msg, str):
9746
+ return msg, args
9104
9747
 
9105
- if logfile or to_syslog:
9106
- self._normal_log = logging.getLogger(__name__)
9748
+ else:
9749
+ raise TypeError(msg)
9107
9750
 
9108
- # if logfile:
9109
- # loggers.handle_file(
9110
- # self.normal_log,
9111
- # filename=logfile,
9112
- # fmt='%(message)s',
9113
- # rotating=bool(max_bytes), # optimization
9114
- # max_bytes=max_bytes,
9115
- # backups=backups,
9116
- # )
9751
+ @abc.abstractmethod
9752
+ def _log(self, ctx: CaptureLoggingContext, msg: ta.Union[str, tuple, LoggingMsgFn], *args: ta.Any, **kwargs: ta.Any) -> T: # noqa
9753
+ raise NotImplementedError
9117
9754
 
9118
- # if to_syslog:
9119
- # loggers.handle_syslog(
9120
- # self.normal_log,
9121
- # fmt=config.name + ' %(message)s',
9122
- # )
9123
9755
 
9124
- def _init_capture_log(self) -> None:
9125
- """
9126
- Configure the capture log for this process. This log is used to temporarily capture output when special output
9127
- is detected. Sets self.capture_log if capturing is enabled.
9128
- """
9756
+ class Logger(AnyLogger[None], Abstract):
9757
+ @abc.abstractmethod
9758
+ def _log(self, ctx: CaptureLoggingContext, msg: ta.Union[str, tuple, LoggingMsgFn], *args: ta.Any, **kwargs: ta.Any) -> None: # noqa
9759
+ raise NotImplementedError
9129
9760
 
9130
- capture_max_bytes = self._lc.capture_max_bytes
9131
- if capture_max_bytes:
9132
- self._capture_log = logging.getLogger(__name__)
9133
- # loggers.handle_boundIO(
9134
- # self._capture_log,
9135
- # fmt='%(message)s',
9136
- # max_bytes=capture_max_bytes,
9137
- # )
9138
9761
 
9139
- def remove_logs(self) -> None:
9140
- for l in (self._normal_log, self._capture_log):
9141
- if l is not None:
9142
- for handler in l.handlers:
9143
- handler.remove() # type: ignore
9144
- handler.reopen() # type: ignore
9762
+ class AsyncLogger(AnyLogger[ta.Awaitable[None]], Abstract):
9763
+ @abc.abstractmethod
9764
+ def _log(self, ctx: CaptureLoggingContext, msg: ta.Union[str, tuple, LoggingMsgFn], *args: ta.Any, **kwargs: ta.Any) -> ta.Awaitable[None]: # noqa
9765
+ raise NotImplementedError
9145
9766
 
9146
- def reopen_logs(self) -> None:
9147
- for l in (self._normal_log, self._capture_log):
9148
- if l is not None:
9149
- for handler in l.handlers:
9150
- handler.reopen() # type: ignore
9151
9767
 
9152
- def _log(self, data: ta.Union[str, bytes, None]) -> None:
9153
- if not data:
9154
- return
9768
+ ##
9155
9769
 
9156
- if self._server_config.strip_ansi:
9157
- data = strip_escapes(as_bytes(data))
9158
9770
 
9159
- if self._child_log:
9160
- self._child_log.info(data)
9771
+ class AnyNopLogger(AnyLogger[T], Abstract):
9772
+ @ta.final
9773
+ def get_effective_level(self) -> LogLevel:
9774
+ return 999
9161
9775
 
9162
- if self._log_to_main_log:
9163
- if not isinstance(data, bytes):
9164
- text = data
9165
- else:
9166
- try:
9167
- text = data.decode('utf-8')
9168
- except UnicodeDecodeError:
9169
- text = f'Undecodable: {data!r}'
9170
- log.log(self._main_log_level, '%r %s output:\n%s', self._process.config.name, self._channel, text) # noqa
9171
9776
 
9172
- if self._channel == 'stdout':
9173
- if self._stdout_events_enabled:
9174
- self._event_callbacks.notify(ProcessLogStdoutEvent(self._process, self._process.pid, data))
9777
+ @ta.final
9778
+ class NopLogger(AnyNopLogger[None], Logger):
9779
+ def _log(self, ctx: CaptureLoggingContext, msg: ta.Union[str, tuple, LoggingMsgFn], *args: ta.Any, **kwargs: ta.Any) -> None: # noqa
9780
+ pass
9175
9781
 
9176
- elif self._stderr_events_enabled:
9177
- self._event_callbacks.notify(ProcessLogStderrEvent(self._process, self._process.pid, data))
9178
9782
 
9179
- def record_output(self) -> None:
9180
- if self._capture_log is None:
9181
- # shortcut trying to find capture data
9182
- data = self._output_buffer
9183
- self._output_buffer = b''
9184
- self._log(data)
9185
- return
9783
+ @ta.final
9784
+ class AsyncNopLogger(AnyNopLogger[ta.Awaitable[None]], AsyncLogger):
9785
+ async def _log(self, ctx: CaptureLoggingContext, msg: ta.Union[str, tuple, LoggingMsgFn], *args: ta.Any, **kwargs: ta.Any) -> None: # noqa
9786
+ pass
9186
9787
 
9187
- if self._capture_mode:
9188
- token, token_len = self._end_token_data
9189
- else:
9190
- token, token_len = self._begin_token_data
9191
9788
 
9192
- if len(self._output_buffer) <= token_len:
9193
- return # not enough data
9789
+ ########################################
9790
+ # ../../../omlish/logs/std/records.py
9194
9791
 
9195
- data = self._output_buffer
9196
- self._output_buffer = b''
9197
9792
 
9198
- try:
9199
- before, after = data.split(token, 1)
9200
- except ValueError:
9201
- after = None
9202
- index = find_prefix_at_end(data, token)
9203
- if index:
9204
- self._output_buffer = self._output_buffer + data[-index:]
9205
- data = data[:-index]
9206
- self._log(data)
9207
- else:
9208
- self._log(before)
9209
- self.toggle_capture_mode()
9210
- self._output_buffer = after # type: ignore
9793
+ ##
9211
9794
 
9212
- if after:
9213
- self.record_output()
9214
9795
 
9215
- def toggle_capture_mode(self) -> None:
9216
- self._capture_mode = not self._capture_mode
9796
+ # Ref:
9797
+ # - https://docs.python.org/3/library/logging.html#logrecord-attributes
9798
+ #
9799
+ # LogRecord:
9800
+ # - https://github.com/python/cpython/blob/39b2f82717a69dde7212bc39b673b0f55c99e6a3/Lib/logging/__init__.py#L276 (3.8)
9801
+ # - https://github.com/python/cpython/blob/f070f54c5f4a42c7c61d1d5d3b8f3b7203b4a0fb/Lib/logging/__init__.py#L286 (~3.14) # noqa
9802
+ #
9803
+ # LogRecord.__init__ args:
9804
+ # - name: str
9805
+ # - level: int
9806
+ # - pathname: str - Confusingly referred to as `fn` before the LogRecord ctor. May be empty or "(unknown file)".
9807
+ # - lineno: int - May be 0.
9808
+ # - msg: str
9809
+ # - args: tuple | dict | 1-tuple[dict]
9810
+ # - exc_info: LoggingExcInfoTuple | None
9811
+ # - func: str | None = None -> funcName
9812
+ # - sinfo: str | None = None -> stack_info
9813
+ #
9814
+ KNOWN_STD_LOGGING_RECORD_ATTRS: ta.Dict[str, ta.Any] = dict(
9815
+ # Name of the logger used to log the call. Unmodified by ctor.
9816
+ name=str,
9217
9817
 
9218
- if self._capture_log is not None:
9219
- if self._capture_mode:
9220
- self._child_log = self._capture_log
9221
- else:
9222
- for handler in self._capture_log.handlers:
9223
- handler.flush()
9224
- data = self._capture_log.getvalue() # type: ignore
9225
- channel = self._channel
9226
- procname = self._process.config.name
9227
- event = self._event_type(self._process, self._process.pid, data)
9228
- self._event_callbacks.notify(event)
9818
+ # The format string passed in the original logging call. Merged with args to produce message, or an arbitrary object
9819
+ # (see Using arbitrary objects as messages). Unmodified by ctor.
9820
+ msg=str,
9229
9821
 
9230
- log.debug('%r %s emitted a comm event', procname, channel)
9231
- for handler in self._capture_log.handlers:
9232
- handler.remove() # type: ignore
9233
- handler.reopen() # type: ignore
9234
- self._child_log = self._normal_log
9822
+ # The tuple of arguments merged into msg to produce message, or a dict whose values are used for the merge (when
9823
+ # there is only one argument, and it is a dictionary). Ctor will transform a 1-tuple containing a Mapping into just
9824
+ # the mapping, but is otherwise unmodified.
9825
+ args=ta.Union[tuple, dict],
9235
9826
 
9236
- def writable(self) -> bool:
9237
- return False
9827
+ #
9238
9828
 
9239
- def readable(self) -> bool:
9240
- if self._closed:
9241
- return False
9242
- return True
9829
+ # Text logging level for the message ('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'). Set to
9830
+ # `getLevelName(level)`.
9831
+ levelname=str,
9243
9832
 
9244
- def on_readable(self) -> None:
9245
- data = read_fd(self._fd)
9246
- self._output_buffer += data
9247
- self.record_output()
9248
- if not data:
9249
- # if we get no data back from the pipe, it means that the child process has ended. See
9250
- # mail.python.org/pipermail/python-dev/2004-August/046850.html
9251
- self.close()
9833
+ # Numeric logging level for the message (DEBUG, INFO, WARNING, ERROR, CRITICAL). Unmodified by ctor.
9834
+ levelno=int,
9252
9835
 
9836
+ #
9253
9837
 
9254
- class ProcessInputDispatcherImpl(BaseProcessDispatcherImpl, ProcessInputDispatcher):
9255
- def __init__(
9256
- self,
9257
- process: Process,
9258
- channel: ProcessOutputChannel,
9259
- fd: Fd,
9260
- *,
9261
- event_callbacks: EventCallbacks,
9262
- server_config: ServerConfig,
9263
- ) -> None:
9264
- super().__init__(
9265
- process,
9266
- channel,
9267
- fd,
9268
- event_callbacks=event_callbacks,
9269
- server_config=server_config,
9270
- )
9838
+ # Full pathname of the source file where the logging call was issued (if available). Unmodified by ctor. May default
9839
+ # to "(unknown file)" by Logger.findCaller / Logger._log.
9840
+ pathname=str,
9271
9841
 
9272
- self._input_buffer = b''
9842
+ # Filename portion of pathname. Set to `os.path.basename(pathname)` if successful, otherwise defaults to pathname.
9843
+ filename=str,
9273
9844
 
9274
- def write(self, chars: ta.Union[bytes, str]) -> None:
9275
- self._input_buffer += as_bytes(chars)
9845
+ # Module (name portion of filename). Set to `os.path.splitext(filename)[0]`, otherwise defaults to
9846
+ # "Unknown module".
9847
+ module=str,
9276
9848
 
9277
- def writable(self) -> bool:
9278
- if self._input_buffer and not self._closed:
9279
- return True
9280
- return False
9849
+ #
9281
9850
 
9282
- def flush(self) -> None:
9283
- # other code depends on this raising EPIPE if the pipe is closed
9284
- sent = os.write(self._fd, as_bytes(self._input_buffer))
9285
- self._input_buffer = self._input_buffer[sent:]
9851
+ # Exception tuple (à la sys.exc_info) or, if no exception has occurred, None. Unmodified by ctor.
9852
+ exc_info=ta.Optional[LoggingExcInfoTuple],
9286
9853
 
9287
- def on_writable(self) -> None:
9288
- if self._input_buffer:
9289
- try:
9290
- self.flush()
9291
- except OSError as why:
9292
- if why.args[0] == errno.EPIPE:
9293
- self._input_buffer = b''
9294
- self.close()
9295
- else:
9296
- raise
9854
+ # Used to cache the traceback text. Simply set to None by ctor, later set by Formatter.format.
9855
+ exc_text=ta.Optional[str],
9297
9856
 
9857
+ #
9298
9858
 
9299
- ########################################
9300
- # ../groupsimpl.py
9859
+ # Stack frame information (where available) from the bottom of the stack in the current thread, up to and including
9860
+ # the stack frame of the logging call which resulted in the creation of this record. Set by ctor to `sinfo` arg,
9861
+ # unmodified. Mostly set, if requested, by `Logger.findCaller`, to `traceback.print_stack(f)`, but prepended with
9862
+ # the literal "Stack (most recent call last):\n", and stripped of exactly one trailing `\n` if present.
9863
+ stack_info=ta.Optional[str],
9301
9864
 
9865
+ # Source line number where the logging call was issued (if available). Unmodified by ctor. May default to 0 by
9866
+ # Logger.findCaller / Logger._log.
9867
+ lineno=int,
9302
9868
 
9303
- ##
9869
+ # Name of function containing the logging call. Set by ctor to `func` arg, unmodified. May default to
9870
+ # "(unknown function)" by Logger.findCaller / Logger._log.
9871
+ funcName=str,
9304
9872
 
9873
+ #
9305
9874
 
9306
- class ProcessFactory(Func2[ProcessConfig, ProcessGroup, Process]):
9307
- pass
9875
+ # Time when the LogRecord was created. Set to `time.time_ns() / 1e9` for >=3.13.0b1, otherwise simply `time.time()`.
9876
+ #
9877
+ # See:
9878
+ # - https://github.com/python/cpython/commit/1316692e8c7c1e1f3b6639e51804f9db5ed892ea
9879
+ # - https://github.com/python/cpython/commit/1500a23f33f5a6d052ff1ef6383d9839928b8ff1
9880
+ #
9881
+ created=float,
9308
9882
 
9883
+ # Millisecond portion of the time when the LogRecord was created.
9884
+ msecs=float,
9309
9885
 
9310
- class ProcessGroupImpl(ProcessGroup):
9311
- def __init__(
9312
- self,
9313
- config: ProcessGroupConfig,
9314
- *,
9315
- process_factory: ProcessFactory,
9316
- ):
9317
- super().__init__()
9886
+ # Time in milliseconds when the LogRecord was created, relative to the time the logging module was loaded.
9887
+ relativeCreated=float,
9318
9888
 
9319
- self._config = config
9320
- self._process_factory = process_factory
9889
+ #
9321
9890
 
9322
- by_name: ta.Dict[str, Process] = {}
9323
- for pconfig in self._config.processes or []:
9324
- p = check.isinstance(self._process_factory(pconfig, self), Process)
9325
- if p.name in by_name:
9326
- raise KeyError(f'name {p.name} of process {p} already registered by {by_name[p.name]}')
9327
- by_name[pconfig.name] = p
9328
- self._by_name = by_name
9891
+ # Thread ID if available, and `logging.logThreads` is truthy.
9892
+ thread=ta.Optional[int],
9329
9893
 
9330
- @property
9331
- def _by_key(self) -> ta.Mapping[str, Process]:
9332
- return self._by_name
9894
+ # Thread name if available, and `logging.logThreads` is truthy.
9895
+ threadName=ta.Optional[str],
9333
9896
 
9334
9897
  #
9335
9898
 
9336
- def __repr__(self) -> str:
9337
- return f'<{self.__class__.__name__} instance at {id(self)} named {self._config.name}>'
9899
+ # Process name if available. Set to None if `logging.logMultiprocessing` is not truthy. Otherwise, set to
9900
+ # 'MainProcess', then `sys.modules.get('multiprocessing').current_process().name` if that works, otherwise remains
9901
+ # as 'MainProcess'.
9902
+ #
9903
+ # As noted by stdlib:
9904
+ #
9905
+ # Errors may occur if multiprocessing has not finished loading yet - e.g. if a custom import hook causes
9906
+ # third-party code to run when multiprocessing calls import. See issue 8200 for an example
9907
+ #
9908
+ processName=ta.Optional[str],
9909
+
9910
+ # Process ID if available - that is, if `hasattr(os, 'getpid')` - and `logging.logProcesses` is truthy, otherwise
9911
+ # None.
9912
+ process=ta.Optional[int],
9338
9913
 
9339
9914
  #
9340
9915
 
9341
- @property
9342
- def name(self) -> str:
9343
- return self._config.name
9916
+ # Absent <3.12, otherwise asyncio.Task name if available, and `logging.logAsyncioTasks` is truthy. Set to
9917
+ # `sys.modules.get('asyncio').current_task().get_name()`, otherwise None.
9918
+ taskName=ta.Optional[str],
9919
+ )
9344
9920
 
9345
- @property
9346
- def config(self) -> ProcessGroupConfig:
9347
- return self._config
9921
+ KNOWN_STD_LOGGING_RECORD_ATTR_SET: ta.FrozenSet[str] = frozenset(KNOWN_STD_LOGGING_RECORD_ATTRS)
9348
9922
 
9349
- @property
9350
- def by_name(self) -> ta.Mapping[str, Process]:
9351
- return self._by_name
9352
9923
 
9353
- #
9924
+ # Formatter:
9925
+ # - https://github.com/python/cpython/blob/39b2f82717a69dde7212bc39b673b0f55c99e6a3/Lib/logging/__init__.py#L514 (3.8)
9926
+ # - https://github.com/python/cpython/blob/f070f54c5f4a42c7c61d1d5d3b8f3b7203b4a0fb/Lib/logging/__init__.py#L554 (~3.14) # noqa
9927
+ #
9928
+ KNOWN_STD_LOGGING_FORMATTER_RECORD_ATTRS: ta.Dict[str, ta.Any] = dict(
9929
+ # The logged message, computed as msg % args. Set to `record.getMessage()`.
9930
+ message=str,
9931
+
9932
+ # Human-readable time when the LogRecord was created. By default this is of the form '2003-07-08 16:49:45,896' (the
9933
+ # numbers after the comma are millisecond portion of the time). Set to `self.formatTime(record, self.datefmt)` if
9934
+ # `self.usesTime()`, otherwise unset.
9935
+ asctime=str,
9936
+
9937
+ # Used to cache the traceback text. If unset (falsey) on the record and `exc_info` is truthy, set to
9938
+ # `self.formatException(record.exc_info)` - otherwise unmodified.
9939
+ exc_text=ta.Optional[str],
9940
+ )
9354
9941
 
9355
- def get_unstopped_processes(self) -> ta.List[Process]:
9356
- return [x for x in self if not x.state.stopped]
9942
+ KNOWN_STD_LOGGING_FORMATTER_RECORD_ATTR_SET: ta.FrozenSet[str] = frozenset(KNOWN_STD_LOGGING_FORMATTER_RECORD_ATTRS)
9357
9943
 
9358
- def stop_all(self) -> None:
9359
- processes = list(self._by_name.values())
9360
- processes.sort()
9361
- processes.reverse() # stop in desc priority order
9362
9944
 
9363
- for proc in processes:
9364
- state = proc.state
9365
- if state == ProcessState.RUNNING:
9366
- # RUNNING -> STOPPING
9367
- proc.stop()
9945
+ ##
9368
9946
 
9369
- elif state == ProcessState.STARTING:
9370
- # STARTING -> STOPPING
9371
- proc.stop()
9372
9947
 
9373
- elif state == ProcessState.BACKOFF:
9374
- # BACKOFF -> FATAL
9375
- proc.give_up()
9948
+ class UnknownStdLoggingRecordAttrsWarning(LoggingSetupWarning):
9949
+ pass
9376
9950
 
9377
- def before_remove(self) -> None:
9378
- pass
9379
9951
 
9952
+ def _check_std_logging_record_attrs() -> None:
9953
+ rec_dct = dict(logging.makeLogRecord({}).__dict__)
9380
9954
 
9381
- ########################################
9382
- # ../process.py
9955
+ if (unk_rec_fields := frozenset(rec_dct) - KNOWN_STD_LOGGING_RECORD_ATTR_SET):
9956
+ import warnings # noqa
9957
+
9958
+ warnings.warn(
9959
+ f'Unknown log record attrs detected: {sorted(unk_rec_fields)!r}',
9960
+ UnknownStdLoggingRecordAttrsWarning,
9961
+ )
9962
+
9963
+
9964
+ _check_std_logging_record_attrs()
9383
9965
 
9384
9966
 
9385
9967
  ##
9386
9968
 
9387
9969
 
9388
- class ProcessStateError(RuntimeError):
9389
- pass
9970
+ class LoggingContextLogRecord(logging.LogRecord):
9971
+ _SHOULD_ADD_TASK_NAME: ta.ClassVar[bool] = sys.version_info >= (3, 12)
9390
9972
 
9973
+ _UNKNOWN_PATH_NAME: ta.ClassVar[str] = '(unknown file)'
9974
+ _UNKNOWN_FUNC_NAME: ta.ClassVar[str] = '(unknown function)'
9975
+ _UNKNOWN_MODULE: ta.ClassVar[str] = 'Unknown module'
9391
9976
 
9392
- ##
9977
+ _STACK_INFO_PREFIX: ta.ClassVar[str] = 'Stack (most recent call last):\n'
9393
9978
 
9979
+ def __init__( # noqa
9980
+ self,
9981
+ # name,
9982
+ # level,
9983
+ # pathname,
9984
+ # lineno,
9985
+ # msg,
9986
+ # args,
9987
+ # exc_info,
9988
+ # func=None,
9989
+ # sinfo=None,
9990
+ # **kwargs,
9991
+ *,
9992
+ name: str,
9993
+ msg: str,
9994
+ args: ta.Union[tuple, dict],
9394
9995
 
9395
- class PidHistory(ta.Dict[Pid, Process]):
9396
- pass
9996
+ _logging_context: LoggingContext,
9997
+ ) -> None:
9998
+ ctx = _logging_context
9397
9999
 
10000
+ self.name: str = name
9398
10001
 
9399
- ########################################
9400
- # ../setupimpl.py
10002
+ self.msg: str = msg
9401
10003
 
10004
+ # https://github.com/python/cpython/blob/e709361fc87d0d9ab9c58033a0a7f2fef0ad43d2/Lib/logging/__init__.py#L307
10005
+ if args and len(args) == 1 and isinstance(args[0], collections.abc.Mapping) and args[0]:
10006
+ args = args[0] # type: ignore[assignment]
10007
+ self.args: ta.Union[tuple, dict] = args
9402
10008
 
9403
- log = get_module_logger(globals()) # noqa
10009
+ self.levelname: str = logging.getLevelName(ctx.level)
10010
+ self.levelno: int = ctx.level
9404
10011
 
10012
+ if (caller := ctx.caller()) is not None:
10013
+ self.pathname: str = caller.file_path
10014
+ else:
10015
+ self.pathname = self._UNKNOWN_PATH_NAME
9405
10016
 
9406
- ##
10017
+ if (src_file := ctx.source_file()) is not None:
10018
+ self.filename: str = src_file.file_name
10019
+ self.module: str = src_file.module
10020
+ else:
10021
+ self.filename = self.pathname
10022
+ self.module = self._UNKNOWN_MODULE
10023
+
10024
+ self.exc_info: ta.Optional[LoggingExcInfoTuple] = ctx.exc_info_tuple
10025
+ self.exc_text: ta.Optional[str] = None
10026
+
10027
+ # If ctx.build_caller() was never called, we simply don't have a stack trace.
10028
+ if caller is not None:
10029
+ if (sinfo := caller.stack_info) is not None:
10030
+ self.stack_info: ta.Optional[str] = '\n'.join([
10031
+ self._STACK_INFO_PREFIX,
10032
+ sinfo[1:] if sinfo.endswith('\n') else sinfo,
10033
+ ])
10034
+ else:
10035
+ self.stack_info = None
9407
10036
 
10037
+ self.lineno: int = caller.line_no
10038
+ self.funcName: str = caller.name
9408
10039
 
9409
- class SupervisorSetupImpl(SupervisorSetup):
9410
- def __init__(
9411
- self,
9412
- *,
9413
- config: ServerConfig,
9414
- user: ta.Optional[SupervisorUser] = None,
9415
- epoch: ServerEpoch = ServerEpoch(0),
9416
- daemonize_listeners: DaemonizeListeners = DaemonizeListeners([]),
9417
- ) -> None:
9418
- super().__init__()
10040
+ else:
10041
+ self.stack_info = None
9419
10042
 
9420
- self._config = config
9421
- self._user = user
9422
- self._epoch = epoch
9423
- self._daemonize_listeners = daemonize_listeners
10043
+ self.lineno = 0
10044
+ self.funcName = self._UNKNOWN_FUNC_NAME
9424
10045
 
9425
- #
10046
+ times = ctx.times
10047
+ self.created: float = times.created
10048
+ self.msecs: float = times.msecs
10049
+ self.relativeCreated: float = times.relative_created
9426
10050
 
9427
- @property
9428
- def first(self) -> bool:
9429
- return not self._epoch
10051
+ if logging.logThreads:
10052
+ thread = check.not_none(ctx.thread())
10053
+ self.thread: ta.Optional[int] = thread.ident
10054
+ self.threadName: ta.Optional[str] = thread.name
10055
+ else:
10056
+ self.thread = None
10057
+ self.threadName = None
9430
10058
 
9431
- #
10059
+ if logging.logProcesses:
10060
+ process = check.not_none(ctx.process())
10061
+ self.process: ta.Optional[int] = process.pid
10062
+ else:
10063
+ self.process = None
9432
10064
 
9433
- @cached_nullary
9434
- def setup(self) -> None:
9435
- if not self.first:
9436
- # prevent crash on libdispatch-based systems, at least for the first request
9437
- self._cleanup_fds()
10065
+ if logging.logMultiprocessing:
10066
+ if (mp := ctx.multiprocessing()) is not None:
10067
+ self.processName: ta.Optional[str] = mp.process_name
10068
+ else:
10069
+ self.processName = None
10070
+ else:
10071
+ self.processName = None
9438
10072
 
9439
- self._set_uid_or_exit()
10073
+ # Absent <3.12
10074
+ if getattr(logging, 'logAsyncioTasks', None):
10075
+ if (at := ctx.asyncio_task()) is not None:
10076
+ self.taskName: ta.Optional[str] = at.name
10077
+ else:
10078
+ self.taskName = None
10079
+ else:
10080
+ self.taskName = None
9440
10081
 
9441
- if self.first:
9442
- self._set_rlimits_or_exit()
9443
10082
 
9444
- # this sets the options.logger object delay logger instantiation until after setuid
9445
- if not self._config.nocleanup:
9446
- # clean up old automatic logs
9447
- self._clear_auto_child_logdir()
10083
+ ########################################
10084
+ # ../dispatchers.py
10085
+
10086
+
10087
+ ##
10088
+
10089
+
10090
+ class Dispatchers(KeyedCollection[Fd, FdioHandler]):
10091
+ def _key(self, v: FdioHandler) -> Fd:
10092
+ return Fd(v.fd())
10093
+
10094
+ #
10095
+
10096
+ def drain(self) -> None:
10097
+ for d in self:
10098
+ # note that we *must* call readable() for every dispatcher, as it may have side effects for a given
10099
+ # dispatcher (eg. call handle_listener_state_change for event listener processes)
10100
+ if d.readable():
10101
+ d.on_readable()
10102
+ if d.writable():
10103
+ d.on_writable()
10104
+
10105
+ #
10106
+
10107
+ def remove_logs(self) -> None:
10108
+ for d in self:
10109
+ if isinstance(d, ProcessOutputDispatcher):
10110
+ d.remove_logs()
9448
10111
 
9449
- if not self._config.nodaemon and self.first:
9450
- self._daemonize()
10112
+ def reopen_logs(self) -> None:
10113
+ for d in self:
10114
+ if isinstance(d, ProcessOutputDispatcher):
10115
+ d.reopen_logs()
9451
10116
 
9452
- # writing pid file needs to come *after* daemonizing or pid will be wrong
9453
- self._write_pidfile()
9454
10117
 
9455
- @cached_nullary
9456
- def cleanup(self) -> None:
9457
- self._cleanup_pidfile()
10118
+ ########################################
10119
+ # ../groupsimpl.py
9458
10120
 
9459
- #
9460
10121
 
9461
- def _cleanup_fds(self) -> None:
9462
- # try to close any leaked file descriptors (for reload)
9463
- start = 5
9464
- os.closerange(start, self._config.min_fds)
10122
+ ##
9465
10123
 
9466
- #
9467
10124
 
9468
- def _set_uid_or_exit(self) -> None:
9469
- """
9470
- Set the uid of the supervisord process. Called during supervisord startup only. No return value. Exits the
9471
- process via usage() if privileges could not be dropped.
9472
- """
10125
+ class ProcessFactory(Func2[ProcessConfig, ProcessGroup, Process]):
10126
+ pass
9473
10127
 
9474
- if self._user is None:
9475
- if os.getuid() == 0:
9476
- warnings.warn(
9477
- 'Supervisor is running as root. Privileges were not dropped because no user is specified in the '
9478
- 'config file. If you intend to run as root, you can set user=root in the config file to avoid '
9479
- 'this message.',
9480
- )
9481
- else:
9482
- msg = drop_privileges(self._user.uid)
9483
- if msg is None:
9484
- log.info('Set uid to user %s succeeded', self._user.uid)
9485
- else: # failed to drop privileges
9486
- raise RuntimeError(msg)
9487
10128
 
9488
- #
10129
+ class ProcessGroupImpl(ProcessGroup):
10130
+ def __init__(
10131
+ self,
10132
+ config: ProcessGroupConfig,
10133
+ *,
10134
+ process_factory: ProcessFactory,
10135
+ ):
10136
+ super().__init__()
9489
10137
 
9490
- def _set_rlimits_or_exit(self) -> None:
9491
- """
9492
- Set the rlimits of the supervisord process. Called during supervisord startup only. No return value. Exits the
9493
- process via usage() if any rlimits could not be set.
9494
- """
10138
+ self._config = config
10139
+ self._process_factory = process_factory
9495
10140
 
9496
- limits = []
10141
+ by_name: ta.Dict[str, Process] = {}
10142
+ for pconfig in self._config.processes or []:
10143
+ p = check.isinstance(self._process_factory(pconfig, self), Process)
10144
+ if p.name in by_name:
10145
+ raise KeyError(f'name {p.name} of process {p} already registered by {by_name[p.name]}')
10146
+ by_name[pconfig.name] = p
10147
+ self._by_name = by_name
9497
10148
 
9498
- if hasattr(resource, 'RLIMIT_NOFILE'):
9499
- limits.append({
9500
- 'msg': (
9501
- 'The minimum number of file descriptors required to run this process is %(min_limit)s as per the '
9502
- '"min_fds" command-line argument or config file setting. The current environment will only allow '
9503
- 'you to open %(hard)s file descriptors. Either raise the number of usable file descriptors in '
9504
- 'your environment (see README.rst) or lower the min_fds setting in the config file to allow the '
9505
- 'process to start.'
9506
- ),
9507
- 'min': self._config.min_fds,
9508
- 'resource': resource.RLIMIT_NOFILE,
9509
- 'name': 'RLIMIT_NOFILE',
9510
- })
10149
+ @property
10150
+ def _by_key(self) -> ta.Mapping[str, Process]:
10151
+ return self._by_name
9511
10152
 
9512
- if hasattr(resource, 'RLIMIT_NPROC'):
9513
- limits.append({
9514
- 'msg': (
9515
- 'The minimum number of available processes required to run this program is %(min_limit)s as per '
9516
- 'the "minprocs" command-line argument or config file setting. The current environment will only '
9517
- 'allow you to open %(hard)s processes. Either raise the number of usable processes in your '
9518
- 'environment (see README.rst) or lower the minprocs setting in the config file to allow the '
9519
- 'program to start.'
9520
- ),
9521
- 'min': self._config.min_procs,
9522
- 'resource': resource.RLIMIT_NPROC,
9523
- 'name': 'RLIMIT_NPROC',
9524
- })
10153
+ #
9525
10154
 
9526
- for limit in limits:
9527
- min_limit = limit['min']
9528
- res = limit['resource']
9529
- msg = limit['msg']
9530
- name = limit['name']
10155
+ def __repr__(self) -> str:
10156
+ return f'<{self.__class__.__name__} instance at {id(self)} named {self._config.name}>'
9531
10157
 
9532
- soft, hard = resource.getrlimit(res) # type: ignore
10158
+ #
9533
10159
 
9534
- # -1 means unlimited
9535
- if soft < min_limit and soft != -1: # type: ignore
9536
- if hard < min_limit and hard != -1: # type: ignore
9537
- # setrlimit should increase the hard limit if we are root, if not then setrlimit raises and we print
9538
- # usage
9539
- hard = min_limit # type: ignore
10160
+ @property
10161
+ def name(self) -> str:
10162
+ return self._config.name
9540
10163
 
9541
- try:
9542
- resource.setrlimit(res, (min_limit, hard)) # type: ignore
9543
- log.info('Increased %s limit to %s', name, min_limit)
9544
- except (OSError, ValueError):
9545
- raise RuntimeError(msg % dict( # type: ignore # noqa
9546
- min_limit=min_limit,
9547
- res=res,
9548
- name=name,
9549
- soft=soft,
9550
- hard=hard,
9551
- ))
10164
+ @property
10165
+ def config(self) -> ProcessGroupConfig:
10166
+ return self._config
10167
+
10168
+ @property
10169
+ def by_name(self) -> ta.Mapping[str, Process]:
10170
+ return self._by_name
9552
10171
 
9553
10172
  #
9554
10173
 
9555
- _unlink_pidfile = False
10174
+ def get_unstopped_processes(self) -> ta.List[Process]:
10175
+ return [x for x in self if not x.state.stopped]
9556
10176
 
9557
- def _write_pidfile(self) -> None:
9558
- pid = os.getpid()
9559
- try:
9560
- with open(self._config.pidfile, 'w') as f:
9561
- f.write(f'{pid}\n')
9562
- except OSError:
9563
- log.critical('could not write pidfile %s', self._config.pidfile)
9564
- else:
9565
- self._unlink_pidfile = True
9566
- log.info('supervisord started with pid %s', pid)
10177
+ def stop_all(self) -> None:
10178
+ processes = list(self._by_name.values())
10179
+ processes.sort()
10180
+ processes.reverse() # stop in desc priority order
9567
10181
 
9568
- def _cleanup_pidfile(self) -> None:
9569
- if self._unlink_pidfile:
9570
- try_unlink(self._config.pidfile)
10182
+ for proc in processes:
10183
+ state = proc.state
10184
+ if state == ProcessState.RUNNING:
10185
+ # RUNNING -> STOPPING
10186
+ proc.stop()
9571
10187
 
9572
- #
10188
+ elif state == ProcessState.STARTING:
10189
+ # STARTING -> STOPPING
10190
+ proc.stop()
9573
10191
 
9574
- def _clear_auto_child_logdir(self) -> None:
9575
- # must be called after realize()
9576
- child_logdir = self._config.child_logdir
9577
- if child_logdir == '/dev/null':
9578
- return
10192
+ elif state == ProcessState.BACKOFF:
10193
+ # BACKOFF -> FATAL
10194
+ proc.give_up()
9579
10195
 
9580
- fnre = re.compile(rf'.+?---{self._config.identifier}-\S+\.log\.?\d{{0,4}}')
9581
- try:
9582
- filenames = os.listdir(child_logdir)
9583
- except OSError:
9584
- log.warning('Could not clear child_log dir')
9585
- return
10196
+ def before_remove(self) -> None:
10197
+ pass
9586
10198
 
9587
- for filename in filenames:
9588
- if fnre.match(filename):
9589
- pathname = os.path.join(child_logdir, filename)
9590
- try:
9591
- os.remove(pathname)
9592
- except OSError:
9593
- log.warning('Failed to clean up %r', pathname)
9594
10199
 
9595
- #
10200
+ ########################################
10201
+ # ../process.py
9596
10202
 
9597
- def _daemonize(self) -> None:
9598
- for dl in self._daemonize_listeners:
9599
- dl.before_daemonize()
9600
10203
 
9601
- self._do_daemonize()
10204
+ ##
9602
10205
 
9603
- for dl in self._daemonize_listeners:
9604
- dl.after_daemonize()
9605
10206
 
9606
- def _do_daemonize(self) -> None:
9607
- # To daemonize, we need to become the leader of our own session (process) group. If we do not, signals sent to
9608
- # our parent process will also be sent to us. This might be bad because signals such as SIGINT can be sent to
9609
- # our parent process during normal (uninteresting) operations such as when we press Ctrl-C in the parent
9610
- # terminal window to escape from a logtail command. To disassociate ourselves from our parent's session group we
9611
- # use os.setsid. It means "set session id", which has the effect of disassociating a process from is current
9612
- # session and process group and setting itself up as a new session leader.
9613
- #
9614
- # Unfortunately we cannot call setsid if we're already a session group leader, so we use "fork" to make a copy
9615
- # of ourselves that is guaranteed to not be a session group leader.
9616
- #
9617
- # We also change directories, set stderr and stdout to null, and change our umask.
9618
- #
9619
- # This explanation was (gratefully) garnered from
9620
- # http://www.cems.uwe.ac.uk/~irjohnso/coursenotes/lrc/system/daemons/d3.htm
10207
+ class ProcessStateError(RuntimeError):
10208
+ pass
9621
10209
 
9622
- pid = os.fork()
9623
- if pid != 0:
9624
- # Parent
9625
- log.debug('supervisord forked; parent exiting')
9626
- real_exit(Rc(0))
9627
10210
 
9628
- # Child
9629
- log.info('daemonizing the supervisord process')
9630
- if self._config.directory:
9631
- try:
9632
- os.chdir(self._config.directory)
9633
- except OSError as err:
9634
- log.critical("can't chdir into %r: %s", self._config.directory, err)
9635
- else:
9636
- log.info('set current directory: %r', self._config.directory)
10211
+ ##
9637
10212
 
9638
- os.dup2(0, os.open('/dev/null', os.O_RDONLY))
9639
- os.dup2(1, os.open('/dev/null', os.O_WRONLY))
9640
- os.dup2(2, os.open('/dev/null', os.O_WRONLY))
9641
10213
 
9642
- # XXX Stevens, in his Advanced Unix book, section 13.3 (page 417) recommends calling umask(0) and closing unused
9643
- # file descriptors. In his Network Programming book, he additionally recommends ignoring SIGHUP and forking
9644
- # again after the setsid() call, for obscure SVR4 reasons.
9645
- os.setsid()
9646
- os.umask(self._config.umask)
10214
+ class PidHistory(ta.Dict[Pid, Process]):
10215
+ pass
9647
10216
 
9648
10217
 
9649
10218
  ########################################
@@ -9778,10 +10347,49 @@ class CoroHttpServerConnectionFdioHandler(SocketFdioHandler):
9778
10347
  if not wb.write(send):
9779
10348
  break
9780
10349
 
9781
- if wb.rem < 1:
9782
- self._write_buf = None
9783
- self._cur_io = None
9784
- self._next_io()
10350
+ if wb.rem < 1:
10351
+ self._write_buf = None
10352
+ self._cur_io = None
10353
+ self._next_io()
10354
+
10355
+
10356
+ ########################################
10357
+ # ../../../omlish/logs/std/adapters.py
10358
+
10359
+
10360
+ ##
10361
+
10362
+
10363
+ class StdLogger(Logger):
10364
+ def __init__(self, std: logging.Logger) -> None:
10365
+ super().__init__()
10366
+
10367
+ self._std = std
10368
+
10369
+ @property
10370
+ def std(self) -> logging.Logger:
10371
+ return self._std
10372
+
10373
+ def get_effective_level(self) -> LogLevel:
10374
+ return self._std.getEffectiveLevel()
10375
+
10376
+ def _log(self, ctx: CaptureLoggingContext, msg: ta.Union[str, tuple, LoggingMsgFn], *args: ta.Any) -> None:
10377
+ if not self.is_enabled_for(ctx.level):
10378
+ return
10379
+
10380
+ ctx.capture()
10381
+
10382
+ ms, args = self._prepare_msg_args(msg, *args)
10383
+
10384
+ rec = LoggingContextLogRecord(
10385
+ name=self._std.name,
10386
+ msg=ms,
10387
+ args=args,
10388
+
10389
+ _logging_context=ctx,
10390
+ )
10391
+
10392
+ self._std.handle(rec)
9785
10393
 
9786
10394
 
9787
10395
  ########################################
@@ -9874,128 +10482,375 @@ class ProcessGroupManager(
9874
10482
 
9875
10483
 
9876
10484
  ########################################
9877
- # ../io.py
10485
+ # ../spawning.py
9878
10486
 
9879
10487
 
9880
- log = get_module_logger(globals()) # noqa
10488
+ ##
10489
+
10490
+
10491
+ @dc.dataclass(frozen=True)
10492
+ class SpawnedProcess:
10493
+ pid: Pid
10494
+ pipes: ProcessPipes
10495
+ dispatchers: Dispatchers
10496
+
10497
+
10498
+ class ProcessSpawnError(RuntimeError):
10499
+ pass
10500
+
10501
+
10502
+ class ProcessSpawning:
10503
+ @property
10504
+ @abc.abstractmethod
10505
+ def process(self) -> Process:
10506
+ raise NotImplementedError
10507
+
10508
+ #
10509
+
10510
+ @abc.abstractmethod
10511
+ def spawn(self) -> SpawnedProcess: # Raises[ProcessSpawnError]
10512
+ raise NotImplementedError
10513
+
10514
+
10515
+ ########################################
10516
+ # ../../../omlish/logs/modules.py
9881
10517
 
9882
10518
 
9883
10519
  ##
9884
10520
 
9885
10521
 
9886
- HasDispatchersList = ta.NewType('HasDispatchersList', ta.Sequence[HasDispatchers])
10522
+ def get_module_logger(mod_globals: ta.Mapping[str, ta.Any]) -> Logger:
10523
+ return StdLogger(logging.getLogger(mod_globals.get('__name__'))) # noqa
9887
10524
 
9888
10525
 
9889
- class IoManager(HasDispatchers):
10526
+ ########################################
10527
+ # ../dispatchersimpl.py
10528
+
10529
+
10530
+ log = get_module_logger(globals()) # noqa
10531
+
10532
+
10533
+ ##
10534
+
10535
+
10536
+ class BaseProcessDispatcherImpl(ProcessDispatcher, Abstract):
9890
10537
  def __init__(
9891
10538
  self,
10539
+ process: Process,
10540
+ channel: ProcessOutputChannel,
10541
+ fd: Fd,
9892
10542
  *,
9893
- poller: FdioPoller,
9894
- has_dispatchers_list: HasDispatchersList,
10543
+ event_callbacks: EventCallbacks,
10544
+ server_config: ServerConfig,
9895
10545
  ) -> None:
9896
10546
  super().__init__()
9897
10547
 
9898
- self._poller = poller
9899
- self._has_dispatchers_list = has_dispatchers_list
10548
+ self._process = process # process which "owns" this dispatcher
10549
+ self._channel = channel # 'stderr' or 'stdout'
10550
+ self._fd = fd
10551
+ self._event_callbacks = event_callbacks
10552
+ self._server_config = server_config
9900
10553
 
9901
- def get_dispatchers(self) -> Dispatchers:
9902
- return Dispatchers(
9903
- d
9904
- for hd in self._has_dispatchers_list
9905
- for d in hd.get_dispatchers()
10554
+ self._closed = False # True if close() has been called
10555
+
10556
+ #
10557
+
10558
+ def __repr__(self) -> str:
10559
+ return f'<{self.__class__.__name__} at {id(self)} for {self._process} ({self._channel})>'
10560
+
10561
+ #
10562
+
10563
+ @property
10564
+ def process(self) -> Process:
10565
+ return self._process
10566
+
10567
+ @property
10568
+ def channel(self) -> ProcessOutputChannel:
10569
+ return self._channel
10570
+
10571
+ def fd(self) -> Fd:
10572
+ return self._fd
10573
+
10574
+ @property
10575
+ def closed(self) -> bool:
10576
+ return self._closed
10577
+
10578
+ #
10579
+
10580
+ def close(self) -> None:
10581
+ if not self._closed:
10582
+ log.debug('fd %s closed, stopped monitoring %s', self._fd, self)
10583
+ self._closed = True
10584
+
10585
+ def on_error(self, exc: ta.Optional[BaseException] = None) -> None:
10586
+ nil, t, v, tbinfo = compact_traceback()
10587
+
10588
+ log.critical('uncaptured python exception, closing channel %s (%s:%s %s)', repr(self), t, v, tbinfo)
10589
+ self.close()
10590
+
10591
+
10592
+ class ProcessOutputDispatcherImpl(BaseProcessDispatcherImpl, ProcessOutputDispatcher):
10593
+ """
10594
+ Dispatcher for one channel (stdout or stderr) of one process. Serves several purposes:
10595
+
10596
+ - capture output sent within <!--XSUPERVISOR:BEGIN--> and <!--XSUPERVISOR:END--> tags and signal a
10597
+ ProcessCommunicationEvent by calling notify_event(event).
10598
+ - route the output to the appropriate log handlers as specified in the config.
10599
+ """
10600
+
10601
+ def __init__(
10602
+ self,
10603
+ process: Process,
10604
+ event_type: ta.Type[ProcessCommunicationEvent],
10605
+ fd: Fd,
10606
+ *,
10607
+ event_callbacks: EventCallbacks,
10608
+ server_config: ServerConfig,
10609
+ ) -> None:
10610
+ super().__init__(
10611
+ process,
10612
+ event_type.channel,
10613
+ fd,
10614
+ event_callbacks=event_callbacks,
10615
+ server_config=server_config,
9906
10616
  )
9907
10617
 
9908
- def poll(self) -> None:
9909
- dispatchers = self.get_dispatchers()
10618
+ self._event_type = event_type
10619
+
10620
+ self._lc: ProcessConfig.Log = getattr(process.config, self._channel)
10621
+
10622
+ self._init_normal_log()
10623
+ self._init_capture_log()
10624
+
10625
+ self._child_log = self._normal_log
10626
+
10627
+ self._capture_mode = False # are we capturing process event data
10628
+ self._output_buffer = b'' # data waiting to be logged
10629
+
10630
+ # all code below is purely for minor speedups
10631
+
10632
+ begin_token = self._event_type.BEGIN_TOKEN
10633
+ end_token = self._event_type.END_TOKEN
10634
+ self._begin_token_data = (begin_token, len(begin_token))
10635
+ self._end_token_data = (end_token, len(end_token))
10636
+
10637
+ self._main_log_level = logging.DEBUG
10638
+
10639
+ self._log_to_main_log = self._server_config.loglevel <= self._main_log_level
10640
+
10641
+ self._stdout_events_enabled = self._process.config.stdout.events_enabled
10642
+ self._stderr_events_enabled = self._process.config.stderr.events_enabled
10643
+
10644
+ _child_log: ta.Optional[logging.Logger] = None # the current logger (normal_log or capture_log)
10645
+ _normal_log: ta.Optional[logging.Logger] = None # the "normal" (non-capture) logger
10646
+ _capture_log: ta.Optional[logging.Logger] = None # the logger used while we're in capture_mode
10647
+
10648
+ def _init_normal_log(self) -> None:
10649
+ """
10650
+ Configure the "normal" (non-capture) log for this channel of this process. Sets self.normal_log if logging is
10651
+ enabled.
10652
+ """
10653
+
10654
+ config = self._process.config # noqa
10655
+ channel = self._channel # noqa
10656
+
10657
+ logfile = self._lc.file
10658
+ max_bytes = self._lc.max_bytes # noqa
10659
+ backups = self._lc.backups # noqa
10660
+ to_syslog = self._lc.syslog
10661
+
10662
+ if logfile or to_syslog:
10663
+ self._normal_log = logging.getLogger(__name__)
10664
+
10665
+ # if logfile:
10666
+ # loggers.handle_file(
10667
+ # self.normal_log,
10668
+ # filename=logfile,
10669
+ # fmt='%(message)s',
10670
+ # rotating=bool(max_bytes), # optimization
10671
+ # max_bytes=max_bytes,
10672
+ # backups=backups,
10673
+ # )
10674
+
10675
+ # if to_syslog:
10676
+ # loggers.handle_syslog(
10677
+ # self.normal_log,
10678
+ # fmt=config.name + ' %(message)s',
10679
+ # )
10680
+
10681
+ def _init_capture_log(self) -> None:
10682
+ """
10683
+ Configure the capture log for this process. This log is used to temporarily capture output when special output
10684
+ is detected. Sets self.capture_log if capturing is enabled.
10685
+ """
10686
+
10687
+ capture_max_bytes = self._lc.capture_max_bytes
10688
+ if capture_max_bytes:
10689
+ self._capture_log = logging.getLogger(__name__)
10690
+ # loggers.handle_boundIO(
10691
+ # self._capture_log,
10692
+ # fmt='%(message)s',
10693
+ # max_bytes=capture_max_bytes,
10694
+ # )
10695
+
10696
+ def remove_logs(self) -> None:
10697
+ for l in (self._normal_log, self._capture_log):
10698
+ if l is not None:
10699
+ for handler in l.handlers:
10700
+ handler.remove() # type: ignore
10701
+ handler.reopen() # type: ignore
10702
+
10703
+ def reopen_logs(self) -> None:
10704
+ for l in (self._normal_log, self._capture_log):
10705
+ if l is not None:
10706
+ for handler in l.handlers:
10707
+ handler.reopen() # type: ignore
10708
+
10709
+ def _log(self, data: ta.Union[str, bytes, None]) -> None:
10710
+ if not data:
10711
+ return
10712
+
10713
+ if self._server_config.strip_ansi:
10714
+ data = strip_escapes(as_bytes(data))
10715
+
10716
+ if self._child_log:
10717
+ self._child_log.info(data)
10718
+
10719
+ if self._log_to_main_log:
10720
+ if not isinstance(data, bytes):
10721
+ text = data
10722
+ else:
10723
+ try:
10724
+ text = data.decode('utf-8')
10725
+ except UnicodeDecodeError:
10726
+ text = f'Undecodable: {data!r}'
10727
+ log.log(self._main_log_level, '%r %s output:\n%s', self._process.config.name, self._channel, text) # noqa
10728
+
10729
+ if self._channel == 'stdout':
10730
+ if self._stdout_events_enabled:
10731
+ self._event_callbacks.notify(ProcessLogStdoutEvent(self._process, self._process.pid, data))
10732
+
10733
+ elif self._stderr_events_enabled:
10734
+ self._event_callbacks.notify(ProcessLogStderrEvent(self._process, self._process.pid, data))
10735
+
10736
+ def record_output(self) -> None:
10737
+ if self._capture_log is None:
10738
+ # shortcut trying to find capture data
10739
+ data = self._output_buffer
10740
+ self._output_buffer = b''
10741
+ self._log(data)
10742
+ return
10743
+
10744
+ if self._capture_mode:
10745
+ token, token_len = self._end_token_data
10746
+ else:
10747
+ token, token_len = self._begin_token_data
9910
10748
 
9911
- self._poller.update(
9912
- {fd for fd, d in dispatchers.items() if d.readable()},
9913
- {fd for fd, d in dispatchers.items() if d.writable()},
9914
- )
10749
+ if len(self._output_buffer) <= token_len:
10750
+ return # not enough data
9915
10751
 
9916
- timeout = 1 # this cannot be fewer than the smallest TickEvent (5)
10752
+ data = self._output_buffer
10753
+ self._output_buffer = b''
9917
10754
 
9918
- polled = self._poller.poll(timeout)
10755
+ try:
10756
+ before, after = data.split(token, 1)
10757
+ except ValueError:
10758
+ after = None
10759
+ index = find_prefix_at_end(data, token)
10760
+ if index:
10761
+ self._output_buffer = self._output_buffer + data[-index:]
10762
+ data = data[:-index]
10763
+ self._log(data)
10764
+ else:
10765
+ self._log(before)
10766
+ self.toggle_capture_mode()
10767
+ self._output_buffer = after # type: ignore
9919
10768
 
9920
- if polled.msg is not None:
9921
- log.error(polled.msg)
9922
- if polled.exc is not None:
9923
- log.error('Poll exception: %r', polled.exc)
10769
+ if after:
10770
+ self.record_output()
9924
10771
 
9925
- for r in polled.r:
9926
- fd = Fd(r)
9927
- if fd in dispatchers:
9928
- dispatcher = dispatchers[fd]
9929
- try:
9930
- log.debug('read event caused by %r', dispatcher)
9931
- dispatcher.on_readable()
9932
- if not dispatcher.readable():
9933
- self._poller.unregister_readable(fd)
9934
- except ExitNow:
9935
- raise
9936
- except Exception as exc: # noqa
9937
- log.exception('Error in dispatcher: %r', dispatcher)
9938
- dispatcher.on_error(exc)
9939
- else:
9940
- # if the fd is not in combined map, we should unregister it. otherwise, it will be polled every
9941
- # time, which may cause 100% cpu usage
9942
- log.debug('unexpected read event from fd %r', fd)
9943
- try:
9944
- self._poller.unregister_readable(fd)
9945
- except Exception: # noqa
9946
- pass
10772
+ def toggle_capture_mode(self) -> None:
10773
+ self._capture_mode = not self._capture_mode
9947
10774
 
9948
- for w in polled.w:
9949
- fd = Fd(w)
9950
- if fd in dispatchers:
9951
- dispatcher = dispatchers[fd]
9952
- try:
9953
- log.debug('write event caused by %r', dispatcher)
9954
- dispatcher.on_writable()
9955
- if not dispatcher.writable():
9956
- self._poller.unregister_writable(fd)
9957
- except ExitNow:
9958
- raise
9959
- except Exception as exc: # noqa
9960
- log.exception('Error in dispatcher: %r', dispatcher)
9961
- dispatcher.on_error(exc)
10775
+ if self._capture_log is not None:
10776
+ if self._capture_mode:
10777
+ self._child_log = self._capture_log
9962
10778
  else:
9963
- log.debug('unexpected write event from fd %r', fd)
9964
- try:
9965
- self._poller.unregister_writable(fd)
9966
- except Exception: # noqa
9967
- pass
9968
-
10779
+ for handler in self._capture_log.handlers:
10780
+ handler.flush()
10781
+ data = self._capture_log.getvalue() # type: ignore
10782
+ channel = self._channel
10783
+ procname = self._process.config.name
10784
+ event = self._event_type(self._process, self._process.pid, data)
10785
+ self._event_callbacks.notify(event)
9969
10786
 
9970
- ########################################
9971
- # ../spawning.py
10787
+ log.debug('%r %s emitted a comm event', procname, channel)
10788
+ for handler in self._capture_log.handlers:
10789
+ handler.remove() # type: ignore
10790
+ handler.reopen() # type: ignore
10791
+ self._child_log = self._normal_log
9972
10792
 
10793
+ def writable(self) -> bool:
10794
+ return False
9973
10795
 
9974
- ##
10796
+ def readable(self) -> bool:
10797
+ if self._closed:
10798
+ return False
10799
+ return True
9975
10800
 
10801
+ def on_readable(self) -> None:
10802
+ data = read_fd(self._fd)
10803
+ self._output_buffer += data
10804
+ self.record_output()
10805
+ if not data:
10806
+ # if we get no data back from the pipe, it means that the child process has ended. See
10807
+ # mail.python.org/pipermail/python-dev/2004-August/046850.html
10808
+ self.close()
9976
10809
 
9977
- @dc.dataclass(frozen=True)
9978
- class SpawnedProcess:
9979
- pid: Pid
9980
- pipes: ProcessPipes
9981
- dispatchers: Dispatchers
9982
10810
 
10811
+ class ProcessInputDispatcherImpl(BaseProcessDispatcherImpl, ProcessInputDispatcher):
10812
+ def __init__(
10813
+ self,
10814
+ process: Process,
10815
+ channel: ProcessOutputChannel,
10816
+ fd: Fd,
10817
+ *,
10818
+ event_callbacks: EventCallbacks,
10819
+ server_config: ServerConfig,
10820
+ ) -> None:
10821
+ super().__init__(
10822
+ process,
10823
+ channel,
10824
+ fd,
10825
+ event_callbacks=event_callbacks,
10826
+ server_config=server_config,
10827
+ )
9983
10828
 
9984
- class ProcessSpawnError(RuntimeError):
9985
- pass
10829
+ self._input_buffer = b''
9986
10830
 
10831
+ def write(self, chars: ta.Union[bytes, str]) -> None:
10832
+ self._input_buffer += as_bytes(chars)
9987
10833
 
9988
- class ProcessSpawning:
9989
- @property
9990
- @abc.abstractmethod
9991
- def process(self) -> Process:
9992
- raise NotImplementedError
10834
+ def writable(self) -> bool:
10835
+ if self._input_buffer and not self._closed:
10836
+ return True
10837
+ return False
9993
10838
 
9994
- #
10839
+ def flush(self) -> None:
10840
+ # other code depends on this raising EPIPE if the pipe is closed
10841
+ sent = os.write(self._fd, as_bytes(self._input_buffer))
10842
+ self._input_buffer = self._input_buffer[sent:]
9995
10843
 
9996
- @abc.abstractmethod
9997
- def spawn(self) -> SpawnedProcess: # Raises[ProcessSpawnError]
9998
- raise NotImplementedError
10844
+ def on_writable(self) -> None:
10845
+ if self._input_buffer:
10846
+ try:
10847
+ self.flush()
10848
+ except OSError as why:
10849
+ if why.args[0] == errno.EPIPE:
10850
+ self._input_buffer = b''
10851
+ self.close()
10852
+ else:
10853
+ raise
9999
10854
 
10000
10855
 
10001
10856
  ########################################
@@ -10120,6 +10975,100 @@ class SupervisorHttpHandler(HttpHandler_):
10120
10975
  )
10121
10976
 
10122
10977
 
10978
+ ########################################
10979
+ # ../io.py
10980
+
10981
+
10982
+ log = get_module_logger(globals()) # noqa
10983
+
10984
+
10985
+ ##
10986
+
10987
+
10988
+ HasDispatchersList = ta.NewType('HasDispatchersList', ta.Sequence[HasDispatchers])
10989
+
10990
+
10991
+ class IoManager(HasDispatchers):
10992
+ def __init__(
10993
+ self,
10994
+ *,
10995
+ poller: FdioPoller,
10996
+ has_dispatchers_list: HasDispatchersList,
10997
+ ) -> None:
10998
+ super().__init__()
10999
+
11000
+ self._poller = poller
11001
+ self._has_dispatchers_list = has_dispatchers_list
11002
+
11003
+ def get_dispatchers(self) -> Dispatchers:
11004
+ return Dispatchers(
11005
+ d
11006
+ for hd in self._has_dispatchers_list
11007
+ for d in hd.get_dispatchers()
11008
+ )
11009
+
11010
+ def poll(self) -> None:
11011
+ dispatchers = self.get_dispatchers()
11012
+
11013
+ self._poller.update(
11014
+ {fd for fd, d in dispatchers.items() if d.readable()},
11015
+ {fd for fd, d in dispatchers.items() if d.writable()},
11016
+ )
11017
+
11018
+ timeout = 1 # this cannot be fewer than the smallest TickEvent (5)
11019
+
11020
+ polled = self._poller.poll(timeout)
11021
+
11022
+ if polled.msg is not None:
11023
+ log.error(polled.msg)
11024
+ if polled.exc is not None:
11025
+ log.error('Poll exception: %r', polled.exc)
11026
+
11027
+ for r in polled.r:
11028
+ fd = Fd(r)
11029
+ if fd in dispatchers:
11030
+ dispatcher = dispatchers[fd]
11031
+ try:
11032
+ log.debug('read event caused by %r', dispatcher)
11033
+ dispatcher.on_readable()
11034
+ if not dispatcher.readable():
11035
+ self._poller.unregister_readable(fd)
11036
+ except ExitNow:
11037
+ raise
11038
+ except Exception as exc: # noqa
11039
+ log.exception('Error in dispatcher: %r', dispatcher)
11040
+ dispatcher.on_error(exc)
11041
+ else:
11042
+ # if the fd is not in combined map, we should unregister it. otherwise, it will be polled every
11043
+ # time, which may cause 100% cpu usage
11044
+ log.debug('unexpected read event from fd %r', fd)
11045
+ try:
11046
+ self._poller.unregister_readable(fd)
11047
+ except Exception: # noqa
11048
+ pass
11049
+
11050
+ for w in polled.w:
11051
+ fd = Fd(w)
11052
+ if fd in dispatchers:
11053
+ dispatcher = dispatchers[fd]
11054
+ try:
11055
+ log.debug('write event caused by %r', dispatcher)
11056
+ dispatcher.on_writable()
11057
+ if not dispatcher.writable():
11058
+ self._poller.unregister_writable(fd)
11059
+ except ExitNow:
11060
+ raise
11061
+ except Exception as exc: # noqa
11062
+ log.exception('Error in dispatcher: %r', dispatcher)
11063
+ dispatcher.on_error(exc)
11064
+ else:
11065
+ log.debug('unexpected write event from fd %r', fd)
11066
+ try:
11067
+ self._poller.unregister_writable(fd)
11068
+ except Exception: # noqa
11069
+ pass
11070
+
11071
+
10123
11072
  ########################################
10124
11073
  # ../processimpl.py
10125
11074
 
@@ -10603,6 +11552,256 @@ class ProcessImpl(Process):
10603
11552
  pass
10604
11553
 
10605
11554
 
11555
+ ########################################
11556
+ # ../setupimpl.py
11557
+
11558
+
11559
+ log = get_module_logger(globals()) # noqa
11560
+
11561
+
11562
+ ##
11563
+
11564
+
11565
+ class SupervisorSetupImpl(SupervisorSetup):
11566
+ def __init__(
11567
+ self,
11568
+ *,
11569
+ config: ServerConfig,
11570
+ user: ta.Optional[SupervisorUser] = None,
11571
+ epoch: ServerEpoch = ServerEpoch(0),
11572
+ daemonize_listeners: DaemonizeListeners = DaemonizeListeners([]),
11573
+ ) -> None:
11574
+ super().__init__()
11575
+
11576
+ self._config = config
11577
+ self._user = user
11578
+ self._epoch = epoch
11579
+ self._daemonize_listeners = daemonize_listeners
11580
+
11581
+ #
11582
+
11583
+ @property
11584
+ def first(self) -> bool:
11585
+ return not self._epoch
11586
+
11587
+ #
11588
+
11589
+ @cached_nullary
11590
+ def setup(self) -> None:
11591
+ if not self.first:
11592
+ # prevent crash on libdispatch-based systems, at least for the first request
11593
+ self._cleanup_fds()
11594
+
11595
+ self._set_uid_or_exit()
11596
+
11597
+ if self.first:
11598
+ self._set_rlimits_or_exit()
11599
+
11600
+ # this sets the options.logger object delay logger instantiation until after setuid
11601
+ if not self._config.nocleanup:
11602
+ # clean up old automatic logs
11603
+ self._clear_auto_child_logdir()
11604
+
11605
+ if not self._config.nodaemon and self.first:
11606
+ self._daemonize()
11607
+
11608
+ # writing pid file needs to come *after* daemonizing or pid will be wrong
11609
+ self._write_pidfile()
11610
+
11611
+ @cached_nullary
11612
+ def cleanup(self) -> None:
11613
+ self._cleanup_pidfile()
11614
+
11615
+ #
11616
+
11617
+ def _cleanup_fds(self) -> None:
11618
+ # try to close any leaked file descriptors (for reload)
11619
+ start = 5
11620
+ os.closerange(start, self._config.min_fds)
11621
+
11622
+ #
11623
+
11624
+ def _set_uid_or_exit(self) -> None:
11625
+ """
11626
+ Set the uid of the supervisord process. Called during supervisord startup only. No return value. Exits the
11627
+ process via usage() if privileges could not be dropped.
11628
+ """
11629
+
11630
+ if self._user is None:
11631
+ if os.getuid() == 0:
11632
+ warnings.warn(
11633
+ 'Supervisor is running as root. Privileges were not dropped because no user is specified in the '
11634
+ 'config file. If you intend to run as root, you can set user=root in the config file to avoid '
11635
+ 'this message.',
11636
+ )
11637
+ else:
11638
+ msg = drop_privileges(self._user.uid)
11639
+ if msg is None:
11640
+ log.info('Set uid to user %s succeeded', self._user.uid)
11641
+ else: # failed to drop privileges
11642
+ raise RuntimeError(msg)
11643
+
11644
+ #
11645
+
11646
+ def _set_rlimits_or_exit(self) -> None:
11647
+ """
11648
+ Set the rlimits of the supervisord process. Called during supervisord startup only. No return value. Exits the
11649
+ process via usage() if any rlimits could not be set.
11650
+ """
11651
+
11652
+ limits = []
11653
+
11654
+ if hasattr(resource, 'RLIMIT_NOFILE'):
11655
+ limits.append({
11656
+ 'msg': (
11657
+ 'The minimum number of file descriptors required to run this process is %(min_limit)s as per the '
11658
+ '"min_fds" command-line argument or config file setting. The current environment will only allow '
11659
+ 'you to open %(hard)s file descriptors. Either raise the number of usable file descriptors in '
11660
+ 'your environment (see README.rst) or lower the min_fds setting in the config file to allow the '
11661
+ 'process to start.'
11662
+ ),
11663
+ 'min': self._config.min_fds,
11664
+ 'resource': resource.RLIMIT_NOFILE,
11665
+ 'name': 'RLIMIT_NOFILE',
11666
+ })
11667
+
11668
+ if hasattr(resource, 'RLIMIT_NPROC'):
11669
+ limits.append({
11670
+ 'msg': (
11671
+ 'The minimum number of available processes required to run this program is %(min_limit)s as per '
11672
+ 'the "minprocs" command-line argument or config file setting. The current environment will only '
11673
+ 'allow you to open %(hard)s processes. Either raise the number of usable processes in your '
11674
+ 'environment (see README.rst) or lower the minprocs setting in the config file to allow the '
11675
+ 'program to start.'
11676
+ ),
11677
+ 'min': self._config.min_procs,
11678
+ 'resource': resource.RLIMIT_NPROC,
11679
+ 'name': 'RLIMIT_NPROC',
11680
+ })
11681
+
11682
+ for limit in limits:
11683
+ min_limit = limit['min']
11684
+ res = limit['resource']
11685
+ msg = limit['msg']
11686
+ name = limit['name']
11687
+
11688
+ soft, hard = resource.getrlimit(res) # type: ignore
11689
+
11690
+ # -1 means unlimited
11691
+ if soft < min_limit and soft != -1: # type: ignore
11692
+ if hard < min_limit and hard != -1: # type: ignore
11693
+ # setrlimit should increase the hard limit if we are root, if not then setrlimit raises and we print
11694
+ # usage
11695
+ hard = min_limit # type: ignore
11696
+
11697
+ try:
11698
+ resource.setrlimit(res, (min_limit, hard)) # type: ignore
11699
+ log.info('Increased %s limit to %s', name, min_limit)
11700
+ except (OSError, ValueError):
11701
+ raise RuntimeError(msg % dict( # type: ignore # noqa
11702
+ min_limit=min_limit,
11703
+ res=res,
11704
+ name=name,
11705
+ soft=soft,
11706
+ hard=hard,
11707
+ ))
11708
+
11709
+ #
11710
+
11711
+ _unlink_pidfile = False
11712
+
11713
+ def _write_pidfile(self) -> None:
11714
+ pid = os.getpid()
11715
+ try:
11716
+ with open(self._config.pidfile, 'w') as f:
11717
+ f.write(f'{pid}\n')
11718
+ except OSError:
11719
+ log.critical('could not write pidfile %s', self._config.pidfile)
11720
+ else:
11721
+ self._unlink_pidfile = True
11722
+ log.info('supervisord started with pid %s', pid)
11723
+
11724
+ def _cleanup_pidfile(self) -> None:
11725
+ if self._unlink_pidfile:
11726
+ try_unlink(self._config.pidfile)
11727
+
11728
+ #
11729
+
11730
+ def _clear_auto_child_logdir(self) -> None:
11731
+ # must be called after realize()
11732
+ child_logdir = self._config.child_logdir
11733
+ if child_logdir == '/dev/null':
11734
+ return
11735
+
11736
+ fnre = re.compile(rf'.+?---{self._config.identifier}-\S+\.log\.?\d{{0,4}}')
11737
+ try:
11738
+ filenames = os.listdir(child_logdir)
11739
+ except OSError:
11740
+ log.warning('Could not clear child_log dir')
11741
+ return
11742
+
11743
+ for filename in filenames:
11744
+ if fnre.match(filename):
11745
+ pathname = os.path.join(child_logdir, filename)
11746
+ try:
11747
+ os.remove(pathname)
11748
+ except OSError:
11749
+ log.warning('Failed to clean up %r', pathname)
11750
+
11751
+ #
11752
+
11753
+ def _daemonize(self) -> None:
11754
+ for dl in self._daemonize_listeners:
11755
+ dl.before_daemonize()
11756
+
11757
+ self._do_daemonize()
11758
+
11759
+ for dl in self._daemonize_listeners:
11760
+ dl.after_daemonize()
11761
+
11762
+ def _do_daemonize(self) -> None:
11763
+ # To daemonize, we need to become the leader of our own session (process) group. If we do not, signals sent to
11764
+ # our parent process will also be sent to us. This might be bad because signals such as SIGINT can be sent to
11765
+ # our parent process during normal (uninteresting) operations such as when we press Ctrl-C in the parent
11766
+ # terminal window to escape from a logtail command. To disassociate ourselves from our parent's session group we
11767
+ # use os.setsid. It means "set session id", which has the effect of disassociating a process from is current
11768
+ # session and process group and setting itself up as a new session leader.
11769
+ #
11770
+ # Unfortunately we cannot call setsid if we're already a session group leader, so we use "fork" to make a copy
11771
+ # of ourselves that is guaranteed to not be a session group leader.
11772
+ #
11773
+ # We also change directories, set stderr and stdout to null, and change our umask.
11774
+ #
11775
+ # This explanation was (gratefully) garnered from
11776
+ # http://www.cems.uwe.ac.uk/~irjohnso/coursenotes/lrc/system/daemons/d3.htm
11777
+
11778
+ pid = os.fork()
11779
+ if pid != 0:
11780
+ # Parent
11781
+ log.debug('supervisord forked; parent exiting')
11782
+ real_exit(Rc(0))
11783
+
11784
+ # Child
11785
+ log.info('daemonizing the supervisord process')
11786
+ if self._config.directory:
11787
+ try:
11788
+ os.chdir(self._config.directory)
11789
+ except OSError as err:
11790
+ log.critical("can't chdir into %r: %s", self._config.directory, err)
11791
+ else:
11792
+ log.info('set current directory: %r', self._config.directory)
11793
+
11794
+ os.dup2(0, os.open('/dev/null', os.O_RDONLY))
11795
+ os.dup2(1, os.open('/dev/null', os.O_WRONLY))
11796
+ os.dup2(2, os.open('/dev/null', os.O_WRONLY))
11797
+
11798
+ # XXX Stevens, in his Advanced Unix book, section 13.3 (page 417) recommends calling umask(0) and closing unused
11799
+ # file descriptors. In his Network Programming book, he additionally recommends ignoring SIGHUP and forking
11800
+ # again after the setsid() call, for obscure SVR4 reasons.
11801
+ os.setsid()
11802
+ os.umask(self._config.umask)
11803
+
11804
+
10606
11805
  ########################################
10607
11806
  # ../signals.py
10608
11807