ominfra 0.0.0.dev427__py3-none-any.whl → 0.0.0.dev429__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -5,7 +5,7 @@
5
5
  # @omlish-generated
6
6
  # @omlish-amalg-output ../supervisor/main.py
7
7
  # @omlish-git-diff-omit
8
- # ruff: noqa: N802 UP006 UP007 UP012 UP036 UP043 UP045
8
+ # ruff: noqa: N802 UP006 UP007 UP012 UP036 UP043 UP045 UP046
9
9
  #
10
10
  # Supervisor is Copyright (c) 2006-2015 Agendaless Consulting and Contributors.
11
11
  # (http://www.agendaless.com), All Rights Reserved
@@ -131,6 +131,9 @@ A0 = ta.TypeVar('A0')
131
131
  A1 = ta.TypeVar('A1')
132
132
  A2 = ta.TypeVar('A2')
133
133
 
134
+ # ../../omlish/logs/levels.py
135
+ LogLevel = int # ta.TypeAlias
136
+
134
137
  # ../../omlish/sockets/addresses.py
135
138
  SocketAddress = ta.Any
136
139
 
@@ -161,9 +164,17 @@ InjectorProviderFn = ta.Callable[['Injector'], ta.Any]
161
164
  InjectorProviderFnMap = ta.Mapping['InjectorKey', 'InjectorProviderFn']
162
165
  InjectorBindingOrBindings = ta.Union['InjectorBinding', 'InjectorBindings']
163
166
 
167
+ # ../../omlish/logs/contexts.py
168
+ LoggingExcInfoTuple = ta.Tuple[ta.Type[BaseException], BaseException, ta.Optional[types.TracebackType]] # ta.TypeAlias
169
+ LoggingExcInfo = ta.Union[BaseException, LoggingExcInfoTuple] # ta.TypeAlias
170
+ LoggingExcInfoArg = ta.Union[LoggingExcInfo, bool, None] # ta.TypeAlias
171
+
164
172
  # ../../omlish/http/coro/server/server.py
165
173
  CoroHttpServerFactory = ta.Callable[[SocketAddress], 'CoroHttpServer']
166
174
 
175
+ # ../../omlish/logs/base.py
176
+ LoggingMsgFn = ta.Callable[[], ta.Union[str, tuple]] # ta.TypeAlias
177
+
167
178
 
168
179
  ########################################
169
180
  # ../errors.py
@@ -3059,14 +3070,189 @@ def typing_annotations_attr() -> str:
3059
3070
 
3060
3071
 
3061
3072
  ########################################
3062
- # ../../../omlish/logs/modules.py
3073
+ # ../../../omlish/logs/infos.py
3074
+
3075
+
3076
+ ##
3077
+
3078
+
3079
+ def logging_context_info(cls):
3080
+ return cls
3081
+
3082
+
3083
+ ##
3084
+
3085
+
3086
+ @logging_context_info
3087
+ @ta.final
3088
+ class LoggingSourceFileInfo(ta.NamedTuple):
3089
+ file_name: str
3090
+ module: str
3091
+
3092
+ @classmethod
3093
+ def build(cls, file_path: ta.Optional[str]) -> ta.Optional['LoggingSourceFileInfo']:
3094
+ if file_path is None:
3095
+ return None
3096
+
3097
+ # https://github.com/python/cpython/blob/e709361fc87d0d9ab9c58033a0a7f2fef0ad43d2/Lib/logging/__init__.py#L331-L336 # noqa
3098
+ try:
3099
+ file_name = os.path.basename(file_path)
3100
+ module = os.path.splitext(file_name)[0]
3101
+ except (TypeError, ValueError, AttributeError):
3102
+ return None
3103
+
3104
+ return cls(
3105
+ file_name,
3106
+ module,
3107
+ )
3108
+
3109
+
3110
+ ##
3111
+
3112
+
3113
+ @logging_context_info
3114
+ @ta.final
3115
+ class LoggingThreadInfo(ta.NamedTuple):
3116
+ ident: int
3117
+ native_id: ta.Optional[int]
3118
+ name: str
3119
+
3120
+ @classmethod
3121
+ def build(cls) -> 'LoggingThreadInfo':
3122
+ return cls(
3123
+ threading.get_ident(),
3124
+ threading.get_native_id() if hasattr(threading, 'get_native_id') else None,
3125
+ threading.current_thread().name,
3126
+ )
3127
+
3128
+
3129
+ ##
3130
+
3131
+
3132
+ @logging_context_info
3133
+ @ta.final
3134
+ class LoggingProcessInfo(ta.NamedTuple):
3135
+ pid: int
3136
+
3137
+ @classmethod
3138
+ def build(cls) -> 'LoggingProcessInfo':
3139
+ return cls(
3140
+ os.getpid(),
3141
+ )
3142
+
3143
+
3144
+ ##
3145
+
3146
+
3147
+ @logging_context_info
3148
+ @ta.final
3149
+ class LoggingMultiprocessingInfo(ta.NamedTuple):
3150
+ process_name: str
3151
+
3152
+ @classmethod
3153
+ def build(cls) -> ta.Optional['LoggingMultiprocessingInfo']:
3154
+ # https://github.com/python/cpython/blob/e709361fc87d0d9ab9c58033a0a7f2fef0ad43d2/Lib/logging/__init__.py#L355-L364 # noqa
3155
+ if (mp := sys.modules.get('multiprocessing')) is None:
3156
+ return None
3157
+
3158
+ return cls(
3159
+ mp.current_process().name,
3160
+ )
3161
+
3162
+
3163
+ ##
3164
+
3165
+
3166
+ @logging_context_info
3167
+ @ta.final
3168
+ class LoggingAsyncioTaskInfo(ta.NamedTuple):
3169
+ name: str
3170
+
3171
+ @classmethod
3172
+ def build(cls) -> ta.Optional['LoggingAsyncioTaskInfo']:
3173
+ # https://github.com/python/cpython/blob/e709361fc87d0d9ab9c58033a0a7f2fef0ad43d2/Lib/logging/__init__.py#L372-L377 # noqa
3174
+ if (asyncio := sys.modules.get('asyncio')) is None:
3175
+ return None
3176
+
3177
+ try:
3178
+ task = asyncio.current_task()
3179
+ except Exception: # noqa
3180
+ return None
3181
+
3182
+ if task is None:
3183
+ return None
3184
+
3185
+ return cls(
3186
+ task.get_name(), # Always non-None
3187
+ )
3188
+
3189
+
3190
+ ########################################
3191
+ # ../../../omlish/logs/levels.py
3063
3192
 
3064
3193
 
3065
3194
  ##
3066
3195
 
3067
3196
 
3068
- def get_module_logger(mod_globals: ta.Mapping[str, ta.Any]) -> logging.Logger:
3069
- return logging.getLogger(mod_globals.get('__name__'))
3197
+ @ta.final
3198
+ class NamedLogLevel(int):
3199
+ # logging.getLevelNamesMapping (or, as that is unavailable <3.11, logging._nameToLevel) includes the deprecated
3200
+ # aliases.
3201
+ _NAMES_BY_INT: ta.ClassVar[ta.Mapping[LogLevel, str]] = dict(sorted(logging._levelToName.items(), key=lambda t: -t[0])) # noqa
3202
+
3203
+ _INTS_BY_NAME: ta.ClassVar[ta.Mapping[str, LogLevel]] = {v: k for k, v in _NAMES_BY_INT.items()}
3204
+
3205
+ _NAME_INT_PAIRS: ta.ClassVar[ta.Sequence[ta.Tuple[str, LogLevel]]] = list(_INTS_BY_NAME.items())
3206
+
3207
+ #
3208
+
3209
+ @property
3210
+ def exact_name(self) -> ta.Optional[str]:
3211
+ return self._NAMES_BY_INT.get(self)
3212
+
3213
+ _effective_name: ta.Optional[str]
3214
+
3215
+ @property
3216
+ def effective_name(self) -> ta.Optional[str]:
3217
+ try:
3218
+ return self._effective_name
3219
+ except AttributeError:
3220
+ pass
3221
+
3222
+ if (n := self.exact_name) is None:
3223
+ for n, i in self._NAME_INT_PAIRS: # noqa
3224
+ if self >= i:
3225
+ break
3226
+ else:
3227
+ n = None
3228
+
3229
+ self._effective_name = n
3230
+ return n
3231
+
3232
+ #
3233
+
3234
+ def __repr__(self) -> str:
3235
+ return f'{self.__class__.__name__}({int(self)})'
3236
+
3237
+ def __str__(self) -> str:
3238
+ return self.exact_name or f'{self.effective_name or "INVALID"}:{int(self)}'
3239
+
3240
+ #
3241
+
3242
+ CRITICAL: ta.ClassVar['NamedLogLevel']
3243
+ ERROR: ta.ClassVar['NamedLogLevel']
3244
+ WARNING: ta.ClassVar['NamedLogLevel']
3245
+ INFO: ta.ClassVar['NamedLogLevel']
3246
+ DEBUG: ta.ClassVar['NamedLogLevel']
3247
+ NOTSET: ta.ClassVar['NamedLogLevel']
3248
+
3249
+
3250
+ NamedLogLevel.CRITICAL = NamedLogLevel(logging.CRITICAL)
3251
+ NamedLogLevel.ERROR = NamedLogLevel(logging.ERROR)
3252
+ NamedLogLevel.WARNING = NamedLogLevel(logging.WARNING)
3253
+ NamedLogLevel.INFO = NamedLogLevel(logging.INFO)
3254
+ NamedLogLevel.DEBUG = NamedLogLevel(logging.DEBUG)
3255
+ NamedLogLevel.NOTSET = NamedLogLevel(logging.NOTSET)
3070
3256
 
3071
3257
 
3072
3258
  ########################################
@@ -3189,6 +3375,17 @@ class ProxyLoggingHandler(ProxyLoggingFilterer, logging.Handler):
3189
3375
  self._underlying.handleError(record)
3190
3376
 
3191
3377
 
3378
+ ########################################
3379
+ # ../../../omlish/logs/warnings.py
3380
+
3381
+
3382
+ ##
3383
+
3384
+
3385
+ class LoggingSetupWarning(Warning):
3386
+ pass
3387
+
3388
+
3192
3389
  ########################################
3193
3390
  # ../../../omlish/sockets/addresses.py
3194
3391
  """
@@ -3650,87 +3847,6 @@ def get_open_fds(limit: int) -> ta.FrozenSet[Fd]:
3650
3847
  return frozenset(fd for i in range(limit) if is_fd_open(fd := Fd(i)))
3651
3848
 
3652
3849
 
3653
- ########################################
3654
- # ../utils/os.py
3655
-
3656
-
3657
- ##
3658
-
3659
-
3660
- def real_exit(code: Rc) -> None:
3661
- os._exit(code) # noqa
3662
-
3663
-
3664
- ##
3665
-
3666
-
3667
- def decode_wait_status(sts: int) -> ta.Tuple[Rc, str]:
3668
- """
3669
- Decode the status returned by wait() or waitpid().
3670
-
3671
- Return a tuple (exitstatus, message) where exitstatus is the exit status, or -1 if the process was killed by a
3672
- signal; and message is a message telling what happened. It is the caller's responsibility to display the message.
3673
- """
3674
-
3675
- if os.WIFEXITED(sts):
3676
- es = os.WEXITSTATUS(sts) & 0xffff
3677
- msg = f'exit status {es}'
3678
- return Rc(es), msg
3679
-
3680
- elif os.WIFSIGNALED(sts):
3681
- sig = os.WTERMSIG(sts)
3682
- msg = f'terminated by {sig_name(sig)}'
3683
- if hasattr(os, 'WCOREDUMP'):
3684
- iscore = os.WCOREDUMP(sts)
3685
- else:
3686
- iscore = bool(sts & 0x80)
3687
- if iscore:
3688
- msg += ' (core dumped)'
3689
- return Rc(-1), msg
3690
-
3691
- else:
3692
- msg = 'unknown termination cause 0x%04x' % sts # noqa
3693
- return Rc(-1), msg
3694
-
3695
-
3696
- ##
3697
-
3698
-
3699
- class WaitedPid(ta.NamedTuple):
3700
- pid: Pid
3701
- sts: Rc
3702
-
3703
-
3704
- def waitpid(
3705
- *,
3706
- log: ta.Optional[logging.Logger] = None,
3707
- ) -> ta.Optional[WaitedPid]:
3708
- # Need pthread_sigmask here to avoid concurrent sigchld, but Python doesn't offer in Python < 3.4. There is
3709
- # still a race condition here; we can get a sigchld while we're sitting in the waitpid call. However, AFAICT, if
3710
- # waitpid is interrupted by SIGCHLD, as long as we call waitpid again (which happens every so often during the
3711
- # normal course in the mainloop), we'll eventually reap the child that we tried to reap during the interrupted
3712
- # call. At least on Linux, this appears to be true, or at least stopping 50 processes at once never left zombies
3713
- # lying around.
3714
- try:
3715
- pid, sts = os.waitpid(-1, os.WNOHANG)
3716
-
3717
- except OSError as exc:
3718
- code = exc.args[0]
3719
-
3720
- if code not in (errno.ECHILD, errno.EINTR):
3721
- if log is not None:
3722
- log.critical('waitpid error %r; a process may not be cleaned up properly', code)
3723
-
3724
- if code == errno.EINTR:
3725
- if log is not None:
3726
- log.debug('EINTR during reap')
3727
-
3728
- return None
3729
-
3730
- else:
3731
- return WaitedPid(pid, sts) # type: ignore
3732
-
3733
-
3734
3850
  ########################################
3735
3851
  # ../utils/users.py
3736
3852
 
@@ -6055,6 +6171,106 @@ def check_lite_runtime_version() -> None:
6055
6171
  raise OSError(f'Requires python {LITE_REQUIRED_PYTHON_VERSION}, got {sys.version_info} from {sys.executable}') # noqa
6056
6172
 
6057
6173
 
6174
+ ########################################
6175
+ # ../../../omlish/logs/callers.py
6176
+
6177
+
6178
+ ##
6179
+
6180
+
6181
+ @logging_context_info
6182
+ @ta.final
6183
+ class LoggingCaller(ta.NamedTuple):
6184
+ file_path: str
6185
+ line_no: int
6186
+ name: str
6187
+ stack_info: ta.Optional[str]
6188
+
6189
+ @classmethod
6190
+ def is_internal_frame(cls, frame: types.FrameType) -> bool:
6191
+ file_path = os.path.normcase(frame.f_code.co_filename)
6192
+
6193
+ # Yes, really.
6194
+ # https://github.com/python/cpython/blob/e709361fc87d0d9ab9c58033a0a7f2fef0ad43d2/Lib/logging/__init__.py#L204
6195
+ # https://github.com/python/cpython/commit/5ca6d7469be53960843df39bb900e9c3359f127f
6196
+ if 'importlib' in file_path and '_bootstrap' in file_path:
6197
+ return True
6198
+
6199
+ return False
6200
+
6201
+ @classmethod
6202
+ def find_frame(cls, ofs: int = 0) -> ta.Optional[types.FrameType]:
6203
+ f: ta.Optional[types.FrameType] = sys._getframe(2 + ofs) # noqa
6204
+
6205
+ while f is not None:
6206
+ # NOTE: We don't check __file__ like stdlib since we may be running amalgamated - we rely on careful, manual
6207
+ # stack_offset management.
6208
+ if hasattr(f, 'f_code'):
6209
+ return f
6210
+
6211
+ f = f.f_back
6212
+
6213
+ return None
6214
+
6215
+ @classmethod
6216
+ def find(
6217
+ cls,
6218
+ ofs: int = 0,
6219
+ *,
6220
+ stack_info: bool = False,
6221
+ ) -> ta.Optional['LoggingCaller']:
6222
+ if (f := cls.find_frame(ofs + 1)) is None:
6223
+ return None
6224
+
6225
+ # https://github.com/python/cpython/blob/08e9794517063c8cd92c48714071b1d3c60b71bd/Lib/logging/__init__.py#L1616-L1623 # noqa
6226
+ sinfo = None
6227
+ if stack_info:
6228
+ sio = io.StringIO()
6229
+ traceback.print_stack(f, file=sio)
6230
+ sinfo = sio.getvalue()
6231
+ sio.close()
6232
+ if sinfo[-1] == '\n':
6233
+ sinfo = sinfo[:-1]
6234
+
6235
+ return cls(
6236
+ f.f_code.co_filename,
6237
+ f.f_lineno or 0,
6238
+ f.f_code.co_name,
6239
+ sinfo,
6240
+ )
6241
+
6242
+
6243
+ ########################################
6244
+ # ../../../omlish/logs/protocols.py
6245
+
6246
+
6247
+ ##
6248
+
6249
+
6250
+ class LoggerLike(ta.Protocol):
6251
+ """Satisfied by both our Logger and stdlib logging.Logger."""
6252
+
6253
+ def isEnabledFor(self, level: LogLevel) -> bool: ... # noqa
6254
+
6255
+ def getEffectiveLevel(self) -> LogLevel: ... # noqa
6256
+
6257
+ #
6258
+
6259
+ def log(self, level: LogLevel, msg: str, /, *args: ta.Any, **kwargs: ta.Any) -> None: ... # noqa
6260
+
6261
+ def debug(self, msg: str, /, *args: ta.Any, **kwargs: ta.Any) -> None: ... # noqa
6262
+
6263
+ def info(self, msg: str, /, *args: ta.Any, **kwargs: ta.Any) -> None: ... # noqa
6264
+
6265
+ def warning(self, msg: str, /, *args: ta.Any, **kwargs: ta.Any) -> None: ... # noqa
6266
+
6267
+ def error(self, msg: str, /, *args: ta.Any, **kwargs: ta.Any) -> None: ... # noqa
6268
+
6269
+ def exception(self, msg: str, /, *args: ta.Any, **kwargs: ta.Any) -> None: ... # noqa
6270
+
6271
+ def critical(self, msg: str, /, *args: ta.Any, **kwargs: ta.Any) -> None: ... # noqa
6272
+
6273
+
6058
6274
  ########################################
6059
6275
  # ../../../omlish/logs/std/json.py
6060
6276
  """
@@ -6113,20 +6329,105 @@ class JsonLoggingFormatter(logging.Formatter):
6113
6329
 
6114
6330
 
6115
6331
  ########################################
6116
- # ../../../omlish/os/journald.py
6332
+ # ../../../omlish/logs/times.py
6117
6333
 
6118
6334
 
6119
6335
  ##
6120
6336
 
6121
6337
 
6122
- class sd_iovec(ct.Structure): # noqa
6123
- pass
6338
+ @logging_context_info
6339
+ @ta.final
6340
+ class LoggingTimeFields(ta.NamedTuple):
6341
+ """Maps directly to stdlib `logging.LogRecord` fields, and must be kept in sync with it."""
6124
6342
 
6343
+ created: float
6344
+ msecs: float
6345
+ relative_created: float
6125
6346
 
6126
- sd_iovec._fields_ = [
6127
- ('iov_base', ct.c_void_p), # Pointer to data.
6128
- ('iov_len', ct.c_size_t), # Length of data.
6129
- ]
6347
+ @classmethod
6348
+ def get_std_start_time_ns(cls) -> int:
6349
+ x: ta.Any = logging._startTime # type: ignore[attr-defined] # noqa
6350
+
6351
+ # Before 3.13.0b1 this will be `time.time()`, a float of seconds. After that, it will be `time.time_ns()`, an
6352
+ # int.
6353
+ #
6354
+ # See:
6355
+ # - https://github.com/python/cpython/commit/1316692e8c7c1e1f3b6639e51804f9db5ed892ea
6356
+ #
6357
+ if isinstance(x, float):
6358
+ return int(x * 1e9)
6359
+ else:
6360
+ return x
6361
+
6362
+ @classmethod
6363
+ def build(
6364
+ cls,
6365
+ time_ns: int,
6366
+ *,
6367
+ start_time_ns: ta.Optional[int] = None,
6368
+ ) -> 'LoggingTimeFields':
6369
+ # https://github.com/python/cpython/commit/1316692e8c7c1e1f3b6639e51804f9db5ed892ea
6370
+ created = time_ns / 1e9 # ns to float seconds
6371
+
6372
+ # Get the number of whole milliseconds (0-999) in the fractional part of seconds.
6373
+ # Eg: 1_677_903_920_999_998_503 ns --> 999_998_503 ns--> 999 ms
6374
+ # Convert to float by adding 0.0 for historical reasons. See gh-89047
6375
+ msecs = (time_ns % 1_000_000_000) // 1_000_000 + 0.0
6376
+
6377
+ # https://github.com/python/cpython/commit/1500a23f33f5a6d052ff1ef6383d9839928b8ff1
6378
+ if msecs == 999.0 and int(created) != time_ns // 1_000_000_000:
6379
+ # ns -> sec conversion can round up, e.g:
6380
+ # 1_677_903_920_999_999_900 ns --> 1_677_903_921.0 sec
6381
+ msecs = 0.0
6382
+
6383
+ if start_time_ns is None:
6384
+ start_time_ns = cls.get_std_start_time_ns()
6385
+ relative_created = (time_ns - start_time_ns) / 1e6
6386
+
6387
+ return cls(
6388
+ created,
6389
+ msecs,
6390
+ relative_created,
6391
+ )
6392
+
6393
+
6394
+ ##
6395
+
6396
+
6397
+ class UnexpectedLoggingStartTimeWarning(LoggingSetupWarning):
6398
+ pass
6399
+
6400
+
6401
+ def _check_logging_start_time() -> None:
6402
+ if (x := LoggingTimeFields.get_std_start_time_ns()) < (t := time.time()):
6403
+ import warnings # noqa
6404
+
6405
+ warnings.warn(
6406
+ f'Unexpected logging start time detected: '
6407
+ f'get_std_start_time_ns={x}, '
6408
+ f'time.time()={t}',
6409
+ UnexpectedLoggingStartTimeWarning,
6410
+ )
6411
+
6412
+
6413
+ _check_logging_start_time()
6414
+
6415
+
6416
+ ########################################
6417
+ # ../../../omlish/os/journald.py
6418
+
6419
+
6420
+ ##
6421
+
6422
+
6423
+ class sd_iovec(ct.Structure): # noqa
6424
+ pass
6425
+
6426
+
6427
+ sd_iovec._fields_ = [
6428
+ ('iov_base', ct.c_void_p), # Pointer to data.
6429
+ ('iov_len', ct.c_size_t), # Length of data.
6430
+ ]
6130
6431
 
6131
6432
 
6132
6433
  ##
@@ -6688,6 +6989,87 @@ class SupervisorSetup(Abstract):
6688
6989
  raise NotImplementedError
6689
6990
 
6690
6991
 
6992
+ ########################################
6993
+ # ../utils/os.py
6994
+
6995
+
6996
+ ##
6997
+
6998
+
6999
+ def real_exit(code: Rc) -> None:
7000
+ os._exit(code) # noqa
7001
+
7002
+
7003
+ ##
7004
+
7005
+
7006
+ def decode_wait_status(sts: int) -> ta.Tuple[Rc, str]:
7007
+ """
7008
+ Decode the status returned by wait() or waitpid().
7009
+
7010
+ Return a tuple (exitstatus, message) where exitstatus is the exit status, or -1 if the process was killed by a
7011
+ signal; and message is a message telling what happened. It is the caller's responsibility to display the message.
7012
+ """
7013
+
7014
+ if os.WIFEXITED(sts):
7015
+ es = os.WEXITSTATUS(sts) & 0xffff
7016
+ msg = f'exit status {es}'
7017
+ return Rc(es), msg
7018
+
7019
+ elif os.WIFSIGNALED(sts):
7020
+ sig = os.WTERMSIG(sts)
7021
+ msg = f'terminated by {sig_name(sig)}'
7022
+ if hasattr(os, 'WCOREDUMP'):
7023
+ iscore = os.WCOREDUMP(sts)
7024
+ else:
7025
+ iscore = bool(sts & 0x80)
7026
+ if iscore:
7027
+ msg += ' (core dumped)'
7028
+ return Rc(-1), msg
7029
+
7030
+ else:
7031
+ msg = 'unknown termination cause 0x%04x' % sts # noqa
7032
+ return Rc(-1), msg
7033
+
7034
+
7035
+ ##
7036
+
7037
+
7038
+ class WaitedPid(ta.NamedTuple):
7039
+ pid: Pid
7040
+ sts: Rc
7041
+
7042
+
7043
+ def waitpid(
7044
+ *,
7045
+ log: ta.Optional[LoggerLike] = None,
7046
+ ) -> ta.Optional[WaitedPid]:
7047
+ # Need pthread_sigmask here to avoid concurrent sigchld, but Python doesn't offer in Python < 3.4. There is
7048
+ # still a race condition here; we can get a sigchld while we're sitting in the waitpid call. However, AFAICT, if
7049
+ # waitpid is interrupted by SIGCHLD, as long as we call waitpid again (which happens every so often during the
7050
+ # normal course in the mainloop), we'll eventually reap the child that we tried to reap during the interrupted
7051
+ # call. At least on Linux, this appears to be true, or at least stopping 50 processes at once never left zombies
7052
+ # lying around.
7053
+ try:
7054
+ pid, sts = os.waitpid(-1, os.WNOHANG)
7055
+
7056
+ except OSError as exc:
7057
+ code = exc.args[0]
7058
+
7059
+ if code not in (errno.ECHILD, errno.EINTR):
7060
+ if log is not None:
7061
+ log.critical('waitpid error %r; a process may not be cleaned up properly', code)
7062
+
7063
+ if code == errno.EINTR:
7064
+ if log is not None:
7065
+ log.debug('EINTR during reap')
7066
+
7067
+ return None
7068
+
7069
+ else:
7070
+ return WaitedPid(pid, sts) # type: ignore
7071
+
7072
+
6691
7073
  ########################################
6692
7074
  # ../../../omlish/http/handlers.py
6693
7075
 
@@ -6747,7 +7129,7 @@ class HttpHandler_(Abstract): # noqa
6747
7129
  @dc.dataclass(frozen=True)
6748
7130
  class LoggingHttpHandler(HttpHandler_):
6749
7131
  handler: HttpHandler
6750
- log: logging.Logger
7132
+ log: LoggerLike
6751
7133
  level: int = logging.DEBUG
6752
7134
 
6753
7135
  def __call__(self, req: HttpHandlerRequest) -> HttpHandlerResponse:
@@ -6760,7 +7142,7 @@ class LoggingHttpHandler(HttpHandler_):
6760
7142
  @dc.dataclass(frozen=True)
6761
7143
  class ExceptionLoggingHttpHandler(HttpHandler_):
6762
7144
  handler: HttpHandler
6763
- log: logging.Logger
7145
+ log: LoggerLike
6764
7146
  message: ta.Union[str, ta.Callable[[HttpHandlerRequest, BaseException], str]] = 'Error in http handler'
6765
7147
 
6766
7148
  def __call__(self, req: HttpHandlerRequest) -> HttpHandlerResponse:
@@ -8062,130 +8444,387 @@ inj = InjectionApi()
8062
8444
 
8063
8445
 
8064
8446
  ########################################
8065
- # ../../../omlish/logs/standard.py
8066
- """
8067
- TODO:
8068
- - !! move to std !!
8069
- - structured
8070
- - prefixed
8071
- - debug
8072
- - optional noisy? noisy will never be lite - some kinda configure_standard callback mechanism?
8073
- """
8447
+ # ../../../omlish/logs/contexts.py
8074
8448
 
8075
8449
 
8076
8450
  ##
8077
8451
 
8078
8452
 
8079
- STANDARD_LOG_FORMAT_PARTS = [
8080
- ('asctime', '%(asctime)-15s'),
8081
- ('process', 'pid=%(process)s'),
8082
- ('thread', 'tid=%(thread)x'),
8083
- ('levelname', '%(levelname)s'),
8084
- ('name', '%(name)s'),
8085
- ('separator', '::'),
8086
- ('message', '%(message)s'),
8087
- ]
8453
+ class LoggingContext(Abstract):
8454
+ @property
8455
+ @abc.abstractmethod
8456
+ def level(self) -> NamedLogLevel:
8457
+ raise NotImplementedError
8088
8458
 
8459
+ #
8089
8460
 
8090
- class StandardLoggingFormatter(logging.Formatter):
8091
- @staticmethod
8092
- def build_log_format(parts: ta.Iterable[ta.Tuple[str, str]]) -> str:
8093
- return ' '.join(v for k, v in parts)
8461
+ @property
8462
+ @abc.abstractmethod
8463
+ def time_ns(self) -> int:
8464
+ raise NotImplementedError
8094
8465
 
8095
- converter = datetime.datetime.fromtimestamp # type: ignore
8466
+ @property
8467
+ @abc.abstractmethod
8468
+ def times(self) -> LoggingTimeFields:
8469
+ raise NotImplementedError
8096
8470
 
8097
- def formatTime(self, record, datefmt=None):
8098
- ct = self.converter(record.created)
8099
- if datefmt:
8100
- return ct.strftime(datefmt) # noqa
8101
- else:
8102
- t = ct.strftime('%Y-%m-%d %H:%M:%S')
8103
- return '%s.%03d' % (t, record.msecs) # noqa
8471
+ #
8104
8472
 
8473
+ @property
8474
+ @abc.abstractmethod
8475
+ def exc_info(self) -> ta.Optional[LoggingExcInfo]:
8476
+ raise NotImplementedError
8105
8477
 
8106
- ##
8478
+ @property
8479
+ @abc.abstractmethod
8480
+ def exc_info_tuple(self) -> ta.Optional[LoggingExcInfoTuple]:
8481
+ raise NotImplementedError
8107
8482
 
8483
+ #
8108
8484
 
8109
- class StandardConfiguredLoggingHandler(ProxyLoggingHandler):
8110
- def __init_subclass__(cls, **kwargs):
8111
- raise TypeError('This class serves only as a marker and should not be subclassed.')
8485
+ @abc.abstractmethod
8486
+ def caller(self) -> ta.Optional[LoggingCaller]:
8487
+ raise NotImplementedError
8112
8488
 
8489
+ @abc.abstractmethod
8490
+ def source_file(self) -> ta.Optional[LoggingSourceFileInfo]:
8491
+ raise NotImplementedError
8113
8492
 
8114
- ##
8493
+ #
8115
8494
 
8495
+ @abc.abstractmethod
8496
+ def thread(self) -> ta.Optional[LoggingThreadInfo]:
8497
+ raise NotImplementedError
8116
8498
 
8117
- @contextlib.contextmanager
8118
- def _locking_logging_module_lock() -> ta.Iterator[None]:
8119
- if hasattr(logging, '_acquireLock'):
8120
- logging._acquireLock() # noqa
8121
- try:
8122
- yield
8123
- finally:
8124
- logging._releaseLock() # type: ignore # noqa
8499
+ @abc.abstractmethod
8500
+ def process(self) -> ta.Optional[LoggingProcessInfo]:
8501
+ raise NotImplementedError
8125
8502
 
8126
- elif hasattr(logging, '_lock'):
8127
- # https://github.com/python/cpython/commit/74723e11109a320e628898817ab449b3dad9ee96
8128
- with logging._lock: # noqa
8129
- yield
8503
+ @abc.abstractmethod
8504
+ def multiprocessing(self) -> ta.Optional[LoggingMultiprocessingInfo]:
8505
+ raise NotImplementedError
8130
8506
 
8131
- else:
8132
- raise Exception("Can't find lock in logging module")
8507
+ @abc.abstractmethod
8508
+ def asyncio_task(self) -> ta.Optional[LoggingAsyncioTaskInfo]:
8509
+ raise NotImplementedError
8133
8510
 
8134
8511
 
8135
- def configure_standard_logging(
8136
- level: ta.Union[int, str] = logging.INFO,
8137
- *,
8138
- json: bool = False,
8139
- target: ta.Optional[logging.Logger] = None,
8140
- force: bool = False,
8141
- handler_factory: ta.Optional[ta.Callable[[], logging.Handler]] = None,
8142
- ) -> ta.Optional[StandardConfiguredLoggingHandler]:
8143
- with _locking_logging_module_lock():
8144
- if target is None:
8145
- target = logging.root
8512
+ ##
8146
8513
 
8147
- #
8148
8514
 
8149
- if not force:
8150
- if any(isinstance(h, StandardConfiguredLoggingHandler) for h in list(target.handlers)):
8151
- return None
8515
+ class CaptureLoggingContext(LoggingContext, Abstract):
8516
+ class AlreadyCapturedError(Exception):
8517
+ pass
8152
8518
 
8153
- #
8519
+ class NotCapturedError(Exception):
8520
+ pass
8154
8521
 
8155
- if handler_factory is not None:
8156
- handler = handler_factory()
8157
- else:
8158
- handler = logging.StreamHandler()
8522
+ @abc.abstractmethod
8523
+ def capture(self) -> None:
8524
+ """Must be cooperatively called only from the expected locations."""
8159
8525
 
8160
- #
8526
+ raise NotImplementedError
8161
8527
 
8162
- formatter: logging.Formatter
8163
- if json:
8164
- formatter = JsonLoggingFormatter()
8165
- else:
8166
- formatter = StandardLoggingFormatter(StandardLoggingFormatter.build_log_format(STANDARD_LOG_FORMAT_PARTS))
8167
- handler.setFormatter(formatter)
8168
8528
 
8169
- #
8529
+ @ta.final
8530
+ class CaptureLoggingContextImpl(CaptureLoggingContext):
8531
+ @ta.final
8532
+ class NOT_SET: # noqa
8533
+ def __new__(cls, *args, **kwargs): # noqa
8534
+ raise TypeError
8170
8535
 
8171
- handler.addFilter(TidLoggingFilter())
8536
+ #
8537
+
8538
+ def __init__(
8539
+ self,
8540
+ level: LogLevel,
8541
+ *,
8542
+ time_ns: ta.Optional[int] = None,
8543
+
8544
+ exc_info: LoggingExcInfoArg = False,
8545
+
8546
+ caller: ta.Union[LoggingCaller, ta.Type[NOT_SET], None] = NOT_SET,
8547
+ stack_offset: int = 0,
8548
+ stack_info: bool = False,
8549
+ ) -> None:
8550
+ self._level: NamedLogLevel = level if level.__class__ is NamedLogLevel else NamedLogLevel(level) # type: ignore[assignment] # noqa
8172
8551
 
8173
8552
  #
8174
8553
 
8175
- target.addHandler(handler)
8554
+ if time_ns is None:
8555
+ time_ns = time.time_ns()
8556
+ self._time_ns: int = time_ns
8176
8557
 
8177
8558
  #
8178
8559
 
8179
- if level is not None:
8180
- target.setLevel(level)
8560
+ if exc_info is True:
8561
+ sys_exc_info = sys.exc_info()
8562
+ if sys_exc_info[0] is not None:
8563
+ exc_info = sys_exc_info
8564
+ else:
8565
+ exc_info = None
8566
+ elif exc_info is False:
8567
+ exc_info = None
8568
+
8569
+ if exc_info is not None:
8570
+ self._exc_info: ta.Optional[LoggingExcInfo] = exc_info
8571
+ if isinstance(exc_info, BaseException):
8572
+ self._exc_info_tuple: ta.Optional[LoggingExcInfoTuple] = (type(exc_info), exc_info, exc_info.__traceback__) # noqa
8573
+ else:
8574
+ self._exc_info_tuple = exc_info
8181
8575
 
8182
8576
  #
8183
8577
 
8184
- return StandardConfiguredLoggingHandler(handler)
8578
+ if caller is not CaptureLoggingContextImpl.NOT_SET:
8579
+ self._caller = caller # type: ignore[assignment]
8580
+ else:
8581
+ self._stack_offset = stack_offset
8582
+ self._stack_info = stack_info
8185
8583
 
8584
+ ##
8186
8585
 
8187
- ########################################
8188
- # ../types.py
8586
+ @property
8587
+ def level(self) -> NamedLogLevel:
8588
+ return self._level
8589
+
8590
+ #
8591
+
8592
+ @property
8593
+ def time_ns(self) -> int:
8594
+ return self._time_ns
8595
+
8596
+ _times: LoggingTimeFields
8597
+
8598
+ @property
8599
+ def times(self) -> LoggingTimeFields:
8600
+ try:
8601
+ return self._times
8602
+ except AttributeError:
8603
+ pass
8604
+
8605
+ times = self._times = LoggingTimeFields.build(self.time_ns)
8606
+ return times
8607
+
8608
+ #
8609
+
8610
+ _exc_info: ta.Optional[LoggingExcInfo] = None
8611
+ _exc_info_tuple: ta.Optional[LoggingExcInfoTuple] = None
8612
+
8613
+ @property
8614
+ def exc_info(self) -> ta.Optional[LoggingExcInfo]:
8615
+ return self._exc_info
8616
+
8617
+ @property
8618
+ def exc_info_tuple(self) -> ta.Optional[LoggingExcInfoTuple]:
8619
+ return self._exc_info_tuple
8620
+
8621
+ ##
8622
+
8623
+ _stack_offset: int
8624
+ _stack_info: bool
8625
+
8626
+ def inc_stack_offset(self, ofs: int = 1) -> 'CaptureLoggingContext':
8627
+ if hasattr(self, '_stack_offset'):
8628
+ self._stack_offset += ofs
8629
+ return self
8630
+
8631
+ _has_captured: bool = False
8632
+
8633
+ _caller: ta.Optional[LoggingCaller]
8634
+ _source_file: ta.Optional[LoggingSourceFileInfo]
8635
+
8636
+ _thread: ta.Optional[LoggingThreadInfo]
8637
+ _process: ta.Optional[LoggingProcessInfo]
8638
+ _multiprocessing: ta.Optional[LoggingMultiprocessingInfo]
8639
+ _asyncio_task: ta.Optional[LoggingAsyncioTaskInfo]
8640
+
8641
+ def capture(self) -> None:
8642
+ if self._has_captured:
8643
+ raise CaptureLoggingContextImpl.AlreadyCapturedError
8644
+ self._has_captured = True
8645
+
8646
+ if not hasattr(self, '_caller'):
8647
+ self._caller = LoggingCaller.find(
8648
+ self._stack_offset + 1,
8649
+ stack_info=self._stack_info,
8650
+ )
8651
+
8652
+ if (caller := self._caller) is not None:
8653
+ self._source_file = LoggingSourceFileInfo.build(caller.file_path)
8654
+ else:
8655
+ self._source_file = None
8656
+
8657
+ self._thread = LoggingThreadInfo.build()
8658
+ self._process = LoggingProcessInfo.build()
8659
+ self._multiprocessing = LoggingMultiprocessingInfo.build()
8660
+ self._asyncio_task = LoggingAsyncioTaskInfo.build()
8661
+
8662
+ #
8663
+
8664
+ def caller(self) -> ta.Optional[LoggingCaller]:
8665
+ try:
8666
+ return self._caller
8667
+ except AttributeError:
8668
+ raise CaptureLoggingContext.NotCapturedError from None
8669
+
8670
+ def source_file(self) -> ta.Optional[LoggingSourceFileInfo]:
8671
+ try:
8672
+ return self._source_file
8673
+ except AttributeError:
8674
+ raise CaptureLoggingContext.NotCapturedError from None
8675
+
8676
+ #
8677
+
8678
+ def thread(self) -> ta.Optional[LoggingThreadInfo]:
8679
+ try:
8680
+ return self._thread
8681
+ except AttributeError:
8682
+ raise CaptureLoggingContext.NotCapturedError from None
8683
+
8684
+ def process(self) -> ta.Optional[LoggingProcessInfo]:
8685
+ try:
8686
+ return self._process
8687
+ except AttributeError:
8688
+ raise CaptureLoggingContext.NotCapturedError from None
8689
+
8690
+ def multiprocessing(self) -> ta.Optional[LoggingMultiprocessingInfo]:
8691
+ try:
8692
+ return self._multiprocessing
8693
+ except AttributeError:
8694
+ raise CaptureLoggingContext.NotCapturedError from None
8695
+
8696
+ def asyncio_task(self) -> ta.Optional[LoggingAsyncioTaskInfo]:
8697
+ try:
8698
+ return self._asyncio_task
8699
+ except AttributeError:
8700
+ raise CaptureLoggingContext.NotCapturedError from None
8701
+
8702
+
8703
+ ########################################
8704
+ # ../../../omlish/logs/standard.py
8705
+ """
8706
+ TODO:
8707
+ - !! move to std !!
8708
+ - structured
8709
+ - prefixed
8710
+ - debug
8711
+ - optional noisy? noisy will never be lite - some kinda configure_standard callback mechanism?
8712
+ """
8713
+
8714
+
8715
+ ##
8716
+
8717
+
8718
+ STANDARD_LOG_FORMAT_PARTS = [
8719
+ ('asctime', '%(asctime)-15s'),
8720
+ ('process', 'pid=%(process)s'),
8721
+ ('thread', 'tid=%(thread)x'),
8722
+ ('levelname', '%(levelname)s'),
8723
+ ('name', '%(name)s'),
8724
+ ('separator', '::'),
8725
+ ('message', '%(message)s'),
8726
+ ]
8727
+
8728
+
8729
+ class StandardLoggingFormatter(logging.Formatter):
8730
+ @staticmethod
8731
+ def build_log_format(parts: ta.Iterable[ta.Tuple[str, str]]) -> str:
8732
+ return ' '.join(v for k, v in parts)
8733
+
8734
+ converter = datetime.datetime.fromtimestamp # type: ignore
8735
+
8736
+ def formatTime(self, record, datefmt=None):
8737
+ ct = self.converter(record.created)
8738
+ if datefmt:
8739
+ return ct.strftime(datefmt) # noqa
8740
+ else:
8741
+ t = ct.strftime('%Y-%m-%d %H:%M:%S')
8742
+ return '%s.%03d' % (t, record.msecs) # noqa
8743
+
8744
+
8745
+ ##
8746
+
8747
+
8748
+ class StandardConfiguredLoggingHandler(ProxyLoggingHandler):
8749
+ def __init_subclass__(cls, **kwargs):
8750
+ raise TypeError('This class serves only as a marker and should not be subclassed.')
8751
+
8752
+
8753
+ ##
8754
+
8755
+
8756
+ @contextlib.contextmanager
8757
+ def _locking_logging_module_lock() -> ta.Iterator[None]:
8758
+ if hasattr(logging, '_acquireLock'):
8759
+ logging._acquireLock() # noqa
8760
+ try:
8761
+ yield
8762
+ finally:
8763
+ logging._releaseLock() # type: ignore # noqa
8764
+
8765
+ elif hasattr(logging, '_lock'):
8766
+ # https://github.com/python/cpython/commit/74723e11109a320e628898817ab449b3dad9ee96
8767
+ with logging._lock: # noqa
8768
+ yield
8769
+
8770
+ else:
8771
+ raise Exception("Can't find lock in logging module")
8772
+
8773
+
8774
+ def configure_standard_logging(
8775
+ level: ta.Union[int, str] = logging.INFO,
8776
+ *,
8777
+ json: bool = False,
8778
+ target: ta.Optional[logging.Logger] = None,
8779
+ force: bool = False,
8780
+ handler_factory: ta.Optional[ta.Callable[[], logging.Handler]] = None,
8781
+ ) -> ta.Optional[StandardConfiguredLoggingHandler]:
8782
+ with _locking_logging_module_lock():
8783
+ if target is None:
8784
+ target = logging.root
8785
+
8786
+ #
8787
+
8788
+ if not force:
8789
+ if any(isinstance(h, StandardConfiguredLoggingHandler) for h in list(target.handlers)):
8790
+ return None
8791
+
8792
+ #
8793
+
8794
+ if handler_factory is not None:
8795
+ handler = handler_factory()
8796
+ else:
8797
+ handler = logging.StreamHandler()
8798
+
8799
+ #
8800
+
8801
+ formatter: logging.Formatter
8802
+ if json:
8803
+ formatter = JsonLoggingFormatter()
8804
+ else:
8805
+ formatter = StandardLoggingFormatter(StandardLoggingFormatter.build_log_format(STANDARD_LOG_FORMAT_PARTS))
8806
+ handler.setFormatter(formatter)
8807
+
8808
+ #
8809
+
8810
+ handler.addFilter(TidLoggingFilter())
8811
+
8812
+ #
8813
+
8814
+ target.addHandler(handler)
8815
+
8816
+ #
8817
+
8818
+ if level is not None:
8819
+ target.setLevel(level)
8820
+
8821
+ #
8822
+
8823
+ return StandardConfiguredLoggingHandler(handler)
8824
+
8825
+
8826
+ ########################################
8827
+ # ../types.py
8189
8828
 
8190
8829
 
8191
8830
  class ExitNow(Exception): # noqa
@@ -8932,718 +9571,654 @@ class CoroHttpServer:
8932
9571
 
8933
9572
 
8934
9573
  ########################################
8935
- # ../dispatchers.py
9574
+ # ../../../omlish/logs/base.py
8936
9575
 
8937
9576
 
8938
9577
  ##
8939
9578
 
8940
9579
 
8941
- class Dispatchers(KeyedCollection[Fd, FdioHandler]):
8942
- def _key(self, v: FdioHandler) -> Fd:
8943
- return Fd(v.fd())
8944
-
8945
- #
9580
+ class AnyLogger(Abstract, ta.Generic[T]):
9581
+ @ta.final
9582
+ def is_enabled_for(self, level: LogLevel) -> bool:
9583
+ return level >= self.get_effective_level()
8946
9584
 
8947
- def drain(self) -> None:
8948
- for d in self:
8949
- # note that we *must* call readable() for every dispatcher, as it may have side effects for a given
8950
- # dispatcher (eg. call handle_listener_state_change for event listener processes)
8951
- if d.readable():
8952
- d.on_readable()
8953
- if d.writable():
8954
- d.on_writable()
9585
+ @abc.abstractmethod
9586
+ def get_effective_level(self) -> LogLevel:
9587
+ raise NotImplementedError
8955
9588
 
8956
9589
  #
8957
9590
 
8958
- def remove_logs(self) -> None:
8959
- for d in self:
8960
- if isinstance(d, ProcessOutputDispatcher):
8961
- d.remove_logs()
9591
+ @ta.final
9592
+ def isEnabledFor(self, level: LogLevel) -> bool: # noqa
9593
+ return self.is_enabled_for(level)
8962
9594
 
8963
- def reopen_logs(self) -> None:
8964
- for d in self:
8965
- if isinstance(d, ProcessOutputDispatcher):
8966
- d.reopen_logs()
9595
+ @ta.final
9596
+ def getEffectiveLevel(self) -> LogLevel: # noqa
9597
+ return self.get_effective_level()
8967
9598
 
9599
+ ##
8968
9600
 
8969
- ########################################
8970
- # ../dispatchersimpl.py
9601
+ @ta.overload
9602
+ def log(self, level: LogLevel, msg: str, *args: ta.Any, **kwargs: ta.Any) -> T:
9603
+ ...
8971
9604
 
9605
+ @ta.overload
9606
+ def log(self, level: LogLevel, msg: ta.Tuple[ta.Any, ...], **kwargs: ta.Any) -> T:
9607
+ ...
8972
9608
 
8973
- log = get_module_logger(globals()) # noqa
9609
+ @ta.overload
9610
+ def log(self, level: LogLevel, msg_fn: LoggingMsgFn, **kwargs: ta.Any) -> T:
9611
+ ...
8974
9612
 
9613
+ @ta.final
9614
+ def log(self, level: LogLevel, *args, **kwargs):
9615
+ return self._log(CaptureLoggingContextImpl(level, stack_offset=1), *args, **kwargs)
8975
9616
 
8976
- ##
9617
+ #
8977
9618
 
9619
+ @ta.overload
9620
+ def debug(self, msg: str, *args: ta.Any, **kwargs: ta.Any) -> T:
9621
+ ...
8978
9622
 
8979
- class BaseProcessDispatcherImpl(ProcessDispatcher, Abstract):
8980
- def __init__(
8981
- self,
8982
- process: Process,
8983
- channel: ProcessOutputChannel,
8984
- fd: Fd,
8985
- *,
8986
- event_callbacks: EventCallbacks,
8987
- server_config: ServerConfig,
8988
- ) -> None:
8989
- super().__init__()
9623
+ @ta.overload
9624
+ def debug(self, msg: ta.Tuple[ta.Any, ...], **kwargs: ta.Any) -> T:
9625
+ ...
8990
9626
 
8991
- self._process = process # process which "owns" this dispatcher
8992
- self._channel = channel # 'stderr' or 'stdout'
8993
- self._fd = fd
8994
- self._event_callbacks = event_callbacks
8995
- self._server_config = server_config
9627
+ @ta.overload
9628
+ def debug(self, msg_fn: LoggingMsgFn, **kwargs: ta.Any) -> T:
9629
+ ...
8996
9630
 
8997
- self._closed = False # True if close() has been called
9631
+ @ta.final
9632
+ def debug(self, *args, **kwargs):
9633
+ return self._log(CaptureLoggingContextImpl(NamedLogLevel.DEBUG, stack_offset=1), *args, **kwargs)
8998
9634
 
8999
9635
  #
9000
9636
 
9001
- def __repr__(self) -> str:
9002
- return f'<{self.__class__.__name__} at {id(self)} for {self._process} ({self._channel})>'
9637
+ @ta.overload
9638
+ def info(self, msg: str, *args: ta.Any, **kwargs: ta.Any) -> T:
9639
+ ...
9003
9640
 
9004
- #
9641
+ @ta.overload
9642
+ def info(self, msg: ta.Tuple[ta.Any, ...], **kwargs: ta.Any) -> T:
9643
+ ...
9005
9644
 
9006
- @property
9007
- def process(self) -> Process:
9008
- return self._process
9645
+ @ta.overload
9646
+ def info(self, msg_fn: LoggingMsgFn, **kwargs: ta.Any) -> T:
9647
+ ...
9009
9648
 
9010
- @property
9011
- def channel(self) -> ProcessOutputChannel:
9012
- return self._channel
9649
+ @ta.final
9650
+ def info(self, *args, **kwargs):
9651
+ return self._log(CaptureLoggingContextImpl(NamedLogLevel.INFO, stack_offset=1), *args, **kwargs)
9013
9652
 
9014
- def fd(self) -> Fd:
9015
- return self._fd
9653
+ #
9016
9654
 
9017
- @property
9018
- def closed(self) -> bool:
9019
- return self._closed
9655
+ @ta.overload
9656
+ def warning(self, msg: str, *args: ta.Any, **kwargs: ta.Any) -> T:
9657
+ ...
9020
9658
 
9021
- #
9659
+ @ta.overload
9660
+ def warning(self, msg: ta.Tuple[ta.Any, ...], **kwargs: ta.Any) -> T:
9661
+ ...
9022
9662
 
9023
- def close(self) -> None:
9024
- if not self._closed:
9025
- log.debug('fd %s closed, stopped monitoring %s', self._fd, self)
9026
- self._closed = True
9663
+ @ta.overload
9664
+ def warning(self, msg_fn: LoggingMsgFn, **kwargs: ta.Any) -> T:
9665
+ ...
9027
9666
 
9028
- def on_error(self, exc: ta.Optional[BaseException] = None) -> None:
9029
- nil, t, v, tbinfo = compact_traceback()
9667
+ @ta.final
9668
+ def warning(self, *args, **kwargs):
9669
+ return self._log(CaptureLoggingContextImpl(NamedLogLevel.WARNING, stack_offset=1), *args, **kwargs)
9030
9670
 
9031
- log.critical('uncaptured python exception, closing channel %s (%s:%s %s)', repr(self), t, v, tbinfo)
9032
- self.close()
9671
+ #
9033
9672
 
9673
+ @ta.overload
9674
+ def error(self, msg: str, *args: ta.Any, **kwargs: ta.Any) -> T:
9675
+ ...
9034
9676
 
9035
- class ProcessOutputDispatcherImpl(BaseProcessDispatcherImpl, ProcessOutputDispatcher):
9036
- """
9037
- Dispatcher for one channel (stdout or stderr) of one process. Serves several purposes:
9677
+ @ta.overload
9678
+ def error(self, msg: ta.Tuple[ta.Any, ...], **kwargs: ta.Any) -> T:
9679
+ ...
9038
9680
 
9039
- - capture output sent within <!--XSUPERVISOR:BEGIN--> and <!--XSUPERVISOR:END--> tags and signal a
9040
- ProcessCommunicationEvent by calling notify_event(event).
9041
- - route the output to the appropriate log handlers as specified in the config.
9042
- """
9681
+ @ta.overload
9682
+ def error(self, msg_fn: LoggingMsgFn, **kwargs: ta.Any) -> T:
9683
+ ...
9043
9684
 
9044
- def __init__(
9045
- self,
9046
- process: Process,
9047
- event_type: ta.Type[ProcessCommunicationEvent],
9048
- fd: Fd,
9049
- *,
9050
- event_callbacks: EventCallbacks,
9051
- server_config: ServerConfig,
9052
- ) -> None:
9053
- super().__init__(
9054
- process,
9055
- event_type.channel,
9056
- fd,
9057
- event_callbacks=event_callbacks,
9058
- server_config=server_config,
9059
- )
9685
+ @ta.final
9686
+ def error(self, *args, **kwargs):
9687
+ return self._log(CaptureLoggingContextImpl(NamedLogLevel.ERROR, stack_offset=1), *args, **kwargs)
9060
9688
 
9061
- self._event_type = event_type
9689
+ #
9062
9690
 
9063
- self._lc: ProcessConfig.Log = getattr(process.config, self._channel)
9691
+ @ta.overload
9692
+ def exception(self, msg: str, *args: ta.Any, exc_info: LoggingExcInfoArg = True, **kwargs: ta.Any) -> T:
9693
+ ...
9064
9694
 
9065
- self._init_normal_log()
9066
- self._init_capture_log()
9695
+ @ta.overload
9696
+ def exception(self, msg: ta.Tuple[ta.Any, ...], *, exc_info: LoggingExcInfoArg = True, **kwargs: ta.Any) -> T:
9697
+ ...
9067
9698
 
9068
- self._child_log = self._normal_log
9699
+ @ta.overload
9700
+ def exception(self, msg_fn: LoggingMsgFn, *, exc_info: LoggingExcInfoArg = True, **kwargs: ta.Any) -> T:
9701
+ ...
9069
9702
 
9070
- self._capture_mode = False # are we capturing process event data
9071
- self._output_buffer = b'' # data waiting to be logged
9703
+ @ta.final
9704
+ def exception(self, *args, exc_info: LoggingExcInfoArg = True, **kwargs):
9705
+ return self._log(CaptureLoggingContextImpl(NamedLogLevel.ERROR, exc_info=exc_info, stack_offset=1), *args, **kwargs) # noqa
9072
9706
 
9073
- # all code below is purely for minor speedups
9707
+ #
9074
9708
 
9075
- begin_token = self._event_type.BEGIN_TOKEN
9076
- end_token = self._event_type.END_TOKEN
9077
- self._begin_token_data = (begin_token, len(begin_token))
9078
- self._end_token_data = (end_token, len(end_token))
9709
+ @ta.overload
9710
+ def critical(self, msg: str, *args: ta.Any, **kwargs: ta.Any) -> T:
9711
+ ...
9079
9712
 
9080
- self._main_log_level = logging.DEBUG
9713
+ @ta.overload
9714
+ def critical(self, msg: ta.Tuple[ta.Any, ...], **kwargs: ta.Any) -> T:
9715
+ ...
9081
9716
 
9082
- self._log_to_main_log = self._server_config.loglevel <= self._main_log_level
9717
+ @ta.overload
9718
+ def critical(self, msg_fn: LoggingMsgFn, **kwargs: ta.Any) -> T:
9719
+ ...
9083
9720
 
9084
- self._stdout_events_enabled = self._process.config.stdout.events_enabled
9085
- self._stderr_events_enabled = self._process.config.stderr.events_enabled
9721
+ @ta.final
9722
+ def critical(self, *args, **kwargs):
9723
+ return self._log(CaptureLoggingContextImpl(NamedLogLevel.CRITICAL, stack_offset=1), *args, **kwargs)
9086
9724
 
9087
- _child_log: ta.Optional[logging.Logger] = None # the current logger (normal_log or capture_log)
9088
- _normal_log: ta.Optional[logging.Logger] = None # the "normal" (non-capture) logger
9089
- _capture_log: ta.Optional[logging.Logger] = None # the logger used while we're in capture_mode
9725
+ ##
9090
9726
 
9091
- def _init_normal_log(self) -> None:
9092
- """
9093
- Configure the "normal" (non-capture) log for this channel of this process. Sets self.normal_log if logging is
9094
- enabled.
9095
- """
9727
+ @classmethod
9728
+ def _prepare_msg_args(cls, msg: ta.Union[str, tuple, LoggingMsgFn], *args: ta.Any) -> ta.Tuple[str, tuple]:
9729
+ if callable(msg):
9730
+ if args:
9731
+ raise TypeError(f'Must not provide both a message function and args: {msg=} {args=}')
9732
+ x = msg()
9733
+ if isinstance(x, str):
9734
+ return x, ()
9735
+ elif isinstance(x, tuple):
9736
+ if x:
9737
+ return x[0], x[1:]
9738
+ else:
9739
+ return '', ()
9740
+ else:
9741
+ raise TypeError(x)
9096
9742
 
9097
- config = self._process.config # noqa
9098
- channel = self._channel # noqa
9743
+ elif isinstance(msg, tuple):
9744
+ if args:
9745
+ raise TypeError(f'Must not provide both a tuple message and args: {msg=} {args=}')
9746
+ if msg:
9747
+ return msg[0], msg[1:]
9748
+ else:
9749
+ return '', ()
9099
9750
 
9100
- logfile = self._lc.file
9101
- max_bytes = self._lc.max_bytes # noqa
9102
- backups = self._lc.backups # noqa
9103
- to_syslog = self._lc.syslog
9751
+ elif isinstance(msg, str):
9752
+ return msg, args
9104
9753
 
9105
- if logfile or to_syslog:
9106
- self._normal_log = logging.getLogger(__name__)
9754
+ else:
9755
+ raise TypeError(msg)
9107
9756
 
9108
- # if logfile:
9109
- # loggers.handle_file(
9110
- # self.normal_log,
9111
- # filename=logfile,
9112
- # fmt='%(message)s',
9113
- # rotating=bool(max_bytes), # optimization
9114
- # max_bytes=max_bytes,
9115
- # backups=backups,
9116
- # )
9757
+ @abc.abstractmethod
9758
+ def _log(self, ctx: CaptureLoggingContext, msg: ta.Union[str, tuple, LoggingMsgFn], *args: ta.Any, **kwargs: ta.Any) -> T: # noqa
9759
+ raise NotImplementedError
9117
9760
 
9118
- # if to_syslog:
9119
- # loggers.handle_syslog(
9120
- # self.normal_log,
9121
- # fmt=config.name + ' %(message)s',
9122
- # )
9123
9761
 
9124
- def _init_capture_log(self) -> None:
9125
- """
9126
- Configure the capture log for this process. This log is used to temporarily capture output when special output
9127
- is detected. Sets self.capture_log if capturing is enabled.
9128
- """
9762
+ class Logger(AnyLogger[None], Abstract):
9763
+ @abc.abstractmethod
9764
+ def _log(self, ctx: CaptureLoggingContext, msg: ta.Union[str, tuple, LoggingMsgFn], *args: ta.Any, **kwargs: ta.Any) -> None: # noqa
9765
+ raise NotImplementedError
9129
9766
 
9130
- capture_max_bytes = self._lc.capture_max_bytes
9131
- if capture_max_bytes:
9132
- self._capture_log = logging.getLogger(__name__)
9133
- # loggers.handle_boundIO(
9134
- # self._capture_log,
9135
- # fmt='%(message)s',
9136
- # max_bytes=capture_max_bytes,
9137
- # )
9138
9767
 
9139
- def remove_logs(self) -> None:
9140
- for l in (self._normal_log, self._capture_log):
9141
- if l is not None:
9142
- for handler in l.handlers:
9143
- handler.remove() # type: ignore
9144
- handler.reopen() # type: ignore
9768
+ class AsyncLogger(AnyLogger[ta.Awaitable[None]], Abstract):
9769
+ @abc.abstractmethod
9770
+ def _log(self, ctx: CaptureLoggingContext, msg: ta.Union[str, tuple, LoggingMsgFn], *args: ta.Any, **kwargs: ta.Any) -> ta.Awaitable[None]: # noqa
9771
+ raise NotImplementedError
9145
9772
 
9146
- def reopen_logs(self) -> None:
9147
- for l in (self._normal_log, self._capture_log):
9148
- if l is not None:
9149
- for handler in l.handlers:
9150
- handler.reopen() # type: ignore
9151
9773
 
9152
- def _log(self, data: ta.Union[str, bytes, None]) -> None:
9153
- if not data:
9154
- return
9774
+ ##
9155
9775
 
9156
- if self._server_config.strip_ansi:
9157
- data = strip_escapes(as_bytes(data))
9158
9776
 
9159
- if self._child_log:
9160
- self._child_log.info(data)
9777
+ class AnyNopLogger(AnyLogger[T], Abstract):
9778
+ @ta.final
9779
+ def get_effective_level(self) -> LogLevel:
9780
+ return 999
9161
9781
 
9162
- if self._log_to_main_log:
9163
- if not isinstance(data, bytes):
9164
- text = data
9165
- else:
9166
- try:
9167
- text = data.decode('utf-8')
9168
- except UnicodeDecodeError:
9169
- text = f'Undecodable: {data!r}'
9170
- log.log(self._main_log_level, '%r %s output:\n%s', self._process.config.name, self._channel, text) # noqa
9171
9782
 
9172
- if self._channel == 'stdout':
9173
- if self._stdout_events_enabled:
9174
- self._event_callbacks.notify(ProcessLogStdoutEvent(self._process, self._process.pid, data))
9783
+ @ta.final
9784
+ class NopLogger(AnyNopLogger[None], Logger):
9785
+ def _log(self, ctx: CaptureLoggingContext, msg: ta.Union[str, tuple, LoggingMsgFn], *args: ta.Any, **kwargs: ta.Any) -> None: # noqa
9786
+ pass
9175
9787
 
9176
- elif self._stderr_events_enabled:
9177
- self._event_callbacks.notify(ProcessLogStderrEvent(self._process, self._process.pid, data))
9178
9788
 
9179
- def record_output(self) -> None:
9180
- if self._capture_log is None:
9181
- # shortcut trying to find capture data
9182
- data = self._output_buffer
9183
- self._output_buffer = b''
9184
- self._log(data)
9185
- return
9789
+ @ta.final
9790
+ class AsyncNopLogger(AnyNopLogger[ta.Awaitable[None]], AsyncLogger):
9791
+ async def _log(self, ctx: CaptureLoggingContext, msg: ta.Union[str, tuple, LoggingMsgFn], *args: ta.Any, **kwargs: ta.Any) -> None: # noqa
9792
+ pass
9186
9793
 
9187
- if self._capture_mode:
9188
- token, token_len = self._end_token_data
9189
- else:
9190
- token, token_len = self._begin_token_data
9191
9794
 
9192
- if len(self._output_buffer) <= token_len:
9193
- return # not enough data
9795
+ ########################################
9796
+ # ../../../omlish/logs/std/records.py
9194
9797
 
9195
- data = self._output_buffer
9196
- self._output_buffer = b''
9197
9798
 
9198
- try:
9199
- before, after = data.split(token, 1)
9200
- except ValueError:
9201
- after = None
9202
- index = find_prefix_at_end(data, token)
9203
- if index:
9204
- self._output_buffer = self._output_buffer + data[-index:]
9205
- data = data[:-index]
9206
- self._log(data)
9207
- else:
9208
- self._log(before)
9209
- self.toggle_capture_mode()
9210
- self._output_buffer = after # type: ignore
9799
+ ##
9211
9800
 
9212
- if after:
9213
- self.record_output()
9214
9801
 
9215
- def toggle_capture_mode(self) -> None:
9216
- self._capture_mode = not self._capture_mode
9802
+ # Ref:
9803
+ # - https://docs.python.org/3/library/logging.html#logrecord-attributes
9804
+ #
9805
+ # LogRecord:
9806
+ # - https://github.com/python/cpython/blob/39b2f82717a69dde7212bc39b673b0f55c99e6a3/Lib/logging/__init__.py#L276 (3.8)
9807
+ # - https://github.com/python/cpython/blob/f070f54c5f4a42c7c61d1d5d3b8f3b7203b4a0fb/Lib/logging/__init__.py#L286 (~3.14) # noqa
9808
+ #
9809
+ # LogRecord.__init__ args:
9810
+ # - name: str
9811
+ # - level: int
9812
+ # - pathname: str - Confusingly referred to as `fn` before the LogRecord ctor. May be empty or "(unknown file)".
9813
+ # - lineno: int - May be 0.
9814
+ # - msg: str
9815
+ # - args: tuple | dict | 1-tuple[dict]
9816
+ # - exc_info: LoggingExcInfoTuple | None
9817
+ # - func: str | None = None -> funcName
9818
+ # - sinfo: str | None = None -> stack_info
9819
+ #
9820
+ KNOWN_STD_LOGGING_RECORD_ATTRS: ta.Dict[str, ta.Any] = dict(
9821
+ # Name of the logger used to log the call. Unmodified by ctor.
9822
+ name=str,
9217
9823
 
9218
- if self._capture_log is not None:
9219
- if self._capture_mode:
9220
- self._child_log = self._capture_log
9221
- else:
9222
- for handler in self._capture_log.handlers:
9223
- handler.flush()
9224
- data = self._capture_log.getvalue() # type: ignore
9225
- channel = self._channel
9226
- procname = self._process.config.name
9227
- event = self._event_type(self._process, self._process.pid, data)
9228
- self._event_callbacks.notify(event)
9824
+ # The format string passed in the original logging call. Merged with args to produce message, or an arbitrary object
9825
+ # (see Using arbitrary objects as messages). Unmodified by ctor.
9826
+ msg=str,
9229
9827
 
9230
- log.debug('%r %s emitted a comm event', procname, channel)
9231
- for handler in self._capture_log.handlers:
9232
- handler.remove() # type: ignore
9233
- handler.reopen() # type: ignore
9234
- self._child_log = self._normal_log
9828
+ # The tuple of arguments merged into msg to produce message, or a dict whose values are used for the merge (when
9829
+ # there is only one argument, and it is a dictionary). Ctor will transform a 1-tuple containing a Mapping into just
9830
+ # the mapping, but is otherwise unmodified.
9831
+ args=ta.Union[tuple, dict],
9235
9832
 
9236
- def writable(self) -> bool:
9237
- return False
9833
+ #
9238
9834
 
9239
- def readable(self) -> bool:
9240
- if self._closed:
9241
- return False
9242
- return True
9835
+ # Text logging level for the message ('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'). Set to
9836
+ # `getLevelName(level)`.
9837
+ levelname=str,
9243
9838
 
9244
- def on_readable(self) -> None:
9245
- data = read_fd(self._fd)
9246
- self._output_buffer += data
9247
- self.record_output()
9248
- if not data:
9249
- # if we get no data back from the pipe, it means that the child process has ended. See
9250
- # mail.python.org/pipermail/python-dev/2004-August/046850.html
9251
- self.close()
9839
+ # Numeric logging level for the message (DEBUG, INFO, WARNING, ERROR, CRITICAL). Unmodified by ctor.
9840
+ levelno=int,
9252
9841
 
9842
+ #
9253
9843
 
9254
- class ProcessInputDispatcherImpl(BaseProcessDispatcherImpl, ProcessInputDispatcher):
9255
- def __init__(
9256
- self,
9257
- process: Process,
9258
- channel: ProcessOutputChannel,
9259
- fd: Fd,
9260
- *,
9261
- event_callbacks: EventCallbacks,
9262
- server_config: ServerConfig,
9263
- ) -> None:
9264
- super().__init__(
9265
- process,
9266
- channel,
9267
- fd,
9268
- event_callbacks=event_callbacks,
9269
- server_config=server_config,
9270
- )
9844
+ # Full pathname of the source file where the logging call was issued (if available). Unmodified by ctor. May default
9845
+ # to "(unknown file)" by Logger.findCaller / Logger._log.
9846
+ pathname=str,
9271
9847
 
9272
- self._input_buffer = b''
9848
+ # Filename portion of pathname. Set to `os.path.basename(pathname)` if successful, otherwise defaults to pathname.
9849
+ filename=str,
9273
9850
 
9274
- def write(self, chars: ta.Union[bytes, str]) -> None:
9275
- self._input_buffer += as_bytes(chars)
9851
+ # Module (name portion of filename). Set to `os.path.splitext(filename)[0]`, otherwise defaults to
9852
+ # "Unknown module".
9853
+ module=str,
9276
9854
 
9277
- def writable(self) -> bool:
9278
- if self._input_buffer and not self._closed:
9279
- return True
9280
- return False
9855
+ #
9281
9856
 
9282
- def flush(self) -> None:
9283
- # other code depends on this raising EPIPE if the pipe is closed
9284
- sent = os.write(self._fd, as_bytes(self._input_buffer))
9285
- self._input_buffer = self._input_buffer[sent:]
9857
+ # Exception tuple (à la sys.exc_info) or, if no exception has occurred, None. Unmodified by ctor.
9858
+ exc_info=ta.Optional[LoggingExcInfoTuple],
9286
9859
 
9287
- def on_writable(self) -> None:
9288
- if self._input_buffer:
9289
- try:
9290
- self.flush()
9291
- except OSError as why:
9292
- if why.args[0] == errno.EPIPE:
9293
- self._input_buffer = b''
9294
- self.close()
9295
- else:
9296
- raise
9860
+ # Used to cache the traceback text. Simply set to None by ctor, later set by Formatter.format.
9861
+ exc_text=ta.Optional[str],
9297
9862
 
9863
+ #
9298
9864
 
9299
- ########################################
9300
- # ../groupsimpl.py
9865
+ # Stack frame information (where available) from the bottom of the stack in the current thread, up to and including
9866
+ # the stack frame of the logging call which resulted in the creation of this record. Set by ctor to `sinfo` arg,
9867
+ # unmodified. Mostly set, if requested, by `Logger.findCaller`, to `traceback.print_stack(f)`, but prepended with
9868
+ # the literal "Stack (most recent call last):\n", and stripped of exactly one trailing `\n` if present.
9869
+ stack_info=ta.Optional[str],
9301
9870
 
9871
+ # Source line number where the logging call was issued (if available). Unmodified by ctor. May default to 0 by
9872
+ # Logger.findCaller / Logger._log.
9873
+ lineno=int,
9302
9874
 
9303
- ##
9875
+ # Name of function containing the logging call. Set by ctor to `func` arg, unmodified. May default to
9876
+ # "(unknown function)" by Logger.findCaller / Logger._log.
9877
+ funcName=str,
9304
9878
 
9879
+ #
9305
9880
 
9306
- class ProcessFactory(Func2[ProcessConfig, ProcessGroup, Process]):
9307
- pass
9881
+ # Time when the LogRecord was created. Set to `time.time_ns() / 1e9` for >=3.13.0b1, otherwise simply `time.time()`.
9882
+ #
9883
+ # See:
9884
+ # - https://github.com/python/cpython/commit/1316692e8c7c1e1f3b6639e51804f9db5ed892ea
9885
+ # - https://github.com/python/cpython/commit/1500a23f33f5a6d052ff1ef6383d9839928b8ff1
9886
+ #
9887
+ created=float,
9308
9888
 
9889
+ # Millisecond portion of the time when the LogRecord was created.
9890
+ msecs=float,
9309
9891
 
9310
- class ProcessGroupImpl(ProcessGroup):
9311
- def __init__(
9312
- self,
9313
- config: ProcessGroupConfig,
9314
- *,
9315
- process_factory: ProcessFactory,
9316
- ):
9317
- super().__init__()
9892
+ # Time in milliseconds when the LogRecord was created, relative to the time the logging module was loaded.
9893
+ relativeCreated=float,
9318
9894
 
9319
- self._config = config
9320
- self._process_factory = process_factory
9895
+ #
9321
9896
 
9322
- by_name: ta.Dict[str, Process] = {}
9323
- for pconfig in self._config.processes or []:
9324
- p = check.isinstance(self._process_factory(pconfig, self), Process)
9325
- if p.name in by_name:
9326
- raise KeyError(f'name {p.name} of process {p} already registered by {by_name[p.name]}')
9327
- by_name[pconfig.name] = p
9328
- self._by_name = by_name
9897
+ # Thread ID if available, and `logging.logThreads` is truthy.
9898
+ thread=ta.Optional[int],
9329
9899
 
9330
- @property
9331
- def _by_key(self) -> ta.Mapping[str, Process]:
9332
- return self._by_name
9900
+ # Thread name if available, and `logging.logThreads` is truthy.
9901
+ threadName=ta.Optional[str],
9333
9902
 
9334
9903
  #
9335
9904
 
9336
- def __repr__(self) -> str:
9337
- return f'<{self.__class__.__name__} instance at {id(self)} named {self._config.name}>'
9905
+ # Process name if available. Set to None if `logging.logMultiprocessing` is not truthy. Otherwise, set to
9906
+ # 'MainProcess', then `sys.modules.get('multiprocessing').current_process().name` if that works, otherwise remains
9907
+ # as 'MainProcess'.
9908
+ #
9909
+ # As noted by stdlib:
9910
+ #
9911
+ # Errors may occur if multiprocessing has not finished loading yet - e.g. if a custom import hook causes
9912
+ # third-party code to run when multiprocessing calls import. See issue 8200 for an example
9913
+ #
9914
+ processName=ta.Optional[str],
9915
+
9916
+ # Process ID if available - that is, if `hasattr(os, 'getpid')` - and `logging.logProcesses` is truthy, otherwise
9917
+ # None.
9918
+ process=ta.Optional[int],
9338
9919
 
9339
9920
  #
9340
9921
 
9341
- @property
9342
- def name(self) -> str:
9343
- return self._config.name
9922
+ # Absent <3.12, otherwise asyncio.Task name if available, and `logging.logAsyncioTasks` is truthy. Set to
9923
+ # `sys.modules.get('asyncio').current_task().get_name()`, otherwise None.
9924
+ taskName=ta.Optional[str],
9925
+ )
9344
9926
 
9345
- @property
9346
- def config(self) -> ProcessGroupConfig:
9347
- return self._config
9927
+ KNOWN_STD_LOGGING_RECORD_ATTR_SET: ta.FrozenSet[str] = frozenset(KNOWN_STD_LOGGING_RECORD_ATTRS)
9348
9928
 
9349
- @property
9350
- def by_name(self) -> ta.Mapping[str, Process]:
9351
- return self._by_name
9352
9929
 
9353
- #
9930
+ # Formatter:
9931
+ # - https://github.com/python/cpython/blob/39b2f82717a69dde7212bc39b673b0f55c99e6a3/Lib/logging/__init__.py#L514 (3.8)
9932
+ # - https://github.com/python/cpython/blob/f070f54c5f4a42c7c61d1d5d3b8f3b7203b4a0fb/Lib/logging/__init__.py#L554 (~3.14) # noqa
9933
+ #
9934
+ KNOWN_STD_LOGGING_FORMATTER_RECORD_ATTRS: ta.Dict[str, ta.Any] = dict(
9935
+ # The logged message, computed as msg % args. Set to `record.getMessage()`.
9936
+ message=str,
9937
+
9938
+ # Human-readable time when the LogRecord was created. By default this is of the form '2003-07-08 16:49:45,896' (the
9939
+ # numbers after the comma are millisecond portion of the time). Set to `self.formatTime(record, self.datefmt)` if
9940
+ # `self.usesTime()`, otherwise unset.
9941
+ asctime=str,
9942
+
9943
+ # Used to cache the traceback text. If unset (falsey) on the record and `exc_info` is truthy, set to
9944
+ # `self.formatException(record.exc_info)` - otherwise unmodified.
9945
+ exc_text=ta.Optional[str],
9946
+ )
9354
9947
 
9355
- def get_unstopped_processes(self) -> ta.List[Process]:
9356
- return [x for x in self if not x.state.stopped]
9948
+ KNOWN_STD_LOGGING_FORMATTER_RECORD_ATTR_SET: ta.FrozenSet[str] = frozenset(KNOWN_STD_LOGGING_FORMATTER_RECORD_ATTRS)
9357
9949
 
9358
- def stop_all(self) -> None:
9359
- processes = list(self._by_name.values())
9360
- processes.sort()
9361
- processes.reverse() # stop in desc priority order
9362
9950
 
9363
- for proc in processes:
9364
- state = proc.state
9365
- if state == ProcessState.RUNNING:
9366
- # RUNNING -> STOPPING
9367
- proc.stop()
9951
+ ##
9368
9952
 
9369
- elif state == ProcessState.STARTING:
9370
- # STARTING -> STOPPING
9371
- proc.stop()
9372
9953
 
9373
- elif state == ProcessState.BACKOFF:
9374
- # BACKOFF -> FATAL
9375
- proc.give_up()
9954
+ class UnknownStdLoggingRecordAttrsWarning(LoggingSetupWarning):
9955
+ pass
9376
9956
 
9377
- def before_remove(self) -> None:
9378
- pass
9379
9957
 
9958
+ def _check_std_logging_record_attrs() -> None:
9959
+ rec_dct = dict(logging.makeLogRecord({}).__dict__)
9380
9960
 
9381
- ########################################
9382
- # ../process.py
9961
+ if (unk_rec_fields := frozenset(rec_dct) - KNOWN_STD_LOGGING_RECORD_ATTR_SET):
9962
+ import warnings # noqa
9963
+
9964
+ warnings.warn(
9965
+ f'Unknown log record attrs detected: {sorted(unk_rec_fields)!r}',
9966
+ UnknownStdLoggingRecordAttrsWarning,
9967
+ )
9968
+
9969
+
9970
+ _check_std_logging_record_attrs()
9383
9971
 
9384
9972
 
9385
9973
  ##
9386
9974
 
9387
9975
 
9388
- class ProcessStateError(RuntimeError):
9389
- pass
9976
+ class LoggingContextLogRecord(logging.LogRecord):
9977
+ _SHOULD_ADD_TASK_NAME: ta.ClassVar[bool] = sys.version_info >= (3, 12)
9390
9978
 
9979
+ _UNKNOWN_PATH_NAME: ta.ClassVar[str] = '(unknown file)'
9980
+ _UNKNOWN_FUNC_NAME: ta.ClassVar[str] = '(unknown function)'
9981
+ _UNKNOWN_MODULE: ta.ClassVar[str] = 'Unknown module'
9391
9982
 
9392
- ##
9983
+ _STACK_INFO_PREFIX: ta.ClassVar[str] = 'Stack (most recent call last):\n'
9393
9984
 
9985
+ def __init__( # noqa
9986
+ self,
9987
+ # name,
9988
+ # level,
9989
+ # pathname,
9990
+ # lineno,
9991
+ # msg,
9992
+ # args,
9993
+ # exc_info,
9994
+ # func=None,
9995
+ # sinfo=None,
9996
+ # **kwargs,
9997
+ *,
9998
+ name: str,
9999
+ msg: str,
10000
+ args: ta.Union[tuple, dict],
9394
10001
 
9395
- class PidHistory(ta.Dict[Pid, Process]):
9396
- pass
10002
+ _logging_context: LoggingContext,
10003
+ ) -> None:
10004
+ ctx = _logging_context
9397
10005
 
10006
+ self.name: str = name
9398
10007
 
9399
- ########################################
9400
- # ../setupimpl.py
10008
+ self.msg: str = msg
9401
10009
 
10010
+ # https://github.com/python/cpython/blob/e709361fc87d0d9ab9c58033a0a7f2fef0ad43d2/Lib/logging/__init__.py#L307
10011
+ if args and len(args) == 1 and isinstance(args[0], collections.abc.Mapping) and args[0]:
10012
+ args = args[0] # type: ignore[assignment]
10013
+ self.args: ta.Union[tuple, dict] = args
9402
10014
 
9403
- log = get_module_logger(globals()) # noqa
10015
+ self.levelname: str = logging.getLevelName(ctx.level)
10016
+ self.levelno: int = ctx.level
9404
10017
 
10018
+ if (caller := ctx.caller()) is not None:
10019
+ self.pathname: str = caller.file_path
10020
+ else:
10021
+ self.pathname = self._UNKNOWN_PATH_NAME
9405
10022
 
9406
- ##
10023
+ if (src_file := ctx.source_file()) is not None:
10024
+ self.filename: str = src_file.file_name
10025
+ self.module: str = src_file.module
10026
+ else:
10027
+ self.filename = self.pathname
10028
+ self.module = self._UNKNOWN_MODULE
10029
+
10030
+ self.exc_info: ta.Optional[LoggingExcInfoTuple] = ctx.exc_info_tuple
10031
+ self.exc_text: ta.Optional[str] = None
10032
+
10033
+ # If ctx.build_caller() was never called, we simply don't have a stack trace.
10034
+ if caller is not None:
10035
+ if (sinfo := caller.stack_info) is not None:
10036
+ self.stack_info: ta.Optional[str] = '\n'.join([
10037
+ self._STACK_INFO_PREFIX,
10038
+ sinfo[1:] if sinfo.endswith('\n') else sinfo,
10039
+ ])
10040
+ else:
10041
+ self.stack_info = None
9407
10042
 
10043
+ self.lineno: int = caller.line_no
10044
+ self.funcName: str = caller.name
9408
10045
 
9409
- class SupervisorSetupImpl(SupervisorSetup):
9410
- def __init__(
9411
- self,
9412
- *,
9413
- config: ServerConfig,
9414
- user: ta.Optional[SupervisorUser] = None,
9415
- epoch: ServerEpoch = ServerEpoch(0),
9416
- daemonize_listeners: DaemonizeListeners = DaemonizeListeners([]),
9417
- ) -> None:
9418
- super().__init__()
10046
+ else:
10047
+ self.stack_info = None
9419
10048
 
9420
- self._config = config
9421
- self._user = user
9422
- self._epoch = epoch
9423
- self._daemonize_listeners = daemonize_listeners
10049
+ self.lineno = 0
10050
+ self.funcName = self._UNKNOWN_FUNC_NAME
9424
10051
 
9425
- #
10052
+ times = ctx.times
10053
+ self.created: float = times.created
10054
+ self.msecs: float = times.msecs
10055
+ self.relativeCreated: float = times.relative_created
9426
10056
 
9427
- @property
9428
- def first(self) -> bool:
9429
- return not self._epoch
10057
+ if logging.logThreads:
10058
+ thread = check.not_none(ctx.thread())
10059
+ self.thread: ta.Optional[int] = thread.ident
10060
+ self.threadName: ta.Optional[str] = thread.name
10061
+ else:
10062
+ self.thread = None
10063
+ self.threadName = None
9430
10064
 
9431
- #
10065
+ if logging.logProcesses:
10066
+ process = check.not_none(ctx.process())
10067
+ self.process: ta.Optional[int] = process.pid
10068
+ else:
10069
+ self.process = None
9432
10070
 
9433
- @cached_nullary
9434
- def setup(self) -> None:
9435
- if not self.first:
9436
- # prevent crash on libdispatch-based systems, at least for the first request
9437
- self._cleanup_fds()
10071
+ if logging.logMultiprocessing:
10072
+ if (mp := ctx.multiprocessing()) is not None:
10073
+ self.processName: ta.Optional[str] = mp.process_name
10074
+ else:
10075
+ self.processName = None
10076
+ else:
10077
+ self.processName = None
9438
10078
 
9439
- self._set_uid_or_exit()
10079
+ # Absent <3.12
10080
+ if getattr(logging, 'logAsyncioTasks', None):
10081
+ if (at := ctx.asyncio_task()) is not None:
10082
+ self.taskName: ta.Optional[str] = at.name
10083
+ else:
10084
+ self.taskName = None
10085
+ else:
10086
+ self.taskName = None
9440
10087
 
9441
- if self.first:
9442
- self._set_rlimits_or_exit()
9443
10088
 
9444
- # this sets the options.logger object delay logger instantiation until after setuid
9445
- if not self._config.nocleanup:
9446
- # clean up old automatic logs
9447
- self._clear_auto_child_logdir()
10089
+ ########################################
10090
+ # ../dispatchers.py
10091
+
10092
+
10093
+ ##
10094
+
10095
+
10096
+ class Dispatchers(KeyedCollection[Fd, FdioHandler]):
10097
+ def _key(self, v: FdioHandler) -> Fd:
10098
+ return Fd(v.fd())
10099
+
10100
+ #
10101
+
10102
+ def drain(self) -> None:
10103
+ for d in self:
10104
+ # note that we *must* call readable() for every dispatcher, as it may have side effects for a given
10105
+ # dispatcher (eg. call handle_listener_state_change for event listener processes)
10106
+ if d.readable():
10107
+ d.on_readable()
10108
+ if d.writable():
10109
+ d.on_writable()
10110
+
10111
+ #
10112
+
10113
+ def remove_logs(self) -> None:
10114
+ for d in self:
10115
+ if isinstance(d, ProcessOutputDispatcher):
10116
+ d.remove_logs()
9448
10117
 
9449
- if not self._config.nodaemon and self.first:
9450
- self._daemonize()
10118
+ def reopen_logs(self) -> None:
10119
+ for d in self:
10120
+ if isinstance(d, ProcessOutputDispatcher):
10121
+ d.reopen_logs()
9451
10122
 
9452
- # writing pid file needs to come *after* daemonizing or pid will be wrong
9453
- self._write_pidfile()
9454
10123
 
9455
- @cached_nullary
9456
- def cleanup(self) -> None:
9457
- self._cleanup_pidfile()
10124
+ ########################################
10125
+ # ../groupsimpl.py
9458
10126
 
9459
- #
9460
10127
 
9461
- def _cleanup_fds(self) -> None:
9462
- # try to close any leaked file descriptors (for reload)
9463
- start = 5
9464
- os.closerange(start, self._config.min_fds)
10128
+ ##
9465
10129
 
9466
- #
9467
10130
 
9468
- def _set_uid_or_exit(self) -> None:
9469
- """
9470
- Set the uid of the supervisord process. Called during supervisord startup only. No return value. Exits the
9471
- process via usage() if privileges could not be dropped.
9472
- """
10131
+ class ProcessFactory(Func2[ProcessConfig, ProcessGroup, Process]):
10132
+ pass
9473
10133
 
9474
- if self._user is None:
9475
- if os.getuid() == 0:
9476
- warnings.warn(
9477
- 'Supervisor is running as root. Privileges were not dropped because no user is specified in the '
9478
- 'config file. If you intend to run as root, you can set user=root in the config file to avoid '
9479
- 'this message.',
9480
- )
9481
- else:
9482
- msg = drop_privileges(self._user.uid)
9483
- if msg is None:
9484
- log.info('Set uid to user %s succeeded', self._user.uid)
9485
- else: # failed to drop privileges
9486
- raise RuntimeError(msg)
9487
10134
 
9488
- #
10135
+ class ProcessGroupImpl(ProcessGroup):
10136
+ def __init__(
10137
+ self,
10138
+ config: ProcessGroupConfig,
10139
+ *,
10140
+ process_factory: ProcessFactory,
10141
+ ):
10142
+ super().__init__()
9489
10143
 
9490
- def _set_rlimits_or_exit(self) -> None:
9491
- """
9492
- Set the rlimits of the supervisord process. Called during supervisord startup only. No return value. Exits the
9493
- process via usage() if any rlimits could not be set.
9494
- """
10144
+ self._config = config
10145
+ self._process_factory = process_factory
9495
10146
 
9496
- limits = []
10147
+ by_name: ta.Dict[str, Process] = {}
10148
+ for pconfig in self._config.processes or []:
10149
+ p = check.isinstance(self._process_factory(pconfig, self), Process)
10150
+ if p.name in by_name:
10151
+ raise KeyError(f'name {p.name} of process {p} already registered by {by_name[p.name]}')
10152
+ by_name[pconfig.name] = p
10153
+ self._by_name = by_name
9497
10154
 
9498
- if hasattr(resource, 'RLIMIT_NOFILE'):
9499
- limits.append({
9500
- 'msg': (
9501
- 'The minimum number of file descriptors required to run this process is %(min_limit)s as per the '
9502
- '"min_fds" command-line argument or config file setting. The current environment will only allow '
9503
- 'you to open %(hard)s file descriptors. Either raise the number of usable file descriptors in '
9504
- 'your environment (see README.rst) or lower the min_fds setting in the config file to allow the '
9505
- 'process to start.'
9506
- ),
9507
- 'min': self._config.min_fds,
9508
- 'resource': resource.RLIMIT_NOFILE,
9509
- 'name': 'RLIMIT_NOFILE',
9510
- })
10155
+ @property
10156
+ def _by_key(self) -> ta.Mapping[str, Process]:
10157
+ return self._by_name
9511
10158
 
9512
- if hasattr(resource, 'RLIMIT_NPROC'):
9513
- limits.append({
9514
- 'msg': (
9515
- 'The minimum number of available processes required to run this program is %(min_limit)s as per '
9516
- 'the "minprocs" command-line argument or config file setting. The current environment will only '
9517
- 'allow you to open %(hard)s processes. Either raise the number of usable processes in your '
9518
- 'environment (see README.rst) or lower the minprocs setting in the config file to allow the '
9519
- 'program to start.'
9520
- ),
9521
- 'min': self._config.min_procs,
9522
- 'resource': resource.RLIMIT_NPROC,
9523
- 'name': 'RLIMIT_NPROC',
9524
- })
10159
+ #
9525
10160
 
9526
- for limit in limits:
9527
- min_limit = limit['min']
9528
- res = limit['resource']
9529
- msg = limit['msg']
9530
- name = limit['name']
10161
+ def __repr__(self) -> str:
10162
+ return f'<{self.__class__.__name__} instance at {id(self)} named {self._config.name}>'
9531
10163
 
9532
- soft, hard = resource.getrlimit(res) # type: ignore
10164
+ #
9533
10165
 
9534
- # -1 means unlimited
9535
- if soft < min_limit and soft != -1: # type: ignore
9536
- if hard < min_limit and hard != -1: # type: ignore
9537
- # setrlimit should increase the hard limit if we are root, if not then setrlimit raises and we print
9538
- # usage
9539
- hard = min_limit # type: ignore
10166
+ @property
10167
+ def name(self) -> str:
10168
+ return self._config.name
9540
10169
 
9541
- try:
9542
- resource.setrlimit(res, (min_limit, hard)) # type: ignore
9543
- log.info('Increased %s limit to %s', name, min_limit)
9544
- except (OSError, ValueError):
9545
- raise RuntimeError(msg % dict( # type: ignore # noqa
9546
- min_limit=min_limit,
9547
- res=res,
9548
- name=name,
9549
- soft=soft,
9550
- hard=hard,
9551
- ))
10170
+ @property
10171
+ def config(self) -> ProcessGroupConfig:
10172
+ return self._config
10173
+
10174
+ @property
10175
+ def by_name(self) -> ta.Mapping[str, Process]:
10176
+ return self._by_name
9552
10177
 
9553
10178
  #
9554
10179
 
9555
- _unlink_pidfile = False
10180
+ def get_unstopped_processes(self) -> ta.List[Process]:
10181
+ return [x for x in self if not x.state.stopped]
9556
10182
 
9557
- def _write_pidfile(self) -> None:
9558
- pid = os.getpid()
9559
- try:
9560
- with open(self._config.pidfile, 'w') as f:
9561
- f.write(f'{pid}\n')
9562
- except OSError:
9563
- log.critical('could not write pidfile %s', self._config.pidfile)
9564
- else:
9565
- self._unlink_pidfile = True
9566
- log.info('supervisord started with pid %s', pid)
10183
+ def stop_all(self) -> None:
10184
+ processes = list(self._by_name.values())
10185
+ processes.sort()
10186
+ processes.reverse() # stop in desc priority order
9567
10187
 
9568
- def _cleanup_pidfile(self) -> None:
9569
- if self._unlink_pidfile:
9570
- try_unlink(self._config.pidfile)
10188
+ for proc in processes:
10189
+ state = proc.state
10190
+ if state == ProcessState.RUNNING:
10191
+ # RUNNING -> STOPPING
10192
+ proc.stop()
9571
10193
 
9572
- #
10194
+ elif state == ProcessState.STARTING:
10195
+ # STARTING -> STOPPING
10196
+ proc.stop()
9573
10197
 
9574
- def _clear_auto_child_logdir(self) -> None:
9575
- # must be called after realize()
9576
- child_logdir = self._config.child_logdir
9577
- if child_logdir == '/dev/null':
9578
- return
10198
+ elif state == ProcessState.BACKOFF:
10199
+ # BACKOFF -> FATAL
10200
+ proc.give_up()
9579
10201
 
9580
- fnre = re.compile(rf'.+?---{self._config.identifier}-\S+\.log\.?\d{{0,4}}')
9581
- try:
9582
- filenames = os.listdir(child_logdir)
9583
- except OSError:
9584
- log.warning('Could not clear child_log dir')
9585
- return
10202
+ def before_remove(self) -> None:
10203
+ pass
9586
10204
 
9587
- for filename in filenames:
9588
- if fnre.match(filename):
9589
- pathname = os.path.join(child_logdir, filename)
9590
- try:
9591
- os.remove(pathname)
9592
- except OSError:
9593
- log.warning('Failed to clean up %r', pathname)
9594
10205
 
9595
- #
10206
+ ########################################
10207
+ # ../process.py
9596
10208
 
9597
- def _daemonize(self) -> None:
9598
- for dl in self._daemonize_listeners:
9599
- dl.before_daemonize()
9600
10209
 
9601
- self._do_daemonize()
10210
+ ##
9602
10211
 
9603
- for dl in self._daemonize_listeners:
9604
- dl.after_daemonize()
9605
10212
 
9606
- def _do_daemonize(self) -> None:
9607
- # To daemonize, we need to become the leader of our own session (process) group. If we do not, signals sent to
9608
- # our parent process will also be sent to us. This might be bad because signals such as SIGINT can be sent to
9609
- # our parent process during normal (uninteresting) operations such as when we press Ctrl-C in the parent
9610
- # terminal window to escape from a logtail command. To disassociate ourselves from our parent's session group we
9611
- # use os.setsid. It means "set session id", which has the effect of disassociating a process from is current
9612
- # session and process group and setting itself up as a new session leader.
9613
- #
9614
- # Unfortunately we cannot call setsid if we're already a session group leader, so we use "fork" to make a copy
9615
- # of ourselves that is guaranteed to not be a session group leader.
9616
- #
9617
- # We also change directories, set stderr and stdout to null, and change our umask.
9618
- #
9619
- # This explanation was (gratefully) garnered from
9620
- # http://www.cems.uwe.ac.uk/~irjohnso/coursenotes/lrc/system/daemons/d3.htm
10213
+ class ProcessStateError(RuntimeError):
10214
+ pass
9621
10215
 
9622
- pid = os.fork()
9623
- if pid != 0:
9624
- # Parent
9625
- log.debug('supervisord forked; parent exiting')
9626
- real_exit(Rc(0))
9627
10216
 
9628
- # Child
9629
- log.info('daemonizing the supervisord process')
9630
- if self._config.directory:
9631
- try:
9632
- os.chdir(self._config.directory)
9633
- except OSError as err:
9634
- log.critical("can't chdir into %r: %s", self._config.directory, err)
9635
- else:
9636
- log.info('set current directory: %r', self._config.directory)
10217
+ ##
9637
10218
 
9638
- os.dup2(0, os.open('/dev/null', os.O_RDONLY))
9639
- os.dup2(1, os.open('/dev/null', os.O_WRONLY))
9640
- os.dup2(2, os.open('/dev/null', os.O_WRONLY))
9641
10219
 
9642
- # XXX Stevens, in his Advanced Unix book, section 13.3 (page 417) recommends calling umask(0) and closing unused
9643
- # file descriptors. In his Network Programming book, he additionally recommends ignoring SIGHUP and forking
9644
- # again after the setsid() call, for obscure SVR4 reasons.
9645
- os.setsid()
9646
- os.umask(self._config.umask)
10220
+ class PidHistory(ta.Dict[Pid, Process]):
10221
+ pass
9647
10222
 
9648
10223
 
9649
10224
  ########################################
@@ -9778,10 +10353,49 @@ class CoroHttpServerConnectionFdioHandler(SocketFdioHandler):
9778
10353
  if not wb.write(send):
9779
10354
  break
9780
10355
 
9781
- if wb.rem < 1:
9782
- self._write_buf = None
9783
- self._cur_io = None
9784
- self._next_io()
10356
+ if wb.rem < 1:
10357
+ self._write_buf = None
10358
+ self._cur_io = None
10359
+ self._next_io()
10360
+
10361
+
10362
+ ########################################
10363
+ # ../../../omlish/logs/std/adapters.py
10364
+
10365
+
10366
+ ##
10367
+
10368
+
10369
+ class StdLogger(Logger):
10370
+ def __init__(self, std: logging.Logger) -> None:
10371
+ super().__init__()
10372
+
10373
+ self._std = std
10374
+
10375
+ @property
10376
+ def std(self) -> logging.Logger:
10377
+ return self._std
10378
+
10379
+ def get_effective_level(self) -> LogLevel:
10380
+ return self._std.getEffectiveLevel()
10381
+
10382
+ def _log(self, ctx: CaptureLoggingContext, msg: ta.Union[str, tuple, LoggingMsgFn], *args: ta.Any) -> None:
10383
+ if not self.is_enabled_for(ctx.level):
10384
+ return
10385
+
10386
+ ctx.capture()
10387
+
10388
+ ms, args = self._prepare_msg_args(msg, *args)
10389
+
10390
+ rec = LoggingContextLogRecord(
10391
+ name=self._std.name,
10392
+ msg=ms,
10393
+ args=args,
10394
+
10395
+ _logging_context=ctx,
10396
+ )
10397
+
10398
+ self._std.handle(rec)
9785
10399
 
9786
10400
 
9787
10401
  ########################################
@@ -9874,128 +10488,375 @@ class ProcessGroupManager(
9874
10488
 
9875
10489
 
9876
10490
  ########################################
9877
- # ../io.py
10491
+ # ../spawning.py
9878
10492
 
9879
10493
 
9880
- log = get_module_logger(globals()) # noqa
10494
+ ##
10495
+
10496
+
10497
+ @dc.dataclass(frozen=True)
10498
+ class SpawnedProcess:
10499
+ pid: Pid
10500
+ pipes: ProcessPipes
10501
+ dispatchers: Dispatchers
10502
+
10503
+
10504
+ class ProcessSpawnError(RuntimeError):
10505
+ pass
10506
+
10507
+
10508
+ class ProcessSpawning:
10509
+ @property
10510
+ @abc.abstractmethod
10511
+ def process(self) -> Process:
10512
+ raise NotImplementedError
10513
+
10514
+ #
10515
+
10516
+ @abc.abstractmethod
10517
+ def spawn(self) -> SpawnedProcess: # Raises[ProcessSpawnError]
10518
+ raise NotImplementedError
10519
+
10520
+
10521
+ ########################################
10522
+ # ../../../omlish/logs/modules.py
9881
10523
 
9882
10524
 
9883
10525
  ##
9884
10526
 
9885
10527
 
9886
- HasDispatchersList = ta.NewType('HasDispatchersList', ta.Sequence[HasDispatchers])
10528
+ def get_module_logger(mod_globals: ta.Mapping[str, ta.Any]) -> Logger:
10529
+ return StdLogger(logging.getLogger(mod_globals.get('__name__'))) # noqa
9887
10530
 
9888
10531
 
9889
- class IoManager(HasDispatchers):
10532
+ ########################################
10533
+ # ../dispatchersimpl.py
10534
+
10535
+
10536
+ log = get_module_logger(globals()) # noqa
10537
+
10538
+
10539
+ ##
10540
+
10541
+
10542
+ class BaseProcessDispatcherImpl(ProcessDispatcher, Abstract):
9890
10543
  def __init__(
9891
10544
  self,
10545
+ process: Process,
10546
+ channel: ProcessOutputChannel,
10547
+ fd: Fd,
9892
10548
  *,
9893
- poller: FdioPoller,
9894
- has_dispatchers_list: HasDispatchersList,
10549
+ event_callbacks: EventCallbacks,
10550
+ server_config: ServerConfig,
9895
10551
  ) -> None:
9896
10552
  super().__init__()
9897
10553
 
9898
- self._poller = poller
9899
- self._has_dispatchers_list = has_dispatchers_list
10554
+ self._process = process # process which "owns" this dispatcher
10555
+ self._channel = channel # 'stderr' or 'stdout'
10556
+ self._fd = fd
10557
+ self._event_callbacks = event_callbacks
10558
+ self._server_config = server_config
9900
10559
 
9901
- def get_dispatchers(self) -> Dispatchers:
9902
- return Dispatchers(
9903
- d
9904
- for hd in self._has_dispatchers_list
9905
- for d in hd.get_dispatchers()
10560
+ self._closed = False # True if close() has been called
10561
+
10562
+ #
10563
+
10564
+ def __repr__(self) -> str:
10565
+ return f'<{self.__class__.__name__} at {id(self)} for {self._process} ({self._channel})>'
10566
+
10567
+ #
10568
+
10569
+ @property
10570
+ def process(self) -> Process:
10571
+ return self._process
10572
+
10573
+ @property
10574
+ def channel(self) -> ProcessOutputChannel:
10575
+ return self._channel
10576
+
10577
+ def fd(self) -> Fd:
10578
+ return self._fd
10579
+
10580
+ @property
10581
+ def closed(self) -> bool:
10582
+ return self._closed
10583
+
10584
+ #
10585
+
10586
+ def close(self) -> None:
10587
+ if not self._closed:
10588
+ log.debug('fd %s closed, stopped monitoring %s', self._fd, self)
10589
+ self._closed = True
10590
+
10591
+ def on_error(self, exc: ta.Optional[BaseException] = None) -> None:
10592
+ nil, t, v, tbinfo = compact_traceback()
10593
+
10594
+ log.critical('uncaptured python exception, closing channel %s (%s:%s %s)', repr(self), t, v, tbinfo)
10595
+ self.close()
10596
+
10597
+
10598
+ class ProcessOutputDispatcherImpl(BaseProcessDispatcherImpl, ProcessOutputDispatcher):
10599
+ """
10600
+ Dispatcher for one channel (stdout or stderr) of one process. Serves several purposes:
10601
+
10602
+ - capture output sent within <!--XSUPERVISOR:BEGIN--> and <!--XSUPERVISOR:END--> tags and signal a
10603
+ ProcessCommunicationEvent by calling notify_event(event).
10604
+ - route the output to the appropriate log handlers as specified in the config.
10605
+ """
10606
+
10607
+ def __init__(
10608
+ self,
10609
+ process: Process,
10610
+ event_type: ta.Type[ProcessCommunicationEvent],
10611
+ fd: Fd,
10612
+ *,
10613
+ event_callbacks: EventCallbacks,
10614
+ server_config: ServerConfig,
10615
+ ) -> None:
10616
+ super().__init__(
10617
+ process,
10618
+ event_type.channel,
10619
+ fd,
10620
+ event_callbacks=event_callbacks,
10621
+ server_config=server_config,
9906
10622
  )
9907
10623
 
9908
- def poll(self) -> None:
9909
- dispatchers = self.get_dispatchers()
10624
+ self._event_type = event_type
10625
+
10626
+ self._lc: ProcessConfig.Log = getattr(process.config, self._channel)
10627
+
10628
+ self._init_normal_log()
10629
+ self._init_capture_log()
10630
+
10631
+ self._child_log = self._normal_log
10632
+
10633
+ self._capture_mode = False # are we capturing process event data
10634
+ self._output_buffer = b'' # data waiting to be logged
10635
+
10636
+ # all code below is purely for minor speedups
10637
+
10638
+ begin_token = self._event_type.BEGIN_TOKEN
10639
+ end_token = self._event_type.END_TOKEN
10640
+ self._begin_token_data = (begin_token, len(begin_token))
10641
+ self._end_token_data = (end_token, len(end_token))
10642
+
10643
+ self._main_log_level = logging.DEBUG
10644
+
10645
+ self._log_to_main_log = self._server_config.loglevel <= self._main_log_level
10646
+
10647
+ self._stdout_events_enabled = self._process.config.stdout.events_enabled
10648
+ self._stderr_events_enabled = self._process.config.stderr.events_enabled
10649
+
10650
+ _child_log: ta.Optional[logging.Logger] = None # the current logger (normal_log or capture_log)
10651
+ _normal_log: ta.Optional[logging.Logger] = None # the "normal" (non-capture) logger
10652
+ _capture_log: ta.Optional[logging.Logger] = None # the logger used while we're in capture_mode
10653
+
10654
+ def _init_normal_log(self) -> None:
10655
+ """
10656
+ Configure the "normal" (non-capture) log for this channel of this process. Sets self.normal_log if logging is
10657
+ enabled.
10658
+ """
10659
+
10660
+ config = self._process.config # noqa
10661
+ channel = self._channel # noqa
10662
+
10663
+ logfile = self._lc.file
10664
+ max_bytes = self._lc.max_bytes # noqa
10665
+ backups = self._lc.backups # noqa
10666
+ to_syslog = self._lc.syslog
10667
+
10668
+ if logfile or to_syslog:
10669
+ self._normal_log = logging.getLogger(__name__)
10670
+
10671
+ # if logfile:
10672
+ # loggers.handle_file(
10673
+ # self.normal_log,
10674
+ # filename=logfile,
10675
+ # fmt='%(message)s',
10676
+ # rotating=bool(max_bytes), # optimization
10677
+ # max_bytes=max_bytes,
10678
+ # backups=backups,
10679
+ # )
10680
+
10681
+ # if to_syslog:
10682
+ # loggers.handle_syslog(
10683
+ # self.normal_log,
10684
+ # fmt=config.name + ' %(message)s',
10685
+ # )
10686
+
10687
+ def _init_capture_log(self) -> None:
10688
+ """
10689
+ Configure the capture log for this process. This log is used to temporarily capture output when special output
10690
+ is detected. Sets self.capture_log if capturing is enabled.
10691
+ """
10692
+
10693
+ capture_max_bytes = self._lc.capture_max_bytes
10694
+ if capture_max_bytes:
10695
+ self._capture_log = logging.getLogger(__name__)
10696
+ # loggers.handle_boundIO(
10697
+ # self._capture_log,
10698
+ # fmt='%(message)s',
10699
+ # max_bytes=capture_max_bytes,
10700
+ # )
10701
+
10702
+ def remove_logs(self) -> None:
10703
+ for l in (self._normal_log, self._capture_log):
10704
+ if l is not None:
10705
+ for handler in l.handlers:
10706
+ handler.remove() # type: ignore
10707
+ handler.reopen() # type: ignore
10708
+
10709
+ def reopen_logs(self) -> None:
10710
+ for l in (self._normal_log, self._capture_log):
10711
+ if l is not None:
10712
+ for handler in l.handlers:
10713
+ handler.reopen() # type: ignore
10714
+
10715
+ def _log(self, data: ta.Union[str, bytes, None]) -> None:
10716
+ if not data:
10717
+ return
10718
+
10719
+ if self._server_config.strip_ansi:
10720
+ data = strip_escapes(as_bytes(data))
10721
+
10722
+ if self._child_log:
10723
+ self._child_log.info(data)
10724
+
10725
+ if self._log_to_main_log:
10726
+ if not isinstance(data, bytes):
10727
+ text = data
10728
+ else:
10729
+ try:
10730
+ text = data.decode('utf-8')
10731
+ except UnicodeDecodeError:
10732
+ text = f'Undecodable: {data!r}'
10733
+ log.log(self._main_log_level, '%r %s output:\n%s', self._process.config.name, self._channel, text) # noqa
10734
+
10735
+ if self._channel == 'stdout':
10736
+ if self._stdout_events_enabled:
10737
+ self._event_callbacks.notify(ProcessLogStdoutEvent(self._process, self._process.pid, data))
10738
+
10739
+ elif self._stderr_events_enabled:
10740
+ self._event_callbacks.notify(ProcessLogStderrEvent(self._process, self._process.pid, data))
10741
+
10742
+ def record_output(self) -> None:
10743
+ if self._capture_log is None:
10744
+ # shortcut trying to find capture data
10745
+ data = self._output_buffer
10746
+ self._output_buffer = b''
10747
+ self._log(data)
10748
+ return
10749
+
10750
+ if self._capture_mode:
10751
+ token, token_len = self._end_token_data
10752
+ else:
10753
+ token, token_len = self._begin_token_data
9910
10754
 
9911
- self._poller.update(
9912
- {fd for fd, d in dispatchers.items() if d.readable()},
9913
- {fd for fd, d in dispatchers.items() if d.writable()},
9914
- )
10755
+ if len(self._output_buffer) <= token_len:
10756
+ return # not enough data
9915
10757
 
9916
- timeout = 1 # this cannot be fewer than the smallest TickEvent (5)
10758
+ data = self._output_buffer
10759
+ self._output_buffer = b''
9917
10760
 
9918
- polled = self._poller.poll(timeout)
10761
+ try:
10762
+ before, after = data.split(token, 1)
10763
+ except ValueError:
10764
+ after = None
10765
+ index = find_prefix_at_end(data, token)
10766
+ if index:
10767
+ self._output_buffer = self._output_buffer + data[-index:]
10768
+ data = data[:-index]
10769
+ self._log(data)
10770
+ else:
10771
+ self._log(before)
10772
+ self.toggle_capture_mode()
10773
+ self._output_buffer = after # type: ignore
9919
10774
 
9920
- if polled.msg is not None:
9921
- log.error(polled.msg)
9922
- if polled.exc is not None:
9923
- log.error('Poll exception: %r', polled.exc)
10775
+ if after:
10776
+ self.record_output()
9924
10777
 
9925
- for r in polled.r:
9926
- fd = Fd(r)
9927
- if fd in dispatchers:
9928
- dispatcher = dispatchers[fd]
9929
- try:
9930
- log.debug('read event caused by %r', dispatcher)
9931
- dispatcher.on_readable()
9932
- if not dispatcher.readable():
9933
- self._poller.unregister_readable(fd)
9934
- except ExitNow:
9935
- raise
9936
- except Exception as exc: # noqa
9937
- log.exception('Error in dispatcher: %r', dispatcher)
9938
- dispatcher.on_error(exc)
9939
- else:
9940
- # if the fd is not in combined map, we should unregister it. otherwise, it will be polled every
9941
- # time, which may cause 100% cpu usage
9942
- log.debug('unexpected read event from fd %r', fd)
9943
- try:
9944
- self._poller.unregister_readable(fd)
9945
- except Exception: # noqa
9946
- pass
10778
+ def toggle_capture_mode(self) -> None:
10779
+ self._capture_mode = not self._capture_mode
9947
10780
 
9948
- for w in polled.w:
9949
- fd = Fd(w)
9950
- if fd in dispatchers:
9951
- dispatcher = dispatchers[fd]
9952
- try:
9953
- log.debug('write event caused by %r', dispatcher)
9954
- dispatcher.on_writable()
9955
- if not dispatcher.writable():
9956
- self._poller.unregister_writable(fd)
9957
- except ExitNow:
9958
- raise
9959
- except Exception as exc: # noqa
9960
- log.exception('Error in dispatcher: %r', dispatcher)
9961
- dispatcher.on_error(exc)
10781
+ if self._capture_log is not None:
10782
+ if self._capture_mode:
10783
+ self._child_log = self._capture_log
9962
10784
  else:
9963
- log.debug('unexpected write event from fd %r', fd)
9964
- try:
9965
- self._poller.unregister_writable(fd)
9966
- except Exception: # noqa
9967
- pass
9968
-
10785
+ for handler in self._capture_log.handlers:
10786
+ handler.flush()
10787
+ data = self._capture_log.getvalue() # type: ignore
10788
+ channel = self._channel
10789
+ procname = self._process.config.name
10790
+ event = self._event_type(self._process, self._process.pid, data)
10791
+ self._event_callbacks.notify(event)
9969
10792
 
9970
- ########################################
9971
- # ../spawning.py
10793
+ log.debug('%r %s emitted a comm event', procname, channel)
10794
+ for handler in self._capture_log.handlers:
10795
+ handler.remove() # type: ignore
10796
+ handler.reopen() # type: ignore
10797
+ self._child_log = self._normal_log
9972
10798
 
10799
+ def writable(self) -> bool:
10800
+ return False
9973
10801
 
9974
- ##
10802
+ def readable(self) -> bool:
10803
+ if self._closed:
10804
+ return False
10805
+ return True
9975
10806
 
10807
+ def on_readable(self) -> None:
10808
+ data = read_fd(self._fd)
10809
+ self._output_buffer += data
10810
+ self.record_output()
10811
+ if not data:
10812
+ # if we get no data back from the pipe, it means that the child process has ended. See
10813
+ # mail.python.org/pipermail/python-dev/2004-August/046850.html
10814
+ self.close()
9976
10815
 
9977
- @dc.dataclass(frozen=True)
9978
- class SpawnedProcess:
9979
- pid: Pid
9980
- pipes: ProcessPipes
9981
- dispatchers: Dispatchers
9982
10816
 
10817
+ class ProcessInputDispatcherImpl(BaseProcessDispatcherImpl, ProcessInputDispatcher):
10818
+ def __init__(
10819
+ self,
10820
+ process: Process,
10821
+ channel: ProcessOutputChannel,
10822
+ fd: Fd,
10823
+ *,
10824
+ event_callbacks: EventCallbacks,
10825
+ server_config: ServerConfig,
10826
+ ) -> None:
10827
+ super().__init__(
10828
+ process,
10829
+ channel,
10830
+ fd,
10831
+ event_callbacks=event_callbacks,
10832
+ server_config=server_config,
10833
+ )
9983
10834
 
9984
- class ProcessSpawnError(RuntimeError):
9985
- pass
10835
+ self._input_buffer = b''
9986
10836
 
10837
+ def write(self, chars: ta.Union[bytes, str]) -> None:
10838
+ self._input_buffer += as_bytes(chars)
9987
10839
 
9988
- class ProcessSpawning:
9989
- @property
9990
- @abc.abstractmethod
9991
- def process(self) -> Process:
9992
- raise NotImplementedError
10840
+ def writable(self) -> bool:
10841
+ if self._input_buffer and not self._closed:
10842
+ return True
10843
+ return False
9993
10844
 
9994
- #
10845
+ def flush(self) -> None:
10846
+ # other code depends on this raising EPIPE if the pipe is closed
10847
+ sent = os.write(self._fd, as_bytes(self._input_buffer))
10848
+ self._input_buffer = self._input_buffer[sent:]
9995
10849
 
9996
- @abc.abstractmethod
9997
- def spawn(self) -> SpawnedProcess: # Raises[ProcessSpawnError]
9998
- raise NotImplementedError
10850
+ def on_writable(self) -> None:
10851
+ if self._input_buffer:
10852
+ try:
10853
+ self.flush()
10854
+ except OSError as why:
10855
+ if why.args[0] == errno.EPIPE:
10856
+ self._input_buffer = b''
10857
+ self.close()
10858
+ else:
10859
+ raise
9999
10860
 
10000
10861
 
10001
10862
  ########################################
@@ -10120,6 +10981,100 @@ class SupervisorHttpHandler(HttpHandler_):
10120
10981
  )
10121
10982
 
10122
10983
 
10984
+ ########################################
10985
+ # ../io.py
10986
+
10987
+
10988
+ log = get_module_logger(globals()) # noqa
10989
+
10990
+
10991
+ ##
10992
+
10993
+
10994
+ HasDispatchersList = ta.NewType('HasDispatchersList', ta.Sequence[HasDispatchers])
10995
+
10996
+
10997
+ class IoManager(HasDispatchers):
10998
+ def __init__(
10999
+ self,
11000
+ *,
11001
+ poller: FdioPoller,
11002
+ has_dispatchers_list: HasDispatchersList,
11003
+ ) -> None:
11004
+ super().__init__()
11005
+
11006
+ self._poller = poller
11007
+ self._has_dispatchers_list = has_dispatchers_list
11008
+
11009
+ def get_dispatchers(self) -> Dispatchers:
11010
+ return Dispatchers(
11011
+ d
11012
+ for hd in self._has_dispatchers_list
11013
+ for d in hd.get_dispatchers()
11014
+ )
11015
+
11016
+ def poll(self) -> None:
11017
+ dispatchers = self.get_dispatchers()
11018
+
11019
+ self._poller.update(
11020
+ {fd for fd, d in dispatchers.items() if d.readable()},
11021
+ {fd for fd, d in dispatchers.items() if d.writable()},
11022
+ )
11023
+
11024
+ timeout = 1 # this cannot be fewer than the smallest TickEvent (5)
11025
+
11026
+ polled = self._poller.poll(timeout)
11027
+
11028
+ if polled.msg is not None:
11029
+ log.error(polled.msg)
11030
+ if polled.exc is not None:
11031
+ log.error('Poll exception: %r', polled.exc)
11032
+
11033
+ for r in polled.r:
11034
+ fd = Fd(r)
11035
+ if fd in dispatchers:
11036
+ dispatcher = dispatchers[fd]
11037
+ try:
11038
+ log.debug('read event caused by %r', dispatcher)
11039
+ dispatcher.on_readable()
11040
+ if not dispatcher.readable():
11041
+ self._poller.unregister_readable(fd)
11042
+ except ExitNow:
11043
+ raise
11044
+ except Exception as exc: # noqa
11045
+ log.exception('Error in dispatcher: %r', dispatcher)
11046
+ dispatcher.on_error(exc)
11047
+ else:
11048
+ # if the fd is not in combined map, we should unregister it. otherwise, it will be polled every
11049
+ # time, which may cause 100% cpu usage
11050
+ log.debug('unexpected read event from fd %r', fd)
11051
+ try:
11052
+ self._poller.unregister_readable(fd)
11053
+ except Exception: # noqa
11054
+ pass
11055
+
11056
+ for w in polled.w:
11057
+ fd = Fd(w)
11058
+ if fd in dispatchers:
11059
+ dispatcher = dispatchers[fd]
11060
+ try:
11061
+ log.debug('write event caused by %r', dispatcher)
11062
+ dispatcher.on_writable()
11063
+ if not dispatcher.writable():
11064
+ self._poller.unregister_writable(fd)
11065
+ except ExitNow:
11066
+ raise
11067
+ except Exception as exc: # noqa
11068
+ log.exception('Error in dispatcher: %r', dispatcher)
11069
+ dispatcher.on_error(exc)
11070
+ else:
11071
+ log.debug('unexpected write event from fd %r', fd)
11072
+ try:
11073
+ self._poller.unregister_writable(fd)
11074
+ except Exception: # noqa
11075
+ pass
11076
+
11077
+
10123
11078
  ########################################
10124
11079
  # ../processimpl.py
10125
11080
 
@@ -10603,6 +11558,256 @@ class ProcessImpl(Process):
10603
11558
  pass
10604
11559
 
10605
11560
 
11561
+ ########################################
11562
+ # ../setupimpl.py
11563
+
11564
+
11565
+ log = get_module_logger(globals()) # noqa
11566
+
11567
+
11568
+ ##
11569
+
11570
+
11571
+ class SupervisorSetupImpl(SupervisorSetup):
11572
+ def __init__(
11573
+ self,
11574
+ *,
11575
+ config: ServerConfig,
11576
+ user: ta.Optional[SupervisorUser] = None,
11577
+ epoch: ServerEpoch = ServerEpoch(0),
11578
+ daemonize_listeners: DaemonizeListeners = DaemonizeListeners([]),
11579
+ ) -> None:
11580
+ super().__init__()
11581
+
11582
+ self._config = config
11583
+ self._user = user
11584
+ self._epoch = epoch
11585
+ self._daemonize_listeners = daemonize_listeners
11586
+
11587
+ #
11588
+
11589
+ @property
11590
+ def first(self) -> bool:
11591
+ return not self._epoch
11592
+
11593
+ #
11594
+
11595
+ @cached_nullary
11596
+ def setup(self) -> None:
11597
+ if not self.first:
11598
+ # prevent crash on libdispatch-based systems, at least for the first request
11599
+ self._cleanup_fds()
11600
+
11601
+ self._set_uid_or_exit()
11602
+
11603
+ if self.first:
11604
+ self._set_rlimits_or_exit()
11605
+
11606
+ # this sets the options.logger object delay logger instantiation until after setuid
11607
+ if not self._config.nocleanup:
11608
+ # clean up old automatic logs
11609
+ self._clear_auto_child_logdir()
11610
+
11611
+ if not self._config.nodaemon and self.first:
11612
+ self._daemonize()
11613
+
11614
+ # writing pid file needs to come *after* daemonizing or pid will be wrong
11615
+ self._write_pidfile()
11616
+
11617
+ @cached_nullary
11618
+ def cleanup(self) -> None:
11619
+ self._cleanup_pidfile()
11620
+
11621
+ #
11622
+
11623
+ def _cleanup_fds(self) -> None:
11624
+ # try to close any leaked file descriptors (for reload)
11625
+ start = 5
11626
+ os.closerange(start, self._config.min_fds)
11627
+
11628
+ #
11629
+
11630
+ def _set_uid_or_exit(self) -> None:
11631
+ """
11632
+ Set the uid of the supervisord process. Called during supervisord startup only. No return value. Exits the
11633
+ process via usage() if privileges could not be dropped.
11634
+ """
11635
+
11636
+ if self._user is None:
11637
+ if os.getuid() == 0:
11638
+ warnings.warn(
11639
+ 'Supervisor is running as root. Privileges were not dropped because no user is specified in the '
11640
+ 'config file. If you intend to run as root, you can set user=root in the config file to avoid '
11641
+ 'this message.',
11642
+ )
11643
+ else:
11644
+ msg = drop_privileges(self._user.uid)
11645
+ if msg is None:
11646
+ log.info('Set uid to user %s succeeded', self._user.uid)
11647
+ else: # failed to drop privileges
11648
+ raise RuntimeError(msg)
11649
+
11650
+ #
11651
+
11652
+ def _set_rlimits_or_exit(self) -> None:
11653
+ """
11654
+ Set the rlimits of the supervisord process. Called during supervisord startup only. No return value. Exits the
11655
+ process via usage() if any rlimits could not be set.
11656
+ """
11657
+
11658
+ limits = []
11659
+
11660
+ if hasattr(resource, 'RLIMIT_NOFILE'):
11661
+ limits.append({
11662
+ 'msg': (
11663
+ 'The minimum number of file descriptors required to run this process is %(min_limit)s as per the '
11664
+ '"min_fds" command-line argument or config file setting. The current environment will only allow '
11665
+ 'you to open %(hard)s file descriptors. Either raise the number of usable file descriptors in '
11666
+ 'your environment (see README.rst) or lower the min_fds setting in the config file to allow the '
11667
+ 'process to start.'
11668
+ ),
11669
+ 'min': self._config.min_fds,
11670
+ 'resource': resource.RLIMIT_NOFILE,
11671
+ 'name': 'RLIMIT_NOFILE',
11672
+ })
11673
+
11674
+ if hasattr(resource, 'RLIMIT_NPROC'):
11675
+ limits.append({
11676
+ 'msg': (
11677
+ 'The minimum number of available processes required to run this program is %(min_limit)s as per '
11678
+ 'the "minprocs" command-line argument or config file setting. The current environment will only '
11679
+ 'allow you to open %(hard)s processes. Either raise the number of usable processes in your '
11680
+ 'environment (see README.rst) or lower the minprocs setting in the config file to allow the '
11681
+ 'program to start.'
11682
+ ),
11683
+ 'min': self._config.min_procs,
11684
+ 'resource': resource.RLIMIT_NPROC,
11685
+ 'name': 'RLIMIT_NPROC',
11686
+ })
11687
+
11688
+ for limit in limits:
11689
+ min_limit = limit['min']
11690
+ res = limit['resource']
11691
+ msg = limit['msg']
11692
+ name = limit['name']
11693
+
11694
+ soft, hard = resource.getrlimit(res) # type: ignore
11695
+
11696
+ # -1 means unlimited
11697
+ if soft < min_limit and soft != -1: # type: ignore
11698
+ if hard < min_limit and hard != -1: # type: ignore
11699
+ # setrlimit should increase the hard limit if we are root, if not then setrlimit raises and we print
11700
+ # usage
11701
+ hard = min_limit # type: ignore
11702
+
11703
+ try:
11704
+ resource.setrlimit(res, (min_limit, hard)) # type: ignore
11705
+ log.info('Increased %s limit to %s', name, min_limit)
11706
+ except (OSError, ValueError):
11707
+ raise RuntimeError(msg % dict( # type: ignore # noqa
11708
+ min_limit=min_limit,
11709
+ res=res,
11710
+ name=name,
11711
+ soft=soft,
11712
+ hard=hard,
11713
+ ))
11714
+
11715
+ #
11716
+
11717
+ _unlink_pidfile = False
11718
+
11719
+ def _write_pidfile(self) -> None:
11720
+ pid = os.getpid()
11721
+ try:
11722
+ with open(self._config.pidfile, 'w') as f:
11723
+ f.write(f'{pid}\n')
11724
+ except OSError:
11725
+ log.critical('could not write pidfile %s', self._config.pidfile)
11726
+ else:
11727
+ self._unlink_pidfile = True
11728
+ log.info('supervisord started with pid %s', pid)
11729
+
11730
+ def _cleanup_pidfile(self) -> None:
11731
+ if self._unlink_pidfile:
11732
+ try_unlink(self._config.pidfile)
11733
+
11734
+ #
11735
+
11736
+ def _clear_auto_child_logdir(self) -> None:
11737
+ # must be called after realize()
11738
+ child_logdir = self._config.child_logdir
11739
+ if child_logdir == '/dev/null':
11740
+ return
11741
+
11742
+ fnre = re.compile(rf'.+?---{self._config.identifier}-\S+\.log\.?\d{{0,4}}')
11743
+ try:
11744
+ filenames = os.listdir(child_logdir)
11745
+ except OSError:
11746
+ log.warning('Could not clear child_log dir')
11747
+ return
11748
+
11749
+ for filename in filenames:
11750
+ if fnre.match(filename):
11751
+ pathname = os.path.join(child_logdir, filename)
11752
+ try:
11753
+ os.remove(pathname)
11754
+ except OSError:
11755
+ log.warning('Failed to clean up %r', pathname)
11756
+
11757
+ #
11758
+
11759
+ def _daemonize(self) -> None:
11760
+ for dl in self._daemonize_listeners:
11761
+ dl.before_daemonize()
11762
+
11763
+ self._do_daemonize()
11764
+
11765
+ for dl in self._daemonize_listeners:
11766
+ dl.after_daemonize()
11767
+
11768
+ def _do_daemonize(self) -> None:
11769
+ # To daemonize, we need to become the leader of our own session (process) group. If we do not, signals sent to
11770
+ # our parent process will also be sent to us. This might be bad because signals such as SIGINT can be sent to
11771
+ # our parent process during normal (uninteresting) operations such as when we press Ctrl-C in the parent
11772
+ # terminal window to escape from a logtail command. To disassociate ourselves from our parent's session group we
11773
+ # use os.setsid. It means "set session id", which has the effect of disassociating a process from is current
11774
+ # session and process group and setting itself up as a new session leader.
11775
+ #
11776
+ # Unfortunately we cannot call setsid if we're already a session group leader, so we use "fork" to make a copy
11777
+ # of ourselves that is guaranteed to not be a session group leader.
11778
+ #
11779
+ # We also change directories, set stderr and stdout to null, and change our umask.
11780
+ #
11781
+ # This explanation was (gratefully) garnered from
11782
+ # http://www.cems.uwe.ac.uk/~irjohnso/coursenotes/lrc/system/daemons/d3.htm
11783
+
11784
+ pid = os.fork()
11785
+ if pid != 0:
11786
+ # Parent
11787
+ log.debug('supervisord forked; parent exiting')
11788
+ real_exit(Rc(0))
11789
+
11790
+ # Child
11791
+ log.info('daemonizing the supervisord process')
11792
+ if self._config.directory:
11793
+ try:
11794
+ os.chdir(self._config.directory)
11795
+ except OSError as err:
11796
+ log.critical("can't chdir into %r: %s", self._config.directory, err)
11797
+ else:
11798
+ log.info('set current directory: %r', self._config.directory)
11799
+
11800
+ os.dup2(0, os.open('/dev/null', os.O_RDONLY))
11801
+ os.dup2(1, os.open('/dev/null', os.O_WRONLY))
11802
+ os.dup2(2, os.open('/dev/null', os.O_WRONLY))
11803
+
11804
+ # XXX Stevens, in his Advanced Unix book, section 13.3 (page 417) recommends calling umask(0) and closing unused
11805
+ # file descriptors. In his Network Programming book, he additionally recommends ignoring SIGHUP and forking
11806
+ # again after the setsid() call, for obscure SVR4 reasons.
11807
+ os.setsid()
11808
+ os.umask(self._config.umask)
11809
+
11810
+
10606
11811
  ########################################
10607
11812
  # ../signals.py
10608
11813