prismalog 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
prismalog/log.py ADDED
@@ -0,0 +1,927 @@
1
+ """
2
+ Logging functionality for the prismalog package.
3
+
4
+ This module provides a high-performance, feature-rich logging system designed
5
+ specifically for multiprocessing and multithreading environments. It extends
6
+ Python's standard logging with colored output, automatic log rotation, and
7
+ improved handling of critical errors.
8
+
9
+ Key components:
10
+ - ColoredFormatter: Adds color-coding to console output based on log levels
11
+ - MultiProcessingLog: Thread-safe and process-safe log handler with rotation support
12
+ - CriticalExitHandler: Optional handler that exits the program on critical errors
13
+ - ColoredLogger: Main logger class with enhanced functionality
14
+ - get_logger: Factory function to obtain properly configured loggers
15
+
16
+ Features:
17
+ - Up to 29K msgs/sec in multiprocessing mode
18
+ - Colored console output for improved readability
19
+ - Automatic log file rotation based on size
20
+ - Process-safe and thread-safe logging
21
+ - Special handling for critical errors
22
+ - Configurable verbosity levels for different modules
23
+ - Zero external dependencies
24
+
25
+ Example:
26
+ >>> from prismalog import get_logger
27
+ >>> logger = get_logger("my_module")
28
+ >>> logger.info("Application started")
29
+ >>> logger.debug("Detailed debugging information")
30
+ >>> logger.error("Something went wrong")
31
+ """
32
+
33
+ import logging
34
+ import os
35
+ import sys
36
+ import time
37
+ from datetime import datetime
38
+ from logging import LogRecord, StreamHandler
39
+ from logging.handlers import RotatingFileHandler
40
+ from multiprocessing import Lock
41
+ from types import FrameType
42
+ from typing import Any, Dict, List, Literal, Optional, Type, Union, cast
43
+
44
+ from .config import LoggingConfig
45
+
46
+
47
+ class ColoredFormatter(logging.Formatter):
48
+ """
49
+ Custom formatter that adds ANSI color codes to log level names in console output.
50
+
51
+ This enhances readability by color-coding log messages based on their severity:
52
+ - DEBUG: Blue
53
+ - INFO: Green
54
+ - WARNING: Yellow
55
+ - ERROR: Red
56
+ - CRITICAL: Bright Red
57
+
58
+ Colors are only applied when the formatter is initialized with colored=True
59
+ and when the output stream supports ANSI color codes.
60
+
61
+ Args:
62
+ fmt: Format string for log messages
63
+ datefmt: Format string for dates
64
+ style: Style of the format string ('%', '{', or '$')
65
+ colored: Whether to apply ANSI color codes to level names
66
+ """
67
+
68
+ # ANSI color codes
69
+ COLORS = {
70
+ "DEBUG": "\033[94m", # Blue
71
+ "INFO": "\033[92m", # Green
72
+ "WARNING": "\033[93m", # Yellow
73
+ "ERROR": "\033[91m", # Red
74
+ "CRITICAL": "\033[91m\033[1m", # Bright Red
75
+ }
76
+ RESET = "\033[0m" # Reset color
77
+
78
+ def __init__(
79
+ self,
80
+ fmt: Optional[str] = None,
81
+ datefmt: Optional[str] = None,
82
+ style: Literal["%", "{", "$"] = "%",
83
+ colored: bool = True,
84
+ ) -> None:
85
+ """
86
+ Initialize the ColoredFormatter.
87
+
88
+ Args:
89
+ fmt: Format string for log messages
90
+ datefmt: Format string for dates
91
+ style: Style of the format string ('%', '{', or '$')
92
+ colored: Whether to apply ANSI color codes to level names
93
+ """
94
+ super().__init__(fmt, datefmt, style)
95
+ self.colored = colored
96
+
97
+ def format(self, record: LogRecord) -> str:
98
+ """Format log record with optional color coding."""
99
+ # Save the original levelname
100
+ original_levelname = record.levelname
101
+
102
+ if self.colored and original_levelname in self.COLORS:
103
+ # Add color to the levelname
104
+ record.levelname = f"{self.COLORS[original_levelname]}{record.levelname}{self.RESET}"
105
+
106
+ # Use the original formatter to do the formatting
107
+ result = super().format(record)
108
+
109
+ # Restore the original levelname
110
+ record.levelname = original_levelname
111
+
112
+ return result
113
+
114
+
115
+ class MultiProcessingLog(logging.Handler):
116
+ """
117
+ Thread-safe and process-safe logging handler based on RotatingFileHandler.
118
+
119
+ This handler ensures consistent log file access across multiple processes
120
+ by using a Lock to coordinate file operations. It automatically handles log
121
+ file rotation and ensures all processes write to the current active log file.
122
+ """
123
+
124
+ # Class-level lock shared across all instances
125
+ file_lock = Lock()
126
+ # Track the active log file across all processes
127
+ active_log_file = None
128
+
129
+ def __init__(self, filename: str, mode: str = "a", maxBytes: int = 0, backupCount: int = 0) -> None:
130
+ """
131
+ Initialize the handler with the specified file and rotation settings.
132
+
133
+ Args:
134
+ filename: Path to the log file
135
+ mode: File opening mode
136
+ maxBytes: Maximum size in bytes before rotation
137
+ backupCount: Number of backup files to keep
138
+ """
139
+ logging.Handler.__init__(self)
140
+ self.filename = filename
141
+ self.mode = mode
142
+ self.maxBytes = maxBytes # pylint: disable=invalid-name
143
+ self.backupCount = backupCount # pylint: disable=invalid-name
144
+ self._handler: Optional[RotatingFileHandler] = None # Add type annotation
145
+
146
+ # Update the class-level active log file
147
+ with self.__class__.file_lock:
148
+ self.__class__.active_log_file = filename
149
+
150
+ # Create the directory if it doesn't exist
151
+ os.makedirs(os.path.dirname(filename), exist_ok=True)
152
+
153
+ # Create the rotating file handler
154
+ self._create_handler()
155
+
156
+ def _create_handler(self) -> None:
157
+ """Create or recreate the underlying rotating file handler"""
158
+ # Close existing handler if it exists
159
+ if hasattr(self, "_handler") and self._handler is not None:
160
+ try:
161
+ self._handler.close()
162
+ except (IOError, OSError) as e:
163
+ # Most likely errors when closing a handler are I/O related
164
+ LoggingConfig.debug_print(f"Warning: I/O error closing log handler: {e}")
165
+ except ValueError as e:
166
+ # ValueError can happen if handler is already closed
167
+ LoggingConfig.debug_print(f"Warning: Handler already closed: {e}")
168
+ except Exception as e:
169
+ # Fallback for unexpected errors, with specific error type
170
+ LoggingConfig.debug_print(
171
+ f"Warning: Unexpected error ({type(e).__name__}) " f"closing log handler: {e}"
172
+ )
173
+
174
+ # Create new handler
175
+ self._handler = RotatingFileHandler(self.filename, self.mode, self.maxBytes, self.backupCount)
176
+
177
+ # Copy the formatter if one is set for the handler
178
+ if hasattr(self, "formatter") and self.formatter:
179
+ self._handler.setFormatter(self.formatter)
180
+
181
+ def emit(self, record: LogRecord) -> None:
182
+ """Process a log record and write it to the log file."""
183
+ # Use the lock to prevent concurrent writes
184
+ with self.__class__.file_lock:
185
+ # Always check if the filename matches the current active log file
186
+ if self.filename != self.__class__.active_log_file:
187
+ # Another process has created a new log file, switch to it
188
+ if self.__class__.active_log_file is not None:
189
+ self.filename = self.__class__.active_log_file
190
+ self._create_handler()
191
+
192
+ # Ensure handler exists
193
+ if self._handler is None:
194
+ self._create_handler()
195
+
196
+ # Now emit the record
197
+ try:
198
+ # Check if rotation needed before emitting
199
+ if self.maxBytes > 0 and os.path.exists(self.filename):
200
+ try:
201
+ # Check file size
202
+ size = os.path.getsize(self.filename)
203
+ if size >= self.maxBytes and self._handler is not None:
204
+ self.doRollover()
205
+ except:
206
+ # If checking size fails, continue with emit
207
+ pass
208
+
209
+ if self._handler is not None:
210
+ self._handler.emit(record)
211
+ except Exception:
212
+ # If any error occurs, try to recreate the handler
213
+ self._create_handler()
214
+
215
+ try:
216
+ if self._handler is not None:
217
+ self._handler.emit(record)
218
+ else:
219
+ self.handleError(record)
220
+ except:
221
+ self.handleError(record)
222
+
223
+ def close(self) -> None:
224
+ """Close the file handler."""
225
+ if self._handler is not None:
226
+ self._handler.close()
227
+ logging.Handler.close(self)
228
+
229
+ def setFormatter(self, fmt: Optional[logging.Formatter]) -> None:
230
+ """Set formatter for the handler and underlying rotating handler."""
231
+ logging.Handler.setFormatter(self, fmt)
232
+ if hasattr(self, "_handler") and self._handler is not None and fmt is not None:
233
+ self._handler.setFormatter(fmt)
234
+
235
+ def doRollover(self) -> None: # pylint: disable=invalid-name
236
+ """Force a rollover and create a new log file"""
237
+ with self.__class__.file_lock:
238
+ try:
239
+ # First, ensure handler exists
240
+ if self._handler is None:
241
+ self._create_handler()
242
+
243
+ # Let the RotatingFileHandler do its rollover if it exists
244
+ if self._handler is not None:
245
+ self._handler.doRollover()
246
+
247
+ # Log files with rotation typically use pattern: filename.1, filename.2, etc.
248
+ # Ensure all processes start using the new (empty) log file
249
+ timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
250
+ unique_suffix = str(os.getpid() % 10000) # Use last 4 digits of PID for uniqueness
251
+ log_dir = os.path.dirname(self.filename)
252
+ new_filename = os.path.join(log_dir, f"app_{timestamp}_{unique_suffix}.log")
253
+
254
+ # Update the filename used by this instance
255
+ self.filename = new_filename
256
+
257
+ # Update the class-level active log file for all processes
258
+ self.__class__.active_log_file = new_filename
259
+
260
+ # Create a new handler with the new file
261
+ self._create_handler()
262
+
263
+ # Log the rotation event to the new file
264
+ if self._handler is not None:
265
+ record = logging.LogRecord(
266
+ name="LogRotation",
267
+ level=logging.INFO,
268
+ pathname="",
269
+ lineno=0,
270
+ msg="Log file rotated",
271
+ args=(),
272
+ exc_info=None,
273
+ )
274
+ # Emit directly using the handler to avoid recursion
275
+ self._handler.emit(record)
276
+
277
+ except Exception as e:
278
+ # If rotation fails, log the error but continue
279
+ LoggingConfig.debug_print(f"Error during log rotation: {e}")
280
+
281
+ def __repr__(self) -> str:
282
+ """
283
+ Return a string representation of the MultiProcessingLog instance.
284
+
285
+ Returns:
286
+ str: A string representation of the instance.
287
+ """
288
+ # Add explicit cast to ensure the return is a string
289
+ return cast(str, f"<MultiProcessingLog ({self.level_name})>")
290
+
291
+ @property
292
+ def level_name(self) -> str:
293
+ """
294
+ Get the name of the current log level.
295
+
296
+ This property retrieves the human-readable name of the log level
297
+ (e.g., "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL") based on
298
+ the numeric log level value.
299
+
300
+ Returns:
301
+ str: The name of the current log level.
302
+ """
303
+ # Use cast to ensure the return type is str
304
+ return cast(str, logging.getLevelName(self.level))
305
+
306
+
307
+ class CriticalExitHandler(logging.Handler):
308
+ """
309
+ Handler that exits the program when a critical message is logged.
310
+
311
+ This handler only processes CRITICAL level log messages. When such a message
312
+ is received, it checks the exit_on_critical configuration setting and calls
313
+ sys.exit(1) if enabled.
314
+
315
+ The handler can be temporarily disabled for testing purposes.
316
+
317
+ Class methods:
318
+ disable_exit(): Temporarily disable program exit on critical logs
319
+ enable_exit(): Re-enable program exit on critical logs
320
+ """
321
+
322
+ # Class variable to disable exit functionality for tests
323
+ exit_disabled = False
324
+
325
+ def __init__(self) -> None:
326
+ """Initialize the CriticalExitHandler with CRITICAL log level."""
327
+ super().__init__(level=logging.CRITICAL)
328
+
329
+ @classmethod
330
+ def disable_exit(cls, disable: bool = True) -> None:
331
+ """
332
+ Control exit functionality for testing.
333
+
334
+ When exits are disabled, critical logs will not cause the program to
335
+ terminate, allowing tests to safely check critical error paths.
336
+
337
+ Args:
338
+ disable: If True (default), disable exits. If False, enable exits.
339
+ """
340
+ cls.exit_disabled = disable
341
+
342
+ @classmethod
343
+ def enable_exit(cls) -> None:
344
+ """Re-enable exit functionality after testing."""
345
+ cls.exit_disabled = False
346
+
347
+ def emit(self, record: LogRecord) -> None:
348
+ """
349
+ Process a log record and potentially exit the program.
350
+
351
+ Args:
352
+ record: The log record to process
353
+ """
354
+ # First check if explicitly disabled for tests
355
+ if self.__class__.exit_disabled:
356
+ return
357
+
358
+ # If set to True, critical log will lead to system exit
359
+ exit_on_critical = LoggingConfig.get("exit_on_critical", True)
360
+
361
+ # Exit if configured to do so (and not disabled)
362
+ if exit_on_critical:
363
+ sys.exit(1)
364
+
365
+
366
+ class ColoredLogger:
367
+ """Logger with colored output support."""
368
+
369
+ # Class-level attributes for shared resources
370
+ _initialized_loggers: Dict[str, "ColoredLogger"] = {}
371
+ _log_file_path: Optional[str] = None
372
+ _file_handler: Optional[MultiProcessingLog] = None
373
+ _root_logger: Optional[logging.Logger] = None
374
+ _loggers: Dict[str, "ColoredLogger"] = {}
375
+
376
+ def __init__(self, name: str, verbose: Optional[str] = None) -> None:
377
+ """Initialize colored logger."""
378
+ self.name = name
379
+ self.verbose = verbose
380
+ self._propagate = False # Default to False like standard logger
381
+ self._configured_level = LoggingConfig.get_level(name, verbose)
382
+ self.logger = self._setup_logger()
383
+
384
+ # Only add CriticalExitHandler if configured to exit on critical
385
+ exit_on_critical = LoggingConfig.get("exit_on_critical", True)
386
+ if exit_on_critical:
387
+ # Add the handler that will exit on critical
388
+ self.handlers.append(CriticalExitHandler())
389
+ self.logger.addHandler(self.handlers[-1])
390
+
391
+ @property
392
+ def propagate(self) -> bool:
393
+ """Control whether messages are propagated to parent loggers."""
394
+ return self._propagate
395
+
396
+ @propagate.setter
397
+ def propagate(self, value: bool) -> None:
398
+ """
399
+ Set propagation value and update internal logger.
400
+
401
+ Args:
402
+ value: Boolean indicating whether to propagate messages to parent loggers
403
+ """
404
+ self._propagate = bool(value)
405
+ if hasattr(self, "logger"):
406
+ self.logger.propagate = self._propagate
407
+
408
+ def _setup_logger(self) -> logging.Logger:
409
+ """Set up the internal logger."""
410
+ logger = logging.getLogger(self.name)
411
+ logger.propagate = self._propagate
412
+ logger.setLevel(logging.DEBUG)
413
+
414
+ # Always clean up any existing handlers to avoid duplicates
415
+ if logger.handlers:
416
+ for handler in logger.handlers[:]:
417
+ logger.removeHandler(handler)
418
+
419
+ # Add handlers to the logger
420
+ self._add_handlers_to_logger(logger)
421
+
422
+ # Store in initialized loggers dictionary
423
+ self.__class__._initialized_loggers[self.name] = self
424
+
425
+ return logger
426
+
427
+ def _add_handlers_to_logger(self, logger: logging.Logger) -> None:
428
+ """Add necessary handlers to the logger."""
429
+
430
+ # Get format string from config
431
+ log_format = LoggingConfig.get("log_format", "%(asctime)s - %(name)s - [%(levelname)s] - %(message)s")
432
+
433
+ # Console Handler
434
+ ch = logging.StreamHandler(sys.stdout)
435
+ ch.setFormatter(ColoredFormatter(fmt=log_format, colored=LoggingConfig.get("colored_console", True)))
436
+ ch.setLevel(self._configured_level)
437
+ logger.addHandler(ch)
438
+
439
+ # File Handler
440
+ if not self.__class__._file_handler:
441
+ self.__class__._file_handler = self.__class__.setup_file_handler()
442
+
443
+ if self.__class__._file_handler:
444
+ # Set the same format for file handler
445
+ self.__class__._file_handler.setFormatter(
446
+ ColoredFormatter(fmt=log_format, colored=LoggingConfig.get("colored_file", False))
447
+ )
448
+ logger.addHandler(self.__class__._file_handler)
449
+
450
+ @classmethod
451
+ def setup_file_handler(cls, log_file_path: Optional[str] = None) -> Optional[MultiProcessingLog]:
452
+ """
453
+ Set up the file handler using LoggingConfig.
454
+
455
+ Args:
456
+ log_file_path: Optional explicit path for the log file
457
+
458
+ Returns:
459
+ The configured MultiProcessingLog handler or None if setup fails
460
+ """
461
+ # If a file handler already exists and no specific path is requested, return existing
462
+ if cls._file_handler and not log_file_path:
463
+ return cls._file_handler
464
+
465
+ # --- Determine Log File Path ---
466
+ if log_file_path is None:
467
+ # Get log directory from config, default to "logs"
468
+ log_dir = LoggingConfig.get("log_dir", "logs")
469
+ os.makedirs(log_dir, exist_ok=True)
470
+
471
+ # Generate filename (keeping existing logic)
472
+ timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
473
+ unique_suffix = str(os.getpid() % 1000)
474
+ log_file_path = os.path.join(log_dir, f"app_{timestamp}_{unique_suffix}.log")
475
+
476
+ cls._log_file_path = log_file_path # Store the determined path
477
+
478
+ # --- Rotation Settings from Config ---
479
+ disable_rotation = LoggingConfig.get("disable_rotation", False)
480
+ # Also check env var for compatibility if needed, though config should be primary
481
+ # disable_rotation = disable_rotation or os.environ.get("LOG_DISABLE_ROTATION") == "1"
482
+
483
+ handler: MultiProcessingLog # Type hint
484
+
485
+ if disable_rotation:
486
+ LoggingConfig.debug_print("Log rotation is disabled via config")
487
+ handler = MultiProcessingLog(log_file_path, "a", 0, 0) # No rotation
488
+ else:
489
+ # Get rotation size from config, default 10MB
490
+ rotation_size_mb = LoggingConfig.get("rotation_size_mb", 10)
491
+ # Ensure minimum size (e.g., 1KB)
492
+ rotation_size_bytes = max(1024, int(rotation_size_mb * 1024 * 1024))
493
+
494
+ # Get backup count from config, default 5
495
+ backup_count = LoggingConfig.get("backup_count", 5)
496
+ # Ensure minimum count (e.g., 1)
497
+ backup_count = max(1, backup_count)
498
+
499
+ LoggingConfig.debug_print(
500
+ "Setting up log rotation via config: "
501
+ f"maxSize={rotation_size_bytes} bytes ({rotation_size_mb}MB), "
502
+ f"backups={backup_count}"
503
+ )
504
+ handler = MultiProcessingLog(log_file_path, "a", rotation_size_bytes, backup_count)
505
+
506
+ # --- Formatter from Config ---
507
+ default_format = "%(asctime)s - %(filename)s - %(name)s - [%(levelname)s] - %(message)s"
508
+ log_format = LoggingConfig.get("log_format", default_format)
509
+
510
+ # Get color setting for file handler from config, default to False
511
+ use_file_color = LoggingConfig.get("colored_file", False)
512
+
513
+ # Use the config setting for 'colored'
514
+ handler.setFormatter(ColoredFormatter(log_format, colored=use_file_color))
515
+
516
+ # --- Level ---
517
+ # File handler always logs at DEBUG level as per original design
518
+ handler.setLevel(logging.DEBUG)
519
+
520
+ return handler
521
+
522
+ @classmethod
523
+ def reset(cls, new_file: bool = False) -> Type["ColoredLogger"]:
524
+ """
525
+ Reset all active loggers and optionally create a new log file.
526
+
527
+ This method is useful when reconfigure logging is required or when
528
+ testing different logging configurations. It closes all existing handlers,
529
+ clears the internal registry, and optionally creates a new log file.
530
+
531
+ Args:
532
+ new_file: If True, generate a new unique log file. If False, reuse the existing one.
533
+
534
+ Returns:
535
+ The ColoredLogger class for method chaining
536
+ """
537
+ # Store logger names before clearing
538
+ logger_names = list(cls._initialized_loggers.keys())
539
+
540
+ # Remove all initialized loggers
541
+ for name in logger_names:
542
+ logger = logging.getLogger(name)
543
+ for handler in logger.handlers[:]:
544
+ logger.removeHandler(handler)
545
+ try:
546
+ handler.close()
547
+ except Exception:
548
+ pass
549
+
550
+ # Clear the initialized loggers dictionary
551
+ cls._initialized_loggers.clear()
552
+
553
+ # Close and clear the shared file handler
554
+ if cls._file_handler:
555
+ try:
556
+ cls._file_handler.close()
557
+ except Exception:
558
+ pass
559
+ cls._file_handler = None
560
+
561
+ # For test_logger_reset test, ensure a different path is generated
562
+ if new_file:
563
+ # ensure CLI args have been processed
564
+ log_dir = LoggingConfig.get("log_dir", "logs")
565
+
566
+ # Make absolute path if needed
567
+ if not os.path.isabs(log_dir):
568
+ log_dir = os.path.abspath(log_dir)
569
+
570
+ os.makedirs(log_dir, exist_ok=True)
571
+
572
+ # Use time.time() to ensure uniqueness, even for fast successive calls
573
+ timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
574
+ unique_suffix = str(int(time.time() * 1000) % 10000) # Use milliseconds as unique ID
575
+ cls._log_file_path = os.path.join(log_dir, f"app_{timestamp}_{unique_suffix}.log")
576
+
577
+ # Create a new file handler
578
+ if cls._file_handler is None:
579
+ cls._file_handler = cls.setup_file_handler(cls._log_file_path)
580
+
581
+ # Reinitialize loggers that were previously registered
582
+ for name in logger_names:
583
+ get_logger(name)
584
+
585
+ return cls
586
+
587
+ @classmethod
588
+ def update_logger_level(cls, name: str, level: Union[int, str]) -> None:
589
+ """
590
+ Update the log level of an existing logger.
591
+
592
+ Args:
593
+ name: Name of the logger to update
594
+ level: New log level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
595
+ """
596
+ if name in cls._initialized_loggers:
597
+ logger_instance = cls._initialized_loggers[name]
598
+
599
+ if isinstance(level, int):
600
+ # If it's already an integer level, use it directly
601
+ level_value = level
602
+ else:
603
+ # If it's a string, use map_level to convert it
604
+ level_value = LoggingConfig.map_level(level)
605
+
606
+ # Update the level in both the wrapper and the underlying logger
607
+ logger_instance.level = level_value
608
+ logger_instance.logger.setLevel(level_value)
609
+
610
+ @property
611
+ def handlers(self) -> List[logging.Handler]:
612
+ """
613
+ Return the handlers from the underlying logger.
614
+
615
+ Returns:
616
+ List of handlers attached to the logger
617
+ """
618
+ return self.logger.handlers
619
+
620
+ @property
621
+ def level(self) -> int:
622
+ """
623
+ Return the effective level for the logger (for testing).
624
+
625
+ Returns:
626
+ The configured log level as an integer
627
+ """
628
+ # This abstracts the implementation details from the tests
629
+ # For tests, report the configured level, not the actual logger level
630
+ return self._configured_level
631
+
632
+ @level.setter
633
+ def level(self, value: int) -> None:
634
+ """
635
+ Set the configured level and update console handlers.
636
+
637
+ Args:
638
+ value: The new log level to set
639
+ """
640
+ self._configured_level = value
641
+
642
+ # Update console handlers only
643
+ if hasattr(self, "logger") and self.logger:
644
+ for handler in self.logger.handlers:
645
+ is_stream_handler = isinstance(handler, logging.StreamHandler)
646
+ is_not_multiprocessing_log = not isinstance(handler, MultiProcessingLog)
647
+
648
+ if is_stream_handler and is_not_multiprocessing_log:
649
+ handler.setLevel(value)
650
+
651
+ # Logger methods - delegate to the underlying logger
652
+ def debug(self, msg: str, *args: Any, **kwargs: Any) -> None:
653
+ """Logs a debug message."""
654
+ self.logger.debug(msg, *args, **kwargs)
655
+
656
+ def info(self, msg: str, *args: Any, **kwargs: Any) -> None:
657
+ """Logs an info message."""
658
+ self.logger.info(msg, *args, **kwargs)
659
+
660
+ def warning(self, msg: str, *args: Any, **kwargs: Any) -> None:
661
+ """
662
+ Logs a warning message.
663
+
664
+ Args:
665
+ msg: The message to log
666
+ *args: Variable length argument list
667
+ **kwargs: Arbitrary keyword arguments
668
+ """
669
+ self.logger.warning(msg, *args, **kwargs)
670
+
671
+ def error(self, msg: str, *args: Any, **kwargs: Any) -> None:
672
+ """
673
+ Logs an error message.
674
+
675
+ Args:
676
+ msg: The message to log
677
+ *args: Variable length argument list
678
+ **kwargs: Arbitrary keyword arguments
679
+ """
680
+ self.logger.error(msg, *args, **kwargs)
681
+
682
+ def critical(self, msg: str, *args: Any, **kwargs: Any) -> None:
683
+ """
684
+ Logs a critical message.
685
+
686
+ Note: If exit_on_critical=True in config, this will terminate the program.
687
+
688
+ Args:
689
+ msg: The message to log
690
+ *args: Variable length argument list
691
+ **kwargs: Arbitrary keyword arguments
692
+ """
693
+ self.logger.critical(msg, *args, **kwargs)
694
+
695
+ def exception(self, msg: str, *args: Any, **kwargs: Any) -> None:
696
+ """Logs an exception message."""
697
+ self.logger.exception(msg, *args, **kwargs)
698
+
699
+
700
+ # At module level
701
+ _EXTERNAL_LOGGERS_CONFIGURED = False
702
+
703
+
704
+ def configure_external_loggers(external_loggers: Dict[str, str]) -> None:
705
+ """Configure external library loggers with specified levels."""
706
+ external_loggers = LoggingConfig.get("external_loggers", {})
707
+
708
+ for logger_name, level in external_loggers.items():
709
+ # Get the logger for this package
710
+ logger = logging.getLogger(logger_name)
711
+
712
+ # Convert level string to logging constant
713
+ level_value = LoggingConfig.map_level(level)
714
+
715
+ # Set the level
716
+ logger.setLevel(level_value)
717
+
718
+ # Disable propagation to avoid duplicate messages
719
+ logger.propagate = False
720
+
721
+ LoggingConfig.debug_print(f"Set external logger '{logger_name}' to level {level}")
722
+
723
+
724
+ def register_exception_hook(exit_on_critical: bool = True) -> None:
725
+ """Register a custom exception hook to log unhandled exceptions."""
726
+
727
+ def default_exception_handler(exc_type: Type[BaseException], exc_value: BaseException, exc_traceback: Any) -> None:
728
+ """Default exception handler that logs unhandled exceptions."""
729
+ logger = get_logger("UnhandledException")
730
+ logger.error("Unhandled exception occurred", exc_info=(exc_type, exc_value, exc_traceback))
731
+ if exit_on_critical:
732
+ sys.exit(1)
733
+
734
+ sys.excepthook = default_exception_handler
735
+
736
+
737
+ def create_logger(
738
+ name: str,
739
+ log_dir: Optional[str] = None,
740
+ level: Optional[Union[int, str]] = None,
741
+ format_string: Optional[str] = None,
742
+ ) -> logging.Logger:
743
+ """Create a new logger with console and optional file output."""
744
+ logger = logging.getLogger(name)
745
+ logger.setLevel(level or logging.INFO)
746
+
747
+ # Console handler
748
+ console_handler = StreamHandler(sys.stdout)
749
+ console_handler.setFormatter(ColoredFormatter(fmt=format_string or "%(message)s"))
750
+ logger.addHandler(console_handler)
751
+
752
+ # File handler
753
+ if log_dir:
754
+ os.makedirs(log_dir, exist_ok=True)
755
+ file_path = os.path.join(log_dir, f"{name}.log")
756
+ file_handler = RotatingFileHandler(file_path, maxBytes=10 * 1024 * 1024, backupCount=5)
757
+ file_handler.setFormatter(logging.Formatter(fmt=format_string or "%(message)s"))
758
+ logger.addHandler(file_handler)
759
+
760
+ return logger
761
+
762
+
763
+ def handle_critical_exception(message: str, exit_code: int = 1) -> None:
764
+ """Log a critical error and exit the application."""
765
+ logger = get_logger("CriticalException")
766
+ logger.critical(message)
767
+ sys.exit(exit_code)
768
+
769
+
770
+ def init_root_logger(
771
+ level: Optional[Union[int, str]] = None,
772
+ log_dir: Optional[str] = None,
773
+ format_string: Optional[str] = None,
774
+ colored_console: bool = True,
775
+ ) -> logging.Logger:
776
+ """Initialize and configure the root logger."""
777
+ root_logger = logging.getLogger()
778
+ root_logger.setLevel(level or logging.INFO)
779
+
780
+ # Console handler
781
+ console_handler = StreamHandler(sys.stdout)
782
+ console_handler.setFormatter(ColoredFormatter(fmt=format_string or "%(message)s", colored=colored_console))
783
+ root_logger.addHandler(console_handler)
784
+
785
+ # File handler
786
+ if log_dir:
787
+ os.makedirs(log_dir, exist_ok=True)
788
+ file_path = os.path.join(log_dir, "root.log")
789
+ file_handler = RotatingFileHandler(file_path, maxBytes=10 * 1024 * 1024, backupCount=5)
790
+ file_handler.setFormatter(logging.Formatter(fmt=format_string or "%(message)s"))
791
+ root_logger.addHandler(file_handler)
792
+
793
+ return root_logger
794
+
795
+
796
+ def enable_debug_logging(logger_names: Optional[List[str]] = None) -> None:
797
+ """
798
+ Enable DEBUG level logging for specified loggers.
799
+
800
+ Args:
801
+ logger_names: List of logger names to set to DEBUG level
802
+ """
803
+ if logger_names is None:
804
+ logger_names = [logging.getLogger().name]
805
+
806
+ for name in logger_names:
807
+ logger = logging.getLogger(name)
808
+ logger.setLevel(logging.DEBUG)
809
+
810
+
811
+ def get_caller_frame(depth: int = 1) -> FrameType:
812
+ """Get the caller's frame at the specified depth."""
813
+ return sys._getframe(depth)
814
+
815
+
816
+ def get_module_name() -> str:
817
+ """
818
+ Get the name of the calling module.
819
+
820
+ Returns:
821
+ The name of the calling module
822
+ """
823
+ module_name = get_caller_frame(1).f_globals["__name__"]
824
+ return cast(str, module_name)
825
+
826
+
827
+ def get_class_logger() -> Union[ColoredLogger, logging.Logger]:
828
+ """
829
+ Get a logger named after the calling class.
830
+
831
+ Returns:
832
+ A logger instance named after the calling class
833
+ """
834
+ class_name = sys._getframe(1).f_globals["__name__"]
835
+ return get_logger(class_name)
836
+
837
+
838
+ def log_to_file(message: str, level: str = "INFO", file_path: Optional[str] = None) -> None:
839
+ """
840
+ Log a message directly to a file without using the logging system.
841
+
842
+ Args:
843
+ message: The message to log
844
+ level: Log level as a string
845
+ file_path: Path to the log file
846
+ """
847
+ file_path = file_path or "default.log"
848
+ with open(file_path, mode="a", encoding="utf-8") as log_file:
849
+ log_file.write(f"{datetime.now()} - {level} - {message}\n")
850
+
851
+
852
+ def get_logger(name: str, verbose: Optional[str] = None) -> Union[ColoredLogger, logging.Logger]:
853
+ """
854
+ Get a configured logger instance.
855
+
856
+ Args:
857
+ name: Name for the logger, typically module name
858
+ verbose: Optional override for log level
859
+
860
+ Returns:
861
+ A logger instance configured according to settings
862
+ """
863
+ global _EXTERNAL_LOGGERS_CONFIGURED
864
+
865
+ # Configure external loggers only once
866
+ if not _EXTERNAL_LOGGERS_CONFIGURED:
867
+ configure_external_loggers(LoggingConfig.get("external_loggers", {}))
868
+ _EXTERNAL_LOGGERS_CONFIGURED = True
869
+
870
+ # Check if logger already exists
871
+ if name in ColoredLogger._initialized_loggers:
872
+ existing_logger = ColoredLogger._initialized_loggers[name]
873
+
874
+ # If explicit verbose parameter is provided, always apply it
875
+ if verbose is not None:
876
+ original_level = existing_logger.level
877
+ ColoredLogger.update_logger_level(name, verbose)
878
+ if original_level != existing_logger.level:
879
+ LoggingConfig.debug_print(
880
+ f"Warning: Logger '{name}' level changed from "
881
+ f"{logging.getLevelName(original_level)} to {logging.getLevelName(existing_logger.level)} "
882
+ f"due to explicit parameter"
883
+ )
884
+ return existing_logger
885
+
886
+ # Check if there's a specific config for this logger in external_loggers
887
+ external_loggers = LoggingConfig.get("external_loggers", {})
888
+ if name in external_loggers:
889
+ original_level = existing_logger.level
890
+ ColoredLogger.update_logger_level(name, external_loggers[name])
891
+ if original_level != existing_logger.level:
892
+ LoggingConfig.debug_print(
893
+ f"Warning: Logger '{name}' level changed from "
894
+ f"{logging.getLevelName(original_level)} to {logging.getLevelName(existing_logger.level)} "
895
+ f"due to external_loggers configuration"
896
+ )
897
+ return existing_logger
898
+
899
+ # Check if there's a module-specific level that should be applied
900
+ module_levels = LoggingConfig.get("module_levels", {})
901
+ if name in module_levels:
902
+ original_level = existing_logger.level
903
+ ColoredLogger.update_logger_level(name, module_levels[name])
904
+ if original_level != existing_logger.level:
905
+ LoggingConfig.debug_print(
906
+ f"Warning: Logger '{name}' level changed from "
907
+ f"{logging.getLevelName(original_level)} to {logging.getLevelName(existing_logger.level)} "
908
+ f"due to module_levels configuration"
909
+ )
910
+
911
+ return existing_logger
912
+
913
+ # Use explicit level, then check external_loggers config, then check module_levels, then use default
914
+ if verbose is None:
915
+ external_loggers = LoggingConfig.get("external_loggers", {})
916
+ module_levels = LoggingConfig.get("module_levels", {})
917
+
918
+ if name in external_loggers:
919
+ verbose = external_loggers[name]
920
+ elif name in module_levels:
921
+ verbose = module_levels[name]
922
+ else:
923
+ verbose = LoggingConfig.get("default_level", "INFO")
924
+
925
+ # Create new logger
926
+ logger = ColoredLogger(name, verbose)
927
+ return logger