aws-lambda-powertools 3.7.1a7__py3-none-any.whl → 3.8.1a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -136,9 +136,8 @@ class OpenAPIValidationMiddleware(BaseMiddlewareHandler):
136
136
  return self._handle_response(route=route, response=response)
137
137
 
138
138
  def _handle_response(self, *, route: Route, response: Response):
139
- # Check if we have a return type defined
140
- if route.dependant.return_param:
141
- # Validate and serialize the response, including None
139
+ # Process the response body if it exists
140
+ if response.body and response.is_json():
142
141
  response.body = self._serialize_response(
143
142
  field=route.dependant.return_param,
144
143
  response_content=response.body,
@@ -0,0 +1,3 @@
1
+ from aws_lambda_powertools.logging.buffer.config import LoggerBufferConfig
2
+
3
+ __all__ = ["LoggerBufferConfig"]
@@ -0,0 +1,215 @@
1
+ from __future__ import annotations
2
+
3
+ from collections import deque
4
+ from typing import Any
5
+
6
+
7
+ class KeyBufferCache:
8
+ """
9
+ A cache implementation for a single key with size tracking and eviction support.
10
+
11
+ This class manages a buffer for a specific key, keeping track of the current size
12
+ and providing methods to add, remove, and manage cached items. It supports automatic
13
+ eviction tracking and size management.
14
+
15
+ Attributes
16
+ ----------
17
+ cache : deque
18
+ A double-ended queue storing the cached items.
19
+ current_size : int
20
+ The total size of all items currently in the cache.
21
+ has_evicted : bool
22
+ A flag indicating whether any items have been evicted from the cache.
23
+ """
24
+
25
+ def __init__(self):
26
+ """
27
+ Initialize a buffer cache for a specific key.
28
+ """
29
+ self.cache: deque = deque()
30
+ self.current_size: int = 0
31
+ self.has_evicted: bool = False
32
+
33
+ def add(self, item: Any) -> None:
34
+ """
35
+ Add an item to the cache.
36
+
37
+ Parameters
38
+ ----------
39
+ item : Any
40
+ The item to be stored in the cache.
41
+ """
42
+ item_size = len(str(item))
43
+ self.cache.append(item)
44
+ self.current_size += item_size
45
+
46
+ def remove_oldest(self) -> Any:
47
+ """
48
+ Remove and return the oldest item from the cache.
49
+
50
+ Returns
51
+ -------
52
+ Any
53
+ The removed item.
54
+ """
55
+ removed_item = self.cache.popleft()
56
+ self.current_size -= len(str(removed_item))
57
+ self.has_evicted = True
58
+ return removed_item
59
+
60
+ def get(self) -> list:
61
+ """
62
+ Retrieve items for this key.
63
+
64
+ Returns
65
+ -------
66
+ list
67
+ List of items in the cache.
68
+ """
69
+ return list(self.cache)
70
+
71
+ def clear(self) -> None:
72
+ """
73
+ Clear the cache for this key.
74
+ """
75
+ self.cache.clear()
76
+ self.current_size = 0
77
+ self.has_evicted = False
78
+
79
+
80
+ class LoggerBufferCache:
81
+ """
82
+ A multi-key buffer cache with size-based eviction and management.
83
+
84
+ This class provides a flexible caching mechanism that manages multiple keys,
85
+ with each key having its own buffer cache. The total size of each key's cache
86
+ is limited, and older items are automatically evicted when the size limit is reached.
87
+
88
+ Key Features:
89
+ - Multiple key support
90
+ - Size-based eviction
91
+ - Tracking of evicted items
92
+ - Configurable maximum buffer size
93
+
94
+ Example
95
+ --------
96
+ >>> buffer_cache = LoggerBufferCache(max_size_bytes=1000)
97
+ >>> buffer_cache.add("logs", "First log message")
98
+ >>> buffer_cache.add("debug", "Debug information")
99
+ >>> buffer_cache.get("logs")
100
+ ['First log message']
101
+ >>> buffer_cache.get_current_size("logs")
102
+ 16
103
+ """
104
+
105
+ def __init__(self, max_size_bytes: int):
106
+ """
107
+ Initialize the LoggerBufferCache.
108
+
109
+ Parameters
110
+ ----------
111
+ max_size_bytes : int
112
+ Maximum size of the cache in bytes for each key.
113
+ """
114
+ self.max_size_bytes: int = max_size_bytes
115
+ self.cache: dict[str, KeyBufferCache] = {}
116
+
117
+ def add(self, key: str, item: Any) -> None:
118
+ """
119
+ Add an item to the cache for a specific key.
120
+
121
+ Parameters
122
+ ----------
123
+ key : str
124
+ The key to store the item under.
125
+ item : Any
126
+ The item to be stored in the cache.
127
+
128
+ Returns
129
+ -------
130
+ bool
131
+ True if item was added, False otherwise.
132
+ """
133
+ # Check if item is larger than entire buffer
134
+ item_size = len(str(item))
135
+ if item_size > self.max_size_bytes:
136
+ raise BufferError("Cannot add item to the buffer")
137
+
138
+ # Create the key's cache if it doesn't exist
139
+ if key not in self.cache:
140
+ self.cache[key] = KeyBufferCache()
141
+
142
+ # Calculate the size after adding the new item
143
+ new_total_size = self.cache[key].current_size + item_size
144
+
145
+ # If adding the item would exceed max size, remove oldest items
146
+ while new_total_size > self.max_size_bytes and self.cache[key].cache:
147
+ self.cache[key].remove_oldest()
148
+ new_total_size = self.cache[key].current_size + item_size
149
+
150
+ self.cache[key].add(item)
151
+
152
+ def get(self, key: str) -> list:
153
+ """
154
+ Retrieve items for a specific key.
155
+
156
+ Parameters
157
+ ----------
158
+ key : str
159
+ The key to retrieve items for.
160
+
161
+ Returns
162
+ -------
163
+ list
164
+ List of items for the given key, or an empty list if the key doesn't exist.
165
+ """
166
+ return [] if key not in self.cache else self.cache[key].get()
167
+
168
+ def clear(self, key: str | None = None) -> None:
169
+ """
170
+ Clear the cache, either for a specific key or entirely.
171
+
172
+ Parameters
173
+ ----------
174
+ key : Optional[str], optional
175
+ The key to clear. If None, clears the entire cache.
176
+ """
177
+ if key:
178
+ if key in self.cache:
179
+ self.cache[key].clear()
180
+ del self.cache[key]
181
+ else:
182
+ self.cache.clear()
183
+
184
+ def has_items_evicted(self, key: str) -> bool:
185
+ """
186
+ Check if a specific key's cache has evicted items.
187
+
188
+ Parameters
189
+ ----------
190
+ key : str
191
+ The key to check for evicted items.
192
+
193
+ Returns
194
+ -------
195
+ bool
196
+ True if items have been evicted, False otherwise.
197
+ """
198
+ return False if key not in self.cache else self.cache[key].has_evicted
199
+
200
+ def get_current_size(self, key: str) -> int | None:
201
+ """
202
+ Get the current size of the buffer for a specific key.
203
+
204
+ Parameters
205
+ ----------
206
+ key : str
207
+ The key to get the current size for.
208
+
209
+ Returns
210
+ -------
211
+ int
212
+ The current size of the buffer for the key.
213
+ Returns 0 if the key does not exist.
214
+ """
215
+ return None if key not in self.cache else self.cache[key].current_size
@@ -0,0 +1,78 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Literal
4
+
5
+
6
+ class LoggerBufferConfig:
7
+ """
8
+ Configuration for log buffering behavior.
9
+ """
10
+
11
+ # Define class-level constant for valid log levels
12
+ VALID_LOG_LEVELS: list[str] = ["DEBUG", "INFO", "WARNING"]
13
+ LOG_LEVEL_BUFFER_VALUES = Literal["DEBUG", "INFO", "WARNING"]
14
+
15
+ def __init__(
16
+ self,
17
+ max_bytes: int = 20480,
18
+ buffer_at_verbosity: LOG_LEVEL_BUFFER_VALUES = "DEBUG",
19
+ flush_on_error_log: bool = True,
20
+ ):
21
+ """
22
+ Initialize logger buffer configuration.
23
+
24
+ Parameters
25
+ ----------
26
+ max_bytes : int, optional
27
+ Maximum size of the buffer in bytes
28
+ buffer_at_verbosity : str, optional
29
+ Minimum log level to buffer
30
+ flush_on_error_log : bool, optional
31
+ Whether to flush the buffer when an error occurs
32
+ """
33
+ self._validate_inputs(max_bytes, buffer_at_verbosity, flush_on_error_log)
34
+
35
+ self._max_bytes = max_bytes
36
+ self._buffer_at_verbosity = buffer_at_verbosity.upper()
37
+ self._flush_on_error_log = flush_on_error_log
38
+
39
+ def _validate_inputs(
40
+ self,
41
+ max_bytes: int,
42
+ buffer_at_verbosity: str,
43
+ flush_on_error_log: bool,
44
+ ) -> None:
45
+ """
46
+ Validate configuration inputs.
47
+
48
+ Parameters
49
+ ----------
50
+ Same as __init__ method parameters
51
+ """
52
+ if not isinstance(max_bytes, int) or max_bytes <= 0:
53
+ raise ValueError("Max size must be a positive integer")
54
+
55
+ if not isinstance(buffer_at_verbosity, str):
56
+ raise ValueError("Log level must be a string")
57
+
58
+ # Validate log level
59
+ if buffer_at_verbosity.upper() not in self.VALID_LOG_LEVELS:
60
+ raise ValueError(f"Invalid log level. Must be one of {self.VALID_LOG_LEVELS}")
61
+
62
+ if not isinstance(flush_on_error_log, bool):
63
+ raise ValueError("flush_on_error must be a boolean")
64
+
65
+ @property
66
+ def max_bytes(self) -> int:
67
+ """Maximum buffer size in bytes."""
68
+ return self._max_bytes
69
+
70
+ @property
71
+ def buffer_at_verbosity(self) -> str:
72
+ """Minimum log level to buffer."""
73
+ return self._buffer_at_verbosity
74
+
75
+ @property
76
+ def flush_on_error_log(self) -> bool:
77
+ """Flag to flush buffer on error."""
78
+ return self._flush_on_error_log
@@ -0,0 +1,127 @@
1
+ from __future__ import annotations
2
+
3
+ import sys
4
+ import time
5
+ from typing import TYPE_CHECKING, Any, Mapping
6
+
7
+ if TYPE_CHECKING:
8
+ import logging
9
+
10
+
11
+ def _create_buffer_record(
12
+ level: int,
13
+ msg: object,
14
+ args: object,
15
+ exc_info: logging._ExcInfoType = None,
16
+ stack_info: bool = False,
17
+ extra: Mapping[str, object] | None = None,
18
+ ) -> dict[str, Any]:
19
+ """
20
+ Create a structured log record for buffering to save in buffer.
21
+
22
+ Parameters
23
+ ----------
24
+ level : int
25
+ Logging level (e.g., logging.DEBUG, logging.INFO) indicating log severity.
26
+ msg : object
27
+ The log message to be recorded.
28
+ args : object
29
+ Additional arguments associated with the log message.
30
+ exc_info : logging._ExcInfoType, optional
31
+ Exception information to be included in the log record.
32
+ If None, no exception details will be captured.
33
+ stack_info : bool, default False
34
+ Flag to include stack trace information in the log record.
35
+ extra : Mapping[str, object], optional
36
+ Additional context or metadata to be attached to the log record.
37
+
38
+ Returns
39
+ -------
40
+ dict[str, Any]
41
+
42
+ Notes
43
+ -----
44
+ - Captures caller frame information for precise log source tracking
45
+ - Automatically handles exception context
46
+ """
47
+ # Retrieve the caller's frame information to capture precise log context
48
+ # Uses inspect.stack() with index 3 to get the original caller's details
49
+ caller_frame = sys._getframe(3)
50
+
51
+ # Get the current timestamp
52
+ timestamp = time.time()
53
+
54
+ # Dynamically replace exc_info with current system exception information
55
+ # This ensures the most recent exception is captured if available
56
+ if exc_info:
57
+ exc_info = sys.exc_info()
58
+
59
+ # Construct and return the og record dictionary
60
+ return {
61
+ "level": level,
62
+ "msg": msg,
63
+ "args": args,
64
+ "filename": caller_frame.f_code.co_filename,
65
+ "line": caller_frame.f_lineno,
66
+ "function": caller_frame.f_code.co_name,
67
+ "extra": extra,
68
+ "timestamp": timestamp,
69
+ "exc_info": exc_info,
70
+ "stack_info": stack_info,
71
+ }
72
+
73
+
74
+ def _check_minimum_buffer_log_level(buffer_log_level, current_log_level):
75
+ """
76
+ Determine if the current log level meets or exceeds the buffer's minimum log level.
77
+
78
+ Compares log levels to decide whether a log message should be included in the buffer.
79
+
80
+ Parameters
81
+ ----------
82
+ buffer_log_level : str
83
+ Minimum log level configured for the buffer.
84
+ Must be one of: 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'.
85
+ current_log_level : str
86
+ Log level of the current log message.
87
+ Must be one of: 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'.
88
+
89
+ Returns
90
+ -------
91
+ bool
92
+ True if the current log level is lower (more verbose) than the buffer's
93
+ minimum log level, indicating the message should be buffered.
94
+ False if the current log level is higher (less verbose) and should not be buffered.
95
+
96
+ Notes
97
+ -----
98
+ - Log levels are compared based on their numeric severity
99
+ - Conversion to uppercase ensures case-insensitive comparisons
100
+
101
+ Examples
102
+ --------
103
+ >>> _check_minimum_buffer_log_level('INFO', 'DEBUG')
104
+ True
105
+ >>> _check_minimum_buffer_log_level('ERROR', 'WARNING')
106
+ False
107
+ """
108
+ # Predefined log level mapping with numeric severity values
109
+ # Lower values indicate more verbose logging levels
110
+ log_levels = {
111
+ "DEBUG": 10,
112
+ "INFO": 20,
113
+ "WARNING": 30,
114
+ "ERROR": 40,
115
+ "CRITICAL": 50,
116
+ }
117
+
118
+ # Normalize input log levels to uppercase for consistent comparison
119
+ # Retrieve corresponding numeric log level values
120
+ buffer_level_num = log_levels.get(buffer_log_level.upper())
121
+ current_level_num = log_levels.get(current_log_level.upper())
122
+
123
+ # Compare numeric levels
124
+ if buffer_level_num < current_level_num:
125
+ return True
126
+
127
+ return False
@@ -16,12 +16,17 @@ import warnings
16
16
  from contextlib import contextmanager
17
17
  from typing import IO, TYPE_CHECKING, Any, Callable, Generator, Iterable, Mapping, TypeVar, cast, overload
18
18
 
19
+ from aws_lambda_powertools.logging.buffer.cache import LoggerBufferCache
20
+ from aws_lambda_powertools.logging.buffer.functions import _check_minimum_buffer_log_level, _create_buffer_record
19
21
  from aws_lambda_powertools.logging.constants import (
20
22
  LOGGER_ATTRIBUTE_HANDLER,
21
23
  LOGGER_ATTRIBUTE_POWERTOOLS_HANDLER,
22
24
  LOGGER_ATTRIBUTE_PRECONFIGURED,
23
25
  )
24
- from aws_lambda_powertools.logging.exceptions import InvalidLoggerSamplingRateError, OrphanedChildLoggerError
26
+ from aws_lambda_powertools.logging.exceptions import (
27
+ InvalidLoggerSamplingRateError,
28
+ OrphanedChildLoggerError,
29
+ )
25
30
  from aws_lambda_powertools.logging.filters import SuppressFilter
26
31
  from aws_lambda_powertools.logging.formatter import (
27
32
  RESERVED_FORMATTER_CUSTOM_KEYS,
@@ -32,14 +37,18 @@ from aws_lambda_powertools.logging.lambda_context import build_lambda_context_mo
32
37
  from aws_lambda_powertools.shared import constants
33
38
  from aws_lambda_powertools.shared.functions import (
34
39
  extract_event_from_common_models,
40
+ get_tracer_id,
35
41
  resolve_env_var_choice,
36
42
  resolve_truthy_env_var_choice,
37
43
  )
38
44
  from aws_lambda_powertools.utilities import jmespath_utils
45
+ from aws_lambda_powertools.warnings import PowertoolsUserWarning
39
46
 
40
47
  if TYPE_CHECKING:
48
+ from aws_lambda_powertools.logging.buffer.config import LoggerBufferConfig
41
49
  from aws_lambda_powertools.shared.types import AnyCallableT
42
50
 
51
+
43
52
  logger = logging.getLogger(__name__)
44
53
 
45
54
  is_cold_start = True
@@ -100,6 +109,8 @@ class Logger:
100
109
  custom logging handler e.g. logging.FileHandler("file.log")
101
110
  log_uncaught_exceptions: bool, by default False
102
111
  logs uncaught exception using sys.excepthook
112
+ buffer_config: LoggerBufferConfig, optional
113
+ logger buffer configuration
103
114
 
104
115
  See: https://docs.python.org/3/library/sys.html#sys.excepthook
105
116
 
@@ -218,6 +229,7 @@ class Logger:
218
229
  utc: bool = False,
219
230
  use_rfc3339: bool = False,
220
231
  serialize_stacktrace: bool = True,
232
+ buffer_config: LoggerBufferConfig | None = None,
221
233
  **kwargs,
222
234
  ) -> None:
223
235
 
@@ -259,7 +271,17 @@ class Logger:
259
271
  "serialize_stacktrace": serialize_stacktrace,
260
272
  }
261
273
 
262
- self._init_logger(formatter_options=formatter_options, log_level=level, **kwargs)
274
+ self._buffer_config = buffer_config
275
+ if self._buffer_config:
276
+ self._buffer_cache = LoggerBufferCache(max_size_bytes=self._buffer_config.max_bytes)
277
+
278
+ self._init_logger(
279
+ formatter_options=formatter_options,
280
+ log_level=level,
281
+ buffer_config=self._buffer_config,
282
+ buffer_cache=getattr(self, "_buffer_cache", None),
283
+ **kwargs,
284
+ )
263
285
 
264
286
  if self.log_uncaught_exceptions:
265
287
  logger.debug("Replacing exception hook")
@@ -303,6 +325,8 @@ class Logger:
303
325
  self,
304
326
  formatter_options: dict | None = None,
305
327
  log_level: str | int | None = None,
328
+ buffer_config: LoggerBufferConfig | None = None,
329
+ buffer_cache: LoggerBufferCache | None = None,
306
330
  **kwargs,
307
331
  ) -> None:
308
332
  """Configures new logger"""
@@ -315,9 +339,19 @@ class Logger:
315
339
  is_logger_preconfigured = getattr(self._logger, LOGGER_ATTRIBUTE_PRECONFIGURED, False)
316
340
  if self.child:
317
341
  self.setLevel(log_level)
342
+ if getattr(self._logger.parent, "powertools_buffer_config", None):
343
+ # Initializes a new, empty LoggerBufferCache for child logger
344
+ # Preserves parent's buffer configuration while resetting cache contents
345
+ self._buffer_config = self._logger.parent.powertools_buffer_config # type: ignore[union-attr]
346
+ self._buffer_cache = LoggerBufferCache(self._logger.parent.powertools_buffer_config.max_bytes) # type: ignore[union-attr]
318
347
  return
319
348
 
320
349
  if is_logger_preconfigured:
350
+ # Reuse existing buffer configuration from a previously configured logger
351
+ # Ensures consistent buffer settings across logger instances within the same service
352
+ # Enables buffer propagation and maintains a unified logging configuration
353
+ self._buffer_config = self._logger.powertools_buffer_config # type: ignore[attr-defined]
354
+ self._buffer_cache = self._logger.powertools_buffer_cache # type: ignore[attr-defined]
321
355
  return
322
356
 
323
357
  self.setLevel(log_level)
@@ -342,6 +376,8 @@ class Logger:
342
376
  logger.debug(f"Marking logger {self.service} as preconfigured")
343
377
  self._logger.init = True # type: ignore[attr-defined]
344
378
  self._logger.powertools_handler = self.logger_handler # type: ignore[attr-defined]
379
+ self._logger.powertools_buffer_config = buffer_config # type: ignore[attr-defined]
380
+ self._logger.powertools_buffer_cache = buffer_cache # type: ignore[attr-defined]
345
381
 
346
382
  def refresh_sample_rate_calculation(self) -> None:
347
383
  """
@@ -386,6 +422,7 @@ class Logger:
386
422
  log_event: bool | None = None,
387
423
  correlation_id_path: str | None = None,
388
424
  clear_state: bool | None = False,
425
+ flush_buffer_on_uncaught_error: bool = False,
389
426
  ) -> AnyCallableT: ...
390
427
 
391
428
  @overload
@@ -395,6 +432,7 @@ class Logger:
395
432
  log_event: bool | None = None,
396
433
  correlation_id_path: str | None = None,
397
434
  clear_state: bool | None = False,
435
+ flush_buffer_on_uncaught_error: bool = False,
398
436
  ) -> Callable[[AnyCallableT], AnyCallableT]: ...
399
437
 
400
438
  def inject_lambda_context(
@@ -403,6 +441,7 @@ class Logger:
403
441
  log_event: bool | None = None,
404
442
  correlation_id_path: str | None = None,
405
443
  clear_state: bool | None = False,
444
+ flush_buffer_on_uncaught_error: bool = False,
406
445
  ) -> Any:
407
446
  """Decorator to capture Lambda contextual info and inject into logger
408
447
 
@@ -459,6 +498,7 @@ class Logger:
459
498
  log_event=log_event,
460
499
  correlation_id_path=correlation_id_path,
461
500
  clear_state=clear_state,
501
+ flush_buffer_on_uncaught_error=flush_buffer_on_uncaught_error,
462
502
  )
463
503
 
464
504
  log_event = resolve_truthy_env_var_choice(
@@ -491,11 +531,25 @@ class Logger:
491
531
  if self.sampling_rate and not cold_start:
492
532
  self.refresh_sample_rate_calculation()
493
533
 
494
- return lambda_handler(event, context, *args, **kwargs)
534
+ try:
535
+ # Execute the Lambda handler with provided event and context
536
+ return lambda_handler(event, context, *args, **kwargs)
537
+ except:
538
+ # Flush the log buffer if configured to do so on uncaught errors
539
+ # Ensures logging state is cleaned up even if an exception is raised
540
+ if flush_buffer_on_uncaught_error:
541
+ logger.debug("Uncaught error detected, flushing log buffer before exit")
542
+ self.flush_buffer()
543
+ # Re-raise any exceptions that occur during handler execution
544
+ raise
545
+ finally:
546
+ # Clear the cache after invocation is complete
547
+ if self._buffer_config:
548
+ self._buffer_cache.clear()
495
549
 
496
550
  return decorate
497
551
 
498
- def info(
552
+ def debug(
499
553
  self,
500
554
  msg: object,
501
555
  *args: object,
@@ -508,16 +562,37 @@ class Logger:
508
562
  extra = extra or {}
509
563
  extra = {**extra, **kwargs}
510
564
 
511
- return self._logger.info(
512
- msg,
513
- *args,
565
+ # Logging workflow for logging.debug:
566
+ # 1. Buffer is completely disabled - log right away
567
+ # 2. DEBUG is the maximum level of buffer, so, can't bypass if enabled
568
+ # 3. Store in buffer for potential later processing
569
+
570
+ # MAINTAINABILITY_DECISION:
571
+ # Keeping this implementation to avoid complex code handling.
572
+ # Also for clarity over complexity
573
+
574
+ # Buffer is not active and we need to log immediately
575
+ if not self._buffer_config:
576
+ return self._logger.debug(
577
+ msg,
578
+ *args,
579
+ exc_info=exc_info,
580
+ stack_info=stack_info,
581
+ stacklevel=stacklevel,
582
+ extra=extra,
583
+ )
584
+
585
+ # Store record in the buffer
586
+ self._add_log_record_to_buffer(
587
+ level=logging.DEBUG,
588
+ msg=msg,
589
+ args=args,
514
590
  exc_info=exc_info,
515
591
  stack_info=stack_info,
516
- stacklevel=stacklevel,
517
592
  extra=extra,
518
593
  )
519
594
 
520
- def error(
595
+ def info(
521
596
  self,
522
597
  msg: object,
523
598
  *args: object,
@@ -530,20 +605,52 @@ class Logger:
530
605
  extra = extra or {}
531
606
  extra = {**extra, **kwargs}
532
607
 
533
- return self._logger.error(
534
- msg,
535
- *args,
608
+ # Logging workflow for logging.info:
609
+ # 1. Buffer is completely disabled - log right away
610
+ # 2. Log severity exceeds buffer's minimum threshold - bypass buffering
611
+ # 3. If neither condition met, store in buffer for potential later processing
612
+
613
+ # MAINTAINABILITY_DECISION:
614
+ # Keeping this implementation to avoid complex code handling.
615
+ # Also for clarity over complexity
616
+
617
+ # Buffer is not active and we need to log immediately
618
+ if not self._buffer_config:
619
+ return self._logger.info(
620
+ msg,
621
+ *args,
622
+ exc_info=exc_info,
623
+ stack_info=stack_info,
624
+ stacklevel=stacklevel,
625
+ extra=extra,
626
+ )
627
+
628
+ # Bypass buffer when log severity meets or exceeds configured minimum
629
+ if _check_minimum_buffer_log_level(self._buffer_config.buffer_at_verbosity, "INFO"):
630
+ return self._logger.info(
631
+ msg,
632
+ *args,
633
+ exc_info=exc_info,
634
+ stack_info=stack_info,
635
+ stacklevel=stacklevel,
636
+ extra=extra,
637
+ )
638
+
639
+ # Store record in the buffer
640
+ self._add_log_record_to_buffer(
641
+ level=logging.INFO,
642
+ msg=msg,
643
+ args=args,
536
644
  exc_info=exc_info,
537
645
  stack_info=stack_info,
538
- stacklevel=stacklevel,
539
646
  extra=extra,
540
647
  )
541
648
 
542
- def exception(
649
+ def warning(
543
650
  self,
544
651
  msg: object,
545
652
  *args: object,
546
- exc_info: logging._ExcInfoType = True,
653
+ exc_info: logging._ExcInfoType = None,
547
654
  stack_info: bool = False,
548
655
  stacklevel: int = 2,
549
656
  extra: Mapping[str, object] | None = None,
@@ -552,16 +659,48 @@ class Logger:
552
659
  extra = extra or {}
553
660
  extra = {**extra, **kwargs}
554
661
 
555
- return self._logger.exception(
556
- msg,
557
- *args,
662
+ # Logging workflow for logging.warning:
663
+ # 1. Buffer is completely disabled - log right away
664
+ # 2. Log severity exceeds buffer's minimum threshold - bypass buffering
665
+ # 3. If neither condition met, store in buffer for potential later processing
666
+
667
+ # MAINTAINABILITY_DECISION:
668
+ # Keeping this implementation to avoid complex code handling.
669
+ # Also for clarity over complexity
670
+
671
+ # Buffer is not active and we need to log immediately
672
+ if not self._buffer_config:
673
+ return self._logger.warning(
674
+ msg,
675
+ *args,
676
+ exc_info=exc_info,
677
+ stack_info=stack_info,
678
+ stacklevel=stacklevel,
679
+ extra=extra,
680
+ )
681
+
682
+ # Bypass buffer when log severity meets or exceeds configured minimum
683
+ if _check_minimum_buffer_log_level(self._buffer_config.buffer_at_verbosity, "WARNING"):
684
+ return self._logger.warning(
685
+ msg,
686
+ *args,
687
+ exc_info=exc_info,
688
+ stack_info=stack_info,
689
+ stacklevel=stacklevel,
690
+ extra=extra,
691
+ )
692
+
693
+ # Store record in the buffer
694
+ self._add_log_record_to_buffer(
695
+ level=logging.WARNING,
696
+ msg=msg,
697
+ args=args,
558
698
  exc_info=exc_info,
559
699
  stack_info=stack_info,
560
- stacklevel=stacklevel,
561
700
  extra=extra,
562
701
  )
563
702
 
564
- def critical(
703
+ def error(
565
704
  self,
566
705
  msg: object,
567
706
  *args: object,
@@ -574,7 +713,15 @@ class Logger:
574
713
  extra = extra or {}
575
714
  extra = {**extra, **kwargs}
576
715
 
577
- return self._logger.critical(
716
+ # Workflow: Error Logging with automatic buffer flushing
717
+ # 1. Buffer configuration checked for immediate flush
718
+ # 2. If auto-flush enabled, trigger complete buffer processing
719
+ # 3. Error log is not "bufferable", so ensure error log is immediately available
720
+
721
+ if self._buffer_config and self._buffer_config.flush_on_error_log:
722
+ self.flush_buffer()
723
+
724
+ return self._logger.error(
578
725
  msg,
579
726
  *args,
580
727
  exc_info=exc_info,
@@ -583,7 +730,7 @@ class Logger:
583
730
  extra=extra,
584
731
  )
585
732
 
586
- def warning(
733
+ def critical(
587
734
  self,
588
735
  msg: object,
589
736
  *args: object,
@@ -596,7 +743,15 @@ class Logger:
596
743
  extra = extra or {}
597
744
  extra = {**extra, **kwargs}
598
745
 
599
- return self._logger.warning(
746
+ # Workflow: Error Logging with automatic buffer flushing
747
+ # 1. Buffer configuration checked for immediate flush
748
+ # 2. If auto-flush enabled, trigger complete buffer processing
749
+ # 3. Critical log is not "bufferable", so ensure error log is immediately available
750
+
751
+ if self._buffer_config and self._buffer_config.flush_on_error_log:
752
+ self.flush_buffer()
753
+
754
+ return self._logger.critical(
600
755
  msg,
601
756
  *args,
602
757
  exc_info=exc_info,
@@ -605,11 +760,11 @@ class Logger:
605
760
  extra=extra,
606
761
  )
607
762
 
608
- def debug(
763
+ def exception(
609
764
  self,
610
765
  msg: object,
611
766
  *args: object,
612
- exc_info: logging._ExcInfoType = None,
767
+ exc_info: logging._ExcInfoType = True,
613
768
  stack_info: bool = False,
614
769
  stacklevel: int = 2,
615
770
  extra: Mapping[str, object] | None = None,
@@ -618,7 +773,14 @@ class Logger:
618
773
  extra = extra or {}
619
774
  extra = {**extra, **kwargs}
620
775
 
621
- return self._logger.debug(
776
+ # Workflow: Error Logging with automatic buffer flushing
777
+ # 1. Buffer configuration checked for immediate flush
778
+ # 2. If auto-flush enabled, trigger complete buffer processing
779
+ # 3. Exception log is not "bufferable", so ensure error log is immediately available
780
+ if self._buffer_config and self._buffer_config.flush_on_error_log:
781
+ self.flush_buffer()
782
+
783
+ return self._logger.exception(
622
784
  msg,
623
785
  *args,
624
786
  exc_info=exc_info,
@@ -887,6 +1049,161 @@ class Logger:
887
1049
  # Powertools log level is set, we use this
888
1050
  return powertools_log_level.upper()
889
1051
 
1052
+ # FUNCTIONS for Buffering log
1053
+
1054
+ def _create_and_flush_log_record(self, log_line: dict) -> None:
1055
+ """
1056
+ Create and immediately flush a log record to the configured logger.
1057
+
1058
+ Parameters
1059
+ ----------
1060
+ log_line : dict[str, Any]
1061
+ Dictionary containing log record details with keys:
1062
+ - 'level': Logging level
1063
+ - 'filename': Source filename
1064
+ - 'line': Line number
1065
+ - 'msg': Log message
1066
+ - 'function': Source function name
1067
+ - 'extra': Additional context
1068
+ - 'timestamp': Original log creation time
1069
+
1070
+ Notes
1071
+ -----
1072
+ Bypasses standard logging flow by directly creating and handling a log record.
1073
+ Preserves original timestamp and source information.
1074
+ """
1075
+ record = self._logger.makeRecord(
1076
+ name=self.name,
1077
+ level=log_line["level"],
1078
+ fn=log_line["filename"],
1079
+ lno=log_line["line"],
1080
+ msg=log_line["msg"],
1081
+ args=(),
1082
+ exc_info=log_line["exc_info"],
1083
+ func=log_line["function"],
1084
+ extra=log_line["extra"],
1085
+ )
1086
+ record.created = log_line["timestamp"]
1087
+ self._logger.handle(record)
1088
+
1089
+ def _add_log_record_to_buffer(
1090
+ self,
1091
+ level: int,
1092
+ msg: object,
1093
+ args: object,
1094
+ exc_info: logging._ExcInfoType = None,
1095
+ stack_info: bool = False,
1096
+ extra: Mapping[str, object] | None = None,
1097
+ ) -> None:
1098
+ """
1099
+ Add log record to buffer with intelligent tracer ID handling.
1100
+
1101
+ Parameters
1102
+ ----------
1103
+ level : int
1104
+ Logging level of the record.
1105
+ msg : object
1106
+ Log message to be recorded.
1107
+ args : object
1108
+ Additional arguments for the log message.
1109
+ exc_info : logging._ExcInfoType, optional
1110
+ Exception information for the log record.
1111
+ stack_info : bool, optional
1112
+ Whether to include stack information.
1113
+ extra : Mapping[str, object], optional
1114
+ Additional contextual information for the log record.
1115
+
1116
+ Raises
1117
+ ------
1118
+ InvalidBufferItem
1119
+ If the log record cannot be added to the buffer.
1120
+
1121
+ Notes
1122
+ -----
1123
+ Handles special first invocation buffering and migration of log records
1124
+ between different tracer contexts.
1125
+ """
1126
+ # Determine tracer ID, defaulting to first invoke marker
1127
+ tracer_id = get_tracer_id()
1128
+
1129
+ if tracer_id and self._buffer_config:
1130
+ log_record: dict[str, Any] = _create_buffer_record(
1131
+ level=level,
1132
+ msg=msg,
1133
+ args=args,
1134
+ exc_info=exc_info,
1135
+ stack_info=stack_info,
1136
+ extra=extra,
1137
+ )
1138
+ try:
1139
+ self._buffer_cache.add(tracer_id, log_record)
1140
+ except BufferError:
1141
+ warnings.warn(
1142
+ message="Cannot add item to the buffer. "
1143
+ f"Item size exceeds total cache size {self._buffer_config.max_bytes} bytes",
1144
+ category=PowertoolsUserWarning,
1145
+ stacklevel=2,
1146
+ )
1147
+
1148
+ # flush this log to avoid data loss
1149
+ self._create_and_flush_log_record(log_record)
1150
+
1151
+ def flush_buffer(self) -> None:
1152
+ """
1153
+ Flush all buffered log records associated with current execution.
1154
+
1155
+ Notes
1156
+ -----
1157
+ Retrieves log records for current trace from buffer
1158
+ Immediately processes and logs each record
1159
+ Warning if some cache was evicted in that execution
1160
+ Clears buffer after complete processing
1161
+
1162
+ Raises
1163
+ ------
1164
+ Any exceptions from underlying logging or buffer mechanisms
1165
+ will be propagated to caller
1166
+ """
1167
+ tracer_id = get_tracer_id()
1168
+
1169
+ # Flushing log without a tracer id? Return
1170
+ if not tracer_id:
1171
+ return
1172
+
1173
+ # is buffer empty? return
1174
+ buffer = self._buffer_cache.get(tracer_id)
1175
+ if not buffer:
1176
+ return
1177
+
1178
+ # Process log records
1179
+ for log_line in buffer:
1180
+ self._create_and_flush_log_record(log_line)
1181
+
1182
+ # Has items evicted?
1183
+ if self._buffer_cache.has_items_evicted(tracer_id):
1184
+ warnings.warn(
1185
+ message="Some logs are not displayed because they were evicted from the buffer. "
1186
+ "Increase buffer size to store more logs in the buffer",
1187
+ category=PowertoolsUserWarning,
1188
+ stacklevel=2,
1189
+ )
1190
+
1191
+ # Clear the entire cache
1192
+ self._buffer_cache.clear()
1193
+
1194
+ def clear_buffer(self) -> None:
1195
+ """
1196
+ Clear the internal buffer cache.
1197
+
1198
+ This method removes all items from the buffer cache, effectively resetting it to an empty state.
1199
+
1200
+ Returns
1201
+ -------
1202
+ None
1203
+ """
1204
+ if self._buffer_config:
1205
+ self._buffer_cache.clear()
1206
+
890
1207
 
891
1208
  def set_package_logger(
892
1209
  level: str | int = logging.DEBUG,
@@ -283,3 +283,8 @@ def abs_lambda_path(relative_path: str = "") -> str:
283
283
 
284
284
  def sanitize_xray_segment_name(name: str) -> str:
285
285
  return re.sub(constants.INVALID_XRAY_NAME_CHARACTERS, "", name)
286
+
287
+
288
+ def get_tracer_id() -> str | None:
289
+ xray_trace_id = os.getenv(constants.XRAY_TRACE_ID_ENV)
290
+ return xray_trace_id.split(";")[0].replace("Root=", "") if xray_trace_id else None
@@ -1,3 +1,3 @@
1
1
  """Exposes version constant to avoid circular dependencies."""
2
2
 
3
- VERSION = "3.7.1a7"
3
+ VERSION = "3.8.1a0"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: aws_lambda_powertools
3
- Version: 3.7.1a7
3
+ Version: 3.8.1a0
4
4
  Summary: Powertools for AWS Lambda (Python) is a developer toolkit to implement Serverless best practices and increase developer velocity.
5
5
  License: MIT
6
6
  Keywords: aws_lambda_powertools,aws,tracing,logging,lambda,powertools,feature_flags,idempotency,middleware
@@ -13,7 +13,7 @@ aws_lambda_powertools/event_handler/graphql_appsync/router.py,sha256=f6jFQ3ZbJz9
13
13
  aws_lambda_powertools/event_handler/lambda_function_url.py,sha256=aJtTUR9DD6UzFcpRfbUki7mm2BDozFmuECrTWXxLjng,2153
14
14
  aws_lambda_powertools/event_handler/middlewares/__init__.py,sha256=3R5XptoCT8owm4swcAEG0lsV_zbL4X-gU5nv8eJ0jQs,158
15
15
  aws_lambda_powertools/event_handler/middlewares/base.py,sha256=llr1_sGaAsyMY9Gn4zKUDxePQ5X7ClcqgjqPesg5FJw,3724
16
- aws_lambda_powertools/event_handler/middlewares/openapi_validation.py,sha256=DZqkFFtft6WbtzRBzM4nA3FFtEKw-pGIgPixSvOJLcs,15017
16
+ aws_lambda_powertools/event_handler/middlewares/openapi_validation.py,sha256=zoHza0Vj_ij0wfobYX7SC46N4YYlMH9en3L3X9iK25g,14959
17
17
  aws_lambda_powertools/event_handler/middlewares/schema_validation.py,sha256=gEX0lgO8e7sxNQZwFbhK3ExQyb_b_Fw3l6k5SeZyHqk,5204
18
18
  aws_lambda_powertools/event_handler/openapi/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
19
19
  aws_lambda_powertools/event_handler/openapi/compat.py,sha256=elvYmcsKx9TjRnTAief8Xfpd5bgKADoM-9BlQ4Q37iY,10877
@@ -36,6 +36,10 @@ aws_lambda_powertools/event_handler/util.py,sha256=j7InZnSXymsWmp2Gj2emnVJjFcKo4
36
36
  aws_lambda_powertools/event_handler/vpc_lattice.py,sha256=rih7cVt4H9opHpYOUyuv2m3AdXS9Rxt-AH7ua51WwFs,3207
37
37
  aws_lambda_powertools/exceptions/__init__.py,sha256=bv7fiO8Cj5xbHOTlDpWpM3pIkbdSB74Nt_mHbzLzYDw,163
38
38
  aws_lambda_powertools/logging/__init__.py,sha256=G5MTkVqaQvpfa7k3fGkj4QN0KU6nFfP0_SLF_47G_VE,72
39
+ aws_lambda_powertools/logging/buffer/__init__.py,sha256=2sdmJToRBp6QJI2VQuvgjuxsJkTMd4dL3TYp6dASiOQ,109
40
+ aws_lambda_powertools/logging/buffer/cache.py,sha256=rh8XjDsfGEvXN8eRXHk8nAiN06rZEK1bcIZVuLshoHI,6058
41
+ aws_lambda_powertools/logging/buffer/config.py,sha256=tKOlgGhkAY214Fskn6u-Xm9pdTTbo3XJSwSoSJfoIW4,2383
42
+ aws_lambda_powertools/logging/buffer/functions.py,sha256=qgywGfLQpr4jYJFugJHDm_fXyxnTpJ3ppHFAI_7bpOM,3949
39
43
  aws_lambda_powertools/logging/constants.py,sha256=P0XgbCmG4NiP96kx0qxe6QUC3ShN12doSIXTkobX7C4,309
40
44
  aws_lambda_powertools/logging/correlation_paths.py,sha256=uHHrl03aWzpOsrGHZ-9E6PNoMFyKjv3APNMMkI1EN_c,411
41
45
  aws_lambda_powertools/logging/exceptions.py,sha256=Fe_jk8O9vgUSUHxxOkz6Ev521aXsgPkMgA9Hb1nBn6g,232
@@ -44,7 +48,7 @@ aws_lambda_powertools/logging/formatter.py,sha256=dmE6XXmkWVkYbWdy1Kg69TYJJ0yDXf
44
48
  aws_lambda_powertools/logging/formatters/__init__.py,sha256=OqddpJcWMqRYhx5SFy-SPqtt72tkRZbfpEi_oCC47eI,301
45
49
  aws_lambda_powertools/logging/formatters/datadog.py,sha256=NwaaBOp7jIlOAH158NzyvW-EwmWqqnYX9f7Uu_nIUYQ,3169
46
50
  aws_lambda_powertools/logging/lambda_context.py,sha256=VHst_6hxMpXgScoxNwaC61UXPTIdd3AEBHTPzb4esPc,1736
47
- aws_lambda_powertools/logging/logger.py,sha256=r9VACN9W72VMMqxbhbRYz9dREhX5pkk5hUf5MtS9u_8,35824
51
+ aws_lambda_powertools/logging/logger.py,sha256=rRvHggw3XjAg3xJw6ngZZx3eMUQybSFCJ-rSX5ZnsPc,48342
48
52
  aws_lambda_powertools/logging/types.py,sha256=Zc95nGdZ2sJUEPdwR0uoArT_F-JSKfpS_LokdCVO0nQ,1263
49
53
  aws_lambda_powertools/logging/utils.py,sha256=NirAObjkkarN5fX2diHs0Ln_8KHLueviL-jCKhckIBM,4069
50
54
  aws_lambda_powertools/metrics/__init__.py,sha256=B5FpJS_VR7zivm2ylvUF8RHBthKz4aDk0VA5GpDn3Tk,592
@@ -77,13 +81,13 @@ aws_lambda_powertools/shared/cache_dict.py,sha256=X3Ykh-_Fb4Wkc5RGjaostC_FUvMyY_
77
81
  aws_lambda_powertools/shared/constants.py,sha256=lIEs2_5080qhCp7l_v_rdjXud5uF0Dlf-4VniDdIJCI,2737
78
82
  aws_lambda_powertools/shared/cookies.py,sha256=X2Bkcf7MAmV75PJFm07QGkL4n1psk4HIGpUxzrsjiJY,3868
79
83
  aws_lambda_powertools/shared/dynamodb_deserializer.py,sha256=tWw8MLXEitInnViFkt_xSBkXD_V1n1KcWbqBXBxwUlE,4054
80
- aws_lambda_powertools/shared/functions.py,sha256=kgHqBFvzhxxROzLL-NcrfKHWcPqr6GP38PeB1XfT8jQ,7442
84
+ aws_lambda_powertools/shared/functions.py,sha256=N5VIND3xW-eZHqY2uPsBMtDbtMx64iklUKByeCh58rc,7625
81
85
  aws_lambda_powertools/shared/headers_serializer.py,sha256=1eRQ-FO6jNskTXWRa25OXI5Klu3yH6HAHjGGZULB8FE,5411
82
86
  aws_lambda_powertools/shared/json_encoder.py,sha256=JQeWNu-4M7_xI_hqYExrxsb3OcEH4uTxjfVE4Of8Nn8,666
83
87
  aws_lambda_powertools/shared/lazy_import.py,sha256=TbXQm2bcwXdZrYdBaJJXIswyLlumM85RJ_A_0w-h-GU,2019
84
88
  aws_lambda_powertools/shared/types.py,sha256=APkI38HbiTpSF19NSNii8Ydx73vmVUVotgEQ9jHruEI,124
85
89
  aws_lambda_powertools/shared/user_agent.py,sha256=DrCMFQuT4a4iIrpcWpAIjY37EFqR9-QxlxDGD-Nn9Gg,7081
86
- aws_lambda_powertools/shared/version.py,sha256=DheiIJQ50d2a_hoe9oSMHEVYeUiKw6fhfDbNFbI0aEE,84
90
+ aws_lambda_powertools/shared/version.py,sha256=eez1WHK6RC3OddSRIYbCj803B2xulcLLJkJMRK4zCUQ,84
87
91
  aws_lambda_powertools/tracing/__init__.py,sha256=f4bMThOPBPWTPVcYqcAIErAJPerMsf3H_Z4gCXCsK9I,141
88
92
  aws_lambda_powertools/tracing/base.py,sha256=DbLD8OSK05KLdSV36oNA5wDSGv8KbcOD19qMUqoXh58,4513
89
93
  aws_lambda_powertools/tracing/extensions.py,sha256=APOfXOq-hRBKaK5WyfIyrd_6M1_9SWJZ3zxLA9jDZzU,492
@@ -251,7 +255,7 @@ aws_lambda_powertools/utilities/validation/envelopes.py,sha256=YD5HOFx6IClQgii0n
251
255
  aws_lambda_powertools/utilities/validation/exceptions.py,sha256=PKy_19zQMBJGCMMFl-sMkcm-cc0v3zZBn_bhGE4wKNo,2084
252
256
  aws_lambda_powertools/utilities/validation/validator.py,sha256=x_1qpuKJBuWpgNU-zCD3Di-vXrZfyUu7oA5RmjZjr84,10034
253
257
  aws_lambda_powertools/warnings/__init__.py,sha256=vqDVeZz8wGtD8WGYNSkQE7AHwqtIrPGRxuoJR_BBnSs,1193
254
- aws_lambda_powertools-3.7.1a7.dist-info/LICENSE,sha256=vMHS2eBgmwPUIMPb7LQ4p7ib_FPVQXarVjAasflrTwo,951
255
- aws_lambda_powertools-3.7.1a7.dist-info/METADATA,sha256=nC3pL-7Br5KiXmduzxGJesz9XyL81NDaynSCqh8CFaM,11151
256
- aws_lambda_powertools-3.7.1a7.dist-info/WHEEL,sha256=IYZQI976HJqqOpQU6PHkJ8fb3tMNBFjg-Cn-pwAbaFM,88
257
- aws_lambda_powertools-3.7.1a7.dist-info/RECORD,,
258
+ aws_lambda_powertools-3.8.1a0.dist-info/LICENSE,sha256=vMHS2eBgmwPUIMPb7LQ4p7ib_FPVQXarVjAasflrTwo,951
259
+ aws_lambda_powertools-3.8.1a0.dist-info/METADATA,sha256=oL3dXznPiEm3OE0yFIBoJYi4eqM8VPFjws170mcmOOk,11151
260
+ aws_lambda_powertools-3.8.1a0.dist-info/WHEEL,sha256=IYZQI976HJqqOpQU6PHkJ8fb3tMNBFjg-Cn-pwAbaFM,88
261
+ aws_lambda_powertools-3.8.1a0.dist-info/RECORD,,