structlog-config 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,184 @@
1
+ import logging
2
+ from typing import Protocol
3
+
4
+ import orjson
5
+ import structlog
6
+ import structlog.dev
7
+ from structlog.processors import ExceptionRenderer
8
+ from structlog.tracebacks import ExceptionDictTransformer
9
+ from structlog.typing import FilteringBoundLogger
10
+
11
+ from structlog_config.formatters import (
12
+ PathPrettifier,
13
+ add_fastapi_context,
14
+ logger_name,
15
+ pretty_traceback_exception_formatter,
16
+ simplify_activemodel_objects,
17
+ )
18
+
19
+ from . import packages
20
+ from .constants import NO_COLOR, PYTHON_LOG_PATH
21
+ from .environments import is_production, is_pytest, is_staging
22
+ from .stdlib_logging import (
23
+ get_environment_log_level_as_string,
24
+ redirect_stdlib_loggers,
25
+ )
26
+ from .warnings import redirect_showwarnings
27
+
28
+ package_logger = logging.getLogger(__name__)
29
+
30
+
31
+ def log_processors_for_mode(json_logger: bool) -> list[structlog.types.Processor]:
32
+ if json_logger:
33
+
34
+ def orjson_dumps_sorted(value, *args, **kwargs):
35
+ "sort_keys=True is not supported, so we do it manually"
36
+ # kwargs includes a default fallback json formatter
37
+ return orjson.dumps(
38
+ # starlette-context includes non-string keys (enums)
39
+ value,
40
+ option=orjson.OPT_SORT_KEYS | orjson.OPT_NON_STR_KEYS,
41
+ **kwargs,
42
+ )
43
+
44
+ return [
45
+ # add exc_info=True to a log and get a full stack trace attached to it
46
+ structlog.processors.format_exc_info,
47
+ # simple, short exception rendering in prod since sentry is in place
48
+ # https://www.structlog.org/en/stable/exceptions.html this is a customized version of dict_tracebacks
49
+ ExceptionRenderer(
50
+ ExceptionDictTransformer(
51
+ show_locals=False,
52
+ use_rich=False,
53
+ # number of frames is completely arbitrary
54
+ max_frames=5,
55
+ # TODO `suppress`?
56
+ )
57
+ ),
58
+ # in prod, we want logs to be rendered as JSON payloads
59
+ structlog.processors.JSONRenderer(serializer=orjson_dumps_sorted),
60
+ ]
61
+
62
+ return [
63
+ structlog.dev.ConsoleRenderer(
64
+ colors=not NO_COLOR,
65
+ exception_formatter=pretty_traceback_exception_formatter
66
+ if packages.pretty_traceback
67
+ else structlog.dev.default_exception_formatter,
68
+ )
69
+ ]
70
+
71
+
72
+ def get_default_processors(json_logger) -> list[structlog.types.Processor]:
73
+ """
74
+ Return the default list of processors for structlog configuration.
75
+ """
76
+ processors = [
77
+ # although this is stdlib, it's needed, although I'm not sure entirely why
78
+ structlog.stdlib.add_log_level,
79
+ structlog.contextvars.merge_contextvars,
80
+ logger_name,
81
+ add_fastapi_context if packages.starlette_context else None,
82
+ simplify_activemodel_objects
83
+ if packages.activemodel and packages.typeid
84
+ else None,
85
+ PathPrettifier(),
86
+ structlog.processors.TimeStamper(fmt="iso", utc=True),
87
+ # add `stack_info=True` to a log and get a `stack` attached to the log
88
+ structlog.processors.StackInfoRenderer(),
89
+ *log_processors_for_mode(json_logger),
90
+ ]
91
+
92
+ return [processor for processor in processors if processor is not None]
93
+
94
+
95
+ def _logger_factory(json_logger: bool):
96
+ """
97
+ Allow dev users to redirect logs to a file using PYTHON_LOG_PATH
98
+
99
+ In production, optimized for speed (https://www.structlog.org/en/stable/performance.html)
100
+ """
101
+
102
+ if json_logger:
103
+ return structlog.BytesLoggerFactory()
104
+
105
+ if PYTHON_LOG_PATH:
106
+ python_log = open(PYTHON_LOG_PATH, "a", encoding="utf-8")
107
+ return structlog.PrintLoggerFactory(file=python_log)
108
+
109
+ # Default case
110
+ return structlog.PrintLoggerFactory()
111
+
112
+
113
+ class LoggerWithContext(FilteringBoundLogger, Protocol):
114
+ """
115
+ A customized bound logger class that adds easy-to-remember methods for adding context.
116
+
117
+ We don't use a real subclass because `make_filtering_bound_logger` has some logic we don't
118
+ want to replicate.
119
+ """
120
+
121
+ def context(self, *args, **kwargs) -> None:
122
+ "context manager to temporarily set and clear logging context"
123
+ ...
124
+
125
+ def local(self, *args, **kwargs) -> None:
126
+ "set thread-local context"
127
+ ...
128
+
129
+ def clear(self) -> None:
130
+ "clear thread-local context"
131
+ ...
132
+
133
+
134
+ # TODO this may be a bad idea, but I really don't like how the `bound` stuff looks and how to access it, way too ugly
135
+ def add_simple_context_aliases(log) -> LoggerWithContext:
136
+ log.context = structlog.contextvars.bound_contextvars
137
+ log.local = structlog.contextvars.bind_contextvars
138
+ log.clear = structlog.contextvars.clear_contextvars
139
+
140
+ return log
141
+
142
+
143
+ def configure_logger(
144
+ *, logger_factory=None, json_logger: bool | None = None
145
+ ) -> LoggerWithContext:
146
+ """
147
+ Create a struct logger with some special additions:
148
+
149
+ >>> with log.context(key=value):
150
+ >>> log.info("some message")
151
+
152
+ >>> log.local(key=value)
153
+ >>> log.info("some message")
154
+ >>> log.clear()
155
+
156
+ Args:
157
+ logger_factory: Optional logger factory to override the default
158
+ json_logger: Optional flag to use JSON logging. If None, defaults to
159
+ production or staging environment sourced from PYTHON_ENV.
160
+ """
161
+ # Reset structlog configuration to make sure we're starting fresh
162
+ # This is important for tests where configure_logger might be called multiple times
163
+ structlog.reset_defaults()
164
+
165
+ if json_logger is None:
166
+ json_logger = is_production() or is_staging()
167
+
168
+ redirect_stdlib_loggers(json_logger)
169
+ redirect_showwarnings()
170
+
171
+ structlog.configure(
172
+ # Don't cache the loggers during tests, it makes it hard to capture them
173
+ cache_logger_on_first_use=not is_pytest(),
174
+ wrapper_class=structlog.make_filtering_bound_logger(
175
+ get_environment_log_level_as_string()
176
+ ),
177
+ logger_factory=logger_factory or _logger_factory(json_logger),
178
+ processors=get_default_processors(json_logger),
179
+ )
180
+
181
+ log = structlog.get_logger()
182
+ log = add_simple_context_aliases(log)
183
+
184
+ return log
@@ -0,0 +1,9 @@
1
+ import os
2
+
3
+ from decouple import config
4
+
5
+ PYTHON_LOG_PATH = config("PYTHON_LOG_PATH", default=None)
6
+ PYTHONASYNCIODEBUG = config("PYTHONASYNCIODEBUG", default=False, cast=bool)
7
+
8
+ NO_COLOR = "NO_COLOR" in os.environ
9
+ "support NO_COLOR standard https://no-color.org"
@@ -0,0 +1,48 @@
1
+ """
2
+ Configure custom logger behavior based on environment variables.
3
+ """
4
+
5
+ import os
6
+ import re
7
+
8
+ # Regex to match LOG_LEVEL_* and LOG_PATH_* environment variables
9
+ LOG_LEVEL_PATTERN = re.compile(r"^LOG_LEVEL_(.+)$")
10
+ LOG_PATH_PATTERN = re.compile(r"^LOG_PATH_(.+)$")
11
+
12
+
13
+ def get_custom_logger_configs() -> dict[str, dict[str, str]]:
14
+ """
15
+ Parse environment variables to extract custom logger configurations.
16
+
17
+ Examples:
18
+ LOG_LEVEL_HTTPX=DEBUG
19
+ LOG_PATH_HTTPX=/var/log/httpx.log
20
+
21
+ LOG_LEVEL_MY_CUSTOM_LOGGER=INFO
22
+ LOG_PATH_MY_CUSTOM_LOGGER=/var/log/custom.log
23
+
24
+ Returns:
25
+ Dictionary mapping logger names to their configuration.
26
+ Example: {"httpx": {"level": "DEBUG", "path": "/var/log/httpx.log"}}
27
+ """
28
+ custom_configs = {}
29
+
30
+ # Process environment variables in reverse alphabetical order
31
+ # This ensures that HTTP_X will be processed after HTTPX if both exist,
32
+ # making the last one (alphabetically) win
33
+ for env_var in sorted(os.environ.keys(), reverse=True):
34
+ # Check for level configuration
35
+ if level_match := LOG_LEVEL_PATTERN.match(env_var):
36
+ logger_name = level_match.group(1).lower().replace("_", ".")
37
+ if logger_name not in custom_configs:
38
+ custom_configs[logger_name] = {}
39
+ custom_configs[logger_name]["level"] = os.environ[env_var]
40
+
41
+ # Check for path configuration
42
+ elif path_match := LOG_PATH_PATTERN.match(env_var):
43
+ logger_name = path_match.group(1).lower().replace("_", ".")
44
+ if logger_name not in custom_configs:
45
+ custom_configs[logger_name] = {}
46
+ custom_configs[logger_name]["path"] = os.environ[env_var]
47
+
48
+ return custom_configs
@@ -0,0 +1,31 @@
1
+ import os
2
+ import typing as t
3
+
4
+ from decouple import config
5
+
6
+
7
+ def python_environment() -> str:
8
+ return t.cast(str, config("PYTHON_ENV", default="development", cast=str)).lower()
9
+
10
+
11
+ def is_testing():
12
+ return python_environment() == "test"
13
+
14
+
15
+ def is_production():
16
+ return python_environment() == "production"
17
+
18
+
19
+ def is_staging():
20
+ return python_environment() == "staging"
21
+
22
+
23
+ def is_development():
24
+ return python_environment() == "development"
25
+
26
+
27
+ def is_pytest():
28
+ """
29
+ PYTEST_CURRENT_TEST is set by pytest to indicate the current test being run
30
+ """
31
+ return "PYTEST_CURRENT_TEST" in os.environ
@@ -0,0 +1,115 @@
1
+ from time import perf_counter
2
+ from urllib.parse import quote
3
+
4
+ import structlog
5
+ from fastapi import FastAPI
6
+ from starlette.middleware.base import RequestResponseEndpoint
7
+ from starlette.requests import Request
8
+ from starlette.responses import Response
9
+ from starlette.routing import Match, Mount
10
+ from starlette.types import Scope
11
+
12
+ log = structlog.get_logger("access_log")
13
+
14
+
15
+ def get_route_name(app: FastAPI, scope: Scope, prefix: str = "") -> str:
16
+ """Generate a descriptive route name for timing metrics"""
17
+ if prefix:
18
+ prefix += "."
19
+
20
+ route = next(
21
+ (r for r in app.router.routes if r.matches(scope)[0] == Match.FULL), None
22
+ )
23
+
24
+ if hasattr(route, "endpoint") and hasattr(route, "name"):
25
+ return f"{prefix}{route.endpoint.__module__}.{route.name}" # type: ignore
26
+ elif isinstance(route, Mount):
27
+ return f"{type(route.app).__name__}<{route.name!r}>"
28
+ else:
29
+ return scope["path"]
30
+
31
+
32
+ def get_path_with_query_string(scope: Scope) -> str:
33
+ """Get the URL with the substitution of query parameters.
34
+
35
+ Args:
36
+ scope (Scope): Current context.
37
+
38
+ Returns:
39
+ str: URL with query parameters
40
+ """
41
+ if "path" not in scope:
42
+ return "-"
43
+ path_with_query_string = quote(scope["path"])
44
+ if raw_query_string := scope["query_string"]:
45
+ query_string = raw_query_string.decode("ascii")
46
+ path_with_query_string = f"{path_with_query_string}?{query_string}"
47
+ return path_with_query_string
48
+
49
+
50
+ def get_client_addr(scope: Scope) -> str:
51
+ """Get the client's address.
52
+
53
+ Args:
54
+ scope (Scope): Current context.
55
+
56
+ Returns:
57
+ str: Client's address in the IP:PORT format.
58
+ """
59
+ client = scope.get("client")
60
+ if not client:
61
+ return ""
62
+ ip, port = client
63
+ return f"{ip}:{port}"
64
+
65
+
66
+ # TODO we should look at the static asset logic and pull the prefix path from tha
67
+ def is_static_assets_request(scope: Scope) -> bool:
68
+ """Check if the request is for static assets. Pretty naive check.
69
+
70
+ Args:
71
+ scope (Scope): Current context.
72
+
73
+ Returns:
74
+ bool: True if the request is for static assets, False otherwise.
75
+ """
76
+ return scope["path"].endswith(".css") or scope["path"].endswith(".js")
77
+
78
+
79
+ def add_middleware(
80
+ app: FastAPI,
81
+ ) -> None:
82
+ """Use this method to add this middleware to your fastapi application."""
83
+
84
+ @app.middleware("http")
85
+ async def access_log_middleware(
86
+ request: Request, call_next: RequestResponseEndpoint
87
+ ) -> Response:
88
+ scope = request.scope
89
+ route_name = get_route_name(app, request.scope)
90
+
91
+ # TODO what other request types are there? why do we need this guard?
92
+ if scope["type"] != "http":
93
+ return await call_next(request)
94
+
95
+ start = perf_counter()
96
+ response = await call_next(request)
97
+
98
+ assert start
99
+ elapsed = perf_counter() - start
100
+
101
+ # debug log all asset requests otherwise the logs because unreadable
102
+ log_method = log.debug if is_static_assets_request(scope) else log.info
103
+
104
+ log_method(
105
+ f"{response.status_code} {scope['method']} {get_path_with_query_string(scope)}",
106
+ time=round(elapsed * 1000),
107
+ status=response.status_code,
108
+ method=scope["method"],
109
+ path=scope["path"],
110
+ query=scope["query_string"].decode(),
111
+ client_ip=get_client_addr(scope),
112
+ route=route_name,
113
+ )
114
+
115
+ return response
@@ -0,0 +1,166 @@
1
+ import logging
2
+ from pathlib import Path
3
+ from typing import Any, MutableMapping, TextIO
4
+
5
+ from structlog.typing import EventDict, ExcInfo
6
+
7
+ from structlog_config.constants import NO_COLOR
8
+
9
+
10
+ def simplify_activemodel_objects(
11
+ logger: logging.Logger,
12
+ method_name: str,
13
+ event_dict: MutableMapping[str, Any],
14
+ ) -> MutableMapping[str, Any]:
15
+ """
16
+ Make the following transformations to the logs:
17
+
18
+ - Convert keys ('object') whose value inherit from activemodel's BaseModel to object_id=str(object.id)
19
+ - Convert TypeIDs to their string representation object=str(object)
20
+
21
+ What's tricky about this method, and other structlog processors, is they are run *after* a response
22
+ is returned to the user. So, they don't error out in tests and it doesn't impact users. They do show up in Sentry.
23
+ """
24
+ from activemodel import BaseModel
25
+ from sqlalchemy.orm.base import object_state
26
+ from typeid import TypeID
27
+
28
+ for key, value in list(event_dict.items()):
29
+ if isinstance(value, BaseModel):
30
+
31
+ def get_field_no_refresh(instance, field_name):
32
+ """
33
+ This was a hard-won little bit of code: in fastapi, this action happens *after* the
34
+ db session dependency has finished, which means the session is closed.
35
+
36
+ If a DB operation within the session causes the model to be marked as stale, then this will trigger
37
+ a `sqlalchemy.orm.exc.DetachedInstanceError` error. This logic pulls the cached value from the object
38
+ which is better for performance *and* avoids the error.
39
+ """
40
+ return str(object_state(instance).dict.get(field_name))
41
+
42
+ # TODO this will break as soon as a model doesn't have `id` as pk
43
+ event_dict[f"{key}_id"] = get_field_no_refresh(value, "id")
44
+ del event_dict[key]
45
+
46
+ elif isinstance(value, TypeID):
47
+ event_dict[key] = str(value)
48
+
49
+ return event_dict
50
+
51
+
52
+ def logger_name(logger: Any, method_name: Any, event_dict: EventDict) -> EventDict:
53
+ """
54
+ structlog does not have named loggers, so we roll our own
55
+
56
+ >>> structlog.get_logger(logger_name="my_logger_name")
57
+ """
58
+
59
+ if logger_name := event_dict.pop("logger_name", None):
60
+ # `logger` is a special key that structlog treats as the logger name
61
+ # look at `structlog.stdlib.add_logger_name` for more information
62
+ event_dict.setdefault("logger", logger_name)
63
+
64
+ return event_dict
65
+
66
+
67
+ def pretty_traceback_exception_formatter(sio: TextIO, exc_info: ExcInfo) -> None:
68
+ """
69
+ By default, rich and then better-exceptions is used to render exceptions when a ConsoleRenderer is used.
70
+
71
+ I prefer pretty-traceback, so I've added a custom processor to use it.
72
+
73
+ https://github.com/hynek/structlog/blob/66e22d261bf493ad2084009ec97c51832fdbb0b9/src/structlog/dev.py#L412
74
+ """
75
+
76
+ # only available in dev
77
+ from pretty_traceback.formatting import exc_to_traceback_str
78
+
79
+ _, exc_value, traceback = exc_info
80
+ formatted_exception = exc_to_traceback_str(exc_value, traceback, color=not NO_COLOR)
81
+ sio.write("\n" + formatted_exception)
82
+
83
+
84
+ # lifted from:
85
+ # https://github.com/underyx/structlog-pretty/blob/a6a4abbb1f6e4a879f9f5a95ba067577cea65a08/structlog_pretty/processors.py#L226C1-L252C26
86
+ class PathPrettifier:
87
+ """A processor for printing paths.
88
+
89
+ Changes all pathlib.Path objects.
90
+
91
+ 1. Remove PosixPath(...) wrapper by calling str() on the path.
92
+ 2. If path is relative to current working directory,
93
+ print it relative to working directory.
94
+
95
+ Note that working directory is determined when configuring structlog.
96
+ """
97
+
98
+ def __init__(self, base_dir: Path | None = None):
99
+ self.base_dir = base_dir or Path.cwd()
100
+
101
+ def __call__(self, _, __, event_dict):
102
+ for key, path in event_dict.items():
103
+ if not isinstance(path, Path):
104
+ continue
105
+ path = event_dict[key]
106
+ try:
107
+ path = path.relative_to(self.base_dir)
108
+ except ValueError:
109
+ pass # path is not relative to cwd
110
+ event_dict[key] = str(path)
111
+
112
+ return event_dict
113
+
114
+
115
+ # https://github.com/amenezes/structlog_ext_utils/blob/9b4fbd301c891dd55faf4ce3b102c08a5a0f970a/structlog_ext_utils/processors.py#L59
116
+ class RenameField:
117
+ """
118
+ A structlog processor that renames fields in the event dictionary.
119
+
120
+ This processor allows for renaming keys in the event dictionary during log processing.
121
+
122
+ Parameters
123
+ ----------
124
+ fields : dict
125
+ A dictionary mapping original field names (keys) to new field names (values).
126
+ For example, {'old_name': 'new_name'} will rename 'old_name' to 'new_name'.
127
+
128
+ Returns
129
+ -------
130
+ callable
131
+ A callable that transforms an event dictionary by renaming specified fields.
132
+
133
+ Examples
134
+ --------
135
+ >>> from structlog.processors import TimeStamper
136
+ >>> processors = [
137
+ ... RenameField({"timestamp": "new_field"}),
138
+ ... ]
139
+ >>> # This will rename "timestamp" field to "New_field" in log events
140
+ """
141
+
142
+ def __init__(self, fields: dict) -> None:
143
+ self.fields = fields
144
+
145
+ def __call__(self, _, __, event_dict):
146
+ for from_key, to_key in self.fields.items():
147
+ if event_dict.get(from_key):
148
+ event_dict[to_key] = event_dict.pop(from_key)
149
+ return event_dict
150
+
151
+
152
+ def add_fastapi_context(
153
+ logger: logging.Logger,
154
+ method_name: str,
155
+ event_dict: MutableMapping[str, Any],
156
+ ) -> MutableMapping[str, Any]:
157
+ """
158
+ Take all state added to starlette-context and add to the logs
159
+
160
+ https://github.com/tomwojcik/starlette-context/blob/master/example/setup_logging.py
161
+ """
162
+ from starlette_context import context
163
+
164
+ if context.exists():
165
+ event_dict.update(context.data)
166
+ return event_dict
@@ -0,0 +1,33 @@
1
+ """
2
+ Determine if certain packages are installed to conditionally enable processors
3
+ """
4
+
5
+ try:
6
+ import orjson
7
+ except ImportError:
8
+ orjson = None
9
+
10
+ try:
11
+ import sqlalchemy
12
+ except ImportError:
13
+ sqlalchemy = None
14
+
15
+ try:
16
+ import activemodel
17
+ except ImportError:
18
+ activemodel = None
19
+
20
+ try:
21
+ import typeid
22
+ except ImportError:
23
+ typeid = None
24
+
25
+ try:
26
+ import pretty_traceback
27
+ except ImportError:
28
+ pretty_traceback = None
29
+
30
+ try:
31
+ import starlette_context
32
+ except ImportError:
33
+ starlette_context = None
@@ -0,0 +1,172 @@
1
+ import logging
2
+ import sys
3
+ from pathlib import Path
4
+
5
+ import structlog
6
+ from decouple import config
7
+
8
+ from structlog_config.env_config import get_custom_logger_configs
9
+
10
+ from .constants import PYTHONASYNCIODEBUG
11
+ from .environments import is_production, is_staging
12
+
13
+
14
+ def get_environment_log_level_as_string() -> str:
15
+ return config("LOG_LEVEL", default="INFO", cast=str).upper()
16
+
17
+
18
+ def reset_stdlib_logger(
19
+ logger_name: str,
20
+ default_structlog_handler: logging.Handler,
21
+ level_override: str | None = None,
22
+ ):
23
+ std_logger = logging.getLogger(logger_name)
24
+ std_logger.propagate = False
25
+ std_logger.handlers = []
26
+ std_logger.addHandler(default_structlog_handler)
27
+
28
+ if level_override:
29
+ std_logger.setLevel(level_override)
30
+
31
+
32
+ def redirect_stdlib_loggers(json_logger: bool):
33
+ """
34
+ Redirect all standard logging module loggers to use the structlog configuration.
35
+
36
+ Inspired by: https://gist.github.com/nymous/f138c7f06062b7c43c060bf03759c29e
37
+ """
38
+ from structlog.stdlib import ProcessorFormatter
39
+
40
+ level = get_environment_log_level_as_string()
41
+
42
+ # TODO I don't understand why we can't use a processor stack as-is here. Need to investigate further.
43
+
44
+ # Use ProcessorFormatter to format log records using structlog processors
45
+ from .__init__ import get_default_processors
46
+
47
+ processors = get_default_processors(json_logger=json_logger)
48
+
49
+ formatter = ProcessorFormatter(
50
+ processors=[
51
+ # required to strip extra keys that the structlog stdlib bindings add in
52
+ structlog.stdlib.ProcessorFormatter.remove_processors_meta,
53
+ processors[-1]
54
+ if not is_production() and not is_staging()
55
+ # don't use ORJSON here, as the stdlib formatter chain expects a str not a bytes
56
+ else structlog.processors.JSONRenderer(sort_keys=True),
57
+ ],
58
+ # processors unique to stdlib logging
59
+ foreign_pre_chain=[
60
+ # logger names are not supported when not using structlog.stdlib.LoggerFactory
61
+ # https://github.com/hynek/structlog/issues/254
62
+ structlog.stdlib.add_logger_name,
63
+ # omit the renderer so we can implement our own
64
+ *processors[:-1],
65
+ ],
66
+ )
67
+
68
+ def handler_for_path(path: str) -> logging.FileHandler:
69
+ path_obj = Path(path)
70
+ # Create parent directories if they don't exist
71
+ path_obj.parent.mkdir(parents=True, exist_ok=True)
72
+ file_handler = logging.FileHandler(path)
73
+ file_handler.setFormatter(formatter)
74
+ return file_handler
75
+
76
+ # Create a handler for the root logger
77
+ handler = logging.StreamHandler(sys.stdout)
78
+ handler.setLevel(level)
79
+ handler.setFormatter(formatter)
80
+
81
+ # Configure the root logger
82
+ root_logger = logging.getLogger()
83
+ root_logger.setLevel(level)
84
+ root_logger.handlers = [handler] # Replace existing handlers with our handler
85
+
86
+ # Disable propagation to avoid duplicate logs
87
+ root_logger.propagate = True
88
+
89
+ # TODO there is a JSON-like format that can be used to configure loggers instead :/
90
+ std_logging_configuration = {
91
+ "httpcore": {},
92
+ "httpx": {
93
+ "levels": {
94
+ "INFO": "WARNING",
95
+ }
96
+ },
97
+ "azure.core.pipeline.policies.http_logging_policy": {
98
+ "levels": {
99
+ "INFO": "WARNING",
100
+ }
101
+ },
102
+ }
103
+
104
+ # Merged from silence_loud_loggers - only silence asyncio if not explicitly debugging it
105
+ if not PYTHONASYNCIODEBUG:
106
+ std_logging_configuration["asyncio"] = {"level": "WARNING"}
107
+
108
+ """
109
+ These loggers either:
110
+
111
+ 1. Are way too chatty by default
112
+ 2. Setup before our logging is initialized
113
+
114
+ This configuration allows us to easily override configuration of various loggers as we add additional complexity
115
+ to the application. The levels map allows us to define specific level mutations based on the current level configuration
116
+ for a set of standard loggers.
117
+ """
118
+
119
+ environment_logger_config = get_custom_logger_configs()
120
+
121
+ # now, let's handle some loggers that are probably already initialized with a handler
122
+ for logger_name, logger_config in std_logging_configuration.items():
123
+ level_override = None
124
+
125
+ # Check if we have a direct level setting
126
+ if "level" in logger_config:
127
+ level_override = logger_config["level"]
128
+ # Otherwise, check if we have a level mapping for the current log level
129
+ elif "levels" in logger_config and level in logger_config["levels"]:
130
+ level_override = logger_config["levels"][level]
131
+
132
+ handler_for_logger = handler
133
+
134
+ # Override with environment-specific config if available
135
+ if logger_name in environment_logger_config:
136
+ env_config = environment_logger_config[logger_name]
137
+
138
+ # if we have a custom path, use that instead
139
+ if "path" in env_config:
140
+ handler_for_logger = handler_for_path(env_config["path"])
141
+
142
+ if "level" in env_config:
143
+ level_override = env_config["level"]
144
+
145
+ reset_stdlib_logger(
146
+ logger_name,
147
+ handler_for_logger,
148
+ level_override,
149
+ )
150
+
151
+ # Handle any additional loggers defined in environment variables
152
+ for logger_name, logger_config in environment_logger_config.items():
153
+ # skip if already configured!
154
+ if logger_name in std_logging_configuration:
155
+ continue
156
+
157
+ handler_for_logger = handler
158
+
159
+ if "path" in logger_config:
160
+ # if we have a custom path, use that instead
161
+ handler_for_logger = handler_for_path(logger_config["path"])
162
+
163
+ reset_stdlib_logger(
164
+ logger_name,
165
+ handler_for_logger,
166
+ logger_config.get("level"),
167
+ )
168
+
169
+ # TODO do i need to setup exception overrides as well?
170
+ # https://gist.github.com/nymous/f138c7f06062b7c43c060bf03759c29e#file-custom_logging-py-L114-L128
171
+ # if sys.excepthook != sys.__excepthook__:
172
+ # logging.getLogger(__name__).warning("sys.excepthook has been overridden.")
@@ -0,0 +1,51 @@
1
+ """
2
+ Warning setup functionality to redirect Python warnings to structlog.
3
+ """
4
+
5
+ import warnings
6
+ from typing import Any, TextIO
7
+
8
+ import structlog
9
+
10
+ _original_warnings_showwarning: Any = None
11
+
12
+
13
+ def _showwarning(
14
+ message: Warning | str,
15
+ category: type[Warning],
16
+ filename: str,
17
+ lineno: int,
18
+ file: TextIO | None = None,
19
+ line: str | None = None,
20
+ ) -> Any:
21
+ """
22
+ Redirects warnings to structlog so they appear in task logs etc.
23
+
24
+ Implementation of showwarnings which redirects to logging, which will first
25
+ check to see if the file parameter is None. If a file is specified, it will
26
+ delegate to the original warnings implementation of showwarning. Otherwise,
27
+ it will call warnings.formatwarning and will log the resulting string to a
28
+ warnings logger named "py.warnings" with level logging.WARNING.
29
+ """
30
+ if file is not None:
31
+ if _original_warnings_showwarning is not None:
32
+ _original_warnings_showwarning(
33
+ message, category, filename, lineno, file, line
34
+ )
35
+ else:
36
+ log = structlog.get_logger(logger_name="py.warnings")
37
+ log.warning(
38
+ str(message), category=category.__name__, filename=filename, lineno=lineno
39
+ )
40
+
41
+
42
+ def redirect_showwarnings():
43
+ """
44
+ Redirect Python warnings to use structlog for logging.
45
+ """
46
+ global _original_warnings_showwarning
47
+
48
+ if _original_warnings_showwarning is None:
49
+ _original_warnings_showwarning = warnings.showwarning
50
+ # Capture warnings and show them via structlog
51
+ warnings.showwarning = _showwarning
@@ -0,0 +1,62 @@
1
+ Metadata-Version: 2.4
2
+ Name: structlog-config
3
+ Version: 0.1.0
4
+ Summary: A comprehensive structlog configuration with sensible defaults for development and production environments, featuring context management, exception formatting, and path prettification.
5
+ Project-URL: Repository, https://github.com/iloveitaly/structlog-config
6
+ Author-email: Michael Bianco <mike@mikebian.co>
7
+ Keywords: json-logging,logging,structlog,structured-logging
8
+ Requires-Python: >=3.10
9
+ Requires-Dist: orjson>=3.10.15
10
+ Requires-Dist: python-decouple-typed>=3.11.0
11
+ Requires-Dist: structlog>=25.2.0
12
+ Description-Content-Type: text/markdown
13
+
14
+ # Structlog Configuration
15
+
16
+ Logging is really important:
17
+
18
+ * High performance JSON logging in production
19
+ * All loggers, even plugin or system loggers, should route through the same formatter
20
+ * Structured logging everywhere
21
+ * Ability to easily set thread-local log context
22
+
23
+ ## Stdib Log Management
24
+
25
+ Note that `{LOGGER_NAME}` is the name of the system logger assigned via `logging.getLogger(__name__)`:
26
+
27
+ * `OPENAI_LOG_LEVEL`
28
+ * `OPENAI_LOG_PATH`. Ignored in production.
29
+
30
+ ## FastAPI Access Logger
31
+
32
+ Structured, simple access log with request timing to replace the default fastapi access log. Why?
33
+
34
+ 1. It's less verbose
35
+ 2. Uses structured logging params instead of string interpolation
36
+ 3. debug level logs any static assets
37
+
38
+ Here's how to use it:
39
+
40
+ 1. [Disable fastapi's default logging.](https://github.com/iloveitaly/python-starter-template/blob/f54cb47d8d104987f2e4a668f9045a62e0d6818a/main.py#L55-L56)
41
+ 2. [Add the middleware to your FastAPI app.](https://github.com/iloveitaly/python-starter-template/blob/f54cb47d8d104987f2e4a668f9045a62e0d6818a/app/routes/middleware/__init__.py#L63-L65)
42
+
43
+ Adapted from:
44
+
45
+ - https://github.com/iloveitaly/fastapi-logger/blob/main/fastapi_structlog/middleware/access_log.py#L70
46
+ - https://github.com/fastapiutils/fastapi-utils/blob/master/fastapi_utils/timing.py
47
+ - https://pypi.org/project/fastapi-structlog/
48
+ - https://pypi.org/project/asgi-correlation-id/
49
+ - https://gist.github.com/nymous/f138c7f06062b7c43c060bf03759c29e
50
+ - https://github.com/sharu1204/fastapi-structlog/blob/master/app/main.py
51
+
52
+ ## Related Projects
53
+
54
+ * https://github.com/underyx/structlog-pretty
55
+ * https://pypi.org/project/httpx-structlog/
56
+
57
+ ## References
58
+
59
+ - https://github.com/replicate/cog/blob/2e57549e18e044982bd100e286a1929f50880383/python/cog/logging.py#L20
60
+ - https://github.com/apache/airflow/blob/4280b83977cd5a53c2b24143f3c9a6a63e298acc/task_sdk/src/airflow/sdk/log.py#L187
61
+ - https://github.com/kiwicom/structlog-sentry
62
+ - https://github.com/jeremyh/datacube-explorer/blob/b289b0cde0973a38a9d50233fe0fff00e8eb2c8e/cubedash/logs.py#L40C21-L40C42
@@ -0,0 +1,12 @@
1
+ structlog_config/__init__.py,sha256=OA1J4X3oWWPyqu1vojagsCrHmWDahqyFP_tAJJMYpTk,6162
2
+ structlog_config/constants.py,sha256=uwfeIMlu6yzl67dOS_JP427CO-9nyHX1kRyjp-Obb1M,260
3
+ structlog_config/env_config.py,sha256=CEjovBIJWxHtbzeqU2VAZ0SwYl8VKL_ECSgIfBU2Pbs,1738
4
+ structlog_config/environments.py,sha256=JpZYVVDGxEf1EaKdPdn6Jo-4wJK6SqF0ueFl7e2TBvI,612
5
+ structlog_config/fastapi_access_logger.py,sha256=DjO0Gn4zRNxXNBeOiibgwlovyg2dHbUFB2UMUzAE4Iw,3462
6
+ structlog_config/formatters.py,sha256=cprGEjvRFphJixbb0nVCpPn9sfw_Wv4d2vPtKDpM05A,5846
7
+ structlog_config/packages.py,sha256=asxrzLR-iRYAbkoSYutyTdIRcruTjHgkzfe2pjm2VFM,519
8
+ structlog_config/stdlib_logging.py,sha256=hQfX-NpEezqbPyvfw-F95i5-i3-zoaAvaWzSLEjsggM,6097
9
+ structlog_config/warnings.py,sha256=c74VRLxhx7jW96vkYfYwrKkGOaqQLLIfKQuaeB7i4n0,1594
10
+ structlog_config-0.1.0.dist-info/METADATA,sha256=uvFkIiX-qnlT0it-Zp1rJ0vb_VAMUsEP_HXIdwlMruM,2654
11
+ structlog_config-0.1.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
12
+ structlog_config-0.1.0.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.27.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any