ganicas-package 0.1.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ganicas_package-0.1.1/PKG-INFO +228 -0
- ganicas_package-0.1.1/README.md +203 -0
- ganicas_package-0.1.1/pyproject.toml +116 -0
- ganicas_package-0.1.1/src/__init__.py +0 -0
- ganicas_package-0.1.1/src/config.py +17 -0
- ganicas_package-0.1.1/src/logging/__init__.py +4 -0
- ganicas_package-0.1.1/src/logging/__pycache__/__init__.cpython-312.pyc +0 -0
- ganicas_package-0.1.1/src/logging/__pycache__/configuration.cpython-312.pyc +0 -0
- ganicas_package-0.1.1/src/logging/__pycache__/formatter.cpython-312.pyc +0 -0
- ganicas_package-0.1.1/src/logging/__pycache__/logger.cpython-312.pyc +0 -0
- ganicas_package-0.1.1/src/logging/__pycache__/middlewares.cpython-312.pyc +0 -0
- ganicas_package-0.1.1/src/logging/configuration.py +69 -0
- ganicas_package-0.1.1/src/logging/formatter.py +15 -0
- ganicas_package-0.1.1/src/logging/logger.py +90 -0
- ganicas_package-0.1.1/src/logging/middlewares.py +48 -0
- ganicas_package-0.1.1/src/logging/utils.py +0 -0
@@ -0,0 +1,228 @@
|
|
1
|
+
Metadata-Version: 2.1
|
2
|
+
Name: ganicas-package
|
3
|
+
Version: 0.1.1
|
4
|
+
Summary: Ganicas internal Python package for structured logging and utilities.
|
5
|
+
Keywords: logging,utilities,internal-package
|
6
|
+
Author: Ganicas
|
7
|
+
Requires-Python: >=3.8,<3.13
|
8
|
+
Classifier: Development Status :: 2 - Pre-Alpha
|
9
|
+
Classifier: Intended Audience :: Developers
|
10
|
+
Classifier: License :: Other/Proprietary License
|
11
|
+
Classifier: Natural Language :: English
|
12
|
+
Classifier: Programming Language :: Python :: 3
|
13
|
+
Classifier: Programming Language :: Python :: 3.8
|
14
|
+
Classifier: Programming Language :: Python :: 3.9
|
15
|
+
Classifier: Programming Language :: Python :: 3.10
|
16
|
+
Classifier: Programming Language :: Python :: 3.11
|
17
|
+
Classifier: Programming Language :: Python :: 3.12
|
18
|
+
Requires-Dist: fastapi (>=0.114.2,<0.116.0)
|
19
|
+
Requires-Dist: flask (>=2.2.0,<3.0.0)
|
20
|
+
Requires-Dist: httpx (>=0.28.1,<0.29.0)
|
21
|
+
Requires-Dist: python-json-logger (>=3.2.1,<4.0.0)
|
22
|
+
Requires-Dist: structlog (>=24.4.0,<25.0.0)
|
23
|
+
Description-Content-Type: text/markdown
|
24
|
+
|
25
|
+
# Ganicas Python Package
|
26
|
+
|
27
|
+
### Structlog
|
28
|
+
Structlog is a powerful logging library for structured, context-aware logging.
|
29
|
+
More details can be found in the [structlog](https://www.structlog.org/en/stable/).
|
30
|
+
|
31
|
+
#### Example, basic structlog configuration
|
32
|
+
|
33
|
+
instead of `logger = logging.getLogger(__name__)` it is `logger = structlog.get_logger(__name__)`
|
34
|
+
|
35
|
+
```python
|
36
|
+
from src.logging import LoggingConfigurator
|
37
|
+
from src.config import Config
|
38
|
+
import structlog
|
39
|
+
|
40
|
+
config = Config()
|
41
|
+
|
42
|
+
LoggingConfigurator(
|
43
|
+
service_name=config.APP_NAME,
|
44
|
+
log_level='INFO',
|
45
|
+
setup_logging_dict=True
|
46
|
+
).configure_structlog(
|
47
|
+
formatter='plain_console',
|
48
|
+
formatter_std_lib='plain_console'
|
49
|
+
)
|
50
|
+
|
51
|
+
logger = structlog.get_logger(__name__)
|
52
|
+
logger.debug("This is a DEBUG log message", key_1="value_1", key_2="value_2", key_n="value_n")
|
53
|
+
logger.info("This is an INFO log message", key_1="value_1", key_2="value_2", key_n="value_n")
|
54
|
+
logger.warning("This is a WARNING log message", key_1="value_1", key_2="value_2", key_n="value_n")
|
55
|
+
logger.error("This is an ERROR log message", key_1="value_1", key_2="value_2", key_n="value_n")
|
56
|
+
logger.critical("This is a CRITICAL log message", key_1="value_1", key_2="value_2", key_n="value_n")
|
57
|
+
|
58
|
+
try:
|
59
|
+
1 / 0
|
60
|
+
except ZeroDivisionError:
|
61
|
+
logger.exception("An EXCEPTION log with stack trace occurred", key_1="value_1", key_2="value_2")
|
62
|
+
|
63
|
+
|
64
|
+
```
|
65
|
+

|
66
|
+
|
67
|
+
|
68
|
+
In production, you should aim for structured, machine-readable logs that can be easily ingested by log aggregation and monitoring tools like ELK (Elasticsearch, Logstash, Kibana), Datadog, or Prometheus:
|
69
|
+
|
70
|
+
```python
|
71
|
+
from src.logging import LoggingConfigurator
|
72
|
+
from ssrc.config import Config
|
73
|
+
import structlog
|
74
|
+
|
75
|
+
config = Config()
|
76
|
+
|
77
|
+
LoggingConfigurator(
|
78
|
+
service_name=config.APP_NAME,
|
79
|
+
log_level='INFO',
|
80
|
+
setup_logging_dict=True
|
81
|
+
).configure_structlog(
|
82
|
+
formatter='json_formatter',
|
83
|
+
formatter_std_lib='json_formatter'
|
84
|
+
)
|
85
|
+
|
86
|
+
logger = structlog.get_logger(__name__)
|
87
|
+
logger.debug("This is a DEBUG log message", key_1="value_1", key_2="value_2", key_n="value_n")
|
88
|
+
logger.info("This is an INFO log message", key_1="value_1", key_2="value_2", key_n="value_n")
|
89
|
+
logger.warning("This is a WARNING log message", key_1="value_1", key_2="value_2", key_n="value_n")
|
90
|
+
logger.error("This is an ERROR log message", key_1="value_1", key_2="value_2", key_n="value_n")
|
91
|
+
logger.critical("This is a CRITICAL log message", key_1="value_1", key_2="value_2", key_n="value_n")
|
92
|
+
|
93
|
+
try:
|
94
|
+
1 / 0
|
95
|
+
except ZeroDivisionError:
|
96
|
+
logger.exception("An EXCEPTION log with stack trace occurred", key_1="value_1", key_2="value_2")
|
97
|
+
```
|
98
|
+
|
99
|
+

|
100
|
+
|
101
|
+
|
102
|
+
#### Using Middleware for Automatic Logging Context:
|
103
|
+
|
104
|
+
The middleware adds request_id, IP, and user_id to every log during a request/response cycle.
|
105
|
+
This middleware module provides logging context management for both Flask and FastAPI applications using structlog.
|
106
|
+
|
107
|
+
Flask Middleware (add_request_context_flask): Captures essential request data such as the request ID, method, and path, binding them to the structlog context for better traceability during the request lifecycle.
|
108
|
+
|
109
|
+
FastAPI Middleware (add_request_context_fastapi): Captures similar request metadata, ensuring a request ID is present, generating one if absent.
|
110
|
+
It binds the request context to structlog and clears it after the request completes.
|
111
|
+
|
112
|
+
Class-Based Middleware (FastAPIRequestContextMiddleware): A reusable FastAPI middleware class that integrates with the BaseHTTPMiddleware and delegates the logging setup to the add_request_context_fastapi function.
|
113
|
+
|
114
|
+
This setup ensures structured, consistent logging across both frameworks, improving traceability and debugging in distributed systems.
|
115
|
+
|
116
|
+
|
117
|
+
This guide explains how to set up and use structlog for structured logging in a Flask application. The goal is to have a consistent and centralized logging setup that can be reused across the application.
|
118
|
+
The logger is initialized once in the main application file (e.g., app.py).
|
119
|
+
|
120
|
+
```python
|
121
|
+
import sys
|
122
|
+
import uuid
|
123
|
+
from flask import Flask, request
|
124
|
+
from src.logging import LoggingConfigurator
|
125
|
+
from src.logging.middlewares import add_request_context_flask
|
126
|
+
from ssrc.config import Config
|
127
|
+
import structlog
|
128
|
+
|
129
|
+
config = Config()
|
130
|
+
|
131
|
+
LoggingConfigurator(
|
132
|
+
service_name=config.APP_NAME,
|
133
|
+
log_level="INFO",
|
134
|
+
setup_logging_dict=True,
|
135
|
+
).configure_structlog(formatter='json_formatter', formatter_std_lib='json_formatter')
|
136
|
+
|
137
|
+
logger = structlog.get_logger(__name__)
|
138
|
+
|
139
|
+
app = Flask(__name__)
|
140
|
+
|
141
|
+
@app.before_request
|
142
|
+
def set_logging_context():
|
143
|
+
"""Bind context for each request using the middleware."""
|
144
|
+
add_request_context_flask()
|
145
|
+
logger.info("Context set for request")
|
146
|
+
|
147
|
+
with app.test_client() as client:
|
148
|
+
dynamic_request_id = str(uuid.uuid4())
|
149
|
+
client.get("/", headers={"X-User-Name": "John Doe", "X-Request-ID": dynamic_request_id})
|
150
|
+
logger.info("Test client request sent", request_id=dynamic_request_id)
|
151
|
+
|
152
|
+
```
|
153
|
+
|
154
|
+

|
155
|
+
|
156
|
+
You can use the same logger instance across different modules by importing structlog directly.
|
157
|
+
Example (services.py):
|
158
|
+
|
159
|
+
|
160
|
+
```python
|
161
|
+
import structlog
|
162
|
+
|
163
|
+
logger = structlog.get_logger(__name__)
|
164
|
+
logger.info("Processing data started", data_size=100)
|
165
|
+
```
|
166
|
+
Key Points:
|
167
|
+
|
168
|
+
- Centralized Configuration: The logger is initialized once in app.py.
|
169
|
+
- Consistent Usage: structlog.get_logger(__name__) is imported and used across all files.
|
170
|
+
- Context Management: Context is managed using structlog.contextvars.bind_contextvars().
|
171
|
+
- Structured Logging: The JSON formatter ensures logs are machine-readable.
|
172
|
+
|
173
|
+
FastAPI:
|
174
|
+
|
175
|
+
```python
|
176
|
+
import uuid
|
177
|
+
from fastapi import FastAPI, Request
|
178
|
+
from src.logging.middlewares import FastAPIRequestContextMiddleware
|
179
|
+
import structlog
|
180
|
+
|
181
|
+
config = Config()
|
182
|
+
|
183
|
+
LoggingConfigurator(
|
184
|
+
service_name=config.APP_NAME,
|
185
|
+
log_level="INFO",
|
186
|
+
setup_logging_dict=True,
|
187
|
+
).configure_structlog(formatter='json_formatter', formatter_std_lib='json_formatter')
|
188
|
+
|
189
|
+
logger = structlog.get_logger(__name__)
|
190
|
+
app = FastAPI()
|
191
|
+
app.add_middleware(FastAPIRequestContextMiddleware)
|
192
|
+
|
193
|
+
```
|
194
|
+

|
195
|
+
|
196
|
+
|
197
|
+
Automatic injection of:
|
198
|
+
- user_id
|
199
|
+
- IP
|
200
|
+
- request_id
|
201
|
+
- request_method
|
202
|
+
|
203
|
+
|
204
|
+
This a console view, in prod it will be json (using python json logging to have standard logging and structlog logging as close as possible)
|
205
|
+
|
206
|
+
|
207
|
+
### Why Use a Structured Logger?
|
208
|
+
- Standard logging often outputs plain text logs, which can be challenging for log aggregation tools like EFK Stack or Grafana Loki to process effectively.
|
209
|
+
- Structured logging outputs data in a machine-readable format (e.g., JSON), making it easier for log analysis tools to filter and process logs efficiently.
|
210
|
+
- With structured logging, developers can filter logs by fields such as request_id, user_id, and transaction_id for better traceability across distributed systems.
|
211
|
+
- The primary goal is to simplify debugging, enable better error tracking, and improve observability with enhanced log analysis capabilities.
|
212
|
+
- Structured logs are designed to be consumed primarily by machines for monitoring and analytics, while still being readable for developers when needed.
|
213
|
+
- This package leverages structlog, a library that enhances Python's standard logging by providing better context management and a flexible structure for log messages.
|
214
|
+
|
215
|
+
|
216
|
+
# Development of this project
|
217
|
+
|
218
|
+
Please install [poetry](https://python-poetry.org/docs/#installation) as this is the tool we use for releasing and development.
|
219
|
+
|
220
|
+
poetry install && poetry run pytest -rs --cov=src -s
|
221
|
+
|
222
|
+
To run tests inside docker:
|
223
|
+
|
224
|
+
poetry install --with dev && poetry run pytest -rs --cov=src
|
225
|
+
|
226
|
+
To run pre-commit:
|
227
|
+
poetry run pre-commit run --all-files
|
228
|
+
|
@@ -0,0 +1,203 @@
|
|
1
|
+
# Ganicas Python Package
|
2
|
+
|
3
|
+
### Structlog
|
4
|
+
Structlog is a powerful logging library for structured, context-aware logging.
|
5
|
+
More details can be found in the [structlog](https://www.structlog.org/en/stable/).
|
6
|
+
|
7
|
+
#### Example, basic structlog configuration
|
8
|
+
|
9
|
+
instead of `logger = logging.getLogger(__name__)` it is `logger = structlog.get_logger(__name__)`
|
10
|
+
|
11
|
+
```python
|
12
|
+
from src.logging import LoggingConfigurator
|
13
|
+
from src.config import Config
|
14
|
+
import structlog
|
15
|
+
|
16
|
+
config = Config()
|
17
|
+
|
18
|
+
LoggingConfigurator(
|
19
|
+
service_name=config.APP_NAME,
|
20
|
+
log_level='INFO',
|
21
|
+
setup_logging_dict=True
|
22
|
+
).configure_structlog(
|
23
|
+
formatter='plain_console',
|
24
|
+
formatter_std_lib='plain_console'
|
25
|
+
)
|
26
|
+
|
27
|
+
logger = structlog.get_logger(__name__)
|
28
|
+
logger.debug("This is a DEBUG log message", key_1="value_1", key_2="value_2", key_n="value_n")
|
29
|
+
logger.info("This is an INFO log message", key_1="value_1", key_2="value_2", key_n="value_n")
|
30
|
+
logger.warning("This is a WARNING log message", key_1="value_1", key_2="value_2", key_n="value_n")
|
31
|
+
logger.error("This is an ERROR log message", key_1="value_1", key_2="value_2", key_n="value_n")
|
32
|
+
logger.critical("This is a CRITICAL log message", key_1="value_1", key_2="value_2", key_n="value_n")
|
33
|
+
|
34
|
+
try:
|
35
|
+
1 / 0
|
36
|
+
except ZeroDivisionError:
|
37
|
+
logger.exception("An EXCEPTION log with stack trace occurred", key_1="value_1", key_2="value_2")
|
38
|
+
|
39
|
+
|
40
|
+
```
|
41
|
+

|
42
|
+
|
43
|
+
|
44
|
+
In production, you should aim for structured, machine-readable logs that can be easily ingested by log aggregation and monitoring tools like ELK (Elasticsearch, Logstash, Kibana), Datadog, or Prometheus:
|
45
|
+
|
46
|
+
```python
|
47
|
+
from src.logging import LoggingConfigurator
|
48
|
+
from ssrc.config import Config
|
49
|
+
import structlog
|
50
|
+
|
51
|
+
config = Config()
|
52
|
+
|
53
|
+
LoggingConfigurator(
|
54
|
+
service_name=config.APP_NAME,
|
55
|
+
log_level='INFO',
|
56
|
+
setup_logging_dict=True
|
57
|
+
).configure_structlog(
|
58
|
+
formatter='json_formatter',
|
59
|
+
formatter_std_lib='json_formatter'
|
60
|
+
)
|
61
|
+
|
62
|
+
logger = structlog.get_logger(__name__)
|
63
|
+
logger.debug("This is a DEBUG log message", key_1="value_1", key_2="value_2", key_n="value_n")
|
64
|
+
logger.info("This is an INFO log message", key_1="value_1", key_2="value_2", key_n="value_n")
|
65
|
+
logger.warning("This is a WARNING log message", key_1="value_1", key_2="value_2", key_n="value_n")
|
66
|
+
logger.error("This is an ERROR log message", key_1="value_1", key_2="value_2", key_n="value_n")
|
67
|
+
logger.critical("This is a CRITICAL log message", key_1="value_1", key_2="value_2", key_n="value_n")
|
68
|
+
|
69
|
+
try:
|
70
|
+
1 / 0
|
71
|
+
except ZeroDivisionError:
|
72
|
+
logger.exception("An EXCEPTION log with stack trace occurred", key_1="value_1", key_2="value_2")
|
73
|
+
```
|
74
|
+
|
75
|
+

|
76
|
+
|
77
|
+
|
78
|
+
#### Using Middleware for Automatic Logging Context:
|
79
|
+
|
80
|
+
The middleware adds request_id, IP, and user_id to every log during a request/response cycle.
|
81
|
+
This middleware module provides logging context management for both Flask and FastAPI applications using structlog.
|
82
|
+
|
83
|
+
Flask Middleware (add_request_context_flask): Captures essential request data such as the request ID, method, and path, binding them to the structlog context for better traceability during the request lifecycle.
|
84
|
+
|
85
|
+
FastAPI Middleware (add_request_context_fastapi): Captures similar request metadata, ensuring a request ID is present, generating one if absent.
|
86
|
+
It binds the request context to structlog and clears it after the request completes.
|
87
|
+
|
88
|
+
Class-Based Middleware (FastAPIRequestContextMiddleware): A reusable FastAPI middleware class that integrates with the BaseHTTPMiddleware and delegates the logging setup to the add_request_context_fastapi function.
|
89
|
+
|
90
|
+
This setup ensures structured, consistent logging across both frameworks, improving traceability and debugging in distributed systems.
|
91
|
+
|
92
|
+
|
93
|
+
This guide explains how to set up and use structlog for structured logging in a Flask application. The goal is to have a consistent and centralized logging setup that can be reused across the application.
|
94
|
+
The logger is initialized once in the main application file (e.g., app.py).
|
95
|
+
|
96
|
+
```python
|
97
|
+
import sys
|
98
|
+
import uuid
|
99
|
+
from flask import Flask, request
|
100
|
+
from src.logging import LoggingConfigurator
|
101
|
+
from src.logging.middlewares import add_request_context_flask
|
102
|
+
from ssrc.config import Config
|
103
|
+
import structlog
|
104
|
+
|
105
|
+
config = Config()
|
106
|
+
|
107
|
+
LoggingConfigurator(
|
108
|
+
service_name=config.APP_NAME,
|
109
|
+
log_level="INFO",
|
110
|
+
setup_logging_dict=True,
|
111
|
+
).configure_structlog(formatter='json_formatter', formatter_std_lib='json_formatter')
|
112
|
+
|
113
|
+
logger = structlog.get_logger(__name__)
|
114
|
+
|
115
|
+
app = Flask(__name__)
|
116
|
+
|
117
|
+
@app.before_request
|
118
|
+
def set_logging_context():
|
119
|
+
"""Bind context for each request using the middleware."""
|
120
|
+
add_request_context_flask()
|
121
|
+
logger.info("Context set for request")
|
122
|
+
|
123
|
+
with app.test_client() as client:
|
124
|
+
dynamic_request_id = str(uuid.uuid4())
|
125
|
+
client.get("/", headers={"X-User-Name": "John Doe", "X-Request-ID": dynamic_request_id})
|
126
|
+
logger.info("Test client request sent", request_id=dynamic_request_id)
|
127
|
+
|
128
|
+
```
|
129
|
+
|
130
|
+

|
131
|
+
|
132
|
+
You can use the same logger instance across different modules by importing structlog directly.
|
133
|
+
Example (services.py):
|
134
|
+
|
135
|
+
|
136
|
+
```python
|
137
|
+
import structlog
|
138
|
+
|
139
|
+
logger = structlog.get_logger(__name__)
|
140
|
+
logger.info("Processing data started", data_size=100)
|
141
|
+
```
|
142
|
+
Key Points:
|
143
|
+
|
144
|
+
- Centralized Configuration: The logger is initialized once in app.py.
|
145
|
+
- Consistent Usage: structlog.get_logger(__name__) is imported and used across all files.
|
146
|
+
- Context Management: Context is managed using structlog.contextvars.bind_contextvars().
|
147
|
+
- Structured Logging: The JSON formatter ensures logs are machine-readable.
|
148
|
+
|
149
|
+
FastAPI:
|
150
|
+
|
151
|
+
```python
|
152
|
+
import uuid
|
153
|
+
from fastapi import FastAPI, Request
|
154
|
+
from src.logging.middlewares import FastAPIRequestContextMiddleware
|
155
|
+
import structlog
|
156
|
+
|
157
|
+
config = Config()
|
158
|
+
|
159
|
+
LoggingConfigurator(
|
160
|
+
service_name=config.APP_NAME,
|
161
|
+
log_level="INFO",
|
162
|
+
setup_logging_dict=True,
|
163
|
+
).configure_structlog(formatter='json_formatter', formatter_std_lib='json_formatter')
|
164
|
+
|
165
|
+
logger = structlog.get_logger(__name__)
|
166
|
+
app = FastAPI()
|
167
|
+
app.add_middleware(FastAPIRequestContextMiddleware)
|
168
|
+
|
169
|
+
```
|
170
|
+

|
171
|
+
|
172
|
+
|
173
|
+
Automatic injection of:
|
174
|
+
- user_id
|
175
|
+
- IP
|
176
|
+
- request_id
|
177
|
+
- request_method
|
178
|
+
|
179
|
+
|
180
|
+
This a console view, in prod it will be json (using python json logging to have standard logging and structlog logging as close as possible)
|
181
|
+
|
182
|
+
|
183
|
+
### Why Use a Structured Logger?
|
184
|
+
- Standard logging often outputs plain text logs, which can be challenging for log aggregation tools like EFK Stack or Grafana Loki to process effectively.
|
185
|
+
- Structured logging outputs data in a machine-readable format (e.g., JSON), making it easier for log analysis tools to filter and process logs efficiently.
|
186
|
+
- With structured logging, developers can filter logs by fields such as request_id, user_id, and transaction_id for better traceability across distributed systems.
|
187
|
+
- The primary goal is to simplify debugging, enable better error tracking, and improve observability with enhanced log analysis capabilities.
|
188
|
+
- Structured logs are designed to be consumed primarily by machines for monitoring and analytics, while still being readable for developers when needed.
|
189
|
+
- This package leverages structlog, a library that enhances Python's standard logging by providing better context management and a flexible structure for log messages.
|
190
|
+
|
191
|
+
|
192
|
+
# Development of this project
|
193
|
+
|
194
|
+
Please install [poetry](https://python-poetry.org/docs/#installation) as this is the tool we use for releasing and development.
|
195
|
+
|
196
|
+
poetry install && poetry run pytest -rs --cov=src -s
|
197
|
+
|
198
|
+
To run tests inside docker:
|
199
|
+
|
200
|
+
poetry install --with dev && poetry run pytest -rs --cov=src
|
201
|
+
|
202
|
+
To run pre-commit:
|
203
|
+
poetry run pre-commit run --all-files
|
@@ -0,0 +1,116 @@
|
|
1
|
+
[tool.poetry]
|
2
|
+
name = "ganicas-package"
|
3
|
+
version = "0.1.1"
|
4
|
+
description = "Ganicas internal Python package for structured logging and utilities."
|
5
|
+
authors = ["Ganicas"]
|
6
|
+
readme = "README.md"
|
7
|
+
keywords = ["logging", "utilities", "internal-package"]
|
8
|
+
classifiers = [
|
9
|
+
"Development Status :: 2 - Pre-Alpha",
|
10
|
+
"Intended Audience :: Developers",
|
11
|
+
"Natural Language :: English",
|
12
|
+
"Programming Language :: Python :: 3",
|
13
|
+
"Programming Language :: Python :: 3.8",
|
14
|
+
"Programming Language :: Python :: 3.9",
|
15
|
+
"Programming Language :: Python :: 3.10",
|
16
|
+
"Programming Language :: Python :: 3.11",
|
17
|
+
"Programming Language :: Python :: 3.12",
|
18
|
+
"License :: Other/Proprietary License"
|
19
|
+
]
|
20
|
+
packages = [{ include = "src" }]
|
21
|
+
|
22
|
+
[tool.poetry.dependencies]
|
23
|
+
python = ">=3.8,<3.13"
|
24
|
+
structlog = "^24.4.0"
|
25
|
+
python-json-logger = "^3.2.1"
|
26
|
+
fastapi = ">=0.114.2,<0.116.0"
|
27
|
+
httpx = "^0.28.1"
|
28
|
+
flask = "^2.2.0"
|
29
|
+
|
30
|
+
|
31
|
+
[tool.poetry.group.dev.dependencies]
|
32
|
+
pytest = "^8.0.0"
|
33
|
+
pytest-cov = { version = "^6.0.0", python = ">=3.9" }
|
34
|
+
pre-commit = { version = "4.*", python = ">=3.9" }
|
35
|
+
ipdb = "^0.13.13"
|
36
|
+
ipython = { version = "^8.24.0", python = ">=3.10" }
|
37
|
+
ruff = "0.8.*"
|
38
|
+
|
39
|
+
[tool.coverage.report]
|
40
|
+
exclude_lines = [
|
41
|
+
"no cov",
|
42
|
+
"pragma: no cover",
|
43
|
+
"def __str__",
|
44
|
+
"if __name__ == .__main__.:",
|
45
|
+
"if TYPE_CHECKING:",
|
46
|
+
]
|
47
|
+
omit = ["*/tests/*"]
|
48
|
+
|
49
|
+
[tool.ruff]
|
50
|
+
# Docs: https://beta.ruff.rs/docs/
|
51
|
+
# Exclude a variety of commonly ignored directories.
|
52
|
+
exclude = [
|
53
|
+
".git",
|
54
|
+
".mypy_cache",
|
55
|
+
".pre-commit-cache",
|
56
|
+
".ruff_cache",
|
57
|
+
".tox",
|
58
|
+
".venv",
|
59
|
+
"venv",
|
60
|
+
"docs",
|
61
|
+
"__pycache",
|
62
|
+
"**/migrations/*",
|
63
|
+
]
|
64
|
+
|
65
|
+
line-length = 120
|
66
|
+
|
67
|
+
|
68
|
+
[tool.ruff.lint]
|
69
|
+
# Rules: https://beta.ruff.rs/docs/rules/
|
70
|
+
select = [
|
71
|
+
"F",
|
72
|
+
"FA",
|
73
|
+
"E",
|
74
|
+
"B",
|
75
|
+
"C",
|
76
|
+
"C4",
|
77
|
+
"C90",
|
78
|
+
"I",
|
79
|
+
"N",
|
80
|
+
"EXE",
|
81
|
+
"ISC",
|
82
|
+
"ICN",
|
83
|
+
"INP",
|
84
|
+
"INT",
|
85
|
+
"PIE",
|
86
|
+
"SIM",
|
87
|
+
"W",
|
88
|
+
"T20",
|
89
|
+
"UP",
|
90
|
+
"T10",
|
91
|
+
"G",
|
92
|
+
"DJ",
|
93
|
+
"ERA",
|
94
|
+
"TID252",
|
95
|
+
"RUF100",
|
96
|
+
"TRY300",
|
97
|
+
"TRY301",
|
98
|
+
"TRY400",
|
99
|
+
"FLY",
|
100
|
+
"S",
|
101
|
+
"BLE",
|
102
|
+
"DTZ",
|
103
|
+
"PYI",
|
104
|
+
"RSE",
|
105
|
+
"RET",
|
106
|
+
"PTH",
|
107
|
+
"PL",
|
108
|
+
"PERF",
|
109
|
+
"RUF",
|
110
|
+
]
|
111
|
+
# Later on might be useful C/C90 (Compexity), ERA (Found commented-out code), FBT
|
112
|
+
ignore = ["ISC001", "B008", "SIM102", "S101", "RUF012", "EXE002", "FA102", "UP007", "UP006"]
|
113
|
+
|
114
|
+
[build-system]
|
115
|
+
requires = ["poetry-core"]
|
116
|
+
build-backend = "poetry.core.masonry.api"
|
File without changes
|
@@ -0,0 +1,17 @@
|
|
1
|
+
import os
|
2
|
+
from dataclasses import dataclass, field
|
3
|
+
|
4
|
+
|
5
|
+
@dataclass
|
6
|
+
class Config:
|
7
|
+
APP_NAME: str = field(init=False)
|
8
|
+
ENVIRONMENT: str = field(init=False)
|
9
|
+
LOG_LEVEL: str = field(init=False)
|
10
|
+
|
11
|
+
def __post_init__(self):
|
12
|
+
self.APP_NAME = os.getenv("APP_NAME", "App")
|
13
|
+
self.ENVIRONMENT = os.getenv("ENVIRONMENT", "dev")
|
14
|
+
self.LOG_LEVEL = os.getenv("LOG_LEVEL", "INFO")
|
15
|
+
|
16
|
+
|
17
|
+
config = Config()
|
Binary file
|
Binary file
|
Binary file
|
@@ -0,0 +1,69 @@
|
|
1
|
+
from typing import Any
|
2
|
+
|
3
|
+
import structlog
|
4
|
+
|
5
|
+
from src.config import Config
|
6
|
+
|
7
|
+
|
8
|
+
def get_default_logging_conf(log_level: str, formatter: str, formatter_std_lib: str) -> dict[str, Any]:
|
9
|
+
config_instance = Config()
|
10
|
+
app_name = config_instance.APP_NAME
|
11
|
+
|
12
|
+
formatters = {
|
13
|
+
"verbose": {
|
14
|
+
"format": "%(asctime)s %(levelname)s %(name)s %(message)s",
|
15
|
+
},
|
16
|
+
"json_formatter": {
|
17
|
+
"()": "src.logging.formatter.LogFormatter",
|
18
|
+
},
|
19
|
+
"plain_console": {
|
20
|
+
"()": structlog.stdlib.ProcessorFormatter,
|
21
|
+
"processor": structlog.dev.ConsoleRenderer(event_key="message"),
|
22
|
+
},
|
23
|
+
"plain_console_std_lib": {
|
24
|
+
"()": "logging.Formatter",
|
25
|
+
"format": "%(asctime)s [%(levelname)s] %(name)s: %(message)s",
|
26
|
+
},
|
27
|
+
"key_value": {
|
28
|
+
"()": structlog.stdlib.ProcessorFormatter,
|
29
|
+
"processor": structlog.processors.KeyValueRenderer(
|
30
|
+
key_order=["microservice", "timestamp", "level", "event", "logger"]
|
31
|
+
),
|
32
|
+
},
|
33
|
+
}
|
34
|
+
|
35
|
+
if formatter not in formatters or formatter_std_lib not in formatters:
|
36
|
+
raise NotImplementedError("formatter not supported")
|
37
|
+
|
38
|
+
config: dict[str, Any] = {
|
39
|
+
"version": 1,
|
40
|
+
"disable_existing_loggers": False,
|
41
|
+
"formatters": formatters,
|
42
|
+
"handlers": {
|
43
|
+
"console": {
|
44
|
+
"level": log_level,
|
45
|
+
"class": "logging.StreamHandler",
|
46
|
+
"formatter": formatter,
|
47
|
+
},
|
48
|
+
"console_std_lib": {
|
49
|
+
"level": log_level,
|
50
|
+
"class": "logging.StreamHandler",
|
51
|
+
"formatter": formatter_std_lib,
|
52
|
+
},
|
53
|
+
},
|
54
|
+
"loggers": {
|
55
|
+
"": {
|
56
|
+
"level": log_level,
|
57
|
+
"handlers": ["console_std_lib"],
|
58
|
+
"propagate": False,
|
59
|
+
},
|
60
|
+
},
|
61
|
+
}
|
62
|
+
|
63
|
+
config["loggers"][app_name] = {
|
64
|
+
"level": log_level,
|
65
|
+
"handlers": ["console"],
|
66
|
+
"propagate": False,
|
67
|
+
}
|
68
|
+
|
69
|
+
return config
|
@@ -0,0 +1,15 @@
|
|
1
|
+
from datetime import datetime, timezone
|
2
|
+
|
3
|
+
from pythonjsonlogger import jsonlogger
|
4
|
+
|
5
|
+
from src.config import config
|
6
|
+
|
7
|
+
|
8
|
+
class LogFormatter(jsonlogger.JsonFormatter):
|
9
|
+
def add_fields(self, log_record, record, message_dict):
|
10
|
+
super().add_fields(log_record, record, message_dict)
|
11
|
+
log_record["microservice"] = config.APP_NAME
|
12
|
+
if not log_record.get("timestamp"):
|
13
|
+
now = datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%fZ")
|
14
|
+
log_record["timestamp"] = now
|
15
|
+
log_record["level"] = log_record.get("level", record.levelname).upper()
|
@@ -0,0 +1,90 @@
|
|
1
|
+
from typing import Optional
|
2
|
+
import logging
|
3
|
+
import logging.config
|
4
|
+
import structlog
|
5
|
+
from structlog import contextvars
|
6
|
+
from structlog.typing import EventDict
|
7
|
+
from src.logging.configuration import get_default_logging_conf
|
8
|
+
from structlog.dev import ConsoleRenderer
|
9
|
+
|
10
|
+
|
11
|
+
class LoggingConfigurator:
|
12
|
+
def __init__(
|
13
|
+
self,
|
14
|
+
service_name: str,
|
15
|
+
log_level: str = "INFO",
|
16
|
+
config: Optional[dict] = None,
|
17
|
+
setup_logging_dict: bool = False,
|
18
|
+
):
|
19
|
+
self.service_name = service_name
|
20
|
+
self.log_level = log_level.upper()
|
21
|
+
self.config = config
|
22
|
+
self.setup_logging_dict = setup_logging_dict
|
23
|
+
|
24
|
+
@staticmethod
|
25
|
+
def add_logger_name(logger: logging.Logger, method_name: str, event_dict: EventDict) -> EventDict:
|
26
|
+
"""
|
27
|
+
Adds the logger name to the log event dictionary, keeping compatibility with structlog's
|
28
|
+
processor signature. The `method_name` parameter is included to maintain compatibility with structlog's
|
29
|
+
processor signature but is not used directly in this method.
|
30
|
+
"""
|
31
|
+
|
32
|
+
record = event_dict.get("_record")
|
33
|
+
event_dict["name"] = record.name if record else logger.name
|
34
|
+
return event_dict
|
35
|
+
|
36
|
+
def get_base_processors(self) -> list:
|
37
|
+
"""Returns the base processors for structlog, common to all formats."""
|
38
|
+
return [
|
39
|
+
contextvars.merge_contextvars,
|
40
|
+
self.add_logger_name,
|
41
|
+
structlog.stdlib.add_log_level,
|
42
|
+
structlog.stdlib.filter_by_level,
|
43
|
+
structlog.processors.TimeStamper(fmt="iso"),
|
44
|
+
structlog.stdlib.PositionalArgumentsFormatter(),
|
45
|
+
structlog.processors.StackInfoRenderer(),
|
46
|
+
structlog.dev.set_exc_info,
|
47
|
+
structlog.processors.format_exc_info,
|
48
|
+
structlog.processors.UnicodeDecoder(),
|
49
|
+
structlog.processors.ExceptionPrettyPrinter(),
|
50
|
+
structlog.stdlib.ExtraAdder(),
|
51
|
+
structlog.processors.EventRenamer(to="message"),
|
52
|
+
]
|
53
|
+
|
54
|
+
def get_processors(self, formatter: str = "json_formatter") -> list:
|
55
|
+
"""
|
56
|
+
Returns processors based on the formatter type.
|
57
|
+
"""
|
58
|
+
processors = self.get_base_processors()
|
59
|
+
|
60
|
+
if formatter == "plain_console":
|
61
|
+
processors.append(ConsoleRenderer(colors=True))
|
62
|
+
else:
|
63
|
+
processors.append(structlog.stdlib.ProcessorFormatter.wrap_for_formatter)
|
64
|
+
|
65
|
+
return processors
|
66
|
+
|
67
|
+
def configure_structlog(
|
68
|
+
self,
|
69
|
+
custom_processors: Optional[list] = None,
|
70
|
+
formatter: str = "json_formatter",
|
71
|
+
formatter_std_lib: str = "json_formatter",
|
72
|
+
) -> None:
|
73
|
+
"""Configures the structlog and standard Python logging."""
|
74
|
+
if self.setup_logging_dict:
|
75
|
+
logger_init_config = self.config or get_default_logging_conf(
|
76
|
+
log_level=self.log_level,
|
77
|
+
formatter=formatter,
|
78
|
+
formatter_std_lib=formatter_std_lib,
|
79
|
+
)
|
80
|
+
|
81
|
+
logging.config.dictConfig(logger_init_config)
|
82
|
+
|
83
|
+
processors = custom_processors or self.get_processors(formatter=formatter)
|
84
|
+
|
85
|
+
structlog.configure(
|
86
|
+
processors=processors,
|
87
|
+
logger_factory=structlog.stdlib.LoggerFactory(),
|
88
|
+
wrapper_class=structlog.stdlib.BoundLogger,
|
89
|
+
cache_logger_on_first_use=True,
|
90
|
+
)
|
@@ -0,0 +1,48 @@
|
|
1
|
+
import uuid
|
2
|
+
|
3
|
+
import structlog
|
4
|
+
from fastapi import Request
|
5
|
+
from starlette.middleware.base import BaseHTTPMiddleware
|
6
|
+
|
7
|
+
|
8
|
+
class FlaskRequestContextMiddleware:
|
9
|
+
"""Middleware for Flask to add request context to structlog."""
|
10
|
+
|
11
|
+
def __init__(self, app):
|
12
|
+
self.app = app
|
13
|
+
|
14
|
+
def __call__(self, environ, start_response):
|
15
|
+
"""Middleware logic executed for every HTTP request."""
|
16
|
+
|
17
|
+
request_id = environ.get("HTTP_X_REQUEST_ID", str(uuid.uuid4()))
|
18
|
+
request_method = environ.get("REQUEST_METHOD", "UNKNOWN")
|
19
|
+
request_path = environ.get("PATH_INFO", "")
|
20
|
+
|
21
|
+
structlog.contextvars.bind_contextvars(
|
22
|
+
request_id=request_id,
|
23
|
+
request_method=request_method,
|
24
|
+
request_path=request_path,
|
25
|
+
)
|
26
|
+
|
27
|
+
return self.app(environ, start_response)
|
28
|
+
|
29
|
+
|
30
|
+
async def add_request_context_fastapi(request: Request, call_next):
|
31
|
+
"""Middleware for FastAPI applications."""
|
32
|
+
request_id = request.headers.get("x-amzn-trace-id")
|
33
|
+
if not request_id:
|
34
|
+
request_id = str(uuid.uuid4())
|
35
|
+
|
36
|
+
structlog.contextvars.bind_contextvars(
|
37
|
+
request_id=request_id, request_method=request.method, request_path=str(request.url)
|
38
|
+
)
|
39
|
+
response = await call_next(request)
|
40
|
+
structlog.contextvars.clear_contextvars()
|
41
|
+
return response
|
42
|
+
|
43
|
+
|
44
|
+
class FastAPIRequestContextMiddleware(BaseHTTPMiddleware):
|
45
|
+
"""Middleware class for FastAPI using BaseHTTPMiddleware."""
|
46
|
+
|
47
|
+
async def dispatch(self, request: Request, call_next):
|
48
|
+
return await add_request_context_fastapi(request, call_next)
|
File without changes
|