djlogq 1.0.4__tar.gz → 1.0.6__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {djlogq-1.0.4/src/djlogq.egg-info → djlogq-1.0.6}/PKG-INFO +3 -1
- {djlogq-1.0.4 → djlogq-1.0.6}/pyproject.toml +4 -3
- {djlogq-1.0.4 → djlogq-1.0.6/src/djlogq.egg-info}/PKG-INFO +3 -1
- {djlogq-1.0.4 → djlogq-1.0.6}/src/djlogq.egg-info/requires.txt +1 -0
- {djlogq-1.0.4 → djlogq-1.0.6}/src/logq/async_logger.py +50 -1
- {djlogq-1.0.4 → djlogq-1.0.6}/src/logq/tests.py +60 -134
- {djlogq-1.0.4 → djlogq-1.0.6}/src/logq/utils.py +34 -10
- {djlogq-1.0.4 → djlogq-1.0.6}/MANIFEST.in +0 -0
- {djlogq-1.0.4 → djlogq-1.0.6}/README.md +0 -0
- {djlogq-1.0.4 → djlogq-1.0.6}/setup.cfg +0 -0
- {djlogq-1.0.4 → djlogq-1.0.6}/src/djlogq.egg-info/SOURCES.txt +0 -0
- {djlogq-1.0.4 → djlogq-1.0.6}/src/djlogq.egg-info/dependency_links.txt +0 -0
- {djlogq-1.0.4 → djlogq-1.0.6}/src/djlogq.egg-info/top_level.txt +0 -0
- {djlogq-1.0.4 → djlogq-1.0.6}/src/logq/__init__.py +0 -0
- {djlogq-1.0.4 → djlogq-1.0.6}/src/logq/admin.py +0 -0
- {djlogq-1.0.4 → djlogq-1.0.6}/src/logq/apps.py +0 -0
- {djlogq-1.0.4 → djlogq-1.0.6}/src/logq/management/__init__.py +0 -0
- {djlogq-1.0.4 → djlogq-1.0.6}/src/logq/management/commands/__init__.py +0 -0
- {djlogq-1.0.4 → djlogq-1.0.6}/src/logq/management/commands/clean_logs.py +0 -0
- {djlogq-1.0.4 → djlogq-1.0.6}/src/logq/middleware.py +0 -0
- {djlogq-1.0.4 → djlogq-1.0.6}/src/logq/migrations/0001_initial.py +0 -0
- {djlogq-1.0.4 → djlogq-1.0.6}/src/logq/migrations/0002_alter_logentry_function_alter_logentry_line_number_and_more.py +0 -0
- {djlogq-1.0.4 → djlogq-1.0.6}/src/logq/migrations/__init__.py +0 -0
- {djlogq-1.0.4 → djlogq-1.0.6}/src/logq/models.py +0 -0
- {djlogq-1.0.4 → djlogq-1.0.6}/src/logq/urls.py +0 -0
- {djlogq-1.0.4 → djlogq-1.0.6}/src/logq/views.py +0 -0
@@ -1,12 +1,14 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: djlogq
|
3
|
-
Version: 1.0.
|
3
|
+
Version: 1.0.6
|
4
4
|
Summary: A reusable Django app for asynchronous, thread-safe logging with rich metadata, admin interface, and API support.
|
5
5
|
Author-email: mess <mesnavunawa@gmail.com>
|
6
6
|
License: MIT
|
7
|
+
Project-URL: Homepage, https://github.com/Mesake94/djlogq
|
7
8
|
Requires-Python: >=3.8
|
8
9
|
Description-Content-Type: text/markdown
|
9
10
|
Requires-Dist: Django
|
11
|
+
Requires-Dist: requests
|
10
12
|
Provides-Extra: dev
|
11
13
|
Requires-Dist: build==1.2.1; extra == "dev"
|
12
14
|
Requires-Dist: nox==2024.4.15; extra == "dev"
|
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|
4
4
|
|
5
5
|
[project]
|
6
6
|
name = "djlogq"
|
7
|
-
version = "1.0.
|
7
|
+
version = "1.0.6"
|
8
8
|
description = "A reusable Django app for asynchronous, thread-safe logging with rich metadata, admin interface, and API support."
|
9
9
|
readme = "README.md"
|
10
10
|
authors = [
|
@@ -13,9 +13,10 @@ authors = [
|
|
13
13
|
license = {text = "MIT"}
|
14
14
|
requires-python = ">=3.8"
|
15
15
|
dependencies = [
|
16
|
-
"Django"
|
16
|
+
"Django",
|
17
|
+
"requests",
|
17
18
|
]
|
18
|
-
|
19
|
+
urls = {Homepage = "https://github.com/Mesake94/djlogq"}
|
19
20
|
[project.optional-dependencies]
|
20
21
|
dev = [
|
21
22
|
"build==1.2.1",
|
@@ -1,12 +1,14 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: djlogq
|
3
|
-
Version: 1.0.
|
3
|
+
Version: 1.0.6
|
4
4
|
Summary: A reusable Django app for asynchronous, thread-safe logging with rich metadata, admin interface, and API support.
|
5
5
|
Author-email: mess <mesnavunawa@gmail.com>
|
6
6
|
License: MIT
|
7
|
+
Project-URL: Homepage, https://github.com/Mesake94/djlogq
|
7
8
|
Requires-Python: >=3.8
|
8
9
|
Description-Content-Type: text/markdown
|
9
10
|
Requires-Dist: Django
|
11
|
+
Requires-Dist: requests
|
10
12
|
Provides-Extra: dev
|
11
13
|
Requires-Dist: build==1.2.1; extra == "dev"
|
12
14
|
Requires-Dist: nox==2024.4.15; extra == "dev"
|
@@ -26,6 +26,9 @@ class AsyncLogger:
|
|
26
26
|
self.running = False
|
27
27
|
self.thread = None
|
28
28
|
self._lock = threading.Lock()
|
29
|
+
self.dropped_count = 0
|
30
|
+
self.dropped_levels = {} # track most serious dropped level
|
31
|
+
self._dropped_lock = threading.Lock() #
|
29
32
|
|
30
33
|
def start(self):
|
31
34
|
"""Start the logging thread."""
|
@@ -80,6 +83,36 @@ class AsyncLogger:
|
|
80
83
|
try:
|
81
84
|
with transaction.atomic():
|
82
85
|
LogEntry.objects.bulk_create(batch, ignore_conflicts=True)
|
86
|
+
|
87
|
+
# Log dropped messages if any
|
88
|
+
with self._dropped_lock:
|
89
|
+
if self.dropped_count > 0:
|
90
|
+
# Find the most serious dropped level
|
91
|
+
level_priority = {
|
92
|
+
'DEBUG': 0,
|
93
|
+
'INFO': 1,
|
94
|
+
'WARNING': 2,
|
95
|
+
'ERROR': 3,
|
96
|
+
'CRITICAL': 4
|
97
|
+
}
|
98
|
+
most_serious_level = max(self.dropped_levels.keys(),
|
99
|
+
key=lambda x: level_priority.get(x, 0)) if self.dropped_levels else 'INFO'
|
100
|
+
|
101
|
+
dropped_entry = LogEntry(
|
102
|
+
level='WARNING',
|
103
|
+
message=f"{self.dropped_count} log messages were dropped due to queue overflow",
|
104
|
+
module='logq.async_logger',
|
105
|
+
function='_flush_batch',
|
106
|
+
extra_data={
|
107
|
+
'dropped_count': self.dropped_count,
|
108
|
+
'most_serious_level': most_serious_level
|
109
|
+
}
|
110
|
+
)
|
111
|
+
dropped_entry.save()
|
112
|
+
|
113
|
+
self.dropped_count = 0
|
114
|
+
self.dropped_levels = {}
|
115
|
+
|
83
116
|
except Exception as e:
|
84
117
|
print(f"Error flushing log batch: {e}")
|
85
118
|
|
@@ -110,7 +143,23 @@ class AsyncLogger:
|
|
110
143
|
self.queue.put_nowait(entry)
|
111
144
|
except queue.Full:
|
112
145
|
# If queue is full, log to console as fallback
|
113
|
-
print(f"Log queue full, dropping entry: [{level}] {message}")
|
146
|
+
# print(f"Log queue full, dropping entry: [{level}] {message}")
|
147
|
+
# Track dropped messages with counter
|
148
|
+
with self._dropped_lock:
|
149
|
+
self.dropped_count += 1
|
150
|
+
# Track the most serious level dropped
|
151
|
+
level_priority = {
|
152
|
+
'DEBUG': 0,
|
153
|
+
'INFO': 1,
|
154
|
+
'WARNING': 2,
|
155
|
+
'ERROR': 3,
|
156
|
+
'CRITICAL': 4
|
157
|
+
}
|
158
|
+
current_priority = level_priority.get(level, 0)
|
159
|
+
if level not in self.dropped_levels or current_priority > level_priority.get(self.dropped_levels[level], 0):
|
160
|
+
self.dropped_levels[level] = level
|
161
|
+
self.dropped_levels[level] = level
|
162
|
+
|
114
163
|
|
115
164
|
def debug(self, message: str, **kwargs):
|
116
165
|
self.log(LogLevel.DEBUG, message, **kwargs)
|
@@ -8,7 +8,8 @@ import time
|
|
8
8
|
import threading
|
9
9
|
from .models import LogEntry, LogLevel
|
10
10
|
from .async_logger import AsyncLogger, get_async_logger, stop_async_logger
|
11
|
-
|
11
|
+
from .utils import log_performance, log_function_call
|
12
|
+
import requests
|
12
13
|
|
13
14
|
class AsyncLoggerTestCase(TransactionTestCase):
|
14
15
|
def setUp(self):
|
@@ -97,7 +98,13 @@ class AsyncLoggerTestCase(TransactionTestCase):
|
|
97
98
|
# Should have some entries but not all due to queue being full
|
98
99
|
entries = LogEntry.objects.count()
|
99
100
|
self.assertGreater(entries, 0)
|
100
|
-
self.assertLessEqual(entries,
|
101
|
+
self.assertLessEqual(entries, 101) # max_queue_size + 1 (allowing for edge case)
|
102
|
+
|
103
|
+
# Check if the dropped log entry is present
|
104
|
+
dropped_entry = LogEntry.objects.filter(message__contains="dropped due to queue overflow").first()
|
105
|
+
self.assertIsNotNone(dropped_entry)
|
106
|
+
self.assertEqual(dropped_entry.level, LogLevel.WARNING)
|
107
|
+
|
101
108
|
|
102
109
|
|
103
110
|
class LogEntryModelTestCase(TransactionTestCase):
|
@@ -148,10 +155,23 @@ class LogEntryModelTestCase(TransactionTestCase):
|
|
148
155
|
self.assertIn("This is a very long message that should be truncated", str_repr[:100])
|
149
156
|
|
150
157
|
|
151
|
-
|
158
|
+
|
159
|
+
@override_settings(
|
160
|
+
ASYNC_LOGGING_CONFIG={'MAX_QUEUE_SIZE': 500, 'FLUSH_INTERVAL': 0.5},
|
161
|
+
MIDDLEWARE=[
|
162
|
+
"django.middleware.security.SecurityMiddleware",
|
163
|
+
"django.contrib.sessions.middleware.SessionMiddleware",
|
164
|
+
"django.middleware.common.CommonMiddleware",
|
165
|
+
"django.middleware.csrf.CsrfViewMiddleware",
|
166
|
+
"django.contrib.auth.middleware.AuthenticationMiddleware",
|
167
|
+
"django.contrib.messages.middleware.MessageMiddleware",
|
168
|
+
"django.middleware.clickjacking.XFrameOptionsMiddleware",
|
169
|
+
"logq.middleware.AsyncLoggingMiddleware", # Fixed: Added middleware
|
170
|
+
]
|
171
|
+
)
|
172
|
+
class MiddlewareTestCase(TransactionTestCase):
|
152
173
|
def setUp(self):
|
153
174
|
super().setUp()
|
154
|
-
self.user = User.objects.create_user(username='testuser', password='testpass')
|
155
175
|
# Stop the global logger to avoid interference
|
156
176
|
stop_async_logger()
|
157
177
|
# Clear all existing logs
|
@@ -164,154 +184,60 @@ class LoggingAPITestCase(TransactionTestCase):
|
|
164
184
|
cursor.execute("DELETE FROM logq_logentry")
|
165
185
|
super().tearDown()
|
166
186
|
|
167
|
-
def test_log_endpoint(self):
|
168
|
-
"""Test the log API endpoint."""
|
169
|
-
# Verify we start with no logs
|
170
|
-
self.assertEqual(LogEntry.objects.count(), 0)
|
171
|
-
|
172
|
-
data = {
|
173
|
-
'level': 'INFO',
|
174
|
-
'message': 'Test API log',
|
175
|
-
'extra_data': {'source': 'api'}
|
176
|
-
}
|
177
|
-
|
178
|
-
response = self.client.post(
|
179
|
-
reverse('logq:log_endpoint'),
|
180
|
-
data=json.dumps(data),
|
181
|
-
content_type='application/json'
|
182
|
-
)
|
183
|
-
|
184
|
-
self.assertEqual(response.status_code, 200)
|
185
|
-
self.assertEqual(response.json()['status'], 'success')
|
186
|
-
|
187
|
-
# Wait for async processing
|
188
|
-
time.sleep(0.5)
|
189
|
-
|
190
|
-
# Verify we have exactly one log entry
|
191
|
-
self.assertEqual(LogEntry.objects.count(), 1)
|
192
|
-
|
193
|
-
entry = LogEntry.objects.first()
|
194
|
-
self.assertEqual(entry.message, 'Test API log')
|
195
|
-
self.assertEqual(entry.extra_data, {'source': 'api'})
|
196
|
-
|
197
|
-
def test_log_api_view(self):
|
198
|
-
"""Test the class-based log API view."""
|
199
|
-
# Verify we start with no logs
|
200
|
-
self.assertEqual(LogEntry.objects.count(), 0)
|
201
|
-
|
202
|
-
data = {
|
203
|
-
'level': 'WARNING',
|
204
|
-
'message': 'Test warning',
|
205
|
-
'user_id': self.user.id,
|
206
|
-
'request_id': 'test-123'
|
207
|
-
}
|
208
|
-
|
209
|
-
response = self.client.post(
|
210
|
-
reverse('logq:log_api'),
|
211
|
-
data=json.dumps(data),
|
212
|
-
content_type='application/json'
|
213
|
-
)
|
214
|
-
|
215
|
-
self.assertEqual(response.status_code, 200)
|
216
|
-
|
217
|
-
time.sleep(0.5)
|
218
|
-
|
219
|
-
# Verify we have exactly one log entry
|
220
|
-
self.assertEqual(LogEntry.objects.count(), 1)
|
221
|
-
|
222
|
-
entry = LogEntry.objects.first()
|
223
|
-
self.assertEqual(entry.level, LogLevel.WARNING)
|
224
|
-
self.assertEqual(entry.user_id, self.user.id)
|
225
|
-
self.assertEqual(entry.request_id, 'test-123')
|
226
|
-
|
227
|
-
def test_get_logs_api(self):
|
228
|
-
"""Test retrieving logs via API."""
|
229
|
-
# Verify we start with no logs
|
230
|
-
self.assertEqual(LogEntry.objects.count(), 0)
|
231
|
-
|
232
|
-
# Create some test logs directly
|
233
|
-
LogEntry.objects.create(level=LogLevel.INFO, message="Test 1")
|
234
|
-
LogEntry.objects.create(level=LogLevel.ERROR, message="Test 2")
|
235
|
-
LogEntry.objects.create(level=LogLevel.DEBUG, message="Test 3")
|
236
|
-
|
237
|
-
# Verify we have exactly 3 logs
|
238
|
-
self.assertEqual(LogEntry.objects.count(), 3)
|
239
|
-
|
240
|
-
response = self.client.get(reverse('logq:log_api'))
|
241
|
-
self.assertEqual(response.status_code, 200)
|
242
|
-
|
243
|
-
data = response.json()
|
244
|
-
self.assertEqual(len(data['logs']), 3)
|
245
|
-
self.assertEqual(data['logs'][0]['message'], "Test 1")
|
246
|
-
|
247
|
-
def test_invalid_log_level(self):
|
248
|
-
"""Test API with invalid log level."""
|
249
|
-
data = {
|
250
|
-
'level': 'INVALID',
|
251
|
-
'message': 'Test message'
|
252
|
-
}
|
253
|
-
|
254
|
-
response = self.client.post(
|
255
|
-
reverse('logq:log_endpoint'),
|
256
|
-
data=json.dumps(data),
|
257
|
-
content_type='application/json'
|
258
|
-
)
|
259
|
-
|
260
|
-
self.assertEqual(response.status_code, 400)
|
261
|
-
self.assertIn('Invalid log level', response.json()['error'])
|
262
|
-
|
263
|
-
|
264
|
-
@override_settings(ASYNC_LOGGING_CONFIG={'MAX_QUEUE_SIZE': 500, 'FLUSH_INTERVAL': 0.5})
|
265
|
-
class ConfigurationTestCase(TransactionTestCase):
|
266
|
-
def setUp(self):
|
267
|
-
super().setUp()
|
268
|
-
# Clear all existing logs
|
269
|
-
with connection.cursor() as cursor:
|
270
|
-
cursor.execute("DELETE FROM logq_logentry")
|
271
|
-
|
272
|
-
def tearDown(self):
|
273
|
-
# Clear logs after test
|
274
|
-
with connection.cursor() as cursor:
|
275
|
-
cursor.execute("DELETE FROM logq_logentry")
|
276
|
-
super().tearDown()
|
277
|
-
|
278
|
-
def test_custom_configuration(self):
|
279
|
-
"""Test that custom configuration is respected."""
|
280
|
-
logger = AsyncLogger()
|
281
|
-
self.assertEqual(logger.queue.maxsize, 500)
|
282
|
-
self.assertEqual(logger.flush_interval, 0.5)
|
283
187
|
|
284
188
|
|
285
|
-
class
|
189
|
+
class UtilsTestCase(TransactionTestCase):
|
286
190
|
def setUp(self):
|
287
191
|
super().setUp()
|
288
192
|
# Stop the global logger to avoid interference
|
289
193
|
stop_async_logger()
|
194
|
+
|
290
195
|
# Clear all existing logs
|
291
196
|
with connection.cursor() as cursor:
|
292
197
|
cursor.execute("DELETE FROM logq_logentry")
|
198
|
+
|
199
|
+
# Create a properly configured global logger
|
200
|
+
from .async_logger import _async_logger
|
201
|
+
from . import async_logger as async_logger_module
|
202
|
+
|
203
|
+
# Create a test logger with fast flush interval
|
204
|
+
test_logger = AsyncLogger(max_queue_size=100, flush_interval=0.1)
|
205
|
+
test_logger.start()
|
206
|
+
|
207
|
+
# Replace the global logger
|
208
|
+
async_logger_module._async_logger = test_logger
|
209
|
+
|
210
|
+
time.sleep(0.2) # Wait for thread to start
|
293
211
|
|
294
212
|
def tearDown(self):
|
213
|
+
# Stop the global logger
|
214
|
+
stop_async_logger()
|
215
|
+
time.sleep(0.2) # Wait for thread to stop
|
216
|
+
|
295
217
|
# Clear logs after test
|
296
218
|
with connection.cursor() as cursor:
|
297
219
|
cursor.execute("DELETE FROM logq_logentry")
|
298
220
|
super().tearDown()
|
299
221
|
|
300
|
-
def
|
301
|
-
"""Test
|
302
|
-
#
|
303
|
-
|
222
|
+
def test_log_performance(self):
|
223
|
+
"""Test log_performance decorator."""
|
224
|
+
# Debug: Check if the logger is running
|
225
|
+
logger = get_async_logger()
|
226
|
+
|
227
|
+
# Test direct logging first
|
228
|
+
logger.info("Direct test message")
|
229
|
+
time.sleep(0.3)
|
304
230
|
|
305
|
-
|
231
|
+
@log_performance(threshold_seconds=0.1, always_log=True)
|
232
|
+
def slow_function():
|
233
|
+
time.sleep(0.2)
|
234
|
+
return "Result"
|
306
235
|
|
307
|
-
|
236
|
+
slow_function()
|
237
|
+
|
238
|
+
time.sleep(0.5) # Wait longer for flush
|
308
239
|
|
309
240
|
entries = LogEntry.objects.all()
|
241
|
+
|
310
242
|
self.assertGreater(entries.count(), 0)
|
311
|
-
|
312
|
-
# Should have request start and completion logs
|
313
|
-
start_logs = entries.filter(message__contains="Request started")
|
314
|
-
complete_logs = entries.filter(message__contains="Request completed")
|
315
|
-
|
316
|
-
self.assertGreater(start_logs.count(), 0)
|
317
|
-
self.assertGreater(complete_logs.count(), 0)
|
243
|
+
|
@@ -32,10 +32,10 @@ def log_function_call(func=None, *, level='INFO'):
|
|
32
32
|
}
|
33
33
|
)
|
34
34
|
|
35
|
-
start_time = time.
|
35
|
+
start_time = time.perf_counter()
|
36
36
|
try:
|
37
37
|
result = func(*args, **kwargs)
|
38
|
-
execution_time = time.
|
38
|
+
execution_time = time.perf_counter() - start_time
|
39
39
|
|
40
40
|
# Log successful completion
|
41
41
|
logger.log(
|
@@ -47,7 +47,7 @@ def log_function_call(func=None, *, level='INFO'):
|
|
47
47
|
return result
|
48
48
|
|
49
49
|
except Exception as e:
|
50
|
-
execution_time = time.
|
50
|
+
execution_time = time.perf_counter() - start_time
|
51
51
|
|
52
52
|
# Log exception
|
53
53
|
logger.exception(
|
@@ -64,30 +64,54 @@ def log_function_call(func=None, *, level='INFO'):
|
|
64
64
|
return decorator(func)
|
65
65
|
|
66
66
|
|
67
|
-
def log_performance(threshold_seconds=1.0):
|
67
|
+
def log_performance(threshold_seconds=1.0, always_log=False):
|
68
68
|
"""
|
69
|
-
Decorator to log
|
69
|
+
Decorator to log function performance metrics.
|
70
|
+
|
71
|
+
Args:
|
72
|
+
threshold_seconds: Log warning when execution exceeds this threshold
|
73
|
+
always_log: If True, log every function call for analytics (like Sentry spans)
|
70
74
|
|
71
75
|
Usage:
|
72
76
|
@log_performance(threshold_seconds=0.5)
|
73
77
|
def my_slow_function():
|
74
78
|
pass
|
79
|
+
|
80
|
+
@log_performance(always_log=True)
|
81
|
+
def my_analytics_function():
|
82
|
+
pass
|
75
83
|
"""
|
76
84
|
def decorator(func):
|
77
85
|
@wraps(func)
|
78
86
|
def wrapper(*args, **kwargs):
|
79
|
-
start_time = time.
|
87
|
+
start_time = time.perf_counter()
|
80
88
|
result = func(*args, **kwargs)
|
81
|
-
execution_time = time.
|
89
|
+
execution_time = time.perf_counter() - start_time
|
90
|
+
|
91
|
+
logger = get_async_logger()
|
92
|
+
|
93
|
+
# Always log for analytics if requested
|
94
|
+
if always_log:
|
95
|
+
logger.info(
|
96
|
+
f"Function performance: {func.__name__}",
|
97
|
+
extra_data={
|
98
|
+
'execution_time': execution_time,
|
99
|
+
'function_name': func.__name__,
|
100
|
+
'module': func.__module__,
|
101
|
+
'performance_metric': True, # Tag for easy filtering
|
102
|
+
}
|
103
|
+
)
|
82
104
|
|
105
|
+
# Log warning if threshold exceeded
|
83
106
|
if execution_time > threshold_seconds:
|
84
|
-
logger = get_async_logger()
|
85
107
|
logger.warning(
|
86
108
|
f"Slow function detected: {func.__name__} took {execution_time:.3f}s",
|
87
109
|
extra_data={
|
88
110
|
'execution_time': execution_time,
|
89
111
|
'threshold': threshold_seconds,
|
90
112
|
'module': func.__module__,
|
113
|
+
'function_name': func.__name__,
|
114
|
+
'performance_metric': True,
|
91
115
|
}
|
92
116
|
)
|
93
117
|
|
@@ -114,7 +138,7 @@ class LogContext:
|
|
114
138
|
self.start_time = None
|
115
139
|
|
116
140
|
def __enter__(self):
|
117
|
-
self.start_time = time.
|
141
|
+
self.start_time = time.perf_counter()
|
118
142
|
self.logger.log(
|
119
143
|
self.level,
|
120
144
|
f"Starting: {self.message}",
|
@@ -123,7 +147,7 @@ class LogContext:
|
|
123
147
|
return self
|
124
148
|
|
125
149
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
126
|
-
execution_time = time.
|
150
|
+
execution_time = time.perf_counter() - self.start_time
|
127
151
|
|
128
152
|
if exc_type is None:
|
129
153
|
self.logger.log(
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|