djlogq 1.0.5__py3-none-any.whl → 1.0.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: djlogq
3
- Version: 1.0.5
3
+ Version: 1.0.7
4
4
  Summary: A reusable Django app for asynchronous, thread-safe logging with rich metadata, admin interface, and API support.
5
5
  Author-email: mess <mesnavunawa@gmail.com>
6
6
  License: MIT
@@ -8,6 +8,7 @@ Project-URL: Homepage, https://github.com/Mesake94/djlogq
8
8
  Requires-Python: >=3.8
9
9
  Description-Content-Type: text/markdown
10
10
  Requires-Dist: Django
11
+ Requires-Dist: requests
11
12
  Provides-Extra: dev
12
13
  Requires-Dist: build==1.2.1; extra == "dev"
13
14
  Requires-Dist: nox==2024.4.15; extra == "dev"
@@ -28,6 +29,16 @@ A reusable Django app that provides asynchronous logging functionality using a s
28
29
  - **Decorators**: Utility decorators for function logging and performance monitoring
29
30
  - **Context Managers**: Easy-to-use context managers for operation logging
30
31
  - **Configurable**: Customizable queue size, flush intervals, and cleanup policies
32
+ - **Extendible**: Easily add your own custom handlers to process logs in different ways.
33
+
34
+ **Useful built-in and example handlers include:**
35
+ - **File Handler**: Write logs to a file.
36
+ - **Email Handler**: Send error logs via email.
37
+ - **Webhook Handler**: Forward logs to external services (e.g., Slack, Discord, custom endpoints).
38
+ - **Database Handler**: Store logs in custom database tables.
39
+ - **Console Handler**: Output logs to the console for development.
40
+ - **Third-Party Integrations**: Integrate with services like Sentry or Logstash.
41
+ - You can implement your own handler by subclassing the provided base handler class.
31
42
 
32
43
  ## Installation
33
44
 
@@ -61,6 +72,7 @@ ASYNC_LOGGING_CONFIG = {
61
72
  'AUTO_CLEANUP_DAYS': 30,
62
73
  'ENABLE_REQUEST_LOGGING': True,
63
74
  'IGNORE_PATHS': ['/admin/'], # paths to ignore for request logging
75
+ 'DEFAULT_HANDLERS': [], # list of handler class paths, e.g. ['logq.handlers.FileHandler']
64
76
  }
65
77
  ```
66
78
 
@@ -145,6 +157,11 @@ response = requests.get('http://your-domain/logq/api/logs/?limit=10')
145
157
  logs = response.json()['logs']
146
158
  ```
147
159
 
160
+ ### CUSTOM HANDLERS
161
+ You can define custom log handlers by subclassing `LogHandler` and passing them to `AsyncLogger` or define them in the `DEFAULT_HANDLERS` section of the config. This allows you to process or forward log entries in any way you need (e.g., send to an external service, write to a file, etc).
162
+
163
+
164
+
148
165
  ### Admin Interface
149
166
 
150
167
  Access the admin interface at `/admin/` to view and manage logs. Features include:
@@ -176,7 +193,6 @@ python manage.py clean_logs --dry-run
176
193
  |---------|---------|-------------|
177
194
  | `MAX_QUEUE_SIZE` | 1000 | Maximum number of log entries in the queue |
178
195
  | `FLUSH_INTERVAL` | 1.0 | How often to flush logs to database (seconds) |
179
- | `AUTO_CLEANUP_DAYS` | 30 | Days to keep logs before auto-cleanup |
180
196
  | `ENABLE_REQUEST_LOGGING` | True | Whether to log all HTTP requests |
181
197
 
182
198
  ## Model Fields
@@ -1,12 +1,12 @@
1
1
  logq/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
2
  logq/admin.py,sha256=nC8TVXc64G41Mh05ijUSCJmvIDo-kgZHKyyOlmqH-AE,320
3
3
  logq/apps.py,sha256=LNBTCEKiwAU2wT3CTjQ-EfXumbBF6Izez5J7mTK3i-U,330
4
- logq/async_logger.py,sha256=I8za2lfEhiGrKDq76oZ4DZvvAlXeiRr17bmXT5tXSY4,5216
4
+ logq/async_logger.py,sha256=kNAmyVV7GWzsd_aZ4baSfNz5H7AwZV5YC4oV-Xy_k5M,10382
5
5
  logq/middleware.py,sha256=Y9EAnxrmavyDgVklEW893Wh0eeQPKdCvizcxGzPDLBo,3642
6
6
  logq/models.py,sha256=Gu8KLNMn7I4XtEUHQPavSSlcESQ-2Pu5JLKsL97t9Gg,1616
7
- logq/tests.py,sha256=iimhG7nvH5SbFn1og6soBMvCWhxCsuvgwkfaPOr5PC4,11102
7
+ logq/tests.py,sha256=180_XHLQ3I1am5qlaaf-36n8E-CaRI4ZT3uGZ-OyYJU,10704
8
8
  logq/urls.py,sha256=oGwVM9HXsVY_P86sVPuz5xnFofYfmkL8ZSZDhExhJQk,216
9
- logq/utils.py,sha256=Bg973EhnvQXvFSk1mtf8pcpQqnHiOnQRdaCLM9ibJPw,4204
9
+ logq/utils.py,sha256=lNm2TfbpWdKbAeoy4ny8-TFaNFyQGsBL_Wxq0VoxkO4,5235
10
10
  logq/views.py,sha256=WJpwiPyfItBbceM1862NcXp_ui4U6WyNBhw2P27mlJ4,3695
11
11
  logq/management/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
12
  logq/management/commands/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -14,7 +14,7 @@ logq/management/commands/clean_logs.py,sha256=Cc33EEqGGwsNcvQjwnLbM6kIb0lBJsyDQl
14
14
  logq/migrations/0001_initial.py,sha256=l4f-lUcO7OsABGYiSBp7fdWDt2rLHaIhR78pCKIAAdQ,2007
15
15
  logq/migrations/0002_alter_logentry_function_alter_logentry_line_number_and_more.py,sha256=SNBngZmhk9BgcOe8eAojX47V5JKC2V7oW9QtLHWIkFc,750
16
16
  logq/migrations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
17
- djlogq-1.0.5.dist-info/METADATA,sha256=ETmekY9yeJ4CUXuu-esuL2UZx77BaNj0h3z5XKCX6zk,6423
18
- djlogq-1.0.5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
19
- djlogq-1.0.5.dist-info/top_level.txt,sha256=VWj_EO93x0ix2Y2qm6obWT22L7VPFiZ7lQf0yIaI8do,5
20
- djlogq-1.0.5.dist-info/RECORD,,
17
+ djlogq-1.0.7.dist-info/METADATA,sha256=3EhYc2CraOPQMB61EeB-Q_kA5yyETvN1_WfEggsjqqE,7426
18
+ djlogq-1.0.7.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
19
+ djlogq-1.0.7.dist-info/top_level.txt,sha256=VWj_EO93x0ix2Y2qm6obWT22L7VPFiZ7lQf0yIaI8do,5
20
+ djlogq-1.0.7.dist-info/RECORD,,
logq/async_logger.py CHANGED
@@ -9,6 +9,19 @@ from django.utils import timezone
9
9
  from django.db import transaction
10
10
  from django.conf import settings
11
11
  from .models import LogEntry, LogLevel
12
+ from typing import List
13
+
14
+
15
+ class LogHandler:
16
+ """Base class for custom log handlers"""
17
+
18
+ def handle(self, log_entry:LogEntry) -> None:
19
+ """Handle a log entry. Overide this method to implement custom logging behavior."""
20
+ pass
21
+
22
+ def flush(self) -> None:
23
+ """Flush any buffered log entries. Override this method to implement custom flushing behavior."""
24
+ pass
12
25
 
13
26
 
14
27
  class AsyncLogger:
@@ -16,7 +29,7 @@ class AsyncLogger:
16
29
  Asynchronous logger that runs in a separate thread to avoid blocking the main application.
17
30
  """
18
31
 
19
- def __init__(self, max_queue_size: int = None, flush_interval: float = None):
32
+ def __init__(self, max_queue_size: int = None, flush_interval: float = None, handlers: List[LogHandler] = None):
20
33
  # Get configuration from settings
21
34
  config = getattr(settings, 'ASYNC_LOGGING_CONFIG', {})
22
35
  self.max_queue_size = max_queue_size or config.get('MAX_QUEUE_SIZE', 1000)
@@ -26,6 +39,43 @@ class AsyncLogger:
26
39
  self.running = False
27
40
  self.thread = None
28
41
  self._lock = threading.Lock()
42
+ self.dropped_count = 0
43
+ self.dropped_levels = {} # track most serious dropped level
44
+ self._dropped_lock = threading.Lock()
45
+
46
+ # initialize custom handlers
47
+ self.handlers = handlers or []
48
+ self._add_default_handlers() # add default handlers to the logger
49
+
50
+ def _add_default_handlers(self):
51
+ """Add default handlers from settings if configured."""
52
+ config = getattr(settings, 'ASYNC_LOGGING_CONFIG', {})
53
+ default_handlers = config.get('DEFAULT_HANDLERS', [])
54
+ for handler_class in default_handlers:
55
+ try:
56
+ if isinstance(handler_class, str):
57
+ # import handler class from string
58
+ module_path, class_name = handler_class.rsplit('.', 1)
59
+ module = __import__(module_path, fromlist=[class_name]) # import the module
60
+ handler_class = getattr(module, class_name)
61
+ handler = handler_class()
62
+ self.handlers.append(handler)
63
+ except Exception as e:
64
+ print(f"Error initializing default handler {handler_class}: {e}")
65
+
66
+ def add_handler(self, handler: LogHandler):
67
+ """Add a custom handler to the logger."""
68
+ if not isinstance(handler, LogHandler):
69
+ raise ValueError("Handler must be an instance of LogHandler")
70
+ self.handlers.append(handler)
71
+
72
+ def remove_handler(self, handler: LogHandler):
73
+ """Remove a custom handler from the logger."""
74
+ if handler in self.handlers:
75
+ self.handlers.remove(handler)
76
+
77
+ def clear_handlers(self):
78
+ """Remove all custom handlers from the logger."""
29
79
 
30
80
  def start(self):
31
81
  """Start the logging thread."""
@@ -80,8 +130,62 @@ class AsyncLogger:
80
130
  try:
81
131
  with transaction.atomic():
82
132
  LogEntry.objects.bulk_create(batch, ignore_conflicts=True)
133
+
134
+ # send log entries to custom handlers
135
+ self._send_to_handlers(batch)
136
+
137
+ # Log dropped messages if any
138
+ with self._dropped_lock:
139
+ if self.dropped_count > 0:
140
+ # Find the most serious dropped level
141
+ level_priority = {
142
+ 'DEBUG': 0,
143
+ 'INFO': 1,
144
+ 'WARNING': 2,
145
+ 'ERROR': 3,
146
+ 'CRITICAL': 4
147
+ }
148
+ most_serious_level = max(self.dropped_levels.keys(),
149
+ key=lambda x: level_priority.get(x, 0)) if self.dropped_levels else 'INFO'
150
+
151
+ dropped_entry = LogEntry(
152
+ level='WARNING',
153
+ message=f"{self.dropped_count} log messages were dropped due to queue overflow",
154
+ module='logq.async_logger',
155
+ function='_flush_batch',
156
+ extra_data={
157
+ 'dropped_count': self.dropped_count,
158
+ 'most_serious_level': most_serious_level
159
+ }
160
+ )
161
+ dropped_entry.save()
162
+
163
+ self.dropped_count = 0
164
+ self.dropped_levels = {}
165
+
83
166
  except Exception as e:
84
167
  print(f"Error flushing log batch: {e}")
168
+
169
+ def _send_to_handlers(self, batch: List[LogEntry]):
170
+ """Send log entries to all registered handlers.
171
+ Args:
172
+ batch: List[LogEntry] - The batch of log entries to send to handlers
173
+ """
174
+ for handler in self.handlers:
175
+ try:
176
+ for entry in batch:
177
+ handler.handle(entry)
178
+ except Exception as e:
179
+ # Dont let an error in a handler crash the logger
180
+ print(f"Error sending log entries to handler {handler.__class__.__name__}: {e}")
181
+
182
+ def _flush_handlers(self):
183
+ """Flush all registered handlers."""
184
+ for handler in self.handlers:
185
+ try:
186
+ handler.flush()
187
+ except Exception as e:
188
+ print(f"Error flushing handler {handler.__class__.__name__}: {e}")
85
189
 
86
190
  def log(self, level: str, message: str, **kwargs):
87
191
  """Add a log entry to the queue."""
@@ -110,7 +214,23 @@ class AsyncLogger:
110
214
  self.queue.put_nowait(entry)
111
215
  except queue.Full:
112
216
  # If queue is full, log to console as fallback
113
- print(f"Log queue full, dropping entry: [{level}] {message}")
217
+ # print(f"Log queue full, dropping entry: [{level}] {message}")
218
+ # Track dropped messages with counter
219
+ with self._dropped_lock:
220
+ self.dropped_count += 1
221
+ # Track the most serious level dropped
222
+ level_priority = {
223
+ 'DEBUG': 0,
224
+ 'INFO': 1,
225
+ 'WARNING': 2,
226
+ 'ERROR': 3,
227
+ 'CRITICAL': 4
228
+ }
229
+ current_priority = level_priority.get(level, 0)
230
+ if level not in self.dropped_levels or current_priority > level_priority.get(self.dropped_levels[level], 0):
231
+ self.dropped_levels[level] = level
232
+ self.dropped_levels[level] = level
233
+
114
234
 
115
235
  def debug(self, message: str, **kwargs):
116
236
  self.log(LogLevel.DEBUG, message, **kwargs)
logq/tests.py CHANGED
@@ -7,7 +7,8 @@ import json
7
7
  import time
8
8
  import threading
9
9
  from .models import LogEntry, LogLevel
10
- from .async_logger import AsyncLogger, get_async_logger, stop_async_logger
10
+ from .async_logger import AsyncLogger, get_async_logger, stop_async_logger, LogHandler
11
+ from .utils import log_performance, log_function_call
11
12
 
12
13
 
13
14
  class AsyncLoggerTestCase(TransactionTestCase):
@@ -97,7 +98,13 @@ class AsyncLoggerTestCase(TransactionTestCase):
97
98
  # Should have some entries but not all due to queue being full
98
99
  entries = LogEntry.objects.count()
99
100
  self.assertGreater(entries, 0)
100
- self.assertLessEqual(entries, 100) # max_queue_size
101
+ self.assertLessEqual(entries, 101) # max_queue_size + 1 (allowing for edge case)
102
+
103
+ # Check if the dropped log entry is present
104
+ dropped_entry = LogEntry.objects.filter(message__contains="dropped due to queue overflow").first()
105
+ self.assertIsNotNone(dropped_entry)
106
+ self.assertEqual(dropped_entry.level, LogLevel.WARNING)
107
+
101
108
 
102
109
 
103
110
  class LogEntryModelTestCase(TransactionTestCase):
@@ -148,10 +155,23 @@ class LogEntryModelTestCase(TransactionTestCase):
148
155
  self.assertIn("This is a very long message that should be truncated", str_repr[:100])
149
156
 
150
157
 
151
- class LoggingAPITestCase(TransactionTestCase):
158
+
159
+ @override_settings(
160
+ ASYNC_LOGGING_CONFIG={'MAX_QUEUE_SIZE': 500, 'FLUSH_INTERVAL': 0.5},
161
+ MIDDLEWARE=[
162
+ "django.middleware.security.SecurityMiddleware",
163
+ "django.contrib.sessions.middleware.SessionMiddleware",
164
+ "django.middleware.common.CommonMiddleware",
165
+ "django.middleware.csrf.CsrfViewMiddleware",
166
+ "django.contrib.auth.middleware.AuthenticationMiddleware",
167
+ "django.contrib.messages.middleware.MessageMiddleware",
168
+ "django.middleware.clickjacking.XFrameOptionsMiddleware",
169
+ "logq.middleware.AsyncLoggingMiddleware", # Fixed: Added middleware
170
+ ]
171
+ )
172
+ class MiddlewareTestCase(TransactionTestCase):
152
173
  def setUp(self):
153
174
  super().setUp()
154
- self.user = User.objects.create_user(username='testuser', password='testpass')
155
175
  # Stop the global logger to avoid interference
156
176
  stop_async_logger()
157
177
  # Clear all existing logs
@@ -164,154 +184,126 @@ class LoggingAPITestCase(TransactionTestCase):
164
184
  cursor.execute("DELETE FROM logq_logentry")
165
185
  super().tearDown()
166
186
 
167
- def test_log_endpoint(self):
168
- """Test the log API endpoint."""
169
- # Verify we start with no logs
170
- self.assertEqual(LogEntry.objects.count(), 0)
171
-
172
- data = {
173
- 'level': 'INFO',
174
- 'message': 'Test API log',
175
- 'extra_data': {'source': 'api'}
176
- }
177
-
178
- response = self.client.post(
179
- reverse('logq:log_endpoint'),
180
- data=json.dumps(data),
181
- content_type='application/json'
182
- )
183
-
184
- self.assertEqual(response.status_code, 200)
185
- self.assertEqual(response.json()['status'], 'success')
186
-
187
- # Wait for async processing
188
- time.sleep(0.5)
189
-
190
- # Verify we have exactly one log entry
191
- self.assertEqual(LogEntry.objects.count(), 1)
192
-
193
- entry = LogEntry.objects.first()
194
- self.assertEqual(entry.message, 'Test API log')
195
- self.assertEqual(entry.extra_data, {'source': 'api'})
196
-
197
- def test_log_api_view(self):
198
- """Test the class-based log API view."""
199
- # Verify we start with no logs
200
- self.assertEqual(LogEntry.objects.count(), 0)
201
-
202
- data = {
203
- 'level': 'WARNING',
204
- 'message': 'Test warning',
205
- 'user_id': self.user.id,
206
- 'request_id': 'test-123'
207
- }
208
-
209
- response = self.client.post(
210
- reverse('logq:log_api'),
211
- data=json.dumps(data),
212
- content_type='application/json'
213
- )
214
-
215
- self.assertEqual(response.status_code, 200)
216
-
217
- time.sleep(0.5)
218
-
219
- # Verify we have exactly one log entry
220
- self.assertEqual(LogEntry.objects.count(), 1)
221
-
222
- entry = LogEntry.objects.first()
223
- self.assertEqual(entry.level, LogLevel.WARNING)
224
- self.assertEqual(entry.user_id, self.user.id)
225
- self.assertEqual(entry.request_id, 'test-123')
226
-
227
- def test_get_logs_api(self):
228
- """Test retrieving logs via API."""
229
- # Verify we start with no logs
230
- self.assertEqual(LogEntry.objects.count(), 0)
231
-
232
- # Create some test logs directly
233
- LogEntry.objects.create(level=LogLevel.INFO, message="Test 1")
234
- LogEntry.objects.create(level=LogLevel.ERROR, message="Test 2")
235
- LogEntry.objects.create(level=LogLevel.DEBUG, message="Test 3")
236
-
237
- # Verify we have exactly 3 logs
238
- self.assertEqual(LogEntry.objects.count(), 3)
239
-
240
- response = self.client.get(reverse('logq:log_api'))
241
- self.assertEqual(response.status_code, 200)
242
-
243
- data = response.json()
244
- self.assertEqual(len(data['logs']), 3)
245
- self.assertEqual(data['logs'][0]['message'], "Test 1")
246
-
247
- def test_invalid_log_level(self):
248
- """Test API with invalid log level."""
249
- data = {
250
- 'level': 'INVALID',
251
- 'message': 'Test message'
252
- }
253
-
254
- response = self.client.post(
255
- reverse('logq:log_endpoint'),
256
- data=json.dumps(data),
257
- content_type='application/json'
258
- )
259
-
260
- self.assertEqual(response.status_code, 400)
261
- self.assertIn('Invalid log level', response.json()['error'])
262
187
 
263
188
 
264
- @override_settings(ASYNC_LOGGING_CONFIG={'MAX_QUEUE_SIZE': 500, 'FLUSH_INTERVAL': 0.5})
265
- class ConfigurationTestCase(TransactionTestCase):
189
+ class UtilsTestCase(TransactionTestCase):
266
190
  def setUp(self):
267
191
  super().setUp()
192
+ # Stop the global logger to avoid interference
193
+ stop_async_logger()
194
+
268
195
  # Clear all existing logs
269
196
  with connection.cursor() as cursor:
270
197
  cursor.execute("DELETE FROM logq_logentry")
198
+
199
+ # Create a properly configured global logger
200
+ from .async_logger import _async_logger
201
+ from . import async_logger as async_logger_module
202
+
203
+ # Create a test logger with fast flush interval
204
+ test_logger = AsyncLogger(max_queue_size=100, flush_interval=0.1)
205
+ test_logger.start()
206
+
207
+ # Replace the global logger
208
+ async_logger_module._async_logger = test_logger
209
+
210
+ time.sleep(0.2) # Wait for thread to start
271
211
 
272
212
  def tearDown(self):
213
+ # Stop the global logger
214
+ stop_async_logger()
215
+ time.sleep(0.2) # Wait for thread to stop
216
+
273
217
  # Clear logs after test
274
218
  with connection.cursor() as cursor:
275
219
  cursor.execute("DELETE FROM logq_logentry")
276
220
  super().tearDown()
277
221
 
278
- def test_custom_configuration(self):
279
- """Test that custom configuration is respected."""
280
- logger = AsyncLogger()
281
- self.assertEqual(logger.queue.maxsize, 500)
282
- self.assertEqual(logger.flush_interval, 0.5)
222
+ def test_log_performance(self):
223
+ """Test log_performance decorator."""
224
+ # Debug: Check if the logger is running
225
+ logger = get_async_logger()
283
226
 
227
+ # Test direct logging first
228
+ logger.info("Direct test message")
229
+ time.sleep(0.3)
230
+
231
+ @log_performance(threshold_seconds=0.1, always_log=True)
232
+ def slow_function():
233
+ time.sleep(0.2)
234
+ return "Result"
235
+
236
+ slow_function()
237
+
238
+ time.sleep(0.5) # Wait longer for flush
239
+
240
+ entries = LogEntry.objects.all()
284
241
 
285
- class MiddlewareTestCase(TransactionTestCase):
242
+ self.assertGreater(entries.count(), 0)
243
+
244
+
245
+ class LogHandlerTestCase(TransactionTestCase):
286
246
  def setUp(self):
287
247
  super().setUp()
288
248
  # Stop the global logger to avoid interference
289
249
  stop_async_logger()
250
+
290
251
  # Clear all existing logs
291
252
  with connection.cursor() as cursor:
292
253
  cursor.execute("DELETE FROM logq_logentry")
254
+
255
+ # Create a properly configured global logger
256
+ from .async_logger import _async_logger
257
+ from . import async_logger as async_logger_module
258
+
259
+ # Create a test logger with fast flush interval
260
+ test_logger = AsyncLogger(max_queue_size=100, flush_interval=0.1)
261
+ test_logger.start()
262
+
263
+ # Replace the global logger
264
+ async_logger_module._async_logger = test_logger
265
+
266
+ time.sleep(0.2) # Wait for thre
293
267
 
294
268
  def tearDown(self):
269
+ # Stop the global logger
270
+ stop_async_logger()
271
+ time.sleep(0.2) # Wait for thread to stop
272
+
295
273
  # Clear logs after test
296
274
  with connection.cursor() as cursor:
297
275
  cursor.execute("DELETE FROM logq_logentry")
298
276
  super().tearDown()
299
277
 
300
- def test_middleware_request_logging(self):
301
- """Test that middleware logs requests."""
278
+ def test_log_handler(self):
279
+ """Test log handler functionality."""
302
280
  # Verify we start with no logs
303
281
  self.assertEqual(LogEntry.objects.count(), 0)
304
282
 
305
- response = self.client.get('/admin/')
283
+ # Create a test handler
284
+ class TestHandler(LogHandler):
285
+ def handle(self, log_entry:LogEntry) -> None:
286
+ pass
287
+
288
+ def flush(self) -> None:
289
+ pass
290
+
291
+ # Create a logger with the test handler
292
+ logger = get_async_logger()
293
+ logger.add_handler(TestHandler())
294
+ logger.start()
306
295
 
296
+ logger.info("Test message")
307
297
  time.sleep(0.5)
298
+
299
+ # Verify we have exactly one log entry
300
+ self.assertEqual(LogEntry.objects.count(), 1)
308
301
 
309
- entries = LogEntry.objects.all()
310
- self.assertGreater(entries.count(), 0)
311
-
312
- # Should have request start and completion logs
313
- start_logs = entries.filter(message__contains="Request started")
314
- complete_logs = entries.filter(message__contains="Request completed")
302
+ # Verify the log entry was sent to the handler
303
+ log_entry = LogEntry.objects.first()
304
+ self.assertEqual(log_entry.message, "Test message")
315
305
 
316
- self.assertGreater(start_logs.count(), 0)
317
- self.assertGreater(complete_logs.count(), 0)
306
+ # Stop the logger
307
+ logger.stop()
308
+ time.sleep(0.2) # Wait for thread to stop
309
+
logq/utils.py CHANGED
@@ -32,10 +32,10 @@ def log_function_call(func=None, *, level='INFO'):
32
32
  }
33
33
  )
34
34
 
35
- start_time = time.time()
35
+ start_time = time.perf_counter()
36
36
  try:
37
37
  result = func(*args, **kwargs)
38
- execution_time = time.time() - start_time
38
+ execution_time = time.perf_counter() - start_time
39
39
 
40
40
  # Log successful completion
41
41
  logger.log(
@@ -47,7 +47,7 @@ def log_function_call(func=None, *, level='INFO'):
47
47
  return result
48
48
 
49
49
  except Exception as e:
50
- execution_time = time.time() - start_time
50
+ execution_time = time.perf_counter() - start_time
51
51
 
52
52
  # Log exception
53
53
  logger.exception(
@@ -64,30 +64,54 @@ def log_function_call(func=None, *, level='INFO'):
64
64
  return decorator(func)
65
65
 
66
66
 
67
- def log_performance(threshold_seconds=1.0):
67
+ def log_performance(threshold_seconds=1.0, always_log=False):
68
68
  """
69
- Decorator to log slow function calls.
69
+ Decorator to log function performance metrics.
70
+
71
+ Args:
72
+ threshold_seconds: Log warning when execution exceeds this threshold
73
+ always_log: If True, log every function call for analytics (like Sentry spans)
70
74
 
71
75
  Usage:
72
76
  @log_performance(threshold_seconds=0.5)
73
77
  def my_slow_function():
74
78
  pass
79
+
80
+ @log_performance(always_log=True)
81
+ def my_analytics_function():
82
+ pass
75
83
  """
76
84
  def decorator(func):
77
85
  @wraps(func)
78
86
  def wrapper(*args, **kwargs):
79
- start_time = time.time()
87
+ start_time = time.perf_counter()
80
88
  result = func(*args, **kwargs)
81
- execution_time = time.time() - start_time
89
+ execution_time = time.perf_counter() - start_time
90
+
91
+ logger = get_async_logger()
92
+
93
+ # Always log for analytics if requested
94
+ if always_log:
95
+ logger.info(
96
+ f"Function performance: {func.__name__}",
97
+ extra_data={
98
+ 'execution_time': execution_time,
99
+ 'function_name': func.__name__,
100
+ 'module': func.__module__,
101
+ 'performance_metric': True, # Tag for easy filtering
102
+ }
103
+ )
82
104
 
105
+ # Log warning if threshold exceeded
83
106
  if execution_time > threshold_seconds:
84
- logger = get_async_logger()
85
107
  logger.warning(
86
108
  f"Slow function detected: {func.__name__} took {execution_time:.3f}s",
87
109
  extra_data={
88
110
  'execution_time': execution_time,
89
111
  'threshold': threshold_seconds,
90
112
  'module': func.__module__,
113
+ 'function_name': func.__name__,
114
+ 'performance_metric': True,
91
115
  }
92
116
  )
93
117
 
@@ -114,7 +138,7 @@ class LogContext:
114
138
  self.start_time = None
115
139
 
116
140
  def __enter__(self):
117
- self.start_time = time.time()
141
+ self.start_time = time.perf_counter()
118
142
  self.logger.log(
119
143
  self.level,
120
144
  f"Starting: {self.message}",
@@ -123,7 +147,7 @@ class LogContext:
123
147
  return self
124
148
 
125
149
  def __exit__(self, exc_type, exc_val, exc_tb):
126
- execution_time = time.time() - self.start_time
150
+ execution_time = time.perf_counter() - self.start_time
127
151
 
128
152
  if exc_type is None:
129
153
  self.logger.log(
File without changes