djlogq 1.0.5__tar.gz → 1.0.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. {djlogq-1.0.5/src/djlogq.egg-info → djlogq-1.0.7}/PKG-INFO +18 -2
  2. {djlogq-1.0.5 → djlogq-1.0.7}/README.md +16 -1
  3. {djlogq-1.0.5 → djlogq-1.0.7}/pyproject.toml +3 -2
  4. {djlogq-1.0.5 → djlogq-1.0.7/src/djlogq.egg-info}/PKG-INFO +18 -2
  5. {djlogq-1.0.5 → djlogq-1.0.7}/src/djlogq.egg-info/requires.txt +1 -0
  6. djlogq-1.0.7/src/logq/async_logger.py +280 -0
  7. {djlogq-1.0.5 → djlogq-1.0.7}/src/logq/tests.py +110 -118
  8. {djlogq-1.0.5 → djlogq-1.0.7}/src/logq/utils.py +34 -10
  9. djlogq-1.0.5/src/logq/async_logger.py +0 -160
  10. {djlogq-1.0.5 → djlogq-1.0.7}/MANIFEST.in +0 -0
  11. {djlogq-1.0.5 → djlogq-1.0.7}/setup.cfg +0 -0
  12. {djlogq-1.0.5 → djlogq-1.0.7}/src/djlogq.egg-info/SOURCES.txt +0 -0
  13. {djlogq-1.0.5 → djlogq-1.0.7}/src/djlogq.egg-info/dependency_links.txt +0 -0
  14. {djlogq-1.0.5 → djlogq-1.0.7}/src/djlogq.egg-info/top_level.txt +0 -0
  15. {djlogq-1.0.5 → djlogq-1.0.7}/src/logq/__init__.py +0 -0
  16. {djlogq-1.0.5 → djlogq-1.0.7}/src/logq/admin.py +0 -0
  17. {djlogq-1.0.5 → djlogq-1.0.7}/src/logq/apps.py +0 -0
  18. {djlogq-1.0.5 → djlogq-1.0.7}/src/logq/management/__init__.py +0 -0
  19. {djlogq-1.0.5 → djlogq-1.0.7}/src/logq/management/commands/__init__.py +0 -0
  20. {djlogq-1.0.5 → djlogq-1.0.7}/src/logq/management/commands/clean_logs.py +0 -0
  21. {djlogq-1.0.5 → djlogq-1.0.7}/src/logq/middleware.py +0 -0
  22. {djlogq-1.0.5 → djlogq-1.0.7}/src/logq/migrations/0001_initial.py +0 -0
  23. {djlogq-1.0.5 → djlogq-1.0.7}/src/logq/migrations/0002_alter_logentry_function_alter_logentry_line_number_and_more.py +0 -0
  24. {djlogq-1.0.5 → djlogq-1.0.7}/src/logq/migrations/__init__.py +0 -0
  25. {djlogq-1.0.5 → djlogq-1.0.7}/src/logq/models.py +0 -0
  26. {djlogq-1.0.5 → djlogq-1.0.7}/src/logq/urls.py +0 -0
  27. {djlogq-1.0.5 → djlogq-1.0.7}/src/logq/views.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: djlogq
3
- Version: 1.0.5
3
+ Version: 1.0.7
4
4
  Summary: A reusable Django app for asynchronous, thread-safe logging with rich metadata, admin interface, and API support.
5
5
  Author-email: mess <mesnavunawa@gmail.com>
6
6
  License: MIT
@@ -8,6 +8,7 @@ Project-URL: Homepage, https://github.com/Mesake94/djlogq
8
8
  Requires-Python: >=3.8
9
9
  Description-Content-Type: text/markdown
10
10
  Requires-Dist: Django
11
+ Requires-Dist: requests
11
12
  Provides-Extra: dev
12
13
  Requires-Dist: build==1.2.1; extra == "dev"
13
14
  Requires-Dist: nox==2024.4.15; extra == "dev"
@@ -28,6 +29,16 @@ A reusable Django app that provides asynchronous logging functionality using a s
28
29
  - **Decorators**: Utility decorators for function logging and performance monitoring
29
30
  - **Context Managers**: Easy-to-use context managers for operation logging
30
31
  - **Configurable**: Customizable queue size, flush intervals, and cleanup policies
32
+ - **Extendible**: Easily add your own custom handlers to process logs in different ways.
33
+
34
+ **Useful built-in and example handlers include:**
35
+ - **File Handler**: Write logs to a file.
36
+ - **Email Handler**: Send error logs via email.
37
+ - **Webhook Handler**: Forward logs to external services (e.g., Slack, Discord, custom endpoints).
38
+ - **Database Handler**: Store logs in custom database tables.
39
+ - **Console Handler**: Output logs to the console for development.
40
+ - **Third-Party Integrations**: Integrate with services like Sentry or Logstash.
41
+ - You can implement your own handler by subclassing the provided base handler class.
31
42
 
32
43
  ## Installation
33
44
 
@@ -61,6 +72,7 @@ ASYNC_LOGGING_CONFIG = {
61
72
  'AUTO_CLEANUP_DAYS': 30,
62
73
  'ENABLE_REQUEST_LOGGING': True,
63
74
  'IGNORE_PATHS': ['/admin/'], # paths to ignore for request logging
75
+ 'DEFAULT_HANDLERS': [], # list of handler class paths, e.g. ['logq.handlers.FileHandler']
64
76
  }
65
77
  ```
66
78
 
@@ -145,6 +157,11 @@ response = requests.get('http://your-domain/logq/api/logs/?limit=10')
145
157
  logs = response.json()['logs']
146
158
  ```
147
159
 
160
+ ### CUSTOM HANDLERS
161
+ You can define custom log handlers by subclassing `LogHandler` and passing them to `AsyncLogger` or define them in the `DEFAULT_HANDLERS` section of the config. This allows you to process or forward log entries in any way you need (e.g., send to an external service, write to a file, etc).
162
+
163
+
164
+
148
165
  ### Admin Interface
149
166
 
150
167
  Access the admin interface at `/admin/` to view and manage logs. Features include:
@@ -176,7 +193,6 @@ python manage.py clean_logs --dry-run
176
193
  |---------|---------|-------------|
177
194
  | `MAX_QUEUE_SIZE` | 1000 | Maximum number of log entries in the queue |
178
195
  | `FLUSH_INTERVAL` | 1.0 | How often to flush logs to database (seconds) |
179
- | `AUTO_CLEANUP_DAYS` | 30 | Days to keep logs before auto-cleanup |
180
196
  | `ENABLE_REQUEST_LOGGING` | True | Whether to log all HTTP requests |
181
197
 
182
198
  ## Model Fields
@@ -13,6 +13,16 @@ A reusable Django app that provides asynchronous logging functionality using a s
13
13
  - **Decorators**: Utility decorators for function logging and performance monitoring
14
14
  - **Context Managers**: Easy-to-use context managers for operation logging
15
15
  - **Configurable**: Customizable queue size, flush intervals, and cleanup policies
16
+ - **Extendible**: Easily add your own custom handlers to process logs in different ways.
17
+
18
+ **Useful built-in and example handlers include:**
19
+ - **File Handler**: Write logs to a file.
20
+ - **Email Handler**: Send error logs via email.
21
+ - **Webhook Handler**: Forward logs to external services (e.g., Slack, Discord, custom endpoints).
22
+ - **Database Handler**: Store logs in custom database tables.
23
+ - **Console Handler**: Output logs to the console for development.
24
+ - **Third-Party Integrations**: Integrate with services like Sentry or Logstash.
25
+ - You can implement your own handler by subclassing the provided base handler class.
16
26
 
17
27
  ## Installation
18
28
 
@@ -46,6 +56,7 @@ ASYNC_LOGGING_CONFIG = {
46
56
  'AUTO_CLEANUP_DAYS': 30,
47
57
  'ENABLE_REQUEST_LOGGING': True,
48
58
  'IGNORE_PATHS': ['/admin/'], # paths to ignore for request logging
59
+ 'DEFAULT_HANDLERS': [], # list of handler class paths, e.g. ['logq.handlers.FileHandler']
49
60
  }
50
61
  ```
51
62
 
@@ -130,6 +141,11 @@ response = requests.get('http://your-domain/logq/api/logs/?limit=10')
130
141
  logs = response.json()['logs']
131
142
  ```
132
143
 
144
+ ### CUSTOM HANDLERS
145
+ You can define custom log handlers by subclassing `LogHandler` and passing them to `AsyncLogger` or define them in the `DEFAULT_HANDLERS` section of the config. This allows you to process or forward log entries in any way you need (e.g., send to an external service, write to a file, etc).
146
+
147
+
148
+
133
149
  ### Admin Interface
134
150
 
135
151
  Access the admin interface at `/admin/` to view and manage logs. Features include:
@@ -161,7 +177,6 @@ python manage.py clean_logs --dry-run
161
177
  |---------|---------|-------------|
162
178
  | `MAX_QUEUE_SIZE` | 1000 | Maximum number of log entries in the queue |
163
179
  | `FLUSH_INTERVAL` | 1.0 | How often to flush logs to database (seconds) |
164
- | `AUTO_CLEANUP_DAYS` | 30 | Days to keep logs before auto-cleanup |
165
180
  | `ENABLE_REQUEST_LOGGING` | True | Whether to log all HTTP requests |
166
181
 
167
182
  ## Model Fields
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "djlogq"
7
- version = "1.0.5"
7
+ version = "1.0.7"
8
8
  description = "A reusable Django app for asynchronous, thread-safe logging with rich metadata, admin interface, and API support."
9
9
  readme = "README.md"
10
10
  authors = [
@@ -13,7 +13,8 @@ authors = [
13
13
  license = {text = "MIT"}
14
14
  requires-python = ">=3.8"
15
15
  dependencies = [
16
- "Django"
16
+ "Django",
17
+ "requests",
17
18
  ]
18
19
  urls = {Homepage = "https://github.com/Mesake94/djlogq"}
19
20
  [project.optional-dependencies]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: djlogq
3
- Version: 1.0.5
3
+ Version: 1.0.7
4
4
  Summary: A reusable Django app for asynchronous, thread-safe logging with rich metadata, admin interface, and API support.
5
5
  Author-email: mess <mesnavunawa@gmail.com>
6
6
  License: MIT
@@ -8,6 +8,7 @@ Project-URL: Homepage, https://github.com/Mesake94/djlogq
8
8
  Requires-Python: >=3.8
9
9
  Description-Content-Type: text/markdown
10
10
  Requires-Dist: Django
11
+ Requires-Dist: requests
11
12
  Provides-Extra: dev
12
13
  Requires-Dist: build==1.2.1; extra == "dev"
13
14
  Requires-Dist: nox==2024.4.15; extra == "dev"
@@ -28,6 +29,16 @@ A reusable Django app that provides asynchronous logging functionality using a s
28
29
  - **Decorators**: Utility decorators for function logging and performance monitoring
29
30
  - **Context Managers**: Easy-to-use context managers for operation logging
30
31
  - **Configurable**: Customizable queue size, flush intervals, and cleanup policies
32
+ - **Extendible**: Easily add your own custom handlers to process logs in different ways.
33
+
34
+ **Useful built-in and example handlers include:**
35
+ - **File Handler**: Write logs to a file.
36
+ - **Email Handler**: Send error logs via email.
37
+ - **Webhook Handler**: Forward logs to external services (e.g., Slack, Discord, custom endpoints).
38
+ - **Database Handler**: Store logs in custom database tables.
39
+ - **Console Handler**: Output logs to the console for development.
40
+ - **Third-Party Integrations**: Integrate with services like Sentry or Logstash.
41
+ - You can implement your own handler by subclassing the provided base handler class.
31
42
 
32
43
  ## Installation
33
44
 
@@ -61,6 +72,7 @@ ASYNC_LOGGING_CONFIG = {
61
72
  'AUTO_CLEANUP_DAYS': 30,
62
73
  'ENABLE_REQUEST_LOGGING': True,
63
74
  'IGNORE_PATHS': ['/admin/'], # paths to ignore for request logging
75
+ 'DEFAULT_HANDLERS': [], # list of handler class paths, e.g. ['logq.handlers.FileHandler']
64
76
  }
65
77
  ```
66
78
 
@@ -145,6 +157,11 @@ response = requests.get('http://your-domain/logq/api/logs/?limit=10')
145
157
  logs = response.json()['logs']
146
158
  ```
147
159
 
160
+ ### CUSTOM HANDLERS
161
+ You can define custom log handlers by subclassing `LogHandler` and passing them to `AsyncLogger` or define them in the `DEFAULT_HANDLERS` section of the config. This allows you to process or forward log entries in any way you need (e.g., send to an external service, write to a file, etc).
162
+
163
+
164
+
148
165
  ### Admin Interface
149
166
 
150
167
  Access the admin interface at `/admin/` to view and manage logs. Features include:
@@ -176,7 +193,6 @@ python manage.py clean_logs --dry-run
176
193
  |---------|---------|-------------|
177
194
  | `MAX_QUEUE_SIZE` | 1000 | Maximum number of log entries in the queue |
178
195
  | `FLUSH_INTERVAL` | 1.0 | How often to flush logs to database (seconds) |
179
- | `AUTO_CLEANUP_DAYS` | 30 | Days to keep logs before auto-cleanup |
180
196
  | `ENABLE_REQUEST_LOGGING` | True | Whether to log all HTTP requests |
181
197
 
182
198
  ## Model Fields
@@ -1,4 +1,5 @@
1
1
  Django
2
+ requests
2
3
 
3
4
  [dev]
4
5
  build==1.2.1
@@ -0,0 +1,280 @@
1
+ import threading
2
+ import queue
3
+ import time
4
+ import logging
5
+ import traceback
6
+ import inspect
7
+ from typing import Optional, Dict, Any
8
+ from django.utils import timezone
9
+ from django.db import transaction
10
+ from django.conf import settings
11
+ from .models import LogEntry, LogLevel
12
+ from typing import List
13
+
14
+
15
+ class LogHandler:
16
+ """Base class for custom log handlers"""
17
+
18
+ def handle(self, log_entry:LogEntry) -> None:
19
+ """Handle a log entry. Overide this method to implement custom logging behavior."""
20
+ pass
21
+
22
+ def flush(self) -> None:
23
+ """Flush any buffered log entries. Override this method to implement custom flushing behavior."""
24
+ pass
25
+
26
+
27
+ class AsyncLogger:
28
+ """
29
+ Asynchronous logger that runs in a separate thread to avoid blocking the main application.
30
+ """
31
+
32
+ def __init__(self, max_queue_size: int = None, flush_interval: float = None, handlers: List[LogHandler] = None):
33
+ # Get configuration from settings
34
+ config = getattr(settings, 'ASYNC_LOGGING_CONFIG', {})
35
+ self.max_queue_size = max_queue_size or config.get('MAX_QUEUE_SIZE', 1000)
36
+ self.flush_interval = flush_interval or config.get('FLUSH_INTERVAL', 1.0)
37
+
38
+ self.queue = queue.Queue(maxsize=self.max_queue_size)
39
+ self.running = False
40
+ self.thread = None
41
+ self._lock = threading.Lock()
42
+ self.dropped_count = 0
43
+ self.dropped_levels = {} # track most serious dropped level
44
+ self._dropped_lock = threading.Lock()
45
+
46
+ # initialize custom handlers
47
+ self.handlers = handlers or []
48
+ self._add_default_handlers() # add default handlers to the logger
49
+
50
+ def _add_default_handlers(self):
51
+ """Add default handlers from settings if configured."""
52
+ config = getattr(settings, 'ASYNC_LOGGING_CONFIG', {})
53
+ default_handlers = config.get('DEFAULT_HANDLERS', [])
54
+ for handler_class in default_handlers:
55
+ try:
56
+ if isinstance(handler_class, str):
57
+ # import handler class from string
58
+ module_path, class_name = handler_class.rsplit('.', 1)
59
+ module = __import__(module_path, fromlist=[class_name]) # import the module
60
+ handler_class = getattr(module, class_name)
61
+ handler = handler_class()
62
+ self.handlers.append(handler)
63
+ except Exception as e:
64
+ print(f"Error initializing default handler {handler_class}: {e}")
65
+
66
+ def add_handler(self, handler: LogHandler):
67
+ """Add a custom handler to the logger."""
68
+ if not isinstance(handler, LogHandler):
69
+ raise ValueError("Handler must be an instance of LogHandler")
70
+ self.handlers.append(handler)
71
+
72
+ def remove_handler(self, handler: LogHandler):
73
+ """Remove a custom handler from the logger."""
74
+ if handler in self.handlers:
75
+ self.handlers.remove(handler)
76
+
77
+ def clear_handlers(self):
78
+ """Remove all custom handlers from the logger."""
79
+
80
+ def start(self):
81
+ """Start the logging thread."""
82
+ with self._lock:
83
+ if not self.running:
84
+ self.running = True
85
+ self.thread = threading.Thread(target=self._worker, daemon=True)
86
+ self.thread.start()
87
+
88
+ def stop(self):
89
+ """Stop the logging thread."""
90
+ with self._lock:
91
+ if self.running:
92
+ self.running = False
93
+ if self.thread:
94
+ self.thread.join(timeout=5.0)
95
+
96
+ def _worker(self):
97
+ """Worker thread that processes log entries from the queue."""
98
+ batch = []
99
+ last_flush = time.time()
100
+
101
+ while self.running:
102
+ try:
103
+ # Try to get a log entry with timeout
104
+ try:
105
+ entry = self.queue.get(timeout=0.1)
106
+ batch.append(entry)
107
+ except queue.Empty:
108
+ pass
109
+
110
+ # Flush batch if it's time or batch is getting large
111
+ current_time = time.time()
112
+ if (current_time - last_flush >= self.flush_interval or
113
+ len(batch) >= 50):
114
+ if batch:
115
+ self._flush_batch(batch)
116
+ batch = []
117
+ last_flush = current_time
118
+
119
+ except Exception as e:
120
+ # Log the error to prevent infinite loops
121
+ print(f"Error in async logger worker: {e}")
122
+ time.sleep(1)
123
+
124
+ # Flush remaining entries
125
+ if batch:
126
+ self._flush_batch(batch)
127
+
128
+ def _flush_batch(self, batch):
129
+ """Flush a batch of log entries to the database."""
130
+ try:
131
+ with transaction.atomic():
132
+ LogEntry.objects.bulk_create(batch, ignore_conflicts=True)
133
+
134
+ # send log entries to custom handlers
135
+ self._send_to_handlers(batch)
136
+
137
+ # Log dropped messages if any
138
+ with self._dropped_lock:
139
+ if self.dropped_count > 0:
140
+ # Find the most serious dropped level
141
+ level_priority = {
142
+ 'DEBUG': 0,
143
+ 'INFO': 1,
144
+ 'WARNING': 2,
145
+ 'ERROR': 3,
146
+ 'CRITICAL': 4
147
+ }
148
+ most_serious_level = max(self.dropped_levels.keys(),
149
+ key=lambda x: level_priority.get(x, 0)) if self.dropped_levels else 'INFO'
150
+
151
+ dropped_entry = LogEntry(
152
+ level='WARNING',
153
+ message=f"{self.dropped_count} log messages were dropped due to queue overflow",
154
+ module='logq.async_logger',
155
+ function='_flush_batch',
156
+ extra_data={
157
+ 'dropped_count': self.dropped_count,
158
+ 'most_serious_level': most_serious_level
159
+ }
160
+ )
161
+ dropped_entry.save()
162
+
163
+ self.dropped_count = 0
164
+ self.dropped_levels = {}
165
+
166
+ except Exception as e:
167
+ print(f"Error flushing log batch: {e}")
168
+
169
+ def _send_to_handlers(self, batch: List[LogEntry]):
170
+ """Send log entries to all registered handlers.
171
+ Args:
172
+ batch: List[LogEntry] - The batch of log entries to send to handlers
173
+ """
174
+ for handler in self.handlers:
175
+ try:
176
+ for entry in batch:
177
+ handler.handle(entry)
178
+ except Exception as e:
179
+ # Dont let an error in a handler crash the logger
180
+ print(f"Error sending log entries to handler {handler.__class__.__name__}: {e}")
181
+
182
+ def _flush_handlers(self):
183
+ """Flush all registered handlers."""
184
+ for handler in self.handlers:
185
+ try:
186
+ handler.flush()
187
+ except Exception as e:
188
+ print(f"Error flushing handler {handler.__class__.__name__}: {e}")
189
+
190
+ def log(self, level: str, message: str, **kwargs):
191
+ """Add a log entry to the queue."""
192
+ if not self.running:
193
+ return
194
+
195
+ # Get caller information
196
+ frame = inspect.currentframe().f_back
197
+ module = frame.f_globals.get('__name__', 'unknown')
198
+ function = frame.f_code.co_name
199
+ line_number = frame.f_lineno
200
+
201
+ # Create log entry
202
+ entry = LogEntry(
203
+ level=level,
204
+ message=message,
205
+ module=module,
206
+ function=function,
207
+ line_number=line_number,
208
+ user_id=kwargs.get('user_id'),
209
+ request_id=kwargs.get('request_id'),
210
+ extra_data=kwargs.get('extra_data', {})
211
+ )
212
+
213
+ try:
214
+ self.queue.put_nowait(entry)
215
+ except queue.Full:
216
+ # If queue is full, log to console as fallback
217
+ # print(f"Log queue full, dropping entry: [{level}] {message}")
218
+ # Track dropped messages with counter
219
+ with self._dropped_lock:
220
+ self.dropped_count += 1
221
+ # Track the most serious level dropped
222
+ level_priority = {
223
+ 'DEBUG': 0,
224
+ 'INFO': 1,
225
+ 'WARNING': 2,
226
+ 'ERROR': 3,
227
+ 'CRITICAL': 4
228
+ }
229
+ current_priority = level_priority.get(level, 0)
230
+ if level not in self.dropped_levels or current_priority > level_priority.get(self.dropped_levels[level], 0):
231
+ self.dropped_levels[level] = level
232
+ self.dropped_levels[level] = level
233
+
234
+
235
+ def debug(self, message: str, **kwargs):
236
+ self.log(LogLevel.DEBUG, message, **kwargs)
237
+
238
+ def info(self, message: str, **kwargs):
239
+ self.log(LogLevel.INFO, message, **kwargs)
240
+
241
+ def warning(self, message: str, **kwargs):
242
+ self.log(LogLevel.WARNING, message, **kwargs)
243
+
244
+ def error(self, message: str, **kwargs):
245
+ self.log(LogLevel.ERROR, message, **kwargs)
246
+
247
+ def critical(self, message: str, **kwargs):
248
+ self.log(LogLevel.CRITICAL, message, **kwargs)
249
+
250
+ def exception(self, message: str, exc_info=None, **kwargs):
251
+ """Log an exception with traceback."""
252
+ if exc_info is None:
253
+ exc_info = traceback.format_exc()
254
+
255
+ extra_data = kwargs.get('extra_data', {})
256
+ extra_data['traceback'] = exc_info
257
+
258
+ self.log(LogLevel.ERROR, message, extra_data=extra_data, **kwargs)
259
+
260
+
261
+ # Global logger instance
262
+ _async_logger = None
263
+
264
+
265
+ def get_async_logger() -> AsyncLogger:
266
+ """Get the global async logger instance."""
267
+ global _async_logger
268
+ if _async_logger is None:
269
+ _async_logger = AsyncLogger()
270
+ _async_logger.start()
271
+ return _async_logger
272
+
273
+
274
+ def stop_async_logger():
275
+ """Stop the global async logger."""
276
+ global _async_logger
277
+ if _async_logger:
278
+ _async_logger.stop()
279
+ _async_logger = None
280
+
@@ -7,7 +7,8 @@ import json
7
7
  import time
8
8
  import threading
9
9
  from .models import LogEntry, LogLevel
10
- from .async_logger import AsyncLogger, get_async_logger, stop_async_logger
10
+ from .async_logger import AsyncLogger, get_async_logger, stop_async_logger, LogHandler
11
+ from .utils import log_performance, log_function_call
11
12
 
12
13
 
13
14
  class AsyncLoggerTestCase(TransactionTestCase):
@@ -97,7 +98,13 @@ class AsyncLoggerTestCase(TransactionTestCase):
97
98
  # Should have some entries but not all due to queue being full
98
99
  entries = LogEntry.objects.count()
99
100
  self.assertGreater(entries, 0)
100
- self.assertLessEqual(entries, 100) # max_queue_size
101
+ self.assertLessEqual(entries, 101) # max_queue_size + 1 (allowing for edge case)
102
+
103
+ # Check if the dropped log entry is present
104
+ dropped_entry = LogEntry.objects.filter(message__contains="dropped due to queue overflow").first()
105
+ self.assertIsNotNone(dropped_entry)
106
+ self.assertEqual(dropped_entry.level, LogLevel.WARNING)
107
+
101
108
 
102
109
 
103
110
  class LogEntryModelTestCase(TransactionTestCase):
@@ -148,10 +155,23 @@ class LogEntryModelTestCase(TransactionTestCase):
148
155
  self.assertIn("This is a very long message that should be truncated", str_repr[:100])
149
156
 
150
157
 
151
- class LoggingAPITestCase(TransactionTestCase):
158
+
159
+ @override_settings(
160
+ ASYNC_LOGGING_CONFIG={'MAX_QUEUE_SIZE': 500, 'FLUSH_INTERVAL': 0.5},
161
+ MIDDLEWARE=[
162
+ "django.middleware.security.SecurityMiddleware",
163
+ "django.contrib.sessions.middleware.SessionMiddleware",
164
+ "django.middleware.common.CommonMiddleware",
165
+ "django.middleware.csrf.CsrfViewMiddleware",
166
+ "django.contrib.auth.middleware.AuthenticationMiddleware",
167
+ "django.contrib.messages.middleware.MessageMiddleware",
168
+ "django.middleware.clickjacking.XFrameOptionsMiddleware",
169
+ "logq.middleware.AsyncLoggingMiddleware", # Fixed: Added middleware
170
+ ]
171
+ )
172
+ class MiddlewareTestCase(TransactionTestCase):
152
173
  def setUp(self):
153
174
  super().setUp()
154
- self.user = User.objects.create_user(username='testuser', password='testpass')
155
175
  # Stop the global logger to avoid interference
156
176
  stop_async_logger()
157
177
  # Clear all existing logs
@@ -164,154 +184,126 @@ class LoggingAPITestCase(TransactionTestCase):
164
184
  cursor.execute("DELETE FROM logq_logentry")
165
185
  super().tearDown()
166
186
 
167
- def test_log_endpoint(self):
168
- """Test the log API endpoint."""
169
- # Verify we start with no logs
170
- self.assertEqual(LogEntry.objects.count(), 0)
171
-
172
- data = {
173
- 'level': 'INFO',
174
- 'message': 'Test API log',
175
- 'extra_data': {'source': 'api'}
176
- }
177
-
178
- response = self.client.post(
179
- reverse('logq:log_endpoint'),
180
- data=json.dumps(data),
181
- content_type='application/json'
182
- )
183
-
184
- self.assertEqual(response.status_code, 200)
185
- self.assertEqual(response.json()['status'], 'success')
186
-
187
- # Wait for async processing
188
- time.sleep(0.5)
189
-
190
- # Verify we have exactly one log entry
191
- self.assertEqual(LogEntry.objects.count(), 1)
192
-
193
- entry = LogEntry.objects.first()
194
- self.assertEqual(entry.message, 'Test API log')
195
- self.assertEqual(entry.extra_data, {'source': 'api'})
196
-
197
- def test_log_api_view(self):
198
- """Test the class-based log API view."""
199
- # Verify we start with no logs
200
- self.assertEqual(LogEntry.objects.count(), 0)
201
-
202
- data = {
203
- 'level': 'WARNING',
204
- 'message': 'Test warning',
205
- 'user_id': self.user.id,
206
- 'request_id': 'test-123'
207
- }
208
-
209
- response = self.client.post(
210
- reverse('logq:log_api'),
211
- data=json.dumps(data),
212
- content_type='application/json'
213
- )
214
-
215
- self.assertEqual(response.status_code, 200)
216
-
217
- time.sleep(0.5)
218
-
219
- # Verify we have exactly one log entry
220
- self.assertEqual(LogEntry.objects.count(), 1)
221
-
222
- entry = LogEntry.objects.first()
223
- self.assertEqual(entry.level, LogLevel.WARNING)
224
- self.assertEqual(entry.user_id, self.user.id)
225
- self.assertEqual(entry.request_id, 'test-123')
226
-
227
- def test_get_logs_api(self):
228
- """Test retrieving logs via API."""
229
- # Verify we start with no logs
230
- self.assertEqual(LogEntry.objects.count(), 0)
231
-
232
- # Create some test logs directly
233
- LogEntry.objects.create(level=LogLevel.INFO, message="Test 1")
234
- LogEntry.objects.create(level=LogLevel.ERROR, message="Test 2")
235
- LogEntry.objects.create(level=LogLevel.DEBUG, message="Test 3")
236
-
237
- # Verify we have exactly 3 logs
238
- self.assertEqual(LogEntry.objects.count(), 3)
239
-
240
- response = self.client.get(reverse('logq:log_api'))
241
- self.assertEqual(response.status_code, 200)
242
-
243
- data = response.json()
244
- self.assertEqual(len(data['logs']), 3)
245
- self.assertEqual(data['logs'][0]['message'], "Test 1")
246
-
247
- def test_invalid_log_level(self):
248
- """Test API with invalid log level."""
249
- data = {
250
- 'level': 'INVALID',
251
- 'message': 'Test message'
252
- }
253
-
254
- response = self.client.post(
255
- reverse('logq:log_endpoint'),
256
- data=json.dumps(data),
257
- content_type='application/json'
258
- )
259
-
260
- self.assertEqual(response.status_code, 400)
261
- self.assertIn('Invalid log level', response.json()['error'])
262
187
 
263
188
 
264
- @override_settings(ASYNC_LOGGING_CONFIG={'MAX_QUEUE_SIZE': 500, 'FLUSH_INTERVAL': 0.5})
265
- class ConfigurationTestCase(TransactionTestCase):
189
+ class UtilsTestCase(TransactionTestCase):
266
190
  def setUp(self):
267
191
  super().setUp()
192
+ # Stop the global logger to avoid interference
193
+ stop_async_logger()
194
+
268
195
  # Clear all existing logs
269
196
  with connection.cursor() as cursor:
270
197
  cursor.execute("DELETE FROM logq_logentry")
198
+
199
+ # Create a properly configured global logger
200
+ from .async_logger import _async_logger
201
+ from . import async_logger as async_logger_module
202
+
203
+ # Create a test logger with fast flush interval
204
+ test_logger = AsyncLogger(max_queue_size=100, flush_interval=0.1)
205
+ test_logger.start()
206
+
207
+ # Replace the global logger
208
+ async_logger_module._async_logger = test_logger
209
+
210
+ time.sleep(0.2) # Wait for thread to start
271
211
 
272
212
  def tearDown(self):
213
+ # Stop the global logger
214
+ stop_async_logger()
215
+ time.sleep(0.2) # Wait for thread to stop
216
+
273
217
  # Clear logs after test
274
218
  with connection.cursor() as cursor:
275
219
  cursor.execute("DELETE FROM logq_logentry")
276
220
  super().tearDown()
277
221
 
278
- def test_custom_configuration(self):
279
- """Test that custom configuration is respected."""
280
- logger = AsyncLogger()
281
- self.assertEqual(logger.queue.maxsize, 500)
282
- self.assertEqual(logger.flush_interval, 0.5)
222
+ def test_log_performance(self):
223
+ """Test log_performance decorator."""
224
+ # Debug: Check if the logger is running
225
+ logger = get_async_logger()
283
226
 
227
+ # Test direct logging first
228
+ logger.info("Direct test message")
229
+ time.sleep(0.3)
230
+
231
+ @log_performance(threshold_seconds=0.1, always_log=True)
232
+ def slow_function():
233
+ time.sleep(0.2)
234
+ return "Result"
235
+
236
+ slow_function()
237
+
238
+ time.sleep(0.5) # Wait longer for flush
239
+
240
+ entries = LogEntry.objects.all()
284
241
 
285
- class MiddlewareTestCase(TransactionTestCase):
242
+ self.assertGreater(entries.count(), 0)
243
+
244
+
245
+ class LogHandlerTestCase(TransactionTestCase):
286
246
  def setUp(self):
287
247
  super().setUp()
288
248
  # Stop the global logger to avoid interference
289
249
  stop_async_logger()
250
+
290
251
  # Clear all existing logs
291
252
  with connection.cursor() as cursor:
292
253
  cursor.execute("DELETE FROM logq_logentry")
254
+
255
+ # Create a properly configured global logger
256
+ from .async_logger import _async_logger
257
+ from . import async_logger as async_logger_module
258
+
259
+ # Create a test logger with fast flush interval
260
+ test_logger = AsyncLogger(max_queue_size=100, flush_interval=0.1)
261
+ test_logger.start()
262
+
263
+ # Replace the global logger
264
+ async_logger_module._async_logger = test_logger
265
+
266
+ time.sleep(0.2) # Wait for thre
293
267
 
294
268
  def tearDown(self):
269
+ # Stop the global logger
270
+ stop_async_logger()
271
+ time.sleep(0.2) # Wait for thread to stop
272
+
295
273
  # Clear logs after test
296
274
  with connection.cursor() as cursor:
297
275
  cursor.execute("DELETE FROM logq_logentry")
298
276
  super().tearDown()
299
277
 
300
- def test_middleware_request_logging(self):
301
- """Test that middleware logs requests."""
278
+ def test_log_handler(self):
279
+ """Test log handler functionality."""
302
280
  # Verify we start with no logs
303
281
  self.assertEqual(LogEntry.objects.count(), 0)
304
282
 
305
- response = self.client.get('/admin/')
283
+ # Create a test handler
284
+ class TestHandler(LogHandler):
285
+ def handle(self, log_entry:LogEntry) -> None:
286
+ pass
287
+
288
+ def flush(self) -> None:
289
+ pass
290
+
291
+ # Create a logger with the test handler
292
+ logger = get_async_logger()
293
+ logger.add_handler(TestHandler())
294
+ logger.start()
306
295
 
296
+ logger.info("Test message")
307
297
  time.sleep(0.5)
298
+
299
+ # Verify we have exactly one log entry
300
+ self.assertEqual(LogEntry.objects.count(), 1)
308
301
 
309
- entries = LogEntry.objects.all()
310
- self.assertGreater(entries.count(), 0)
311
-
312
- # Should have request start and completion logs
313
- start_logs = entries.filter(message__contains="Request started")
314
- complete_logs = entries.filter(message__contains="Request completed")
302
+ # Verify the log entry was sent to the handler
303
+ log_entry = LogEntry.objects.first()
304
+ self.assertEqual(log_entry.message, "Test message")
315
305
 
316
- self.assertGreater(start_logs.count(), 0)
317
- self.assertGreater(complete_logs.count(), 0)
306
+ # Stop the logger
307
+ logger.stop()
308
+ time.sleep(0.2) # Wait for thread to stop
309
+
@@ -32,10 +32,10 @@ def log_function_call(func=None, *, level='INFO'):
32
32
  }
33
33
  )
34
34
 
35
- start_time = time.time()
35
+ start_time = time.perf_counter()
36
36
  try:
37
37
  result = func(*args, **kwargs)
38
- execution_time = time.time() - start_time
38
+ execution_time = time.perf_counter() - start_time
39
39
 
40
40
  # Log successful completion
41
41
  logger.log(
@@ -47,7 +47,7 @@ def log_function_call(func=None, *, level='INFO'):
47
47
  return result
48
48
 
49
49
  except Exception as e:
50
- execution_time = time.time() - start_time
50
+ execution_time = time.perf_counter() - start_time
51
51
 
52
52
  # Log exception
53
53
  logger.exception(
@@ -64,30 +64,54 @@ def log_function_call(func=None, *, level='INFO'):
64
64
  return decorator(func)
65
65
 
66
66
 
67
- def log_performance(threshold_seconds=1.0):
67
+ def log_performance(threshold_seconds=1.0, always_log=False):
68
68
  """
69
- Decorator to log slow function calls.
69
+ Decorator to log function performance metrics.
70
+
71
+ Args:
72
+ threshold_seconds: Log warning when execution exceeds this threshold
73
+ always_log: If True, log every function call for analytics (like Sentry spans)
70
74
 
71
75
  Usage:
72
76
  @log_performance(threshold_seconds=0.5)
73
77
  def my_slow_function():
74
78
  pass
79
+
80
+ @log_performance(always_log=True)
81
+ def my_analytics_function():
82
+ pass
75
83
  """
76
84
  def decorator(func):
77
85
  @wraps(func)
78
86
  def wrapper(*args, **kwargs):
79
- start_time = time.time()
87
+ start_time = time.perf_counter()
80
88
  result = func(*args, **kwargs)
81
- execution_time = time.time() - start_time
89
+ execution_time = time.perf_counter() - start_time
90
+
91
+ logger = get_async_logger()
92
+
93
+ # Always log for analytics if requested
94
+ if always_log:
95
+ logger.info(
96
+ f"Function performance: {func.__name__}",
97
+ extra_data={
98
+ 'execution_time': execution_time,
99
+ 'function_name': func.__name__,
100
+ 'module': func.__module__,
101
+ 'performance_metric': True, # Tag for easy filtering
102
+ }
103
+ )
82
104
 
105
+ # Log warning if threshold exceeded
83
106
  if execution_time > threshold_seconds:
84
- logger = get_async_logger()
85
107
  logger.warning(
86
108
  f"Slow function detected: {func.__name__} took {execution_time:.3f}s",
87
109
  extra_data={
88
110
  'execution_time': execution_time,
89
111
  'threshold': threshold_seconds,
90
112
  'module': func.__module__,
113
+ 'function_name': func.__name__,
114
+ 'performance_metric': True,
91
115
  }
92
116
  )
93
117
 
@@ -114,7 +138,7 @@ class LogContext:
114
138
  self.start_time = None
115
139
 
116
140
  def __enter__(self):
117
- self.start_time = time.time()
141
+ self.start_time = time.perf_counter()
118
142
  self.logger.log(
119
143
  self.level,
120
144
  f"Starting: {self.message}",
@@ -123,7 +147,7 @@ class LogContext:
123
147
  return self
124
148
 
125
149
  def __exit__(self, exc_type, exc_val, exc_tb):
126
- execution_time = time.time() - self.start_time
150
+ execution_time = time.perf_counter() - self.start_time
127
151
 
128
152
  if exc_type is None:
129
153
  self.logger.log(
@@ -1,160 +0,0 @@
1
- import threading
2
- import queue
3
- import time
4
- import logging
5
- import traceback
6
- import inspect
7
- from typing import Optional, Dict, Any
8
- from django.utils import timezone
9
- from django.db import transaction
10
- from django.conf import settings
11
- from .models import LogEntry, LogLevel
12
-
13
-
14
- class AsyncLogger:
15
- """
16
- Asynchronous logger that runs in a separate thread to avoid blocking the main application.
17
- """
18
-
19
- def __init__(self, max_queue_size: int = None, flush_interval: float = None):
20
- # Get configuration from settings
21
- config = getattr(settings, 'ASYNC_LOGGING_CONFIG', {})
22
- self.max_queue_size = max_queue_size or config.get('MAX_QUEUE_SIZE', 1000)
23
- self.flush_interval = flush_interval or config.get('FLUSH_INTERVAL', 1.0)
24
-
25
- self.queue = queue.Queue(maxsize=self.max_queue_size)
26
- self.running = False
27
- self.thread = None
28
- self._lock = threading.Lock()
29
-
30
- def start(self):
31
- """Start the logging thread."""
32
- with self._lock:
33
- if not self.running:
34
- self.running = True
35
- self.thread = threading.Thread(target=self._worker, daemon=True)
36
- self.thread.start()
37
-
38
- def stop(self):
39
- """Stop the logging thread."""
40
- with self._lock:
41
- if self.running:
42
- self.running = False
43
- if self.thread:
44
- self.thread.join(timeout=5.0)
45
-
46
- def _worker(self):
47
- """Worker thread that processes log entries from the queue."""
48
- batch = []
49
- last_flush = time.time()
50
-
51
- while self.running:
52
- try:
53
- # Try to get a log entry with timeout
54
- try:
55
- entry = self.queue.get(timeout=0.1)
56
- batch.append(entry)
57
- except queue.Empty:
58
- pass
59
-
60
- # Flush batch if it's time or batch is getting large
61
- current_time = time.time()
62
- if (current_time - last_flush >= self.flush_interval or
63
- len(batch) >= 50):
64
- if batch:
65
- self._flush_batch(batch)
66
- batch = []
67
- last_flush = current_time
68
-
69
- except Exception as e:
70
- # Log the error to prevent infinite loops
71
- print(f"Error in async logger worker: {e}")
72
- time.sleep(1)
73
-
74
- # Flush remaining entries
75
- if batch:
76
- self._flush_batch(batch)
77
-
78
- def _flush_batch(self, batch):
79
- """Flush a batch of log entries to the database."""
80
- try:
81
- with transaction.atomic():
82
- LogEntry.objects.bulk_create(batch, ignore_conflicts=True)
83
- except Exception as e:
84
- print(f"Error flushing log batch: {e}")
85
-
86
- def log(self, level: str, message: str, **kwargs):
87
- """Add a log entry to the queue."""
88
- if not self.running:
89
- return
90
-
91
- # Get caller information
92
- frame = inspect.currentframe().f_back
93
- module = frame.f_globals.get('__name__', 'unknown')
94
- function = frame.f_code.co_name
95
- line_number = frame.f_lineno
96
-
97
- # Create log entry
98
- entry = LogEntry(
99
- level=level,
100
- message=message,
101
- module=module,
102
- function=function,
103
- line_number=line_number,
104
- user_id=kwargs.get('user_id'),
105
- request_id=kwargs.get('request_id'),
106
- extra_data=kwargs.get('extra_data', {})
107
- )
108
-
109
- try:
110
- self.queue.put_nowait(entry)
111
- except queue.Full:
112
- # If queue is full, log to console as fallback
113
- print(f"Log queue full, dropping entry: [{level}] {message}")
114
-
115
- def debug(self, message: str, **kwargs):
116
- self.log(LogLevel.DEBUG, message, **kwargs)
117
-
118
- def info(self, message: str, **kwargs):
119
- self.log(LogLevel.INFO, message, **kwargs)
120
-
121
- def warning(self, message: str, **kwargs):
122
- self.log(LogLevel.WARNING, message, **kwargs)
123
-
124
- def error(self, message: str, **kwargs):
125
- self.log(LogLevel.ERROR, message, **kwargs)
126
-
127
- def critical(self, message: str, **kwargs):
128
- self.log(LogLevel.CRITICAL, message, **kwargs)
129
-
130
- def exception(self, message: str, exc_info=None, **kwargs):
131
- """Log an exception with traceback."""
132
- if exc_info is None:
133
- exc_info = traceback.format_exc()
134
-
135
- extra_data = kwargs.get('extra_data', {})
136
- extra_data['traceback'] = exc_info
137
-
138
- self.log(LogLevel.ERROR, message, extra_data=extra_data, **kwargs)
139
-
140
-
141
- # Global logger instance
142
- _async_logger = None
143
-
144
-
145
- def get_async_logger() -> AsyncLogger:
146
- """Get the global async logger instance."""
147
- global _async_logger
148
- if _async_logger is None:
149
- _async_logger = AsyncLogger()
150
- _async_logger.start()
151
- return _async_logger
152
-
153
-
154
- def stop_async_logger():
155
- """Stop the global async logger."""
156
- global _async_logger
157
- if _async_logger:
158
- _async_logger.stop()
159
- _async_logger = None
160
-
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes