djlogq 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,242 @@
1
+ Metadata-Version: 2.4
2
+ Name: djlogq
3
+ Version: 1.0.0
4
+ Summary: A reusable Django app for asynchronous, thread-safe logging with rich metadata, admin interface, and API support.
5
+ Author-email: mesake <mesnavunawa@gmail.com>
6
+ License: MIT
7
+ Requires-Python: >=3.8
8
+ Description-Content-Type: text/markdown
9
+ Requires-Dist: Django
10
+ Provides-Extra: dev
11
+ Requires-Dist: build==1.2.1; extra == "dev"
12
+ Requires-Dist: nox==2024.4.15; extra == "dev"
13
+ Requires-Dist: twine==5.1.1; extra == "dev"
14
+
15
+ # Django Async Logger
16
+
17
+ A reusable Django app that provides asynchronous logging functionality using a separate thread to avoid blocking the main application.
18
+
19
+ ## Features
20
+
21
+ - **Asynchronous Logging**: All log operations run in a separate thread
22
+ - **Thread-Safe**: Uses a queue system for thread-safe logging
23
+ - **Rich Metadata**: Captures module, function, line number, user ID, request ID, and extra data
24
+ - **Admin Interface**: Beautiful Django admin interface for viewing and managing logs
25
+ - **API Endpoints**: REST API for external logging
26
+ - **Middleware**: Automatic request logging with unique request IDs
27
+ - **Decorators**: Utility decorators for function logging and performance monitoring
28
+ - **Context Managers**: Easy-to-use context managers for operation logging
29
+ - **Configurable**: Customizable queue size, flush intervals, and cleanup policies
30
+
31
+ ## Installation
32
+
33
+ 1. Add the app to your Django project:
34
+ ```python
35
+ INSTALLED_APPS = [
36
+ # ...
37
+ 'logq',
38
+ ]
39
+ ```
40
+
41
+ 2. Add the middleware to your settings:
42
+ ```python
43
+ MIDDLEWARE = [
44
+ # ...
45
+ 'logq.middleware.AsyncLoggingMiddleware',
46
+ ]
47
+ ```
48
+
49
+ 3. Run migrations:
50
+ ```bash
51
+ python manage.py makemigrations logq
52
+ python manage.py migrate
53
+ ```
54
+
55
+ 4. (Optional) Configure logging settings:
56
+ ```python
57
+ ASYNC_LOGGING_CONFIG = {
58
+ 'MAX_QUEUE_SIZE': 1000,
59
+ 'FLUSH_INTERVAL': 1.0, # seconds
60
+ 'AUTO_CLEANUP_DAYS': 30,
61
+ 'ENABLE_REQUEST_LOGGING': True,
62
+ 'IGNORE_PATHS': ['/admin/'], # paths to ignore for request logging
63
+ }
64
+ ```
65
+
66
+ ## Usage
67
+
68
+ ### Basic Logging
69
+
70
+ ```python
71
+ from logq.async_logger import get_async_logger
72
+
73
+ logger = get_async_logger()
74
+
75
+ # Different log levels
76
+ logger.debug("Debug message")
77
+ logger.info("Info message")
78
+ logger.warning("Warning message")
79
+ logger.error("Error message")
80
+ logger.critical("Critical message")
81
+
82
+ # With extra data
83
+ logger.info("User action", extra_data={'action': 'login', 'ip': '192.168.1.1'})
84
+
85
+ # Log exceptions
86
+ try:
87
+ # some code that might fail
88
+ pass
89
+ except Exception as e:
90
+ logger.exception("An error occurred", exc_info=str(e))
91
+ ```
92
+
93
+ ### Function Decorators
94
+
95
+ ```python
96
+ from logq.utils import log_function_call, log_performance
97
+
98
+ @log_function_call
99
+ def my_function():
100
+ return "result"
101
+
102
+ @log_function_call(level='DEBUG')
103
+ def debug_function():
104
+ return "debug result"
105
+
106
+ @log_performance(threshold_seconds=0.5)
107
+ def slow_function():
108
+ time.sleep(1)
109
+ return "slow result"
110
+ ```
111
+
112
+ ### Context Managers
113
+
114
+ ```python
115
+ from logq.utils import LogContext
116
+
117
+ with LogContext("Processing data", level='INFO'):
118
+ # do some work
119
+ time.sleep(0.1)
120
+ # automatically logs start and completion with timing
121
+ ```
122
+
123
+ ### API Logging
124
+
125
+ ```python
126
+ import requests
127
+ import json
128
+
129
+ # Log via API
130
+ data = {
131
+ 'level': 'INFO',
132
+ 'message': 'External log message',
133
+ 'extra_data': {'source': 'external_service'}
134
+ }
135
+
136
+ response = requests.post(
137
+ 'http://your-domain/logq/api/log/',
138
+ data=json.dumps(data),
139
+ headers={'Content-Type': 'application/json'}
140
+ )
141
+
142
+ # Retrieve logs via API
143
+ response = requests.get('http://your-domain/logq/api/logs/?limit=10')
144
+ logs = response.json()['logs']
145
+ ```
146
+
147
+ ### Admin Interface
148
+
149
+ Access the admin interface at `/admin/` to view and manage logs. Features include:
150
+
151
+ - Filter by level, module, timestamp, user ID
152
+ - Search by message, module, function, request ID
153
+ - View extra data in formatted JSON
154
+ - Delete old logs
155
+ - Export functionality
156
+
157
+ ### Management Commands
158
+
159
+ Clean old logs:
160
+ ```bash
161
+ # Delete logs older than 30 days
162
+ python manage.py clean_logs
163
+
164
+ # Delete logs older than 7 days
165
+ python manage.py clean_logs --days 7
166
+
167
+ # Delete only DEBUG and INFO logs older than 30 days
168
+ python manage.py clean_logs --level INFO
169
+
170
+ # Dry run to see what would be deleted
171
+ python manage.py clean_logs --dry-run
172
+ ```
173
+
174
+ ## Configuration Options
175
+
176
+ | Setting | Default | Description |
177
+ |---------|---------|-------------|
178
+ | `MAX_QUEUE_SIZE` | 1000 | Maximum number of log entries in the queue |
179
+ | `FLUSH_INTERVAL` | 1.0 | How often to flush logs to database (seconds) |
180
+ | `AUTO_CLEANUP_DAYS` | 30 | Days to keep logs before auto-cleanup |
181
+ | `ENABLE_REQUEST_LOGGING` | True | Whether to log all HTTP requests |
182
+
183
+ ## Model Fields
184
+
185
+ The `LogEntry` model includes:
186
+
187
+ - `timestamp`: When the log was created
188
+ - `level`: Log level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
189
+ - `message`: The log message
190
+ - `module`: Python module where the log originated
191
+ - `function`: Function name where the log originated
192
+ - `line_number`: Line number where the log originated
193
+ - `user_id`: ID of the user (if authenticated)
194
+ - `request_id`: Unique request identifier
195
+ - `extra_data`: Additional JSON data
196
+ - `created_at`: When the entry was saved to database
197
+
198
+ ## Performance Considerations
199
+
200
+ - The logger runs in a separate thread and won't block your main application
201
+ - Log entries are batched and written to the database periodically
202
+ - If the queue is full, new entries are dropped (with console fallback)
203
+ - Consider setting up database indexes for better query performance
204
+ - Use the cleanup command regularly to prevent database bloat
205
+
206
+ ## Thread Safety
207
+
208
+ The logger is completely thread-safe:
209
+ - Uses a thread-safe queue for communication
210
+ - Database operations are wrapped in transactions
211
+ - Multiple threads can safely call the logger simultaneously
212
+
213
+ ## Customization
214
+
215
+ You can extend the logger by:
216
+
217
+ 1. Creating custom log levels
218
+ 2. Adding new fields to the LogEntry model
219
+ 3. Customizing the admin interface
220
+ 4. Adding new API endpoints
221
+ 5. Creating custom middleware
222
+
223
+ ## Troubleshooting
224
+
225
+ ### Logs not appearing
226
+ - Check that the async logger thread is running
227
+ - Verify database migrations are applied
228
+ - Check for any database connection issues
229
+
230
+ ### Performance issues
231
+ - Reduce `FLUSH_INTERVAL` for more frequent writes
232
+ - Increase `MAX_QUEUE_SIZE` for higher throughput
233
+ - Add database indexes for frequently queried fields
234
+
235
+ ### Memory usage
236
+ - Reduce `MAX_QUEUE_SIZE` if memory is a concern
237
+ - Run cleanup commands more frequently
238
+ - Monitor database size and clean old logs
239
+
240
+ ## License
241
+
242
+ This project is open source and available under the MIT License.
@@ -0,0 +1,20 @@
1
+ logq/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ logq/admin.py,sha256=nC8TVXc64G41Mh05ijUSCJmvIDo-kgZHKyyOlmqH-AE,320
3
+ logq/apps.py,sha256=uTCJNk3UWtyAZOpg7nRX0owKI-ts6BtPHARrU7PytQA,346
4
+ logq/async_logger.py,sha256=I8za2lfEhiGrKDq76oZ4DZvvAlXeiRr17bmXT5tXSY4,5216
5
+ logq/middleware.py,sha256=Y9EAnxrmavyDgVklEW893Wh0eeQPKdCvizcxGzPDLBo,3642
6
+ logq/models.py,sha256=Gu8KLNMn7I4XtEUHQPavSSlcESQ-2Pu5JLKsL97t9Gg,1616
7
+ logq/tests.py,sha256=iimhG7nvH5SbFn1og6soBMvCWhxCsuvgwkfaPOr5PC4,11102
8
+ logq/urls.py,sha256=oGwVM9HXsVY_P86sVPuz5xnFofYfmkL8ZSZDhExhJQk,216
9
+ logq/utils.py,sha256=Bg973EhnvQXvFSk1mtf8pcpQqnHiOnQRdaCLM9ibJPw,4204
10
+ logq/views.py,sha256=WJpwiPyfItBbceM1862NcXp_ui4U6WyNBhw2P27mlJ4,3695
11
+ logq/management/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
+ logq/management/commands/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
+ logq/management/commands/clean_logs.py,sha256=Cc33EEqGGwsNcvQjwnLbM6kIb0lBJsyDQliia7EEAZo,2416
14
+ logq/migrations/0001_initial.py,sha256=l4f-lUcO7OsABGYiSBp7fdWDt2rLHaIhR78pCKIAAdQ,2007
15
+ logq/migrations/0002_alter_logentry_function_alter_logentry_line_number_and_more.py,sha256=SNBngZmhk9BgcOe8eAojX47V5JKC2V7oW9QtLHWIkFc,750
16
+ logq/migrations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
17
+ djlogq-1.0.0.dist-info/METADATA,sha256=E1B27SP2iNCWCG1UkcpldNH1fhuAItNbRUwyk8JpTV8,6453
18
+ djlogq-1.0.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
19
+ djlogq-1.0.0.dist-info/top_level.txt,sha256=VWj_EO93x0ix2Y2qm6obWT22L7VPFiZ7lQf0yIaI8do,5
20
+ djlogq-1.0.0.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (80.9.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1 @@
1
+ logq
logq/__init__.py ADDED
File without changes
logq/admin.py ADDED
@@ -0,0 +1,12 @@
1
+ from django.contrib import admin
2
+
3
+ from .models import LogEntry
4
+
5
+
6
+ @admin.register(LogEntry)
7
+ class LogEntryAdmin(admin.ModelAdmin):
8
+ list_display = ('timestamp', 'level', 'message')
9
+ list_filter = ('level', 'timestamp')
10
+ search_fields = ('message',)
11
+ date_hierarchy = 'timestamp'
12
+ ordering = ('-timestamp',)
logq/apps.py ADDED
@@ -0,0 +1,12 @@
1
+ from django.apps import AppConfig
2
+
3
+
4
+ class LogqConfig(AppConfig):
5
+ default_auto_field = 'django.db.models.BigAutoField'
6
+ name = 'logq'
7
+ verbose_name = 'Asynchronous Logging'
8
+
9
+ def ready(self):
10
+ """Initialize the async logger when the app is ready."""
11
+ from .async_logger import get_async_logger
12
+ get_async_logger()
logq/async_logger.py ADDED
@@ -0,0 +1,160 @@
1
+ import threading
2
+ import queue
3
+ import time
4
+ import logging
5
+ import traceback
6
+ import inspect
7
+ from typing import Optional, Dict, Any
8
+ from django.utils import timezone
9
+ from django.db import transaction
10
+ from django.conf import settings
11
+ from .models import LogEntry, LogLevel
12
+
13
+
14
+ class AsyncLogger:
15
+ """
16
+ Asynchronous logger that runs in a separate thread to avoid blocking the main application.
17
+ """
18
+
19
+ def __init__(self, max_queue_size: int = None, flush_interval: float = None):
20
+ # Get configuration from settings
21
+ config = getattr(settings, 'ASYNC_LOGGING_CONFIG', {})
22
+ self.max_queue_size = max_queue_size or config.get('MAX_QUEUE_SIZE', 1000)
23
+ self.flush_interval = flush_interval or config.get('FLUSH_INTERVAL', 1.0)
24
+
25
+ self.queue = queue.Queue(maxsize=self.max_queue_size)
26
+ self.running = False
27
+ self.thread = None
28
+ self._lock = threading.Lock()
29
+
30
+ def start(self):
31
+ """Start the logging thread."""
32
+ with self._lock:
33
+ if not self.running:
34
+ self.running = True
35
+ self.thread = threading.Thread(target=self._worker, daemon=True)
36
+ self.thread.start()
37
+
38
+ def stop(self):
39
+ """Stop the logging thread."""
40
+ with self._lock:
41
+ if self.running:
42
+ self.running = False
43
+ if self.thread:
44
+ self.thread.join(timeout=5.0)
45
+
46
+ def _worker(self):
47
+ """Worker thread that processes log entries from the queue."""
48
+ batch = []
49
+ last_flush = time.time()
50
+
51
+ while self.running:
52
+ try:
53
+ # Try to get a log entry with timeout
54
+ try:
55
+ entry = self.queue.get(timeout=0.1)
56
+ batch.append(entry)
57
+ except queue.Empty:
58
+ pass
59
+
60
+ # Flush batch if it's time or batch is getting large
61
+ current_time = time.time()
62
+ if (current_time - last_flush >= self.flush_interval or
63
+ len(batch) >= 50):
64
+ if batch:
65
+ self._flush_batch(batch)
66
+ batch = []
67
+ last_flush = current_time
68
+
69
+ except Exception as e:
70
+ # Log the error to prevent infinite loops
71
+ print(f"Error in async logger worker: {e}")
72
+ time.sleep(1)
73
+
74
+ # Flush remaining entries
75
+ if batch:
76
+ self._flush_batch(batch)
77
+
78
+ def _flush_batch(self, batch):
79
+ """Flush a batch of log entries to the database."""
80
+ try:
81
+ with transaction.atomic():
82
+ LogEntry.objects.bulk_create(batch, ignore_conflicts=True)
83
+ except Exception as e:
84
+ print(f"Error flushing log batch: {e}")
85
+
86
+ def log(self, level: str, message: str, **kwargs):
87
+ """Add a log entry to the queue."""
88
+ if not self.running:
89
+ return
90
+
91
+ # Get caller information
92
+ frame = inspect.currentframe().f_back
93
+ module = frame.f_globals.get('__name__', 'unknown')
94
+ function = frame.f_code.co_name
95
+ line_number = frame.f_lineno
96
+
97
+ # Create log entry
98
+ entry = LogEntry(
99
+ level=level,
100
+ message=message,
101
+ module=module,
102
+ function=function,
103
+ line_number=line_number,
104
+ user_id=kwargs.get('user_id'),
105
+ request_id=kwargs.get('request_id'),
106
+ extra_data=kwargs.get('extra_data', {})
107
+ )
108
+
109
+ try:
110
+ self.queue.put_nowait(entry)
111
+ except queue.Full:
112
+ # If queue is full, log to console as fallback
113
+ print(f"Log queue full, dropping entry: [{level}] {message}")
114
+
115
+ def debug(self, message: str, **kwargs):
116
+ self.log(LogLevel.DEBUG, message, **kwargs)
117
+
118
+ def info(self, message: str, **kwargs):
119
+ self.log(LogLevel.INFO, message, **kwargs)
120
+
121
+ def warning(self, message: str, **kwargs):
122
+ self.log(LogLevel.WARNING, message, **kwargs)
123
+
124
+ def error(self, message: str, **kwargs):
125
+ self.log(LogLevel.ERROR, message, **kwargs)
126
+
127
+ def critical(self, message: str, **kwargs):
128
+ self.log(LogLevel.CRITICAL, message, **kwargs)
129
+
130
+ def exception(self, message: str, exc_info=None, **kwargs):
131
+ """Log an exception with traceback."""
132
+ if exc_info is None:
133
+ exc_info = traceback.format_exc()
134
+
135
+ extra_data = kwargs.get('extra_data', {})
136
+ extra_data['traceback'] = exc_info
137
+
138
+ self.log(LogLevel.ERROR, message, extra_data=extra_data, **kwargs)
139
+
140
+
141
+ # Global logger instance
142
+ _async_logger = None
143
+
144
+
145
+ def get_async_logger() -> AsyncLogger:
146
+ """Get the global async logger instance."""
147
+ global _async_logger
148
+ if _async_logger is None:
149
+ _async_logger = AsyncLogger()
150
+ _async_logger.start()
151
+ return _async_logger
152
+
153
+
154
+ def stop_async_logger():
155
+ """Stop the global async logger."""
156
+ global _async_logger
157
+ if _async_logger:
158
+ _async_logger.stop()
159
+ _async_logger = None
160
+
File without changes
File without changes
@@ -0,0 +1,69 @@
1
+
2
+ from django.core.management.base import BaseCommand
3
+ from django.utils import timezone
4
+ from datetime import timedelta
5
+ from django.conf import settings
6
+ from logq.models import LogEntry
7
+
8
+
9
+ class Command(BaseCommand):
10
+ help = 'Clean old log entries from the database'
11
+
12
+ def add_arguments(self, parser):
13
+ parser.add_argument(
14
+ '--days',
15
+ type=int,
16
+ default=None,
17
+ help='Delete logs older than this many days (default: from ASYNC_LOGGING_CONFIG)'
18
+ )
19
+ parser.add_argument(
20
+ '--level',
21
+ type=str,
22
+ choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
23
+ help='Only delete logs of this level or lower'
24
+ )
25
+ parser.add_argument(
26
+ '--dry-run',
27
+ action='store_true',
28
+ help='Show what would be deleted without actually deleting'
29
+ )
30
+
31
+ def handle(self, *args, **options):
32
+ # Get default days from settings
33
+ config = getattr(settings, 'ASYNC_LOGGING_CONFIG', {})
34
+ default_days = config.get('AUTO_CLEANUP_DAYS', 30)
35
+
36
+ days = options['days'] if options['days'] is not None else default_days
37
+ level = options['level']
38
+ dry_run = options['dry_run']
39
+
40
+ # Calculate cutoff date
41
+ cutoff_date = timezone.now() - timedelta(days=days)
42
+
43
+ # Build query
44
+ query = LogEntry.objects.filter(timestamp__lt=cutoff_date)
45
+
46
+ if level:
47
+ # Get level hierarchy
48
+ level_order = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']
49
+ level_index = level_order.index(level)
50
+ levels_to_delete = level_order[:level_index + 1]
51
+ query = query.filter(level__in=levels_to_delete)
52
+
53
+ count = query.count()
54
+
55
+ if dry_run:
56
+ self.stdout.write(
57
+ self.style.WARNING(
58
+ f'Would delete {count} log entries older than {days} days'
59
+ + (f' with level {level} or lower' if level else '')
60
+ )
61
+ )
62
+ else:
63
+ deleted_count = query.delete()[0]
64
+ self.stdout.write(
65
+ self.style.SUCCESS(
66
+ f'Successfully deleted {deleted_count} log entries older than {days} days'
67
+ + (f' with level {level} or lower' if level else '')
68
+ )
69
+ )
logq/middleware.py ADDED
@@ -0,0 +1,96 @@
1
+ import uuid
2
+ from django.utils.deprecation import MiddlewareMixin
3
+ from .async_logger import get_async_logger
4
+ from django.conf import settings
5
+
6
+ class AsyncLoggingMiddleware(MiddlewareMixin):
7
+ """
8
+ Middleware that automatically logs request information and adds request_id to the request.
9
+ """
10
+
11
+ def process_request(self, request):
12
+ # Check if request path is in ignore paths
13
+ ignore_paths = getattr(settings, 'ASYNC_LOGGING_CONFIG', {}).get('IGNORE_PATHS', [])
14
+ if any(request.path.startswith(path) for path in ignore_paths):
15
+ return None
16
+
17
+ # Generate unique request ID
18
+ request.request_id = str(uuid.uuid4())
19
+
20
+ # Get user ID if authenticated
21
+ user_id = None
22
+ if hasattr(request, 'user') and request.user.is_authenticated:
23
+ user_id = request.user.id
24
+
25
+ # Log request start
26
+ logger = get_async_logger()
27
+ logger.info(
28
+ f"Request started: {request.method} {request.path}",
29
+ request_id=request.request_id,
30
+ user_id=user_id,
31
+ extra_data={
32
+ 'method': request.method,
33
+ 'path': request.path,
34
+ 'query_params': dict(request.GET),
35
+ 'user_agent': request.META.get('HTTP_USER_AGENT', ''),
36
+ 'ip_address': self._get_client_ip(request),
37
+ }
38
+ )
39
+
40
+ def process_response(self, request, response):
41
+ # Check if request path is in ignore paths
42
+ ignore_paths = getattr(settings, 'ASYNC_LOGGING_CONFIG', {}).get('IGNORE_PATHS', [])
43
+ if any(request.path.startswith(path) for path in ignore_paths):
44
+ return response
45
+
46
+ # Get user ID if authenticated
47
+ user_id = None
48
+ if hasattr(request, 'user') and request.user.is_authenticated:
49
+ user_id = request.user.id
50
+
51
+ # Log request completion
52
+ logger = get_async_logger()
53
+ logger.info(
54
+ f"Request completed: {request.method} {request.path} - {response.status_code}",
55
+ request_id=getattr(request, 'request_id', 'unknown'),
56
+ user_id=user_id,
57
+ extra_data={
58
+ 'status_code': response.status_code,
59
+ 'content_length': len(response.content) if hasattr(response, 'content') else 0,
60
+ }
61
+ )
62
+
63
+ return response
64
+
65
+ def process_exception(self, request, exception):
66
+ # Check if request path is in ignore paths
67
+ ignore_paths = getattr(settings, 'ASYNC_LOGGING_CONFIG', {}).get('IGNORE_PATHS', [])
68
+ if any(request.path.startswith(path) for path in ignore_paths):
69
+ return None
70
+
71
+ # Get user ID if authenticated
72
+ user_id = None
73
+ if hasattr(request, 'user') and request.user.is_authenticated:
74
+ user_id = request.user.id
75
+
76
+ # Log exception
77
+ logger = get_async_logger()
78
+ logger.exception(
79
+ f"Request exception: {request.method} {request.path}",
80
+ exc_info=str(exception),
81
+ request_id=getattr(request, 'request_id', 'unknown'),
82
+ user_id=user_id,
83
+ extra_data={
84
+ 'exception_type': type(exception).__name__,
85
+ 'exception_args': str(exception.args),
86
+ }
87
+ )
88
+
89
+ def _get_client_ip(self, request):
90
+ """Get the client's IP address."""
91
+ x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
92
+ if x_forwarded_for:
93
+ ip = x_forwarded_for.split(',')[0]
94
+ else:
95
+ ip = request.META.get('REMOTE_ADDR')
96
+ return ip
@@ -0,0 +1,37 @@
1
+ # Generated by Django 5.2.4 on 2025-07-15 20:43
2
+
3
+ import django.utils.timezone
4
+ from django.db import migrations, models
5
+
6
+
7
+ class Migration(migrations.Migration):
8
+
9
+ initial = True
10
+
11
+ dependencies = [
12
+ ]
13
+
14
+ operations = [
15
+ migrations.CreateModel(
16
+ name='LogEntry',
17
+ fields=[
18
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
19
+ ('timestamp', models.DateTimeField(db_index=True, default=django.utils.timezone.now)),
20
+ ('level', models.CharField(choices=[('DEBUG', 'Debug'), ('INFO', 'Info'), ('WARNING', 'Warning'), ('ERROR', 'Error'), ('CRITICAL', 'Critical')], max_length=10)),
21
+ ('message', models.TextField()),
22
+ ('module', models.CharField(max_length=255)),
23
+ ('function', models.CharField(max_length=255)),
24
+ ('line_number', models.IntegerField()),
25
+ ('user_id', models.IntegerField(blank=True, null=True)),
26
+ ('request_id', models.CharField(blank=True, max_length=255, null=True)),
27
+ ('extra_data', models.JSONField(blank=True, default=dict)),
28
+ ('created_at', models.DateTimeField(auto_now_add=True)),
29
+ ],
30
+ options={
31
+ 'verbose_name': 'Log Entry',
32
+ 'verbose_name_plural': 'Log Entries',
33
+ 'ordering': ['-timestamp'],
34
+ 'indexes': [models.Index(fields=['timestamp'], name='logq_logent_timesta_9e2e5a_idx'), models.Index(fields=['level'], name='logq_logent_level_fa122c_idx'), models.Index(fields=['module'], name='logq_logent_module_f632bb_idx'), models.Index(fields=['function'], name='logq_logent_functio_433e61_idx'), models.Index(fields=['line_number'], name='logq_logent_line_nu_bd9370_idx'), models.Index(fields=['user_id'], name='logq_logent_user_id_651abc_idx'), models.Index(fields=['request_id'], name='logq_logent_request_eda96e_idx')],
35
+ },
36
+ ),
37
+ ]
@@ -0,0 +1,28 @@
1
+ # Generated by Django 5.2.4 on 2025-07-15 21:02
2
+
3
+ from django.db import migrations, models
4
+
5
+
6
+ class Migration(migrations.Migration):
7
+
8
+ dependencies = [
9
+ ('logq', '0001_initial'),
10
+ ]
11
+
12
+ operations = [
13
+ migrations.AlterField(
14
+ model_name='logentry',
15
+ name='function',
16
+ field=models.CharField(blank=True, max_length=255, null=True),
17
+ ),
18
+ migrations.AlterField(
19
+ model_name='logentry',
20
+ name='line_number',
21
+ field=models.IntegerField(blank=True, null=True),
22
+ ),
23
+ migrations.AlterField(
24
+ model_name='logentry',
25
+ name='module',
26
+ field=models.CharField(blank=True, max_length=255, null=True),
27
+ ),
28
+ ]
File without changes
logq/models.py ADDED
@@ -0,0 +1,44 @@
1
+ from django.db import models
2
+
3
+ from django.utils import timezone
4
+ import json
5
+
6
+
7
+ class LogLevel(models.TextChoices):
8
+ DEBUG = 'DEBUG'
9
+ INFO = 'INFO'
10
+ WARNING = 'WARNING'
11
+ ERROR = 'ERROR'
12
+ CRITICAL = 'CRITICAL'
13
+
14
+ class LogEntry(models.Model):
15
+ timestamp = models.DateTimeField(default=timezone.now, db_index=True) # index for faster queries
16
+ level = models.CharField(max_length=10, choices=LogLevel.choices)
17
+ message = models.TextField()
18
+ module = models.CharField(max_length=255, null=True, blank=True)
19
+ function = models.CharField(max_length=255, null=True, blank=True)
20
+ line_number = models.IntegerField(null=True, blank=True)
21
+ user_id = models.IntegerField(null=True, blank=True)
22
+ request_id = models.CharField(max_length=255, null=True, blank=True)
23
+ extra_data = models.JSONField(default=dict, blank=True)
24
+ created_at = models.DateTimeField(auto_now_add=True)
25
+
26
+
27
+ class Meta:
28
+ ordering = ['-timestamp']
29
+ indexes = [ # index for faster queries
30
+ models.Index(fields=['timestamp']),
31
+ models.Index(fields=['level']),
32
+ models.Index(fields=['module']),
33
+ models.Index(fields=['function']),
34
+ models.Index(fields=['line_number']),
35
+ models.Index(fields=['user_id']),
36
+ models.Index(fields=['request_id']),
37
+ ]
38
+ verbose_name = 'Log Entry'
39
+ verbose_name_plural = 'Log Entries'
40
+
41
+ def __str__(self):
42
+ """Return a string representation of the log entry."""
43
+ return f"[{self.level}] {self.timestamp.strftime('%Y-%m-%d %H:%M:%S')} - {self.message[:100]}"
44
+
logq/tests.py ADDED
@@ -0,0 +1,317 @@
1
+ from django.test import TransactionTestCase, override_settings
2
+ from django.urls import reverse
3
+ from django.contrib.auth.models import User
4
+ from django.utils import timezone
5
+ from django.db import connection
6
+ import json
7
+ import time
8
+ import threading
9
+ from .models import LogEntry, LogLevel
10
+ from .async_logger import AsyncLogger, get_async_logger, stop_async_logger
11
+
12
+
13
+ class AsyncLoggerTestCase(TransactionTestCase):
14
+ def setUp(self):
15
+ super().setUp()
16
+ # Stop the global logger to avoid interference
17
+ stop_async_logger()
18
+
19
+ # Clear all existing logs using raw SQL to ensure complete cleanup
20
+ with connection.cursor() as cursor:
21
+ cursor.execute("DELETE FROM logq_logentry")
22
+
23
+ # Create a fresh logger instance for testing
24
+ self.logger = AsyncLogger(max_queue_size=100, flush_interval=0.1)
25
+ self.logger.start()
26
+ time.sleep(0.2) # Wait for thread to start
27
+
28
+ def tearDown(self):
29
+ self.logger.stop()
30
+ time.sleep(0.2) # Wait for thread to stop
31
+
32
+ # Clear logs after test using raw SQL
33
+ with connection.cursor() as cursor:
34
+ cursor.execute("DELETE FROM logq_logentry")
35
+
36
+ super().tearDown()
37
+
38
+ def test_basic_logging(self):
39
+ """Test basic logging functionality."""
40
+ # Verify we start with no logs
41
+ self.assertEqual(LogEntry.objects.count(), 0)
42
+
43
+ self.logger.info("Test message")
44
+ time.sleep(0.5) # Wait longer for flush
45
+
46
+ # Verify we have exactly one log entry
47
+ self.assertEqual(LogEntry.objects.count(), 1)
48
+
49
+ log_entry = LogEntry.objects.first()
50
+ self.assertEqual(log_entry.level, LogLevel.INFO)
51
+ self.assertEqual(log_entry.message, "Test message")
52
+
53
+ def test_all_log_levels(self):
54
+ """Test all log levels."""
55
+ # Verify we start with no logs
56
+ self.assertEqual(LogEntry.objects.count(), 0)
57
+
58
+ levels = [LogLevel.DEBUG, LogLevel.INFO, LogLevel.WARNING, LogLevel.ERROR, LogLevel.CRITICAL]
59
+
60
+ for level in levels:
61
+ self.logger.log(level, f"Test {level}")
62
+
63
+ time.sleep(0.5) # Wait longer for flush
64
+
65
+ entries = LogEntry.objects.all()
66
+ self.assertEqual(entries.count(), len(levels))
67
+
68
+ for entry in entries:
69
+ self.assertIn(entry.level, levels)
70
+
71
+ def test_extra_data(self):
72
+ """Test logging with extra data."""
73
+ # Verify we start with no logs
74
+ self.assertEqual(LogEntry.objects.count(), 0)
75
+
76
+ extra_data = {'user_id': 123, 'action': 'test'}
77
+ self.logger.info("Test with extra data", extra_data=extra_data)
78
+ time.sleep(0.5)
79
+
80
+ # Verify we have exactly one log entry
81
+ self.assertEqual(LogEntry.objects.count(), 1)
82
+
83
+ entry = LogEntry.objects.first()
84
+ self.assertEqual(entry.extra_data, extra_data)
85
+
86
+ def test_queue_full_handling(self):
87
+ """Test behavior when queue is full."""
88
+ # Verify we start with no logs
89
+ self.assertEqual(LogEntry.objects.count(), 0)
90
+
91
+ # Fill the queue
92
+ for i in range(150): # More than max_queue_size
93
+ self.logger.info(f"Message {i}")
94
+
95
+ time.sleep(0.5)
96
+
97
+ # Should have some entries but not all due to queue being full
98
+ entries = LogEntry.objects.count()
99
+ self.assertGreater(entries, 0)
100
+ self.assertLessEqual(entries, 100) # max_queue_size
101
+
102
+
103
+ class LogEntryModelTestCase(TransactionTestCase):
104
+ def setUp(self):
105
+ super().setUp()
106
+ # Clear all existing logs
107
+ with connection.cursor() as cursor:
108
+ cursor.execute("DELETE FROM logq_logentry")
109
+
110
+ def tearDown(self):
111
+ # Clear logs after test
112
+ with connection.cursor() as cursor:
113
+ cursor.execute("DELETE FROM logq_logentry")
114
+ super().tearDown()
115
+
116
+ def test_log_entry_creation(self):
117
+ """Test LogEntry model creation."""
118
+ entry = LogEntry.objects.create(
119
+ level=LogLevel.INFO,
120
+ message="Test message",
121
+ module="test_module",
122
+ function="test_function",
123
+ line_number=42,
124
+ user_id=123,
125
+ request_id="test-request-id",
126
+ extra_data={'key': 'value'}
127
+ )
128
+
129
+ self.assertEqual(entry.level, LogLevel.INFO)
130
+ self.assertEqual(entry.message, "Test message")
131
+ self.assertEqual(entry.module, "test_module")
132
+ self.assertEqual(entry.function, "test_function")
133
+ self.assertEqual(entry.line_number, 42)
134
+ self.assertEqual(entry.user_id, 123)
135
+ self.assertEqual(entry.request_id, "test-request-id")
136
+ self.assertEqual(entry.extra_data, {'key': 'value'})
137
+
138
+ def test_log_entry_str_representation(self):
139
+ """Test string representation of LogEntry."""
140
+ entry = LogEntry.objects.create(
141
+ level=LogLevel.ERROR,
142
+ message="This is a very long message that should be truncated in the string representation",
143
+ timestamp=timezone.now()
144
+ )
145
+
146
+ str_repr = str(entry)
147
+ self.assertIn("[ERROR]", str_repr)
148
+ self.assertIn("This is a very long message that should be truncated", str_repr[:100])
149
+
150
+
151
+ class LoggingAPITestCase(TransactionTestCase):
152
+ def setUp(self):
153
+ super().setUp()
154
+ self.user = User.objects.create_user(username='testuser', password='testpass')
155
+ # Stop the global logger to avoid interference
156
+ stop_async_logger()
157
+ # Clear all existing logs
158
+ with connection.cursor() as cursor:
159
+ cursor.execute("DELETE FROM logq_logentry")
160
+
161
+ def tearDown(self):
162
+ # Clear logs after test
163
+ with connection.cursor() as cursor:
164
+ cursor.execute("DELETE FROM logq_logentry")
165
+ super().tearDown()
166
+
167
+ def test_log_endpoint(self):
168
+ """Test the log API endpoint."""
169
+ # Verify we start with no logs
170
+ self.assertEqual(LogEntry.objects.count(), 0)
171
+
172
+ data = {
173
+ 'level': 'INFO',
174
+ 'message': 'Test API log',
175
+ 'extra_data': {'source': 'api'}
176
+ }
177
+
178
+ response = self.client.post(
179
+ reverse('logq:log_endpoint'),
180
+ data=json.dumps(data),
181
+ content_type='application/json'
182
+ )
183
+
184
+ self.assertEqual(response.status_code, 200)
185
+ self.assertEqual(response.json()['status'], 'success')
186
+
187
+ # Wait for async processing
188
+ time.sleep(0.5)
189
+
190
+ # Verify we have exactly one log entry
191
+ self.assertEqual(LogEntry.objects.count(), 1)
192
+
193
+ entry = LogEntry.objects.first()
194
+ self.assertEqual(entry.message, 'Test API log')
195
+ self.assertEqual(entry.extra_data, {'source': 'api'})
196
+
197
+ def test_log_api_view(self):
198
+ """Test the class-based log API view."""
199
+ # Verify we start with no logs
200
+ self.assertEqual(LogEntry.objects.count(), 0)
201
+
202
+ data = {
203
+ 'level': 'WARNING',
204
+ 'message': 'Test warning',
205
+ 'user_id': self.user.id,
206
+ 'request_id': 'test-123'
207
+ }
208
+
209
+ response = self.client.post(
210
+ reverse('logq:log_api'),
211
+ data=json.dumps(data),
212
+ content_type='application/json'
213
+ )
214
+
215
+ self.assertEqual(response.status_code, 200)
216
+
217
+ time.sleep(0.5)
218
+
219
+ # Verify we have exactly one log entry
220
+ self.assertEqual(LogEntry.objects.count(), 1)
221
+
222
+ entry = LogEntry.objects.first()
223
+ self.assertEqual(entry.level, LogLevel.WARNING)
224
+ self.assertEqual(entry.user_id, self.user.id)
225
+ self.assertEqual(entry.request_id, 'test-123')
226
+
227
+ def test_get_logs_api(self):
228
+ """Test retrieving logs via API."""
229
+ # Verify we start with no logs
230
+ self.assertEqual(LogEntry.objects.count(), 0)
231
+
232
+ # Create some test logs directly
233
+ LogEntry.objects.create(level=LogLevel.INFO, message="Test 1")
234
+ LogEntry.objects.create(level=LogLevel.ERROR, message="Test 2")
235
+ LogEntry.objects.create(level=LogLevel.DEBUG, message="Test 3")
236
+
237
+ # Verify we have exactly 3 logs
238
+ self.assertEqual(LogEntry.objects.count(), 3)
239
+
240
+ response = self.client.get(reverse('logq:log_api'))
241
+ self.assertEqual(response.status_code, 200)
242
+
243
+ data = response.json()
244
+ self.assertEqual(len(data['logs']), 3)
245
+ self.assertEqual(data['logs'][0]['message'], "Test 1")
246
+
247
+ def test_invalid_log_level(self):
248
+ """Test API with invalid log level."""
249
+ data = {
250
+ 'level': 'INVALID',
251
+ 'message': 'Test message'
252
+ }
253
+
254
+ response = self.client.post(
255
+ reverse('logq:log_endpoint'),
256
+ data=json.dumps(data),
257
+ content_type='application/json'
258
+ )
259
+
260
+ self.assertEqual(response.status_code, 400)
261
+ self.assertIn('Invalid log level', response.json()['error'])
262
+
263
+
264
+ @override_settings(ASYNC_LOGGING_CONFIG={'MAX_QUEUE_SIZE': 500, 'FLUSH_INTERVAL': 0.5})
265
+ class ConfigurationTestCase(TransactionTestCase):
266
+ def setUp(self):
267
+ super().setUp()
268
+ # Clear all existing logs
269
+ with connection.cursor() as cursor:
270
+ cursor.execute("DELETE FROM logq_logentry")
271
+
272
+ def tearDown(self):
273
+ # Clear logs after test
274
+ with connection.cursor() as cursor:
275
+ cursor.execute("DELETE FROM logq_logentry")
276
+ super().tearDown()
277
+
278
+ def test_custom_configuration(self):
279
+ """Test that custom configuration is respected."""
280
+ logger = AsyncLogger()
281
+ self.assertEqual(logger.queue.maxsize, 500)
282
+ self.assertEqual(logger.flush_interval, 0.5)
283
+
284
+
285
+ class MiddlewareTestCase(TransactionTestCase):
286
+ def setUp(self):
287
+ super().setUp()
288
+ # Stop the global logger to avoid interference
289
+ stop_async_logger()
290
+ # Clear all existing logs
291
+ with connection.cursor() as cursor:
292
+ cursor.execute("DELETE FROM logq_logentry")
293
+
294
+ def tearDown(self):
295
+ # Clear logs after test
296
+ with connection.cursor() as cursor:
297
+ cursor.execute("DELETE FROM logq_logentry")
298
+ super().tearDown()
299
+
300
+ def test_middleware_request_logging(self):
301
+ """Test that middleware logs requests."""
302
+ # Verify we start with no logs
303
+ self.assertEqual(LogEntry.objects.count(), 0)
304
+
305
+ response = self.client.get('/admin/')
306
+
307
+ time.sleep(0.5)
308
+
309
+ entries = LogEntry.objects.all()
310
+ self.assertGreater(entries.count(), 0)
311
+
312
+ # Should have request start and completion logs
313
+ start_logs = entries.filter(message__contains="Request started")
314
+ complete_logs = entries.filter(message__contains="Request completed")
315
+
316
+ self.assertGreater(start_logs.count(), 0)
317
+ self.assertGreater(complete_logs.count(), 0)
logq/urls.py ADDED
@@ -0,0 +1,9 @@
1
+ from django.urls import path
2
+ from . import views
3
+
4
+ app_name = 'logq'
5
+
6
+ urlpatterns = [
7
+ path('api/log/', views.log_endpoint, name='log_endpoint'),
8
+ path('api/logs/', views.LogAPIView.as_view(), name='log_api'),
9
+ ]
logq/utils.py ADDED
@@ -0,0 +1,141 @@
1
+ from .async_logger import get_async_logger
2
+ from functools import wraps
3
+ import time
4
+
5
+
6
+ def log_function_call(func=None, *, level='INFO'):
7
+ """
8
+ Decorator to automatically log function calls.
9
+
10
+ Usage:
11
+ @log_function_call
12
+ def my_function():
13
+ pass
14
+
15
+ @log_function_call(level='DEBUG')
16
+ def my_debug_function():
17
+ pass
18
+ """
19
+ def decorator(func):
20
+ @wraps(func)
21
+ def wrapper(*args, **kwargs):
22
+ logger = get_async_logger()
23
+
24
+ # Log function entry
25
+ logger.log(
26
+ level,
27
+ f"Entering function: {func.__name__}",
28
+ extra_data={
29
+ 'args_count': len(args),
30
+ 'kwargs_keys': list(kwargs.keys()),
31
+ 'module': func.__module__,
32
+ }
33
+ )
34
+
35
+ start_time = time.time()
36
+ try:
37
+ result = func(*args, **kwargs)
38
+ execution_time = time.time() - start_time
39
+
40
+ # Log successful completion
41
+ logger.log(
42
+ level,
43
+ f"Function completed: {func.__name__} (took {execution_time:.3f}s)",
44
+ extra_data={'execution_time': execution_time}
45
+ )
46
+
47
+ return result
48
+
49
+ except Exception as e:
50
+ execution_time = time.time() - start_time
51
+
52
+ # Log exception
53
+ logger.exception(
54
+ f"Function failed: {func.__name__} (took {execution_time:.3f}s)",
55
+ exc_info=str(e),
56
+ extra_data={'execution_time': execution_time}
57
+ )
58
+ raise
59
+
60
+ return wrapper
61
+
62
+ if func is None:
63
+ return decorator
64
+ return decorator(func)
65
+
66
+
67
+ def log_performance(threshold_seconds=1.0):
68
+ """
69
+ Decorator to log slow function calls.
70
+
71
+ Usage:
72
+ @log_performance(threshold_seconds=0.5)
73
+ def my_slow_function():
74
+ pass
75
+ """
76
+ def decorator(func):
77
+ @wraps(func)
78
+ def wrapper(*args, **kwargs):
79
+ start_time = time.time()
80
+ result = func(*args, **kwargs)
81
+ execution_time = time.time() - start_time
82
+
83
+ if execution_time > threshold_seconds:
84
+ logger = get_async_logger()
85
+ logger.warning(
86
+ f"Slow function detected: {func.__name__} took {execution_time:.3f}s",
87
+ extra_data={
88
+ 'execution_time': execution_time,
89
+ 'threshold': threshold_seconds,
90
+ 'module': func.__module__,
91
+ }
92
+ )
93
+
94
+ return result
95
+ return wrapper
96
+ return decorator
97
+
98
+
99
+ class LogContext:
100
+ """
101
+ Context manager for logging operations with automatic timing.
102
+
103
+ Usage:
104
+ with LogContext("Processing data", level='INFO'):
105
+ # do some work
106
+ pass
107
+ """
108
+
109
+ def __init__(self, message, level='INFO', **kwargs):
110
+ self.message = message
111
+ self.level = level
112
+ self.kwargs = kwargs
113
+ self.logger = get_async_logger()
114
+ self.start_time = None
115
+
116
+ def __enter__(self):
117
+ self.start_time = time.time()
118
+ self.logger.log(
119
+ self.level,
120
+ f"Starting: {self.message}",
121
+ **self.kwargs
122
+ )
123
+ return self
124
+
125
+ def __exit__(self, exc_type, exc_val, exc_tb):
126
+ execution_time = time.time() - self.start_time
127
+
128
+ if exc_type is None:
129
+ self.logger.log(
130
+ self.level,
131
+ f"Completed: {self.message} (took {execution_time:.3f}s)",
132
+ extra_data={'execution_time': execution_time},
133
+ **self.kwargs
134
+ )
135
+ else:
136
+ self.logger.exception(
137
+ f"Failed: {self.message} (took {execution_time:.3f}s)",
138
+ exc_info=str(exc_val),
139
+ extra_data={'execution_time': execution_time},
140
+ **self.kwargs
141
+ )
logq/views.py ADDED
@@ -0,0 +1,103 @@
1
+ from django.http import JsonResponse
2
+ from django.views.decorators.csrf import csrf_exempt
3
+ from django.views.decorators.http import require_http_methods
4
+ from django.utils.decorators import method_decorator
5
+ from django.views import View
6
+ import json
7
+ from .async_logger import get_async_logger
8
+ from .models import LogEntry, LogLevel
9
+
10
+
11
+ @csrf_exempt
12
+ @require_http_methods(["POST"])
13
+ def log_endpoint(request):
14
+ """Simple API endpoint for external logging."""
15
+ try:
16
+ data = json.loads(request.body)
17
+ level = data.get('level', 'INFO')
18
+ message = data.get('message', '')
19
+
20
+ if not message:
21
+ return JsonResponse({'error': 'Message is required'}, status=400)
22
+
23
+ if level not in [choice[0] for choice in LogLevel.choices]:
24
+ return JsonResponse({'error': 'Invalid log level'}, status=400)
25
+
26
+ logger = get_async_logger()
27
+ logger.log(level, message, extra_data=data.get('extra_data', {}))
28
+
29
+ return JsonResponse({'status': 'success'})
30
+
31
+ except json.JSONDecodeError:
32
+ return JsonResponse({'error': 'Invalid JSON'}, status=400)
33
+ except Exception as e:
34
+ return JsonResponse({'error': str(e)}, status=500)
35
+
36
+
37
+ @method_decorator(csrf_exempt, name='dispatch')
38
+ class LogAPIView(View):
39
+ """Class-based view for more advanced logging operations."""
40
+
41
+ def post(self, request):
42
+ """Handle POST requests for logging."""
43
+ try:
44
+ data = json.loads(request.body)
45
+ level = data.get('level', 'INFO')
46
+ message = data.get('message', '')
47
+
48
+ if not message:
49
+ return JsonResponse({'error': 'Message is required'}, status=400)
50
+
51
+ logger = get_async_logger()
52
+ logger.log(
53
+ level,
54
+ message,
55
+ user_id=data.get('user_id'),
56
+ request_id=data.get('request_id'),
57
+ extra_data=data.get('extra_data', {})
58
+ )
59
+
60
+ return JsonResponse({'status': 'success'})
61
+
62
+ except json.JSONDecodeError:
63
+ return JsonResponse({'error': 'Invalid JSON'}, status=400)
64
+ except Exception as e:
65
+ return JsonResponse({'error': str(e)}, status=500)
66
+
67
+ def get(self, request):
68
+ """Handle GET requests for retrieving recent logs."""
69
+ try:
70
+ limit = int(request.GET.get('limit', 100))
71
+ level = request.GET.get('level')
72
+ module = request.GET.get('module')
73
+
74
+ query = LogEntry.objects.all()
75
+
76
+ if level:
77
+ query = query.filter(level=level)
78
+ if module:
79
+ query = query.filter(module__icontains=module)
80
+
81
+ logs = query.order_by('-timestamp')[:limit]
82
+
83
+ log_data = []
84
+ for log in logs:
85
+ log_data.append({
86
+ 'id': log.id,
87
+ 'timestamp': log.timestamp.isoformat(),
88
+ 'level': log.level,
89
+ 'message': log.message,
90
+ 'module': log.module,
91
+ 'function': log.function,
92
+ 'line_number': log.line_number,
93
+ 'user_id': log.user_id,
94
+ 'request_id': log.request_id,
95
+ 'extra_data': log.extra_data,
96
+ })
97
+
98
+ return JsonResponse({'logs': log_data})
99
+
100
+ except ValueError:
101
+ return JsonResponse({'error': 'Invalid limit parameter'}, status=400)
102
+ except Exception as e:
103
+ return JsonResponse({'error': str(e)}, status=500)