djlogq 1.0.8__tar.gz → 1.0.9__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. {djlogq-1.0.8/src/djlogq.egg-info → djlogq-1.0.9}/PKG-INFO +16 -10
  2. {djlogq-1.0.8 → djlogq-1.0.9}/README.md +13 -8
  3. {djlogq-1.0.8 → djlogq-1.0.9}/pyproject.toml +6 -3
  4. {djlogq-1.0.8 → djlogq-1.0.9/src/djlogq.egg-info}/PKG-INFO +16 -10
  5. {djlogq-1.0.8 → djlogq-1.0.9}/src/djlogq.egg-info/SOURCES.txt +2 -0
  6. djlogq-1.0.9/src/logq/apps.py +23 -0
  7. {djlogq-1.0.8 → djlogq-1.0.9}/src/logq/async_logger.py +4 -15
  8. djlogq-1.0.9/src/logq/cleanup_service.py +212 -0
  9. djlogq-1.0.9/src/logq/handlers.py +42 -0
  10. {djlogq-1.0.8 → djlogq-1.0.9}/src/logq/models.py +2 -1
  11. {djlogq-1.0.8 → djlogq-1.0.9}/src/logq/tests.py +68 -4
  12. djlogq-1.0.8/src/logq/apps.py +0 -12
  13. {djlogq-1.0.8 → djlogq-1.0.9}/MANIFEST.in +0 -0
  14. {djlogq-1.0.8 → djlogq-1.0.9}/setup.cfg +0 -0
  15. {djlogq-1.0.8 → djlogq-1.0.9}/src/djlogq.egg-info/dependency_links.txt +0 -0
  16. {djlogq-1.0.8 → djlogq-1.0.9}/src/djlogq.egg-info/requires.txt +0 -0
  17. {djlogq-1.0.8 → djlogq-1.0.9}/src/djlogq.egg-info/top_level.txt +0 -0
  18. {djlogq-1.0.8 → djlogq-1.0.9}/src/logq/__init__.py +0 -0
  19. {djlogq-1.0.8 → djlogq-1.0.9}/src/logq/admin.py +0 -0
  20. {djlogq-1.0.8 → djlogq-1.0.9}/src/logq/management/__init__.py +0 -0
  21. {djlogq-1.0.8 → djlogq-1.0.9}/src/logq/management/commands/__init__.py +0 -0
  22. {djlogq-1.0.8 → djlogq-1.0.9}/src/logq/management/commands/clean_logs.py +0 -0
  23. {djlogq-1.0.8 → djlogq-1.0.9}/src/logq/middleware.py +0 -0
  24. {djlogq-1.0.8 → djlogq-1.0.9}/src/logq/migrations/0001_initial.py +0 -0
  25. {djlogq-1.0.8 → djlogq-1.0.9}/src/logq/migrations/0002_alter_logentry_function_alter_logentry_line_number_and_more.py +0 -0
  26. {djlogq-1.0.8 → djlogq-1.0.9}/src/logq/migrations/__init__.py +0 -0
  27. {djlogq-1.0.8 → djlogq-1.0.9}/src/logq/urls.py +0 -0
  28. {djlogq-1.0.8 → djlogq-1.0.9}/src/logq/utils.py +0 -0
  29. {djlogq-1.0.8 → djlogq-1.0.9}/src/logq/views.py +0 -0
@@ -1,10 +1,11 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: djlogq
3
- Version: 1.0.8
4
- Summary: A reusable Django app for asynchronous, thread-safe logging with rich metadata, admin interface, and API support.
3
+ Version: 1.0.9
4
+ Summary: A robust and reusable Django app for asynchronous, thread-safe logging. Features include rich metadata capture, background log cleanup, configurable retention policies, admin interface, and REST API support for seamless log management.
5
5
  Author-email: mess <mesnavunawa@gmail.com>
6
6
  License: MIT
7
7
  Project-URL: Homepage, https://github.com/Mesake94/djlogq
8
+ Project-URL: Downloads, https://pepy.tech/projects/djlogq
8
9
  Requires-Python: >=3.8
9
10
  Description-Content-Type: text/markdown
10
11
  Requires-Dist: Django
@@ -32,12 +33,7 @@ A reusable Django app that provides asynchronous logging functionality using a s
32
33
  - **Extendible**: Easily add your own custom handlers to process logs in different ways.
33
34
 
34
35
  **Useful built-in and example handlers include:**
35
- - **File Handler**: Write logs to a file.
36
- - **Email Handler**: Send error logs via email.
37
- - **Webhook Handler**: Forward logs to external services (e.g., Slack, Discord, custom endpoints).
38
- - **Database Handler**: Store logs in custom database tables.
39
- - **Console Handler**: Output logs to the console for development.
40
- - **Third-Party Integrations**: Integrate with services like Sentry or Logstash.
36
+ - **Console Handler**: Output logs to the console
41
37
  - You can implement your own handler by subclassing the provided base handler class.
42
38
 
43
39
  ## Installation
@@ -69,10 +65,20 @@ python manage.py migrate
69
65
  ASYNC_LOGGING_CONFIG = {
70
66
  'MAX_QUEUE_SIZE': 1000,
71
67
  'FLUSH_INTERVAL': 1.0, # seconds
72
- 'AUTO_CLEANUP_DAYS': 30,
68
+ 'AUTO_CLEANUP_INTERVAL': 3600, # seconds
73
69
  'ENABLE_REQUEST_LOGGING': True,
74
70
  'IGNORE_PATHS': ['/admin/'], # paths to ignore for request logging
75
- 'DEFAULT_HANDLERS': [], # list of handler class paths, e.g. ['logq.handlers.FileHandler']
71
+ 'DEFAULT_HANDLERS': [], # list of handler class paths, e.g. ['logq.handlers.FileHandler'],
72
+ # CLEANUP_POLICIES defines how long to keep logs of each level before automatic deletion.
73
+ # Each policy is a dictionary with:
74
+ # - "days": Number of days to retain logs at this level
75
+ # - "level": Log level to which this policy applies (e.g., "INFO", "WARNING", "ERROR")
76
+ # - "enabled": Whether this cleanup policy is active
77
+ 'CLEANUP_POLICIES': [
78
+ {"days": 10, "level": "INFO", "enabled": True}, # Keep INFO logs for 10 days
79
+ {"days": 10, "level": "WARNING", "enabled": True}, # Keep WARNING logs for 10 days
80
+ {"days": 15, "level": "ERROR", "enabled": True}, # Keep ERROR logs for 15 days
81
+ ]
76
82
  }
77
83
  ```
78
84
 
@@ -16,12 +16,7 @@ A reusable Django app that provides asynchronous logging functionality using a s
16
16
  - **Extendible**: Easily add your own custom handlers to process logs in different ways.
17
17
 
18
18
  **Useful built-in and example handlers include:**
19
- - **File Handler**: Write logs to a file.
20
- - **Email Handler**: Send error logs via email.
21
- - **Webhook Handler**: Forward logs to external services (e.g., Slack, Discord, custom endpoints).
22
- - **Database Handler**: Store logs in custom database tables.
23
- - **Console Handler**: Output logs to the console for development.
24
- - **Third-Party Integrations**: Integrate with services like Sentry or Logstash.
19
+ - **Console Handler**: Output logs to the console
25
20
  - You can implement your own handler by subclassing the provided base handler class.
26
21
 
27
22
  ## Installation
@@ -53,10 +48,20 @@ python manage.py migrate
53
48
  ASYNC_LOGGING_CONFIG = {
54
49
  'MAX_QUEUE_SIZE': 1000,
55
50
  'FLUSH_INTERVAL': 1.0, # seconds
56
- 'AUTO_CLEANUP_DAYS': 30,
51
+ 'AUTO_CLEANUP_INTERVAL': 3600, # seconds
57
52
  'ENABLE_REQUEST_LOGGING': True,
58
53
  'IGNORE_PATHS': ['/admin/'], # paths to ignore for request logging
59
- 'DEFAULT_HANDLERS': [], # list of handler class paths, e.g. ['logq.handlers.FileHandler']
54
+ 'DEFAULT_HANDLERS': [], # list of handler class paths, e.g. ['logq.handlers.FileHandler'],
55
+ # CLEANUP_POLICIES defines how long to keep logs of each level before automatic deletion.
56
+ # Each policy is a dictionary with:
57
+ # - "days": Number of days to retain logs at this level
58
+ # - "level": Log level to which this policy applies (e.g., "INFO", "WARNING", "ERROR")
59
+ # - "enabled": Whether this cleanup policy is active
60
+ 'CLEANUP_POLICIES': [
61
+ {"days": 10, "level": "INFO", "enabled": True}, # Keep INFO logs for 10 days
62
+ {"days": 10, "level": "WARNING", "enabled": True}, # Keep WARNING logs for 10 days
63
+ {"days": 15, "level": "ERROR", "enabled": True}, # Keep ERROR logs for 15 days
64
+ ]
60
65
  }
61
66
  ```
62
67
 
@@ -4,8 +4,8 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "djlogq"
7
- version = "1.0.8"
8
- description = "A reusable Django app for asynchronous, thread-safe logging with rich metadata, admin interface, and API support."
7
+ version = "1.0.9"
8
+ description = "A robust and reusable Django app for asynchronous, thread-safe logging. Features include rich metadata capture, background log cleanup, configurable retention policies, admin interface, and REST API support for seamless log management."
9
9
  readme = "README.md"
10
10
  authors = [
11
11
  {name = "mess", email = "mesnavunawa@gmail.com"}
@@ -16,7 +16,10 @@ dependencies = [
16
16
  "Django",
17
17
  "requests",
18
18
  ]
19
- urls = {Homepage = "https://github.com/Mesake94/djlogq"}
19
+
20
+ # [![PyPI Downloads](https://static.pepy.tech/badge/djlogq)](https://pepy.tech/projects/djlogq)
21
+ urls = {Homepage = "https://github.com/Mesake94/djlogq", Downloads = "https://pepy.tech/projects/djlogq"}
22
+
20
23
  [project.optional-dependencies]
21
24
  dev = [
22
25
  "build==1.2.1",
@@ -1,10 +1,11 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: djlogq
3
- Version: 1.0.8
4
- Summary: A reusable Django app for asynchronous, thread-safe logging with rich metadata, admin interface, and API support.
3
+ Version: 1.0.9
4
+ Summary: A robust and reusable Django app for asynchronous, thread-safe logging. Features include rich metadata capture, background log cleanup, configurable retention policies, admin interface, and REST API support for seamless log management.
5
5
  Author-email: mess <mesnavunawa@gmail.com>
6
6
  License: MIT
7
7
  Project-URL: Homepage, https://github.com/Mesake94/djlogq
8
+ Project-URL: Downloads, https://pepy.tech/projects/djlogq
8
9
  Requires-Python: >=3.8
9
10
  Description-Content-Type: text/markdown
10
11
  Requires-Dist: Django
@@ -32,12 +33,7 @@ A reusable Django app that provides asynchronous logging functionality using a s
32
33
  - **Extendible**: Easily add your own custom handlers to process logs in different ways.
33
34
 
34
35
  **Useful built-in and example handlers include:**
35
- - **File Handler**: Write logs to a file.
36
- - **Email Handler**: Send error logs via email.
37
- - **Webhook Handler**: Forward logs to external services (e.g., Slack, Discord, custom endpoints).
38
- - **Database Handler**: Store logs in custom database tables.
39
- - **Console Handler**: Output logs to the console for development.
40
- - **Third-Party Integrations**: Integrate with services like Sentry or Logstash.
36
+ - **Console Handler**: Output logs to the console
41
37
  - You can implement your own handler by subclassing the provided base handler class.
42
38
 
43
39
  ## Installation
@@ -69,10 +65,20 @@ python manage.py migrate
69
65
  ASYNC_LOGGING_CONFIG = {
70
66
  'MAX_QUEUE_SIZE': 1000,
71
67
  'FLUSH_INTERVAL': 1.0, # seconds
72
- 'AUTO_CLEANUP_DAYS': 30,
68
+ 'AUTO_CLEANUP_INTERVAL': 3600, # seconds
73
69
  'ENABLE_REQUEST_LOGGING': True,
74
70
  'IGNORE_PATHS': ['/admin/'], # paths to ignore for request logging
75
- 'DEFAULT_HANDLERS': [], # list of handler class paths, e.g. ['logq.handlers.FileHandler']
71
+ 'DEFAULT_HANDLERS': [], # list of handler class paths, e.g. ['logq.handlers.FileHandler'],
72
+ # CLEANUP_POLICIES defines how long to keep logs of each level before automatic deletion.
73
+ # Each policy is a dictionary with:
74
+ # - "days": Number of days to retain logs at this level
75
+ # - "level": Log level to which this policy applies (e.g., "INFO", "WARNING", "ERROR")
76
+ # - "enabled": Whether this cleanup policy is active
77
+ 'CLEANUP_POLICIES': [
78
+ {"days": 10, "level": "INFO", "enabled": True}, # Keep INFO logs for 10 days
79
+ {"days": 10, "level": "WARNING", "enabled": True}, # Keep WARNING logs for 10 days
80
+ {"days": 15, "level": "ERROR", "enabled": True}, # Keep ERROR logs for 15 days
81
+ ]
76
82
  }
77
83
  ```
78
84
 
@@ -10,6 +10,8 @@ src/logq/__init__.py
10
10
  src/logq/admin.py
11
11
  src/logq/apps.py
12
12
  src/logq/async_logger.py
13
+ src/logq/cleanup_service.py
14
+ src/logq/handlers.py
13
15
  src/logq/middleware.py
14
16
  src/logq/models.py
15
17
  src/logq/tests.py
@@ -0,0 +1,23 @@
1
+ from django.apps import AppConfig
2
+ from django.conf import settings
3
+ import os
4
+
5
+
6
+ class LogqConfig(AppConfig):
7
+ default_auto_field = 'django.db.models.BigAutoField'
8
+ name = 'logq'
9
+ verbose_name = 'LogQ'
10
+
11
+ def ready(self):
12
+ """Initialize the async logger when the app is ready."""
13
+ # Prevent multiple initializations during development server reload
14
+ # RUN_MAIN is set to 'true' only in the child process that actually runs the app
15
+ if os.environ.get('RUN_MAIN') != 'true':
16
+ return
17
+
18
+ from .async_logger import get_async_logger
19
+ from .cleanup_service import start_cleanup_service
20
+
21
+ get_async_logger()
22
+ # dont start the cleanup service in test mode
23
+ start_cleanup_service()
@@ -10,18 +10,7 @@ from django.db import transaction
10
10
  from django.conf import settings
11
11
  from .models import LogEntry, LogLevel
12
12
  from typing import List
13
-
14
-
15
- class LogHandler:
16
- """Base class for custom log handlers"""
17
-
18
- def handle(self, log_entry:LogEntry) -> None:
19
- """Handle a log entry. Overide this method to implement custom logging behavior."""
20
- pass
21
-
22
- def flush(self) -> None:
23
- """Flush any buffered log entries. Override this method to implement custom flushing behavior."""
24
- pass
13
+ from .handlers import LogHandler, ConsoleHandler
25
14
 
26
15
 
27
16
  class AsyncLogger:
@@ -44,7 +33,7 @@ class AsyncLogger:
44
33
  self._dropped_lock = threading.Lock()
45
34
 
46
35
  # initialize custom handlers
47
- self.handlers = handlers or []
36
+ self.handlers = handlers or [ConsoleHandler()]
48
37
  self._add_default_handlers() # add default handlers to the logger
49
38
 
50
39
  def _add_default_handlers(self):
@@ -76,7 +65,8 @@ class AsyncLogger:
76
65
 
77
66
  def clear_handlers(self):
78
67
  """Remove all custom handlers from the logger."""
79
-
68
+ self.handlers = []
69
+
80
70
  def start(self):
81
71
  """Start the logging thread."""
82
72
  with self._lock:
@@ -115,7 +105,6 @@ class AsyncLogger:
115
105
  self._flush_batch(batch)
116
106
  batch = []
117
107
  last_flush = current_time
118
-
119
108
  except Exception as e:
120
109
  # Log the error to prevent infinite loops
121
110
  print(f"Error in async logger worker: {e}")
@@ -0,0 +1,212 @@
1
+ import threading
2
+ import time
3
+ from datetime import timedelta
4
+ from django.utils import timezone
5
+ from django.conf import settings
6
+ from django.core.management import call_command
7
+ from typing import List
8
+ from .models import LogEntry
9
+ from .async_logger import get_async_logger
10
+
11
+
12
+ class CleanupPolicy:
13
+ """Define cleanup policy for log entries."""
14
+
15
+ def __init__(self, days:int, level:str=None, enabled:bool=True):
16
+ self.days = days
17
+ self.level = level
18
+ self.enabled = enabled
19
+
20
+ def __str__(self):
21
+ level_str = f"level={self.level}" if self.level else "all levels"
22
+ return f"Delete logs older than {self.days} days {level_str}"
23
+
24
+
25
+ class PeriodicCleanupService:
26
+ """Service that periodically cleans up old log entries.
27
+ Runs in a separate thread to avoid blocking the main application.
28
+ """
29
+
30
+ def __init__(self, policies:List[CleanupPolicy], check_interval:int=None):
31
+ """Initialize the cleanup service:
32
+ Args:
33
+ policies: List of cleanup policies to apply.
34
+ check_interval: Interval in seconds between cleanup runs.
35
+ """
36
+ self.policies = policies
37
+ self.check_interval = check_interval or self._get_check_interval()
38
+
39
+ self.running = False
40
+ self.thread = None
41
+ self._lock = threading.Lock()
42
+ self.logger = get_async_logger()
43
+
44
+ # Track last cleanup times for each policy
45
+ self.last_cleanup = {}
46
+ for policy in policies:
47
+ self.last_cleanup[policy] = None
48
+
49
+ def _get_check_interval(self) -> int:
50
+ """Get the check interval from settings."""
51
+ config = getattr(settings, 'ASYNC_LOGGING_CONFIG', {})
52
+ return config.get('AUTO_CLEANUP_INTERVAL', 3600)
53
+
54
+ def start(self):
55
+ """Start the cleanup service."""
56
+ with self._lock: # Ensure thread safety
57
+ if not self.running: # Only start if not already running
58
+ self.running = True
59
+ self.thread = threading.Thread(target=self._worker, daemon=True)
60
+ self.thread.start()
61
+ self.logger.info(f"Cleanup service started...")
62
+ else:
63
+ self.logger.info("Cleanup service already running")
64
+
65
+
66
+ def stop(self):
67
+ """Stop the cleanup service."""
68
+ with self._lock:
69
+ if self.running:
70
+ self.running = False
71
+ if self.thread:
72
+ self.thread.join(timeout=10.0) # Wait for thread to finish
73
+ self.thread = None
74
+ self.logger.info("Cleanup service stopped")
75
+
76
+ def _worker(self):
77
+ """Main worker thread that runs cleanup checks."""
78
+ while self.running:
79
+ try:
80
+ self._check_cleanup()
81
+ time.sleep(self.check_interval)
82
+ except Exception as e:
83
+ self.logger.error(f"Cleanup service has stopped: {e}")
84
+ time.sleep(60) # Wait for 1 minute before retrying
85
+
86
+ def _check_cleanup(self):
87
+ """Check if any cleanup policies should be applied."""
88
+ with self._lock:
89
+ now = timezone.now()
90
+ for policy in self.policies:
91
+ if not policy.enabled:
92
+ self.logger.info(f"Cleanup policy {policy} is disabled")
93
+ continue
94
+
95
+ # Check if it's time to run this policy
96
+ if self._should_run_policy(policy):
97
+ self._run_cleanup_policy(policy)
98
+
99
+ def _should_run_policy(self, policy:CleanupPolicy) -> bool:
100
+ """Check if it's time to run this policy."""
101
+ last_cleanup = self.last_cleanup.get(policy)
102
+ if last_cleanup is None:
103
+ return True
104
+
105
+ # Check if the policy has been run in the last check interval
106
+ return timezone.now() - last_cleanup > timedelta(seconds=self.check_interval)
107
+
108
+ def _run_cleanup_policy(self, policy:CleanupPolicy):
109
+ """Run the cleanup policy."""
110
+ try:
111
+ self.logger.info(f"Running cleanup policy: {policy}")
112
+ args = [
113
+ '--days', str(policy.days),
114
+ ]
115
+ if policy.level:
116
+ args.extend(['--level', policy.level])
117
+
118
+ # Run the cleanup command
119
+ call_command('clean_logs', *args, verbosity=0)
120
+ # Update the last cleanup time for this policy
121
+ self.last_cleanup[policy] = timezone.now()
122
+ except Exception as e:
123
+ self.logger.error(f"Cleanup policy {policy} has failed: {e}")
124
+
125
+ _cleanup_service = None
126
+ _cleanup_service_lock = threading.Lock()
127
+
128
+
129
+ def get_cleanup_service() -> PeriodicCleanupService:
130
+ """
131
+ Retrieve the singleton instance of the PeriodicCleanupService.
132
+
133
+ This function ensures that only one instance of the PeriodicCleanupService exists
134
+ throughout the application's lifecycle. If the service has not yet been created,
135
+ it reads the cleanup policies from the Django settings (specifically from
136
+ ASYNC_LOGGING_CONFIG['CLEANUP_POLICIES']), constructs CleanupPolicy objects for each
137
+ policy, and initializes the PeriodicCleanupService with these policies and a default
138
+ check interval of 10 seconds.
139
+
140
+ Returns:
141
+ PeriodicCleanupService: The singleton instance of the cleanup service, which
142
+ periodically checks and applies log cleanup policies as configured.
143
+
144
+ Notes:
145
+ - The cleanup policies should be defined in Django settings under
146
+ ASYNC_LOGGING_CONFIG['CLEANUP_POLICIES'] as a list of dictionaries, each
147
+ representing a policy's parameters.
148
+ - The service is thread-safe and intended to be started and stopped via
149
+ start_cleanup_service() and stop_cleanup_service().
150
+
151
+ Example:
152
+ service = get_cleanup_service()
153
+ service.start()
154
+ """
155
+ global _cleanup_service
156
+ with _cleanup_service_lock:
157
+ if _cleanup_service is None:
158
+ # read the policies from the config
159
+ policies = getattr(settings, 'ASYNC_LOGGING_CONFIG', {}).get('CLEANUP_POLICIES', [])
160
+ policies = [CleanupPolicy(**policy) for policy in policies]
161
+ _cleanup_service = PeriodicCleanupService(
162
+ policies=policies,
163
+ )
164
+ return _cleanup_service
165
+
166
+
167
+ def start_cleanup_service():
168
+ """
169
+ Start the periodic log cleanup service.
170
+
171
+ This function retrieves the singleton instance of the PeriodicCleanupService
172
+ (which is responsible for periodically applying log cleanup policies as defined
173
+ in the Django settings) and starts its background thread. If the service is
174
+ already running, calling this function has no effect.
175
+
176
+ Usage:
177
+ start_cleanup_service()
178
+
179
+ Notes:
180
+ - The cleanup service will run in the background, periodically checking and
181
+ applying the configured cleanup policies.
182
+ - To stop the service, use stop_cleanup_service().
183
+ - This function is thread-safe and can be called multiple times safely.
184
+
185
+ See Also:
186
+ - get_cleanup_service(): Retrieves the singleton cleanup service instance.
187
+ - stop_cleanup_service(): Stops the running cleanup service.
188
+ """
189
+ service = get_cleanup_service()
190
+ service.start()
191
+
192
+
193
+ def stop_cleanup_service():
194
+ """
195
+ Stop the periodic log cleanup service.
196
+
197
+ This function retrieves the singleton instance of the PeriodicCleanupService
198
+ (which is responsible for periodically applying log cleanup policies as defined
199
+ in the Django settings) and stops its background thread. If the service is
200
+ not running, calling this function has no effect.
201
+
202
+ Usage:
203
+ stop_cleanup_service()
204
+
205
+ Notes:
206
+ - This function is thread-safe and can be called multiple times safely.
207
+ - The service will stop running after the current cleanup cycle completes.
208
+ """
209
+ global _cleanup_service
210
+ with _cleanup_service_lock:
211
+ if _cleanup_service:
212
+ _cleanup_service.stop()
@@ -0,0 +1,42 @@
1
+ from .models import LogEntry
2
+ import sys
3
+
4
+ class LogHandler:
5
+ """Base class for custom log handlers"""
6
+
7
+ def handle(self, log_entry:LogEntry) -> None:
8
+ """Handle a log entry. Overide this method to implement custom logging behavior."""
9
+ pass
10
+
11
+ def flush(self) -> None:
12
+ """Flush any buffered log entries. Override this method to implement custom flushing behavior."""
13
+ pass
14
+
15
+
16
+ class ConsoleHandler(LogHandler):
17
+ """Log handler that prints to the console"""
18
+
19
+ def __init__(self):
20
+ self.buffer = []
21
+
22
+ def handle(self, log_entry:LogEntry):
23
+ self.buffer.append({
24
+ "message": log_entry.message,
25
+ "level": log_entry.level,
26
+ "timestamp": log_entry.timestamp.strftime("%Y-%m-%d %H:%M:%S"),
27
+ })
28
+
29
+ def flush(self):
30
+ if not self.buffer:
31
+ return
32
+ lines = []
33
+ for entry in self.buffer:
34
+ line = (
35
+ f"[{entry['timestamp']}] "
36
+ f"{entry['level']}: "
37
+ f"{entry['message']} "
38
+ )
39
+ lines.append(line)
40
+ sys.stdout.write('\n'.join(lines) + '\n')
41
+ sys.stdout.flush()
42
+ self.buffer.clear()
@@ -41,4 +41,5 @@ class LogEntry(models.Model):
41
41
  def __str__(self):
42
42
  """Return a string representation of the log entry."""
43
43
  return f"[{self.level}] {self.timestamp.strftime('%Y-%m-%d %H:%M:%S')} - {self.message[:100]}"
44
-
44
+
45
+
@@ -3,12 +3,14 @@ from django.urls import reverse
3
3
  from django.contrib.auth.models import User
4
4
  from django.utils import timezone
5
5
  from django.db import connection
6
+ from datetime import timedelta
6
7
  import json
7
8
  import time
8
9
  import threading
9
10
  from .models import LogEntry, LogLevel
10
11
  from .async_logger import AsyncLogger, get_async_logger, stop_async_logger, LogHandler
11
12
  from .utils import log_performance, log_function_call
13
+ from .cleanup_service import get_cleanup_service, start_cleanup_service, stop_cleanup_service
12
14
 
13
15
 
14
16
  class AsyncLoggerTestCase(TransactionTestCase):
@@ -106,7 +108,6 @@ class AsyncLoggerTestCase(TransactionTestCase):
106
108
  self.assertEqual(dropped_entry.level, LogLevel.WARNING)
107
109
 
108
110
 
109
-
110
111
  class LogEntryModelTestCase(TransactionTestCase):
111
112
  def setUp(self):
112
113
  super().setUp()
@@ -282,11 +283,25 @@ class LogHandlerTestCase(TransactionTestCase):
282
283
 
283
284
  # Create a test handler
284
285
  class TestHandler(LogHandler):
286
+
287
+ def __init__(self):
288
+ self.buffer = []
289
+
285
290
  def handle(self, log_entry:LogEntry) -> None:
286
- pass
291
+ self.buffer.append({
292
+ "message": log_entry.message,
293
+ "level": log_entry.level,
294
+ "timestamp": log_entry.timestamp.strftime("%Y-%m-%d %H:%M:%S"),
295
+ "module": log_entry.module,
296
+ "function": log_entry.function,
297
+ "line_number": log_entry.line_number,
298
+ "user_id": log_entry.user_id,
299
+ })
287
300
 
288
301
  def flush(self) -> None:
289
- pass
302
+ with open("test_log.log", "a") as f:
303
+ f.write(json.dumps(self.buffer) + "\n")
304
+ self.buffer.clear() # Clear the buffer after writing to file
290
305
 
291
306
  # Create a logger with the test handler
292
307
  logger = get_async_logger()
@@ -306,4 +321,53 @@ class LogHandlerTestCase(TransactionTestCase):
306
321
  # Stop the logger
307
322
  logger.stop()
308
323
  time.sleep(0.2) # Wait for thread to stop
309
-
324
+
325
+
326
+ # class CleanupServiceTestCase(TransactionTestCase):
327
+ # def setUp(self):
328
+ # super().setUp()
329
+ # # Stop the global logger to avoid interference
330
+ # stop_async_logger()
331
+
332
+ # # Clear all existing logs
333
+ # with connection.cursor() as cursor:
334
+ # cursor.execute("DELETE FROM logq_logentry")
335
+
336
+ # # Create a properly configured global logger
337
+ # from .async_logger import _async_logger
338
+ # from . import async_logger as async_logger_module
339
+ # from .cleanup_service import get_cleanup_service, start_cleanup_service, stop_cleanup_service
340
+
341
+ # # Create a test logger with fast flush interval
342
+ # test_logger = AsyncLogger(max_queue_size=100, flush_interval=0.1)
343
+ # test_logger.start()
344
+
345
+ # # Replace the global logger
346
+ # async_logger_module._async_logger = test_logger
347
+ # # create cleanup service
348
+ # cleanup_service = get_cleanup_service()
349
+ # cleanup_service.start()
350
+ # time.sleep(0.2) # Wait for thre
351
+
352
+ # def tearDown(self):
353
+ # # Stop the global logger
354
+ # stop_async_logger()
355
+ # time.sleep(0.2) # Wait for thread to stop
356
+
357
+ # # Clear logs after test
358
+ # with connection.cursor() as cursor:
359
+ # cursor.execute("DELETE FROM logq_logentry")
360
+
361
+ # # stop cleanup service
362
+ # stop_cleanup_service()
363
+ # super().tearDown()
364
+
365
+ # def test_cleanup_service(self):
366
+ # # create a log entry
367
+ # logger = get_async_logger()
368
+ # logger.info("Test message")
369
+ # time.sleep(0.5)
370
+
371
+ # # check that the log entry is created
372
+ # self.assertEqual(LogEntry.objects.count(), 1)
373
+
@@ -1,12 +0,0 @@
1
- from django.apps import AppConfig
2
-
3
-
4
- class LogqConfig(AppConfig):
5
- default_auto_field = 'django.db.models.BigAutoField'
6
- name = 'logq'
7
- verbose_name = 'LogQ'
8
-
9
- def ready(self):
10
- """Initialize the async logger when the app is ready."""
11
- from .async_logger import get_async_logger
12
- get_async_logger()
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes