django-cfg 1.5.1__py3-none-any.whl → 1.5.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of django-cfg might be problematic. Click here for more details.

Files changed (121) hide show
  1. django_cfg/__init__.py +1 -1
  2. django_cfg/apps/dashboard/TRANSACTION_FIX.md +73 -0
  3. django_cfg/apps/dashboard/serializers/__init__.py +0 -12
  4. django_cfg/apps/dashboard/serializers/activity.py +1 -1
  5. django_cfg/apps/dashboard/services/__init__.py +0 -2
  6. django_cfg/apps/dashboard/services/charts_service.py +4 -3
  7. django_cfg/apps/dashboard/services/statistics_service.py +11 -2
  8. django_cfg/apps/dashboard/services/system_health_service.py +64 -106
  9. django_cfg/apps/dashboard/urls.py +0 -2
  10. django_cfg/apps/dashboard/views/__init__.py +0 -2
  11. django_cfg/apps/dashboard/views/commands_views.py +3 -6
  12. django_cfg/apps/dashboard/views/overview_views.py +14 -13
  13. django_cfg/apps/knowbase/apps.py +2 -2
  14. django_cfg/apps/maintenance/admin/api_key_admin.py +2 -3
  15. django_cfg/apps/newsletter/admin/newsletter_admin.py +12 -11
  16. django_cfg/apps/rq/__init__.py +9 -0
  17. django_cfg/apps/rq/apps.py +80 -0
  18. django_cfg/apps/rq/management/__init__.py +1 -0
  19. django_cfg/apps/rq/management/commands/__init__.py +1 -0
  20. django_cfg/apps/rq/management/commands/rqscheduler.py +31 -0
  21. django_cfg/apps/rq/management/commands/rqstats.py +33 -0
  22. django_cfg/apps/rq/management/commands/rqworker.py +31 -0
  23. django_cfg/apps/rq/management/commands/rqworker_pool.py +27 -0
  24. django_cfg/apps/rq/serializers/__init__.py +40 -0
  25. django_cfg/apps/rq/serializers/health.py +60 -0
  26. django_cfg/apps/rq/serializers/job.py +100 -0
  27. django_cfg/apps/rq/serializers/queue.py +80 -0
  28. django_cfg/apps/rq/serializers/schedule.py +178 -0
  29. django_cfg/apps/rq/serializers/testing.py +139 -0
  30. django_cfg/apps/rq/serializers/worker.py +58 -0
  31. django_cfg/apps/rq/services/__init__.py +25 -0
  32. django_cfg/apps/rq/services/config_helper.py +233 -0
  33. django_cfg/apps/rq/services/models/README.md +417 -0
  34. django_cfg/apps/rq/services/models/__init__.py +30 -0
  35. django_cfg/apps/rq/services/models/event.py +123 -0
  36. django_cfg/apps/rq/services/models/job.py +99 -0
  37. django_cfg/apps/rq/services/models/queue.py +92 -0
  38. django_cfg/apps/rq/services/models/worker.py +104 -0
  39. django_cfg/apps/rq/services/rq_converters.py +183 -0
  40. django_cfg/apps/rq/tasks/__init__.py +23 -0
  41. django_cfg/apps/rq/tasks/demo_tasks.py +284 -0
  42. django_cfg/apps/rq/urls.py +54 -0
  43. django_cfg/apps/rq/views/__init__.py +19 -0
  44. django_cfg/apps/rq/views/jobs.py +882 -0
  45. django_cfg/apps/rq/views/monitoring.py +248 -0
  46. django_cfg/apps/rq/views/queues.py +261 -0
  47. django_cfg/apps/rq/views/schedule.py +400 -0
  48. django_cfg/apps/rq/views/testing.py +761 -0
  49. django_cfg/apps/rq/views/workers.py +195 -0
  50. django_cfg/apps/urls.py +6 -7
  51. django_cfg/core/base/config_model.py +10 -26
  52. django_cfg/core/builders/apps_builder.py +4 -11
  53. django_cfg/core/generation/integration_generators/__init__.py +3 -6
  54. django_cfg/core/generation/integration_generators/django_rq.py +80 -0
  55. django_cfg/core/generation/orchestrator.py +9 -19
  56. django_cfg/core/integration/display/startup.py +6 -20
  57. django_cfg/mixins/__init__.py +2 -0
  58. django_cfg/mixins/superadmin_api.py +59 -0
  59. django_cfg/models/__init__.py +3 -3
  60. django_cfg/models/django/__init__.py +3 -3
  61. django_cfg/models/django/django_rq.py +621 -0
  62. django_cfg/models/django/revolution_legacy.py +1 -1
  63. django_cfg/modules/base.py +4 -6
  64. django_cfg/modules/django_admin/config/background_task_config.py +4 -4
  65. django_cfg/modules/django_admin/utils/html/composition.py +9 -2
  66. django_cfg/modules/django_unfold/navigation.py +1 -26
  67. django_cfg/pyproject.toml +4 -4
  68. django_cfg/registry/core.py +4 -7
  69. django_cfg/static/frontend/admin.zip +0 -0
  70. django_cfg/templates/admin/constance/includes/results_list.html +73 -0
  71. django_cfg/templates/admin/index.html +187 -62
  72. django_cfg/templatetags/django_cfg.py +61 -1
  73. {django_cfg-1.5.1.dist-info → django_cfg-1.5.3.dist-info}/METADATA +5 -6
  74. {django_cfg-1.5.1.dist-info → django_cfg-1.5.3.dist-info}/RECORD +77 -82
  75. django_cfg/apps/dashboard/permissions.py +0 -48
  76. django_cfg/apps/dashboard/serializers/django_q2.py +0 -50
  77. django_cfg/apps/dashboard/services/django_q2_service.py +0 -159
  78. django_cfg/apps/dashboard/views/django_q2_views.py +0 -79
  79. django_cfg/apps/tasks/__init__.py +0 -64
  80. django_cfg/apps/tasks/admin/__init__.py +0 -4
  81. django_cfg/apps/tasks/admin/config.py +0 -98
  82. django_cfg/apps/tasks/admin/task_log.py +0 -238
  83. django_cfg/apps/tasks/apps.py +0 -15
  84. django_cfg/apps/tasks/filters/__init__.py +0 -10
  85. django_cfg/apps/tasks/filters/task_log.py +0 -121
  86. django_cfg/apps/tasks/migrations/0001_initial.py +0 -196
  87. django_cfg/apps/tasks/migrations/0002_delete_tasklog.py +0 -16
  88. django_cfg/apps/tasks/migrations/__init__.py +0 -0
  89. django_cfg/apps/tasks/models/__init__.py +0 -4
  90. django_cfg/apps/tasks/models/task_log.py +0 -246
  91. django_cfg/apps/tasks/serializers/__init__.py +0 -28
  92. django_cfg/apps/tasks/serializers/task_log.py +0 -249
  93. django_cfg/apps/tasks/services/__init__.py +0 -10
  94. django_cfg/apps/tasks/services/client/__init__.py +0 -7
  95. django_cfg/apps/tasks/services/client/client.py +0 -234
  96. django_cfg/apps/tasks/services/config_helper.py +0 -63
  97. django_cfg/apps/tasks/services/sync.py +0 -204
  98. django_cfg/apps/tasks/urls.py +0 -16
  99. django_cfg/apps/tasks/views/__init__.py +0 -10
  100. django_cfg/apps/tasks/views/task_log.py +0 -41
  101. django_cfg/apps/tasks/views/task_log_base.py +0 -41
  102. django_cfg/apps/tasks/views/task_log_overview.py +0 -100
  103. django_cfg/apps/tasks/views/task_log_related.py +0 -41
  104. django_cfg/apps/tasks/views/task_log_stats.py +0 -91
  105. django_cfg/apps/tasks/views/task_log_timeline.py +0 -81
  106. django_cfg/core/generation/integration_generators/django_q2.py +0 -133
  107. django_cfg/core/generation/integration_generators/tasks.py +0 -88
  108. django_cfg/models/django/django_q2.py +0 -514
  109. django_cfg/models/tasks/__init__.py +0 -49
  110. django_cfg/models/tasks/backends.py +0 -122
  111. django_cfg/models/tasks/config.py +0 -209
  112. django_cfg/models/tasks/utils.py +0 -162
  113. django_cfg/modules/django_q2/README.md +0 -140
  114. django_cfg/modules/django_q2/__init__.py +0 -8
  115. django_cfg/modules/django_q2/apps.py +0 -107
  116. django_cfg/modules/django_q2/management/__init__.py +0 -0
  117. django_cfg/modules/django_q2/management/commands/__init__.py +0 -0
  118. django_cfg/modules/django_q2/management/commands/sync_django_q_schedules.py +0 -74
  119. {django_cfg-1.5.1.dist-info → django_cfg-1.5.3.dist-info}/WHEEL +0 -0
  120. {django_cfg-1.5.1.dist-info → django_cfg-1.5.3.dist-info}/entry_points.txt +0 -0
  121. {django_cfg-1.5.1.dist-info → django_cfg-1.5.3.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,92 @@
1
+ """
2
+ Pydantic models for RQ Queues.
3
+
4
+ Internal models for queue validation and business logic.
5
+ NO NESTED JSON - all fields are flat!
6
+ """
7
+
8
+ from typing import Optional
9
+
10
+ from pydantic import BaseModel, Field
11
+
12
+
13
+ class RQQueueModel(BaseModel):
14
+ """
15
+ Internal model for RQ Queue statistics.
16
+
17
+ FLAT STRUCTURE - no nested objects!
18
+ All fields are primitive types.
19
+ """
20
+
21
+ # Basic info
22
+ name: str = Field(..., description="Queue name")
23
+ is_async: bool = Field(default=True, description="Queue is in async mode")
24
+
25
+ # Job counts by status
26
+ count: int = Field(default=0, ge=0, description="Total jobs in queue")
27
+ queued_jobs: int = Field(default=0, ge=0, description="Jobs waiting to be processed")
28
+ started_jobs: int = Field(default=0, ge=0, description="Jobs currently being processed")
29
+ finished_jobs: int = Field(default=0, ge=0, description="Completed jobs")
30
+ failed_jobs: int = Field(default=0, ge=0, description="Failed jobs")
31
+ deferred_jobs: int = Field(default=0, ge=0, description="Deferred jobs")
32
+ scheduled_jobs: int = Field(default=0, ge=0, description="Scheduled jobs")
33
+
34
+ # Worker info
35
+ workers: int = Field(default=0, ge=0, description="Number of workers for this queue")
36
+
37
+ # Metadata
38
+ oldest_job_timestamp: Optional[str] = Field(
39
+ None, description="Timestamp of oldest job in queue (ISO 8601)"
40
+ )
41
+
42
+ # Connection info (flat!)
43
+ connection_host: Optional[str] = Field(None, description="Redis host")
44
+ connection_port: Optional[int] = Field(None, description="Redis port")
45
+ connection_db: Optional[int] = Field(None, description="Redis DB number")
46
+
47
+ @property
48
+ def total_jobs(self) -> int:
49
+ """Calculate total jobs across all statuses."""
50
+ return (
51
+ self.queued_jobs
52
+ + self.started_jobs
53
+ + self.finished_jobs
54
+ + self.failed_jobs
55
+ + self.deferred_jobs
56
+ + self.scheduled_jobs
57
+ )
58
+
59
+ @property
60
+ def completed_jobs(self) -> int:
61
+ """Total completed jobs (finished + failed)."""
62
+ return self.finished_jobs + self.failed_jobs
63
+
64
+ @property
65
+ def failure_rate(self) -> float:
66
+ """Calculate failure rate percentage (0-100%)."""
67
+ completed = self.completed_jobs
68
+ if completed == 0:
69
+ return 0.0
70
+ return (self.failed_jobs / completed) * 100
71
+
72
+ @property
73
+ def is_empty(self) -> bool:
74
+ """Check if queue is empty."""
75
+ return self.count == 0
76
+
77
+ @property
78
+ def has_workers(self) -> bool:
79
+ """Check if queue has any workers."""
80
+ return self.workers > 0
81
+
82
+ @property
83
+ def is_healthy(self) -> bool:
84
+ """
85
+ Check if queue is healthy.
86
+
87
+ Queue is healthy if:
88
+ - Has workers
89
+ - Failure rate < 50%
90
+ - Not too many queued jobs (< 1000)
91
+ """
92
+ return self.has_workers and self.failure_rate < 50 and self.queued_jobs < 1000
@@ -0,0 +1,104 @@
1
+ """
2
+ Pydantic models for RQ Workers.
3
+
4
+ Internal models for worker validation and business logic.
5
+ NO NESTED JSON - all fields are flat!
6
+ """
7
+
8
+ from datetime import datetime
9
+ from enum import Enum
10
+ from typing import Optional
11
+
12
+ from pydantic import BaseModel, Field
13
+
14
+
15
+ class WorkerState(str, Enum):
16
+ """Worker state enumeration."""
17
+
18
+ IDLE = "idle"
19
+ BUSY = "busy"
20
+ SUSPENDED = "suspended"
21
+
22
+
23
+ class RQWorkerModel(BaseModel):
24
+ """
25
+ Internal model for RQ Worker.
26
+
27
+ FLAT STRUCTURE - no nested objects!
28
+ Queues are comma-separated string, timestamps are ISO strings.
29
+ """
30
+
31
+ # Basic info
32
+ name: str = Field(..., description="Worker name/ID")
33
+ state: WorkerState = Field(..., description="Worker state (idle/busy/suspended)")
34
+
35
+ # Queues (comma-separated for flat structure)
36
+ queues: str = Field(..., description="Comma-separated queue names (e.g., 'default,high,low')")
37
+
38
+ # Current job
39
+ current_job_id: Optional[str] = Field(None, description="Current job ID if busy")
40
+
41
+ # Timestamps (as ISO strings)
42
+ birth: str = Field(..., description="Worker start time (ISO 8601)")
43
+ last_heartbeat: str = Field(..., description="Last heartbeat timestamp (ISO 8601)")
44
+
45
+ # Statistics
46
+ successful_job_count: int = Field(default=0, ge=0, description="Total successful jobs")
47
+ failed_job_count: int = Field(default=0, ge=0, description="Total failed jobs")
48
+ total_working_time: float = Field(default=0.0, ge=0.0, description="Total working time in seconds")
49
+
50
+ @property
51
+ def is_alive(self) -> bool:
52
+ """
53
+ Check if worker is alive.
54
+
55
+ Worker is considered alive if heartbeat was within last 60 seconds.
56
+ """
57
+ try:
58
+ last_hb = datetime.fromisoformat(self.last_heartbeat)
59
+ now = datetime.now(last_hb.tzinfo) if last_hb.tzinfo else datetime.now()
60
+ delta = (now - last_hb).total_seconds()
61
+ return delta < 60
62
+ except (ValueError, TypeError):
63
+ return False
64
+
65
+ @property
66
+ def is_busy(self) -> bool:
67
+ """Check if worker is busy."""
68
+ return self.state == WorkerState.BUSY
69
+
70
+ @property
71
+ def is_idle(self) -> bool:
72
+ """Check if worker is idle."""
73
+ return self.state == WorkerState.IDLE
74
+
75
+ def get_uptime_seconds(self) -> Optional[float]:
76
+ """Calculate worker uptime in seconds."""
77
+ try:
78
+ birth_dt = datetime.fromisoformat(self.birth)
79
+ now = datetime.now(birth_dt.tzinfo) if birth_dt.tzinfo else datetime.now()
80
+ return (now - birth_dt).total_seconds()
81
+ except (ValueError, TypeError):
82
+ return None
83
+
84
+ def get_queue_list(self) -> list[str]:
85
+ """Get list of queue names."""
86
+ return [q.strip() for q in self.queues.split(",") if q.strip()]
87
+
88
+ @property
89
+ def total_job_count(self) -> int:
90
+ """Total jobs processed (successful + failed)."""
91
+ return self.successful_job_count + self.failed_job_count
92
+
93
+ @property
94
+ def success_rate(self) -> float:
95
+ """Calculate success rate (0-100%)."""
96
+ total = self.total_job_count
97
+ if total == 0:
98
+ return 0.0
99
+ return (self.successful_job_count / total) * 100
100
+
101
+ class Config:
102
+ """Pydantic config."""
103
+
104
+ use_enum_values = True
@@ -0,0 +1,183 @@
1
+ """
2
+ RQ Object to Pydantic Model Converters.
3
+
4
+ Converts RQ objects (Job, Queue, Worker) to type-safe Pydantic models
5
+ for internal business logic.
6
+ """
7
+
8
+ import json
9
+ from typing import Optional
10
+
11
+ from rq import Queue, Worker
12
+ from rq.job import Job
13
+
14
+ from .models import RQJobModel, RQQueueModel, RQWorkerModel, JobStatus, WorkerState
15
+
16
+
17
+ def job_to_model(job: Job, queue_name: Optional[str] = None) -> RQJobModel:
18
+ """
19
+ Convert RQ Job to Pydantic RQJobModel.
20
+
21
+ Args:
22
+ job: RQ Job instance
23
+ queue_name: Queue name (optional, will try to get from job.origin)
24
+
25
+ Returns:
26
+ Validated RQJobModel instance
27
+ """
28
+ # Get queue name
29
+ if not queue_name:
30
+ queue_name = getattr(job, 'origin', 'unknown')
31
+
32
+ # Map RQ status to JobStatus enum
33
+ rq_status = job.get_status()
34
+ status_map = {
35
+ 'queued': JobStatus.QUEUED,
36
+ 'started': JobStatus.STARTED,
37
+ 'finished': JobStatus.FINISHED,
38
+ 'failed': JobStatus.FAILED,
39
+ 'deferred': JobStatus.DEFERRED,
40
+ 'scheduled': JobStatus.SCHEDULED,
41
+ 'canceled': JobStatus.CANCELED,
42
+ }
43
+ status = status_map.get(rq_status, JobStatus.QUEUED)
44
+
45
+ # Serialize args/kwargs/meta to JSON strings (flat!)
46
+ args_json = json.dumps(list(job.args or []))
47
+ kwargs_json = json.dumps(job.kwargs or {})
48
+ meta_json = json.dumps(job.meta or {})
49
+
50
+ # Serialize result to JSON string if available
51
+ result_json = None
52
+ if job.result is not None:
53
+ try:
54
+ result_json = json.dumps(job.result)
55
+ except (TypeError, ValueError):
56
+ # If result is not JSON serializable, convert to string
57
+ result_json = json.dumps(str(job.result))
58
+
59
+ # Get dependency IDs as comma-separated string
60
+ dependency_ids = ""
61
+ if hasattr(job, '_dependency_ids') and job._dependency_ids:
62
+ dependency_ids = ",".join(job._dependency_ids)
63
+
64
+ return RQJobModel(
65
+ id=job.id,
66
+ func_name=job.func_name or "unknown",
67
+ queue=queue_name,
68
+ status=status,
69
+ created_at=job.created_at.isoformat() if job.created_at else "",
70
+ enqueued_at=job.enqueued_at.isoformat() if job.enqueued_at else None,
71
+ started_at=job.started_at.isoformat() if job.started_at else None,
72
+ ended_at=job.ended_at.isoformat() if job.ended_at else None,
73
+ worker_name=job.worker_name,
74
+ timeout=job.timeout,
75
+ result_ttl=job.result_ttl,
76
+ failure_ttl=job.failure_ttl,
77
+ result_json=result_json,
78
+ exc_info=job.exc_info,
79
+ args_json=args_json,
80
+ kwargs_json=kwargs_json,
81
+ meta_json=meta_json,
82
+ dependency_ids=dependency_ids,
83
+ )
84
+
85
+
86
+ def worker_to_model(worker: Worker) -> RQWorkerModel:
87
+ """
88
+ Convert RQ Worker to Pydantic RQWorkerModel.
89
+
90
+ Args:
91
+ worker: RQ Worker instance
92
+
93
+ Returns:
94
+ Validated RQWorkerModel instance
95
+ """
96
+ # Get worker state
97
+ rq_state = worker.get_state()
98
+ state_map = {
99
+ 'idle': WorkerState.IDLE,
100
+ 'busy': WorkerState.BUSY,
101
+ 'suspended': WorkerState.SUSPENDED,
102
+ }
103
+ state = state_map.get(rq_state, WorkerState.IDLE)
104
+
105
+ # Get queues as comma-separated string (flat!)
106
+ queue_names = [q.name for q in worker.queues]
107
+ queues_str = ",".join(queue_names)
108
+
109
+ # Get current job ID
110
+ current_job_id = worker.get_current_job_id()
111
+
112
+ return RQWorkerModel(
113
+ name=worker.name,
114
+ state=state,
115
+ queues=queues_str,
116
+ current_job_id=current_job_id,
117
+ birth=worker.birth_date.isoformat() if worker.birth_date else "",
118
+ last_heartbeat=worker.last_heartbeat.isoformat() if worker.last_heartbeat else "",
119
+ successful_job_count=worker.successful_job_count,
120
+ failed_job_count=worker.failed_job_count,
121
+ total_working_time=worker.total_working_time,
122
+ )
123
+
124
+
125
+ def queue_to_model(queue: Queue, queue_name: str) -> RQQueueModel:
126
+ """
127
+ Convert RQ Queue to Pydantic RQQueueModel.
128
+
129
+ Args:
130
+ queue: RQ Queue instance
131
+ queue_name: Queue name
132
+
133
+ Returns:
134
+ Validated RQQueueModel instance
135
+ """
136
+ # Get job counts from registries
137
+ queued_jobs = len(queue.get_job_ids())
138
+ started_jobs = len(queue.started_job_registry)
139
+ finished_jobs = len(queue.finished_job_registry)
140
+ failed_jobs = len(queue.failed_job_registry)
141
+ deferred_jobs = len(queue.deferred_job_registry)
142
+ scheduled_jobs = len(queue.scheduled_job_registry)
143
+
144
+ # Get worker count
145
+ workers = Worker.all(queue=queue)
146
+ worker_count = len(workers)
147
+
148
+ # Get oldest job timestamp
149
+ oldest_job_timestamp = None
150
+ if queue.count > 0:
151
+ try:
152
+ oldest_job = queue.get_jobs(0, 1)[0]
153
+ oldest_job_timestamp = oldest_job.created_at.isoformat() if oldest_job.created_at else None
154
+ except (IndexError, AttributeError):
155
+ pass
156
+
157
+ # Get connection info (flat!)
158
+ connection_host = None
159
+ connection_port = None
160
+ connection_db = None
161
+ if hasattr(queue.connection, 'connection_pool'):
162
+ pool = queue.connection.connection_pool
163
+ connection_kwargs = pool.connection_kwargs
164
+ connection_host = connection_kwargs.get('host', 'unknown')
165
+ connection_port = connection_kwargs.get('port', 6379)
166
+ connection_db = connection_kwargs.get('db', 0)
167
+
168
+ return RQQueueModel(
169
+ name=queue_name,
170
+ is_async=queue.is_async,
171
+ count=queue.count,
172
+ queued_jobs=queued_jobs,
173
+ started_jobs=started_jobs,
174
+ finished_jobs=finished_jobs,
175
+ failed_jobs=failed_jobs,
176
+ deferred_jobs=deferred_jobs,
177
+ scheduled_jobs=scheduled_jobs,
178
+ workers=worker_count,
179
+ oldest_job_timestamp=oldest_job_timestamp,
180
+ connection_host=connection_host,
181
+ connection_port=connection_port,
182
+ connection_db=connection_db,
183
+ )
@@ -0,0 +1,23 @@
1
+ """
2
+ Demo and test tasks for RQ testing and simulation.
3
+
4
+ These tasks are used for testing RQ functionality from the frontend.
5
+ """
6
+
7
+ from .demo_tasks import (
8
+ demo_success_task,
9
+ demo_failure_task,
10
+ demo_slow_task,
11
+ demo_progress_task,
12
+ demo_crash_task,
13
+ demo_retry_task,
14
+ )
15
+
16
+ __all__ = [
17
+ 'demo_success_task',
18
+ 'demo_failure_task',
19
+ 'demo_slow_task',
20
+ 'demo_progress_task',
21
+ 'demo_crash_task',
22
+ 'demo_retry_task',
23
+ ]
@@ -0,0 +1,284 @@
1
+ """
2
+ Demo tasks for RQ testing and simulation.
3
+
4
+ These tasks provide various scenarios for testing RQ functionality:
5
+ - Successful completion
6
+ - Failure scenarios
7
+ - Long-running tasks
8
+ - Progress tracking
9
+ - Crash simulation
10
+ - Retry logic
11
+ """
12
+
13
+ import time
14
+ import random
15
+ from django_cfg.modules.django_logging import get_logger
16
+
17
+ logger = get_logger("rq.demo_tasks")
18
+
19
+
20
+ def demo_success_task(duration: int = 2, message: str = "Demo task completed"):
21
+ """
22
+ Simple task that always succeeds.
23
+
24
+ Args:
25
+ duration: How long to run (seconds)
26
+ message: Custom message to return
27
+
28
+ Returns:
29
+ dict: Success result with message and metadata
30
+ """
31
+ logger.info(f"Starting demo_success_task (duration={duration}s)")
32
+ time.sleep(duration)
33
+
34
+ result = {
35
+ "status": "success",
36
+ "message": message,
37
+ "duration": duration,
38
+ "timestamp": time.time(),
39
+ }
40
+
41
+ logger.info(f"Completed demo_success_task: {result}")
42
+ return result
43
+
44
+
45
+ def demo_failure_task(error_message: str = "Simulated failure"):
46
+ """
47
+ Task that always fails with an exception.
48
+
49
+ Args:
50
+ error_message: Custom error message
51
+
52
+ Raises:
53
+ ValueError: Always raises this exception
54
+ """
55
+ logger.warning(f"Starting demo_failure_task (will fail with: {error_message})")
56
+ time.sleep(1)
57
+ raise ValueError(error_message)
58
+
59
+
60
+ def demo_slow_task(duration: int = 30, step_interval: int = 5):
61
+ """
62
+ Long-running task for testing worker timeouts and monitoring.
63
+
64
+ Args:
65
+ duration: Total duration (seconds)
66
+ step_interval: How often to log progress (seconds)
67
+
68
+ Returns:
69
+ dict: Success result with timing info
70
+ """
71
+ logger.info(f"Starting demo_slow_task (duration={duration}s)")
72
+
73
+ steps = duration // step_interval
74
+ for i in range(steps):
75
+ time.sleep(step_interval)
76
+ progress = ((i + 1) / steps) * 100
77
+ logger.info(f"Progress: {progress:.1f}%")
78
+
79
+ result = {
80
+ "status": "success",
81
+ "message": "Slow task completed",
82
+ "duration": duration,
83
+ "steps": steps,
84
+ }
85
+
86
+ logger.info(f"Completed demo_slow_task: {result}")
87
+ return result
88
+
89
+
90
+ def demo_progress_task(total_items: int = 100):
91
+ """
92
+ Task that updates progress in job meta.
93
+
94
+ Args:
95
+ total_items: Number of items to process
96
+
97
+ Returns:
98
+ dict: Success result with processed items count
99
+ """
100
+ from rq import get_current_job
101
+
102
+ logger.info(f"Starting demo_progress_task (items={total_items})")
103
+
104
+ job = get_current_job()
105
+
106
+ for i in range(total_items):
107
+ # Simulate work
108
+ time.sleep(0.1)
109
+
110
+ # Update progress in job meta
111
+ if job:
112
+ job.meta['progress'] = {
113
+ 'current': i + 1,
114
+ 'total': total_items,
115
+ 'percentage': ((i + 1) / total_items) * 100,
116
+ }
117
+ job.save_meta()
118
+
119
+ if (i + 1) % 10 == 0:
120
+ logger.info(f"Processed {i + 1}/{total_items} items")
121
+
122
+ result = {
123
+ "status": "success",
124
+ "message": "Progress task completed",
125
+ "processed_items": total_items,
126
+ }
127
+
128
+ logger.info(f"Completed demo_progress_task: {result}")
129
+ return result
130
+
131
+
132
+ def demo_crash_task():
133
+ """
134
+ Task that simulates a worker crash (raises SystemExit).
135
+
136
+ WARNING: This will actually crash the worker! Use with caution.
137
+
138
+ Raises:
139
+ SystemExit: Simulates worker crash
140
+ """
141
+ logger.warning("Starting demo_crash_task (will crash worker!)")
142
+ time.sleep(1)
143
+ raise SystemExit("Simulated worker crash")
144
+
145
+
146
+ def demo_retry_task(max_attempts: int = 3, fail_until_attempt: int = 2):
147
+ """
148
+ Task that fails N times before succeeding.
149
+
150
+ Useful for testing retry logic.
151
+
152
+ Args:
153
+ max_attempts: Maximum retry attempts
154
+ fail_until_attempt: Succeed on this attempt
155
+
156
+ Returns:
157
+ dict: Success result if attempt succeeds
158
+
159
+ Raises:
160
+ ValueError: If attempt should fail
161
+ """
162
+ from rq import get_current_job
163
+
164
+ job = get_current_job()
165
+
166
+ # Track attempt count in job meta
167
+ if job:
168
+ attempts = job.meta.get('attempts', 0) + 1
169
+ job.meta['attempts'] = attempts
170
+ job.save_meta()
171
+ else:
172
+ attempts = 1
173
+
174
+ logger.info(f"demo_retry_task attempt {attempts}/{max_attempts}")
175
+
176
+ if attempts < fail_until_attempt:
177
+ error_msg = f"Retry attempt {attempts} - failing until attempt {fail_until_attempt}"
178
+ logger.warning(error_msg)
179
+ raise ValueError(error_msg)
180
+
181
+ result = {
182
+ "status": "success",
183
+ "message": f"Succeeded on attempt {attempts}",
184
+ "attempts": attempts,
185
+ }
186
+
187
+ logger.info(f"Completed demo_retry_task: {result}")
188
+ return result
189
+
190
+
191
+ def demo_random_task(success_rate: float = 0.7):
192
+ """
193
+ Task that randomly succeeds or fails.
194
+
195
+ Args:
196
+ success_rate: Probability of success (0.0 to 1.0)
197
+
198
+ Returns:
199
+ dict: Success result if task succeeds
200
+
201
+ Raises:
202
+ ValueError: If task randomly fails
203
+ """
204
+ logger.info(f"Starting demo_random_task (success_rate={success_rate})")
205
+ time.sleep(2)
206
+
207
+ if random.random() < success_rate:
208
+ result = {
209
+ "status": "success",
210
+ "message": "Random task succeeded",
211
+ "success_rate": success_rate,
212
+ }
213
+ logger.info(f"demo_random_task succeeded: {result}")
214
+ return result
215
+ else:
216
+ error_msg = f"Random task failed (success_rate={success_rate})"
217
+ logger.warning(error_msg)
218
+ raise ValueError(error_msg)
219
+
220
+
221
+ def demo_memory_intensive_task(mb_to_allocate: int = 100):
222
+ """
223
+ Task that allocates memory for testing worker memory limits.
224
+
225
+ Args:
226
+ mb_to_allocate: Megabytes of memory to allocate
227
+
228
+ Returns:
229
+ dict: Success result with memory info
230
+ """
231
+ logger.info(f"Starting demo_memory_intensive_task (allocating {mb_to_allocate}MB)")
232
+
233
+ # Allocate memory (1MB = 1024*1024 bytes, store as list of integers)
234
+ data = []
235
+ chunk_size = 1024 * 1024 # 1MB chunks
236
+
237
+ for i in range(mb_to_allocate):
238
+ chunk = [0] * (chunk_size // 8) # 8 bytes per integer
239
+ data.append(chunk)
240
+
241
+ if (i + 1) % 10 == 0:
242
+ logger.info(f"Allocated {i + 1}MB")
243
+
244
+ time.sleep(2)
245
+
246
+ result = {
247
+ "status": "success",
248
+ "message": "Memory intensive task completed",
249
+ "mb_allocated": mb_to_allocate,
250
+ }
251
+
252
+ logger.info(f"Completed demo_memory_intensive_task: {result}")
253
+ return result
254
+
255
+
256
+ def demo_cpu_intensive_task(iterations: int = 1000000):
257
+ """
258
+ CPU-intensive task for testing worker CPU limits.
259
+
260
+ Args:
261
+ iterations: Number of iterations to compute
262
+
263
+ Returns:
264
+ dict: Success result with computation info
265
+ """
266
+ logger.info(f"Starting demo_cpu_intensive_task (iterations={iterations})")
267
+
268
+ # Perform CPU-intensive calculation
269
+ result_value = 0
270
+ for i in range(iterations):
271
+ result_value += i ** 2
272
+
273
+ if (i + 1) % 100000 == 0:
274
+ logger.info(f"Processed {i + 1}/{iterations} iterations")
275
+
276
+ result = {
277
+ "status": "success",
278
+ "message": "CPU intensive task completed",
279
+ "iterations": iterations,
280
+ "result": result_value,
281
+ }
282
+
283
+ logger.info(f"Completed demo_cpu_intensive_task: {result}")
284
+ return result