pybgworker 0.2.1__tar.gz → 0.2.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pybgworker-0.2.1 → pybgworker-0.2.2}/PKG-INFO +35 -25
- {pybgworker-0.2.1 → pybgworker-0.2.2}/README.md +194 -184
- {pybgworker-0.2.1 → pybgworker-0.2.2}/pybgworker/__init__.py +1 -1
- pybgworker-0.2.2/pybgworker/ratelimit.py +43 -0
- {pybgworker-0.2.1 → pybgworker-0.2.2}/pybgworker/sqlite_queue.py +24 -3
- {pybgworker-0.2.1 → pybgworker-0.2.2}/pybgworker/task.py +12 -2
- pybgworker-0.2.2/pybgworker/worker.py +189 -0
- {pybgworker-0.2.1 → pybgworker-0.2.2}/pybgworker.egg-info/PKG-INFO +35 -25
- {pybgworker-0.2.1 → pybgworker-0.2.2}/pyproject.toml +1 -1
- pybgworker-0.2.1/pybgworker/ratelimit.py +0 -26
- pybgworker-0.2.1/pybgworker/worker.py +0 -122
- {pybgworker-0.2.1 → pybgworker-0.2.2}/LICENSE +0 -0
- {pybgworker-0.2.1 → pybgworker-0.2.2}/pybgworker/backends.py +0 -0
- {pybgworker-0.2.1 → pybgworker-0.2.2}/pybgworker/cancel.py +0 -0
- {pybgworker-0.2.1 → pybgworker-0.2.2}/pybgworker/cli.py +0 -0
- {pybgworker-0.2.1 → pybgworker-0.2.2}/pybgworker/config.py +0 -0
- {pybgworker-0.2.1 → pybgworker-0.2.2}/pybgworker/failed.py +0 -0
- {pybgworker-0.2.1 → pybgworker-0.2.2}/pybgworker/inspect.py +0 -0
- {pybgworker-0.2.1 → pybgworker-0.2.2}/pybgworker/logger.py +0 -0
- {pybgworker-0.2.1 → pybgworker-0.2.2}/pybgworker/purge.py +0 -0
- {pybgworker-0.2.1 → pybgworker-0.2.2}/pybgworker/queue.py +0 -0
- {pybgworker-0.2.1 → pybgworker-0.2.2}/pybgworker/result.py +0 -0
- {pybgworker-0.2.1 → pybgworker-0.2.2}/pybgworker/retry.py +0 -0
- {pybgworker-0.2.1 → pybgworker-0.2.2}/pybgworker/scheduler.py +0 -0
- {pybgworker-0.2.1 → pybgworker-0.2.2}/pybgworker/state.py +0 -0
- {pybgworker-0.2.1 → pybgworker-0.2.2}/pybgworker/stats.py +0 -0
- {pybgworker-0.2.1 → pybgworker-0.2.2}/pybgworker/utils.py +0 -0
- {pybgworker-0.2.1 → pybgworker-0.2.2}/pybgworker.egg-info/SOURCES.txt +0 -0
- {pybgworker-0.2.1 → pybgworker-0.2.2}/pybgworker.egg-info/dependency_links.txt +0 -0
- {pybgworker-0.2.1 → pybgworker-0.2.2}/pybgworker.egg-info/entry_points.txt +0 -0
- {pybgworker-0.2.1 → pybgworker-0.2.2}/pybgworker.egg-info/requires.txt +0 -0
- {pybgworker-0.2.1 → pybgworker-0.2.2}/pybgworker.egg-info/top_level.txt +0 -0
- {pybgworker-0.2.1 → pybgworker-0.2.2}/setup.cfg +0 -0
- {pybgworker-0.2.1 → pybgworker-0.2.2}/tests/test_retry.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pybgworker
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.2
|
|
4
4
|
Summary: Lightweight production-ready background task worker with cron, rate limiting and JSON observability
|
|
5
5
|
Author: Prabhat Verma
|
|
6
6
|
License: MIT
|
|
@@ -25,33 +25,38 @@ Dynamic: license-file
|
|
|
25
25
|
|
|
26
26
|
# PyBgWorker
|
|
27
27
|
|
|
28
|
-
A lightweight, production-ready background task
|
|
28
|
+
A lightweight, production-ready background task library for Python.
|
|
29
29
|
|
|
30
|
-
PyBgWorker provides a durable SQLite-backed task queue, cron
|
|
31
|
-
rate limiting, retries, and structured observability
|
|
32
|
-
infrastructure.
|
|
30
|
+
PyBgWorker provides a durable SQLite-backed task queue, scheduling (cron and
|
|
31
|
+
countdown/ETA), rate limiting, retries, and structured observability without
|
|
32
|
+
external infrastructure.
|
|
33
33
|
|
|
34
34
|
It is designed to be simple, reliable, and easy to deploy.
|
|
35
35
|
|
|
36
36
|
---
|
|
37
37
|
|
|
38
|
-
##
|
|
38
|
+
## Features
|
|
39
39
|
|
|
40
40
|
- Persistent SQLite task queue
|
|
41
41
|
- Multi-worker safe execution
|
|
42
|
+
- Task scheduling: cron + countdown/ETA
|
|
42
43
|
- Retry + failure handling
|
|
44
|
+
- Task cancellation support
|
|
43
45
|
- Crash isolation via subprocess
|
|
44
|
-
-
|
|
46
|
+
- Task priority execution
|
|
47
|
+
- Task status tracking
|
|
48
|
+
- Result storage and retrieval
|
|
49
|
+
- Worker statistics and monitoring
|
|
45
50
|
- JSON structured logging
|
|
46
51
|
- Task duration tracking
|
|
47
52
|
- Rate limiting (overload protection)
|
|
48
53
|
- Heartbeat monitoring
|
|
49
|
-
- CLI inspect
|
|
54
|
+
- CLI tools: inspect, retry, failed, purge, cancel, stats
|
|
50
55
|
- Production-safe worker loop
|
|
51
56
|
|
|
52
57
|
---
|
|
53
58
|
|
|
54
|
-
##
|
|
59
|
+
## Installation
|
|
55
60
|
|
|
56
61
|
```bash
|
|
57
62
|
pip install pybgworker
|
|
@@ -59,7 +64,7 @@ pip install pybgworker
|
|
|
59
64
|
|
|
60
65
|
---
|
|
61
66
|
|
|
62
|
-
##
|
|
67
|
+
## Basic Usage
|
|
63
68
|
|
|
64
69
|
### Define a task
|
|
65
70
|
|
|
@@ -79,7 +84,7 @@ add.delay(1, 2)
|
|
|
79
84
|
|
|
80
85
|
---
|
|
81
86
|
|
|
82
|
-
##
|
|
87
|
+
## Run worker
|
|
83
88
|
|
|
84
89
|
```bash
|
|
85
90
|
python -m pybgworker.cli run --app example
|
|
@@ -87,7 +92,7 @@ python -m pybgworker.cli run --app example
|
|
|
87
92
|
|
|
88
93
|
---
|
|
89
94
|
|
|
90
|
-
##
|
|
95
|
+
## Cron Scheduler
|
|
91
96
|
|
|
92
97
|
Run recurring tasks:
|
|
93
98
|
|
|
@@ -105,7 +110,7 @@ Cron runs automatically inside the worker.
|
|
|
105
110
|
|
|
106
111
|
---
|
|
107
112
|
|
|
108
|
-
##
|
|
113
|
+
## JSON Logging
|
|
109
114
|
|
|
110
115
|
All worker events are structured JSON:
|
|
111
116
|
|
|
@@ -123,7 +128,7 @@ This enables:
|
|
|
123
128
|
|
|
124
129
|
---
|
|
125
130
|
|
|
126
|
-
##
|
|
131
|
+
## Rate Limiting
|
|
127
132
|
|
|
128
133
|
Protect infrastructure from overload:
|
|
129
134
|
|
|
@@ -135,7 +140,7 @@ Ensures predictable execution under heavy load.
|
|
|
135
140
|
|
|
136
141
|
---
|
|
137
142
|
|
|
138
|
-
##
|
|
143
|
+
## CLI Commands
|
|
139
144
|
|
|
140
145
|
Inspect queue:
|
|
141
146
|
|
|
@@ -163,7 +168,7 @@ python -m pybgworker.cli purge
|
|
|
163
168
|
|
|
164
169
|
---
|
|
165
170
|
|
|
166
|
-
##
|
|
171
|
+
## Observability
|
|
167
172
|
|
|
168
173
|
PyBgWorker logs:
|
|
169
174
|
|
|
@@ -181,7 +186,7 @@ All machine-readable.
|
|
|
181
186
|
|
|
182
187
|
---
|
|
183
188
|
|
|
184
|
-
##
|
|
189
|
+
## Design Goals
|
|
185
190
|
|
|
186
191
|
- zero external dependencies
|
|
187
192
|
- SQLite durability
|
|
@@ -192,18 +197,23 @@ All machine-readable.
|
|
|
192
197
|
|
|
193
198
|
---
|
|
194
199
|
|
|
195
|
-
##
|
|
200
|
+
## Roadmap
|
|
196
201
|
|
|
197
|
-
|
|
202
|
+
Planned but not yet included:
|
|
198
203
|
|
|
199
|
-
-
|
|
200
|
-
-
|
|
201
|
-
-
|
|
202
|
-
-
|
|
203
|
-
-
|
|
204
|
+
- Single-worker concurrency (process pool)
|
|
205
|
+
- Retry backoff + jitter policies
|
|
206
|
+
- Dead-letter queue for exhausted retries
|
|
207
|
+
- Task/result TTL and automatic DB cleanup
|
|
208
|
+
- Multiple named queues + routing
|
|
209
|
+
- Pluggable backends (Redis first)
|
|
210
|
+
- Cluster coordination / leader election for scheduler
|
|
211
|
+
- Metrics endpoint and health checks
|
|
212
|
+
- Dashboard API + web UI
|
|
213
|
+
- Workflow pipelines / DAGs
|
|
204
214
|
|
|
205
215
|
---
|
|
206
216
|
|
|
207
|
-
##
|
|
217
|
+
## License
|
|
208
218
|
|
|
209
219
|
MIT License
|
|
@@ -1,184 +1,194 @@
|
|
|
1
|
-
# PyBgWorker
|
|
2
|
-
|
|
3
|
-
A lightweight, production-ready background task
|
|
4
|
-
|
|
5
|
-
PyBgWorker provides a durable SQLite-backed task queue, cron
|
|
6
|
-
rate limiting, retries, and structured observability
|
|
7
|
-
infrastructure.
|
|
8
|
-
|
|
9
|
-
It is designed to be simple, reliable, and easy to deploy.
|
|
10
|
-
|
|
11
|
-
---
|
|
12
|
-
|
|
13
|
-
##
|
|
14
|
-
|
|
15
|
-
- Persistent SQLite task queue
|
|
16
|
-
- Multi-worker safe execution
|
|
17
|
-
-
|
|
18
|
-
-
|
|
19
|
-
-
|
|
20
|
-
-
|
|
21
|
-
- Task
|
|
22
|
-
-
|
|
23
|
-
-
|
|
24
|
-
-
|
|
25
|
-
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
-
|
|
151
|
-
-
|
|
152
|
-
-
|
|
153
|
-
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
1
|
+
# PyBgWorker
|
|
2
|
+
|
|
3
|
+
A lightweight, production-ready background task library for Python.
|
|
4
|
+
|
|
5
|
+
PyBgWorker provides a durable SQLite-backed task queue, scheduling (cron and
|
|
6
|
+
countdown/ETA), rate limiting, retries, and structured observability without
|
|
7
|
+
external infrastructure.
|
|
8
|
+
|
|
9
|
+
It is designed to be simple, reliable, and easy to deploy.
|
|
10
|
+
|
|
11
|
+
---
|
|
12
|
+
|
|
13
|
+
## Features
|
|
14
|
+
|
|
15
|
+
- Persistent SQLite task queue
|
|
16
|
+
- Multi-worker safe execution
|
|
17
|
+
- Task scheduling: cron + countdown/ETA
|
|
18
|
+
- Retry + failure handling
|
|
19
|
+
- Task cancellation support
|
|
20
|
+
- Crash isolation via subprocess
|
|
21
|
+
- Task priority execution
|
|
22
|
+
- Task status tracking
|
|
23
|
+
- Result storage and retrieval
|
|
24
|
+
- Worker statistics and monitoring
|
|
25
|
+
- JSON structured logging
|
|
26
|
+
- Task duration tracking
|
|
27
|
+
- Rate limiting (overload protection)
|
|
28
|
+
- Heartbeat monitoring
|
|
29
|
+
- CLI tools: inspect, retry, failed, purge, cancel, stats
|
|
30
|
+
- Production-safe worker loop
|
|
31
|
+
|
|
32
|
+
---
|
|
33
|
+
|
|
34
|
+
## Installation
|
|
35
|
+
|
|
36
|
+
```bash
|
|
37
|
+
pip install pybgworker
|
|
38
|
+
```
|
|
39
|
+
|
|
40
|
+
---
|
|
41
|
+
|
|
42
|
+
## Basic Usage
|
|
43
|
+
|
|
44
|
+
### Define a task
|
|
45
|
+
|
|
46
|
+
```python
|
|
47
|
+
from pybgworker.task import task
|
|
48
|
+
|
|
49
|
+
@task(name="add")
|
|
50
|
+
def add(a, b):
|
|
51
|
+
return a + b
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
### Enqueue a task
|
|
55
|
+
|
|
56
|
+
```python
|
|
57
|
+
add.delay(1, 2)
|
|
58
|
+
```
|
|
59
|
+
|
|
60
|
+
---
|
|
61
|
+
|
|
62
|
+
## Run worker
|
|
63
|
+
|
|
64
|
+
```bash
|
|
65
|
+
python -m pybgworker.cli run --app example
|
|
66
|
+
```
|
|
67
|
+
|
|
68
|
+
---
|
|
69
|
+
|
|
70
|
+
## Cron Scheduler
|
|
71
|
+
|
|
72
|
+
Run recurring tasks:
|
|
73
|
+
|
|
74
|
+
```python
|
|
75
|
+
from pybgworker.scheduler import cron
|
|
76
|
+
from pybgworker.task import task
|
|
77
|
+
|
|
78
|
+
@task(name="heartbeat_task")
|
|
79
|
+
@cron("*/1 * * * *")
|
|
80
|
+
def heartbeat():
|
|
81
|
+
print("alive")
|
|
82
|
+
```
|
|
83
|
+
|
|
84
|
+
Cron runs automatically inside the worker.
|
|
85
|
+
|
|
86
|
+
---
|
|
87
|
+
|
|
88
|
+
## JSON Logging
|
|
89
|
+
|
|
90
|
+
All worker events are structured JSON:
|
|
91
|
+
|
|
92
|
+
```json
|
|
93
|
+
{"event":"task_start","task_id":"..."}
|
|
94
|
+
{"event":"task_success","duration":0.12}
|
|
95
|
+
```
|
|
96
|
+
|
|
97
|
+
This enables:
|
|
98
|
+
|
|
99
|
+
- monitoring
|
|
100
|
+
- analytics
|
|
101
|
+
- alerting
|
|
102
|
+
- observability pipelines
|
|
103
|
+
|
|
104
|
+
---
|
|
105
|
+
|
|
106
|
+
## Rate Limiting
|
|
107
|
+
|
|
108
|
+
Protect infrastructure from overload:
|
|
109
|
+
|
|
110
|
+
```python
|
|
111
|
+
RATE_LIMIT = 5 # tasks per second
|
|
112
|
+
```
|
|
113
|
+
|
|
114
|
+
Ensures predictable execution under heavy load.
|
|
115
|
+
|
|
116
|
+
---
|
|
117
|
+
|
|
118
|
+
## CLI Commands
|
|
119
|
+
|
|
120
|
+
Inspect queue:
|
|
121
|
+
|
|
122
|
+
```bash
|
|
123
|
+
python -m pybgworker.cli inspect
|
|
124
|
+
```
|
|
125
|
+
|
|
126
|
+
Retry failed task:
|
|
127
|
+
|
|
128
|
+
```bash
|
|
129
|
+
python -m pybgworker.cli retry <task_id>
|
|
130
|
+
```
|
|
131
|
+
|
|
132
|
+
Cancel task:
|
|
133
|
+
|
|
134
|
+
```bash
|
|
135
|
+
python -m pybgworker.cli cancel <task_id>
|
|
136
|
+
```
|
|
137
|
+
|
|
138
|
+
Purge queued tasks:
|
|
139
|
+
|
|
140
|
+
```bash
|
|
141
|
+
python -m pybgworker.cli purge
|
|
142
|
+
```
|
|
143
|
+
|
|
144
|
+
---
|
|
145
|
+
|
|
146
|
+
## Observability
|
|
147
|
+
|
|
148
|
+
PyBgWorker logs:
|
|
149
|
+
|
|
150
|
+
- worker start
|
|
151
|
+
- cron events
|
|
152
|
+
- task start
|
|
153
|
+
- success
|
|
154
|
+
- retry
|
|
155
|
+
- failure
|
|
156
|
+
- timeout
|
|
157
|
+
- crash
|
|
158
|
+
- heartbeat errors
|
|
159
|
+
|
|
160
|
+
All machine-readable.
|
|
161
|
+
|
|
162
|
+
---
|
|
163
|
+
|
|
164
|
+
## Design Goals
|
|
165
|
+
|
|
166
|
+
- zero external dependencies
|
|
167
|
+
- SQLite durability
|
|
168
|
+
- safe multiprocessing
|
|
169
|
+
- operator-friendly CLI
|
|
170
|
+
- production observability
|
|
171
|
+
- infrastructure protection
|
|
172
|
+
|
|
173
|
+
---
|
|
174
|
+
|
|
175
|
+
## Roadmap
|
|
176
|
+
|
|
177
|
+
Planned but not yet included:
|
|
178
|
+
|
|
179
|
+
- Single-worker concurrency (process pool)
|
|
180
|
+
- Retry backoff + jitter policies
|
|
181
|
+
- Dead-letter queue for exhausted retries
|
|
182
|
+
- Task/result TTL and automatic DB cleanup
|
|
183
|
+
- Multiple named queues + routing
|
|
184
|
+
- Pluggable backends (Redis first)
|
|
185
|
+
- Cluster coordination / leader election for scheduler
|
|
186
|
+
- Metrics endpoint and health checks
|
|
187
|
+
- Dashboard API + web UI
|
|
188
|
+
- Workflow pipelines / DAGs
|
|
189
|
+
|
|
190
|
+
---
|
|
191
|
+
|
|
192
|
+
## License
|
|
193
|
+
|
|
194
|
+
MIT License
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import threading
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class RateLimiter:
|
|
6
|
+
def __init__(self, rate_per_sec):
|
|
7
|
+
# default/global rate
|
|
8
|
+
self.default_rate = rate_per_sec
|
|
9
|
+
self.lock = threading.Lock()
|
|
10
|
+
self.timestamps = []
|
|
11
|
+
|
|
12
|
+
def acquire(self, rate=None):
|
|
13
|
+
"""
|
|
14
|
+
rate: optional per-task rate limit
|
|
15
|
+
"""
|
|
16
|
+
limit = rate or self.default_rate
|
|
17
|
+
|
|
18
|
+
# No limit configured
|
|
19
|
+
if not limit or limit <= 0:
|
|
20
|
+
return
|
|
21
|
+
|
|
22
|
+
with self.lock:
|
|
23
|
+
now = time.time()
|
|
24
|
+
|
|
25
|
+
# Remove timestamps older than 1 second
|
|
26
|
+
self.timestamps = [
|
|
27
|
+
t for t in self.timestamps
|
|
28
|
+
if now - t < 1
|
|
29
|
+
]
|
|
30
|
+
|
|
31
|
+
# Wait if limit reached
|
|
32
|
+
if len(self.timestamps) >= limit:
|
|
33
|
+
sleep_time = 1 - (now - self.timestamps[0])
|
|
34
|
+
if sleep_time > 0:
|
|
35
|
+
time.sleep(sleep_time)
|
|
36
|
+
|
|
37
|
+
now = time.time()
|
|
38
|
+
self.timestamps = [
|
|
39
|
+
t for t in self.timestamps
|
|
40
|
+
if now - t < 1
|
|
41
|
+
]
|
|
42
|
+
|
|
43
|
+
self.timestamps.append(time.time())
|
|
@@ -105,7 +105,9 @@ class SQLiteQueue(BaseQueue):
|
|
|
105
105
|
UPDATE tasks
|
|
106
106
|
SET status='success',
|
|
107
107
|
finished_at=?,
|
|
108
|
-
updated_at
|
|
108
|
+
updated_at=?,
|
|
109
|
+
locked_by=NULL,
|
|
110
|
+
locked_at=NULL
|
|
109
111
|
WHERE id=?
|
|
110
112
|
""", (now().isoformat(), now().isoformat(), task_id))
|
|
111
113
|
conn.commit()
|
|
@@ -119,7 +121,9 @@ class SQLiteQueue(BaseQueue):
|
|
|
119
121
|
SET status='failed',
|
|
120
122
|
last_error=?,
|
|
121
123
|
finished_at=?,
|
|
122
|
-
updated_at
|
|
124
|
+
updated_at=?,
|
|
125
|
+
locked_by=NULL,
|
|
126
|
+
locked_at=NULL
|
|
123
127
|
WHERE id=?
|
|
124
128
|
""", (error, now().isoformat(), now().isoformat(), task_id))
|
|
125
129
|
conn.commit()
|
|
@@ -134,7 +138,24 @@ class SQLiteQueue(BaseQueue):
|
|
|
134
138
|
SET status='retrying',
|
|
135
139
|
attempt=attempt+1,
|
|
136
140
|
run_at=?,
|
|
137
|
-
updated_at
|
|
141
|
+
updated_at=?,
|
|
142
|
+
locked_by=NULL,
|
|
143
|
+
locked_at=NULL
|
|
138
144
|
WHERE id=?
|
|
139
145
|
""", (run_at.isoformat(), now().isoformat(), task_id))
|
|
140
146
|
conn.commit()
|
|
147
|
+
|
|
148
|
+
# ---------------- cancel ----------------
|
|
149
|
+
|
|
150
|
+
def cancel(self, task_id):
|
|
151
|
+
with get_conn() as conn:
|
|
152
|
+
conn.execute("""
|
|
153
|
+
UPDATE tasks
|
|
154
|
+
SET status='cancelled',
|
|
155
|
+
finished_at=?,
|
|
156
|
+
updated_at=?,
|
|
157
|
+
locked_by=NULL,
|
|
158
|
+
locked_at=NULL
|
|
159
|
+
WHERE id=?
|
|
160
|
+
""", (now().isoformat(), now().isoformat(), task_id))
|
|
161
|
+
conn.commit()
|
|
@@ -10,17 +10,27 @@ queue = SQLiteQueue()
|
|
|
10
10
|
backend = SQLiteBackend()
|
|
11
11
|
|
|
12
12
|
|
|
13
|
-
def task(
|
|
13
|
+
def task(
|
|
14
|
+
name=None,
|
|
15
|
+
retries=0,
|
|
16
|
+
retry_delay=0,
|
|
17
|
+
retry_for=(Exception,),
|
|
18
|
+
timeout=None,
|
|
19
|
+
rate_limit=None,
|
|
20
|
+
):
|
|
14
21
|
if name is None:
|
|
15
22
|
raise ValueError("Task name is required to avoid __main__ issues")
|
|
16
23
|
|
|
17
24
|
def decorator(func):
|
|
18
25
|
task_name = name or f"{func.__module__}.{func.__name__}"
|
|
19
26
|
|
|
27
|
+
# Store task metadata
|
|
20
28
|
TASK_REGISTRY[task_name] = {
|
|
21
29
|
"func": func,
|
|
22
30
|
"retry_delay": retry_delay,
|
|
23
31
|
"retry_for": retry_for,
|
|
32
|
+
"timeout": timeout,
|
|
33
|
+
"rate_limit": rate_limit,
|
|
24
34
|
}
|
|
25
35
|
|
|
26
36
|
@wraps(func)
|
|
@@ -42,7 +52,7 @@ def task(name=None, retries=0, retry_delay=0, retry_for=(Exception,)):
|
|
|
42
52
|
"attempt": 0,
|
|
43
53
|
"max_retries": retries,
|
|
44
54
|
"run_at": run_at.isoformat(),
|
|
45
|
-
"priority": priority,
|
|
55
|
+
"priority": priority,
|
|
46
56
|
"locked_by": None,
|
|
47
57
|
"locked_at": None,
|
|
48
58
|
"last_error": None,
|
|
@@ -0,0 +1,189 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import traceback
|
|
3
|
+
import threading
|
|
4
|
+
import signal
|
|
5
|
+
import os
|
|
6
|
+
from multiprocessing import Process, Queue as MPQueue
|
|
7
|
+
|
|
8
|
+
from .logger import log
|
|
9
|
+
from .sqlite_queue import SQLiteQueue
|
|
10
|
+
from .task import TASK_REGISTRY
|
|
11
|
+
from .config import WORKER_NAME, POLL_INTERVAL, RATE_LIMIT
|
|
12
|
+
from .utils import loads, get_conn, now
|
|
13
|
+
from .backends import SQLiteBackend
|
|
14
|
+
from .scheduler import run_scheduler
|
|
15
|
+
from .ratelimit import RateLimiter
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
queue = SQLiteQueue()
|
|
19
|
+
backend = SQLiteBackend()
|
|
20
|
+
limiter = RateLimiter(RATE_LIMIT)
|
|
21
|
+
|
|
22
|
+
TASK_TIMEOUT = 150 # default timeout
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
shutdown_requested = False
|
|
26
|
+
last_shutdown_signal = 0
|
|
27
|
+
current_task_id = None
|
|
28
|
+
current_process = None
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def handle_shutdown(signum, frame):
|
|
32
|
+
global shutdown_requested, last_shutdown_signal
|
|
33
|
+
global current_task_id, current_process
|
|
34
|
+
|
|
35
|
+
now_ts = time.time()
|
|
36
|
+
|
|
37
|
+
# Ignore duplicate signals (Windows issue)
|
|
38
|
+
if now_ts - last_shutdown_signal < 1:
|
|
39
|
+
return
|
|
40
|
+
|
|
41
|
+
last_shutdown_signal = now_ts
|
|
42
|
+
|
|
43
|
+
# Second Ctrl+C → force exit
|
|
44
|
+
if shutdown_requested:
|
|
45
|
+
log("worker_force_exit", worker=WORKER_NAME)
|
|
46
|
+
|
|
47
|
+
if current_task_id:
|
|
48
|
+
queue.cancel(current_task_id)
|
|
49
|
+
log("task_cancelled", task_id=current_task_id)
|
|
50
|
+
|
|
51
|
+
if current_process and current_process.is_alive():
|
|
52
|
+
current_process.terminate()
|
|
53
|
+
|
|
54
|
+
os._exit(1)
|
|
55
|
+
|
|
56
|
+
shutdown_requested = True
|
|
57
|
+
log("worker_shutdown_requested", worker=WORKER_NAME)
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def heartbeat():
|
|
61
|
+
while True:
|
|
62
|
+
try:
|
|
63
|
+
with get_conn() as conn:
|
|
64
|
+
conn.execute("""
|
|
65
|
+
INSERT INTO workers(name, last_seen)
|
|
66
|
+
VALUES (?, ?)
|
|
67
|
+
ON CONFLICT(name)
|
|
68
|
+
DO UPDATE SET last_seen=excluded.last_seen
|
|
69
|
+
""", (WORKER_NAME, now().isoformat()))
|
|
70
|
+
conn.commit()
|
|
71
|
+
except Exception as e:
|
|
72
|
+
log("heartbeat_error", error=str(e))
|
|
73
|
+
|
|
74
|
+
time.sleep(5)
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def run_task(func, args, kwargs, result_queue):
|
|
78
|
+
# Child process ignores Ctrl+C
|
|
79
|
+
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
|
80
|
+
|
|
81
|
+
try:
|
|
82
|
+
result = func(*args, **kwargs)
|
|
83
|
+
result_queue.put(("success", result))
|
|
84
|
+
except Exception:
|
|
85
|
+
result_queue.put(("error", traceback.format_exc()))
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def run_worker():
|
|
89
|
+
global shutdown_requested, current_task_id, current_process
|
|
90
|
+
|
|
91
|
+
signal.signal(signal.SIGINT, handle_shutdown)
|
|
92
|
+
signal.signal(signal.SIGTERM, handle_shutdown)
|
|
93
|
+
|
|
94
|
+
log("worker_start", worker=WORKER_NAME)
|
|
95
|
+
|
|
96
|
+
threading.Thread(target=heartbeat, daemon=True).start()
|
|
97
|
+
threading.Thread(target=run_scheduler, daemon=True).start()
|
|
98
|
+
|
|
99
|
+
while not shutdown_requested:
|
|
100
|
+
task = queue.fetch_next(WORKER_NAME)
|
|
101
|
+
|
|
102
|
+
if not task:
|
|
103
|
+
if shutdown_requested:
|
|
104
|
+
break
|
|
105
|
+
time.sleep(POLL_INTERVAL)
|
|
106
|
+
continue
|
|
107
|
+
|
|
108
|
+
meta = TASK_REGISTRY.get(task["name"])
|
|
109
|
+
if not meta:
|
|
110
|
+
queue.fail(task["id"], "Task not registered")
|
|
111
|
+
log("task_invalid", task_id=task["id"])
|
|
112
|
+
continue
|
|
113
|
+
|
|
114
|
+
# -------- Rate limit per task --------
|
|
115
|
+
limiter.acquire(meta.get("rate_limit"))
|
|
116
|
+
|
|
117
|
+
func = meta["func"]
|
|
118
|
+
retry_delay = meta["retry_delay"]
|
|
119
|
+
|
|
120
|
+
args = loads(task["args"])
|
|
121
|
+
kwargs = loads(task["kwargs"])
|
|
122
|
+
|
|
123
|
+
start_time = now()
|
|
124
|
+
current_task_id = task["id"]
|
|
125
|
+
|
|
126
|
+
log("task_start", task_id=current_task_id, worker=WORKER_NAME)
|
|
127
|
+
|
|
128
|
+
result_queue = MPQueue()
|
|
129
|
+
process = Process(target=run_task, args=(func, args, kwargs, result_queue))
|
|
130
|
+
current_process = process
|
|
131
|
+
|
|
132
|
+
process.start()
|
|
133
|
+
|
|
134
|
+
# -------- Timeout per task --------
|
|
135
|
+
timeout = meta.get("timeout") or TASK_TIMEOUT
|
|
136
|
+
|
|
137
|
+
start_join = time.time()
|
|
138
|
+
|
|
139
|
+
while process.is_alive():
|
|
140
|
+
if time.time() - start_join > timeout:
|
|
141
|
+
break
|
|
142
|
+
time.sleep(0.2)
|
|
143
|
+
|
|
144
|
+
if process.is_alive():
|
|
145
|
+
process.terminate()
|
|
146
|
+
|
|
147
|
+
info = backend.get_task(current_task_id)
|
|
148
|
+
if info["status"] == "cancelled":
|
|
149
|
+
log("task_cancelled", task_id=current_task_id)
|
|
150
|
+
current_task_id = None
|
|
151
|
+
current_process = None
|
|
152
|
+
continue
|
|
153
|
+
|
|
154
|
+
queue.fail(current_task_id, "Task timeout")
|
|
155
|
+
log("task_timeout", task_id=current_task_id)
|
|
156
|
+
log("task_failed", task_id=current_task_id)
|
|
157
|
+
current_task_id = None
|
|
158
|
+
current_process = None
|
|
159
|
+
continue
|
|
160
|
+
|
|
161
|
+
if result_queue.empty():
|
|
162
|
+
queue.fail(current_task_id, "Task crashed")
|
|
163
|
+
log("task_crash", task_id=current_task_id)
|
|
164
|
+
current_task_id = None
|
|
165
|
+
current_process = None
|
|
166
|
+
continue
|
|
167
|
+
|
|
168
|
+
status, payload = result_queue.get()
|
|
169
|
+
duration = (now() - start_time).total_seconds()
|
|
170
|
+
|
|
171
|
+
if status == "success":
|
|
172
|
+
backend.store_result(current_task_id, payload)
|
|
173
|
+
queue.ack(current_task_id)
|
|
174
|
+
log(
|
|
175
|
+
"task_success",
|
|
176
|
+
task_id=current_task_id,
|
|
177
|
+
duration=duration,
|
|
178
|
+
worker=WORKER_NAME,
|
|
179
|
+
)
|
|
180
|
+
else:
|
|
181
|
+
if task["attempt"] < task["max_retries"]:
|
|
182
|
+
queue.reschedule(current_task_id, retry_delay)
|
|
183
|
+
else:
|
|
184
|
+
queue.fail(current_task_id, payload)
|
|
185
|
+
|
|
186
|
+
current_task_id = None
|
|
187
|
+
current_process = None
|
|
188
|
+
|
|
189
|
+
log("worker_stopped", worker=WORKER_NAME)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pybgworker
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.2
|
|
4
4
|
Summary: Lightweight production-ready background task worker with cron, rate limiting and JSON observability
|
|
5
5
|
Author: Prabhat Verma
|
|
6
6
|
License: MIT
|
|
@@ -25,33 +25,38 @@ Dynamic: license-file
|
|
|
25
25
|
|
|
26
26
|
# PyBgWorker
|
|
27
27
|
|
|
28
|
-
A lightweight, production-ready background task
|
|
28
|
+
A lightweight, production-ready background task library for Python.
|
|
29
29
|
|
|
30
|
-
PyBgWorker provides a durable SQLite-backed task queue, cron
|
|
31
|
-
rate limiting, retries, and structured observability
|
|
32
|
-
infrastructure.
|
|
30
|
+
PyBgWorker provides a durable SQLite-backed task queue, scheduling (cron and
|
|
31
|
+
countdown/ETA), rate limiting, retries, and structured observability without
|
|
32
|
+
external infrastructure.
|
|
33
33
|
|
|
34
34
|
It is designed to be simple, reliable, and easy to deploy.
|
|
35
35
|
|
|
36
36
|
---
|
|
37
37
|
|
|
38
|
-
##
|
|
38
|
+
## Features
|
|
39
39
|
|
|
40
40
|
- Persistent SQLite task queue
|
|
41
41
|
- Multi-worker safe execution
|
|
42
|
+
- Task scheduling: cron + countdown/ETA
|
|
42
43
|
- Retry + failure handling
|
|
44
|
+
- Task cancellation support
|
|
43
45
|
- Crash isolation via subprocess
|
|
44
|
-
-
|
|
46
|
+
- Task priority execution
|
|
47
|
+
- Task status tracking
|
|
48
|
+
- Result storage and retrieval
|
|
49
|
+
- Worker statistics and monitoring
|
|
45
50
|
- JSON structured logging
|
|
46
51
|
- Task duration tracking
|
|
47
52
|
- Rate limiting (overload protection)
|
|
48
53
|
- Heartbeat monitoring
|
|
49
|
-
- CLI inspect
|
|
54
|
+
- CLI tools: inspect, retry, failed, purge, cancel, stats
|
|
50
55
|
- Production-safe worker loop
|
|
51
56
|
|
|
52
57
|
---
|
|
53
58
|
|
|
54
|
-
##
|
|
59
|
+
## Installation
|
|
55
60
|
|
|
56
61
|
```bash
|
|
57
62
|
pip install pybgworker
|
|
@@ -59,7 +64,7 @@ pip install pybgworker
|
|
|
59
64
|
|
|
60
65
|
---
|
|
61
66
|
|
|
62
|
-
##
|
|
67
|
+
## Basic Usage
|
|
63
68
|
|
|
64
69
|
### Define a task
|
|
65
70
|
|
|
@@ -79,7 +84,7 @@ add.delay(1, 2)
|
|
|
79
84
|
|
|
80
85
|
---
|
|
81
86
|
|
|
82
|
-
##
|
|
87
|
+
## Run worker
|
|
83
88
|
|
|
84
89
|
```bash
|
|
85
90
|
python -m pybgworker.cli run --app example
|
|
@@ -87,7 +92,7 @@ python -m pybgworker.cli run --app example
|
|
|
87
92
|
|
|
88
93
|
---
|
|
89
94
|
|
|
90
|
-
##
|
|
95
|
+
## Cron Scheduler
|
|
91
96
|
|
|
92
97
|
Run recurring tasks:
|
|
93
98
|
|
|
@@ -105,7 +110,7 @@ Cron runs automatically inside the worker.
|
|
|
105
110
|
|
|
106
111
|
---
|
|
107
112
|
|
|
108
|
-
##
|
|
113
|
+
## JSON Logging
|
|
109
114
|
|
|
110
115
|
All worker events are structured JSON:
|
|
111
116
|
|
|
@@ -123,7 +128,7 @@ This enables:
|
|
|
123
128
|
|
|
124
129
|
---
|
|
125
130
|
|
|
126
|
-
##
|
|
131
|
+
## Rate Limiting
|
|
127
132
|
|
|
128
133
|
Protect infrastructure from overload:
|
|
129
134
|
|
|
@@ -135,7 +140,7 @@ Ensures predictable execution under heavy load.
|
|
|
135
140
|
|
|
136
141
|
---
|
|
137
142
|
|
|
138
|
-
##
|
|
143
|
+
## CLI Commands
|
|
139
144
|
|
|
140
145
|
Inspect queue:
|
|
141
146
|
|
|
@@ -163,7 +168,7 @@ python -m pybgworker.cli purge
|
|
|
163
168
|
|
|
164
169
|
---
|
|
165
170
|
|
|
166
|
-
##
|
|
171
|
+
## Observability
|
|
167
172
|
|
|
168
173
|
PyBgWorker logs:
|
|
169
174
|
|
|
@@ -181,7 +186,7 @@ All machine-readable.
|
|
|
181
186
|
|
|
182
187
|
---
|
|
183
188
|
|
|
184
|
-
##
|
|
189
|
+
## Design Goals
|
|
185
190
|
|
|
186
191
|
- zero external dependencies
|
|
187
192
|
- SQLite durability
|
|
@@ -192,18 +197,23 @@ All machine-readable.
|
|
|
192
197
|
|
|
193
198
|
---
|
|
194
199
|
|
|
195
|
-
##
|
|
200
|
+
## Roadmap
|
|
196
201
|
|
|
197
|
-
|
|
202
|
+
Planned but not yet included:
|
|
198
203
|
|
|
199
|
-
-
|
|
200
|
-
-
|
|
201
|
-
-
|
|
202
|
-
-
|
|
203
|
-
-
|
|
204
|
+
- Single-worker concurrency (process pool)
|
|
205
|
+
- Retry backoff + jitter policies
|
|
206
|
+
- Dead-letter queue for exhausted retries
|
|
207
|
+
- Task/result TTL and automatic DB cleanup
|
|
208
|
+
- Multiple named queues + routing
|
|
209
|
+
- Pluggable backends (Redis first)
|
|
210
|
+
- Cluster coordination / leader election for scheduler
|
|
211
|
+
- Metrics endpoint and health checks
|
|
212
|
+
- Dashboard API + web UI
|
|
213
|
+
- Workflow pipelines / DAGs
|
|
204
214
|
|
|
205
215
|
---
|
|
206
216
|
|
|
207
|
-
##
|
|
217
|
+
## License
|
|
208
218
|
|
|
209
219
|
MIT License
|
|
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "pybgworker"
|
|
7
|
-
version = "0.2.
|
|
7
|
+
version = "0.2.2"
|
|
8
8
|
description = "Lightweight production-ready background task worker with cron, rate limiting and JSON observability"
|
|
9
9
|
readme = "README.md"
|
|
10
10
|
license = { text = "MIT" }
|
|
@@ -1,26 +0,0 @@
|
|
|
1
|
-
import time
|
|
2
|
-
import threading
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
class RateLimiter:
|
|
6
|
-
def __init__(self, rate_per_sec):
|
|
7
|
-
self.rate = rate_per_sec
|
|
8
|
-
self.lock = threading.Lock()
|
|
9
|
-
self.timestamps = []
|
|
10
|
-
|
|
11
|
-
def acquire(self):
|
|
12
|
-
with self.lock:
|
|
13
|
-
now = time.time()
|
|
14
|
-
|
|
15
|
-
# remove old timestamps
|
|
16
|
-
self.timestamps = [
|
|
17
|
-
t for t in self.timestamps
|
|
18
|
-
if now - t < 1
|
|
19
|
-
]
|
|
20
|
-
|
|
21
|
-
if len(self.timestamps) >= self.rate:
|
|
22
|
-
sleep_time = 1 - (now - self.timestamps[0])
|
|
23
|
-
if sleep_time > 0:
|
|
24
|
-
time.sleep(sleep_time)
|
|
25
|
-
|
|
26
|
-
self.timestamps.append(time.time())
|
|
@@ -1,122 +0,0 @@
|
|
|
1
|
-
import time
|
|
2
|
-
import traceback
|
|
3
|
-
import threading
|
|
4
|
-
from multiprocessing import Process, Queue as MPQueue
|
|
5
|
-
|
|
6
|
-
from .logger import log
|
|
7
|
-
from .sqlite_queue import SQLiteQueue
|
|
8
|
-
from .task import TASK_REGISTRY
|
|
9
|
-
from .config import WORKER_NAME, POLL_INTERVAL, RATE_LIMIT
|
|
10
|
-
from .utils import loads, get_conn, now
|
|
11
|
-
from .backends import SQLiteBackend
|
|
12
|
-
from .scheduler import run_scheduler
|
|
13
|
-
from .ratelimit import RateLimiter
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
queue = SQLiteQueue()
|
|
17
|
-
backend = SQLiteBackend()
|
|
18
|
-
limiter = RateLimiter(RATE_LIMIT)
|
|
19
|
-
|
|
20
|
-
TASK_TIMEOUT = 150 # seconds
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
def heartbeat():
|
|
24
|
-
while True:
|
|
25
|
-
try:
|
|
26
|
-
with get_conn() as conn:
|
|
27
|
-
conn.execute("""
|
|
28
|
-
INSERT INTO workers(name, last_seen)
|
|
29
|
-
VALUES (?, ?)
|
|
30
|
-
ON CONFLICT(name)
|
|
31
|
-
DO UPDATE SET last_seen=excluded.last_seen
|
|
32
|
-
""", (WORKER_NAME, now().isoformat()))
|
|
33
|
-
conn.commit()
|
|
34
|
-
except Exception as e:
|
|
35
|
-
log("heartbeat_error", error=str(e))
|
|
36
|
-
|
|
37
|
-
time.sleep(5)
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
def run_task(func, args, kwargs, result_queue):
|
|
41
|
-
try:
|
|
42
|
-
result = func(*args, **kwargs)
|
|
43
|
-
result_queue.put(("success", result))
|
|
44
|
-
except Exception:
|
|
45
|
-
result_queue.put(("error", traceback.format_exc()))
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
def run_worker():
|
|
49
|
-
log("worker_start", worker=WORKER_NAME)
|
|
50
|
-
|
|
51
|
-
threading.Thread(target=heartbeat, daemon=True).start()
|
|
52
|
-
threading.Thread(target=run_scheduler, daemon=True).start()
|
|
53
|
-
|
|
54
|
-
while True:
|
|
55
|
-
task = queue.fetch_next(WORKER_NAME)
|
|
56
|
-
|
|
57
|
-
if not task:
|
|
58
|
-
time.sleep(POLL_INTERVAL)
|
|
59
|
-
continue
|
|
60
|
-
|
|
61
|
-
# ⭐ rate limiting happens here
|
|
62
|
-
limiter.acquire()
|
|
63
|
-
|
|
64
|
-
meta = TASK_REGISTRY.get(task["name"])
|
|
65
|
-
if not meta:
|
|
66
|
-
queue.fail(task["id"], "Task not registered")
|
|
67
|
-
log("task_invalid", task_id=task["id"])
|
|
68
|
-
continue
|
|
69
|
-
|
|
70
|
-
func = meta["func"]
|
|
71
|
-
retry_delay = meta["retry_delay"]
|
|
72
|
-
|
|
73
|
-
args = loads(task["args"])
|
|
74
|
-
kwargs = loads(task["kwargs"])
|
|
75
|
-
|
|
76
|
-
start_time = now()
|
|
77
|
-
log("task_start", task_id=task["id"], worker=WORKER_NAME)
|
|
78
|
-
|
|
79
|
-
result_queue = MPQueue()
|
|
80
|
-
process = Process(target=run_task, args=(func, args, kwargs, result_queue))
|
|
81
|
-
|
|
82
|
-
process.start()
|
|
83
|
-
process.join(TASK_TIMEOUT)
|
|
84
|
-
|
|
85
|
-
if process.is_alive():
|
|
86
|
-
process.terminate()
|
|
87
|
-
|
|
88
|
-
info = backend.get_task(task["id"])
|
|
89
|
-
if info["status"] == "cancelled":
|
|
90
|
-
log("task_cancelled", task_id=task["id"])
|
|
91
|
-
continue
|
|
92
|
-
|
|
93
|
-
queue.fail(task["id"], "Task timeout")
|
|
94
|
-
log("task_timeout", task_id=task["id"])
|
|
95
|
-
continue
|
|
96
|
-
|
|
97
|
-
if result_queue.empty():
|
|
98
|
-
queue.fail(task["id"], "Task crashed without result")
|
|
99
|
-
log("task_crash", task_id=task["id"])
|
|
100
|
-
continue
|
|
101
|
-
|
|
102
|
-
status, payload = result_queue.get()
|
|
103
|
-
duration = (now() - start_time).total_seconds()
|
|
104
|
-
|
|
105
|
-
if status == "success":
|
|
106
|
-
backend.store_result(task["id"], payload)
|
|
107
|
-
queue.ack(task["id"])
|
|
108
|
-
log("task_success",
|
|
109
|
-
task_id=task["id"],
|
|
110
|
-
duration=duration,
|
|
111
|
-
worker=WORKER_NAME)
|
|
112
|
-
|
|
113
|
-
else:
|
|
114
|
-
if task["attempt"] < task["max_retries"]:
|
|
115
|
-
queue.reschedule(task["id"], retry_delay)
|
|
116
|
-
log("task_retry",
|
|
117
|
-
task_id=task["id"],
|
|
118
|
-
attempt=task["attempt"] + 1,
|
|
119
|
-
max=task["max_retries"])
|
|
120
|
-
else:
|
|
121
|
-
queue.fail(task["id"], payload)
|
|
122
|
-
log("task_failed", task_id=task["id"])
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|