pybgworker 0.2.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pybgworker-0.2.1/LICENSE +21 -0
- pybgworker-0.2.1/PKG-INFO +209 -0
- pybgworker-0.2.1/README.md +184 -0
- pybgworker-0.2.1/pybgworker/__init__.py +6 -0
- pybgworker-0.2.1/pybgworker/backends.py +47 -0
- pybgworker-0.2.1/pybgworker/cancel.py +29 -0
- pybgworker-0.2.1/pybgworker/cli.py +69 -0
- pybgworker-0.2.1/pybgworker/config.py +7 -0
- pybgworker-0.2.1/pybgworker/failed.py +24 -0
- pybgworker-0.2.1/pybgworker/inspect.py +42 -0
- pybgworker-0.2.1/pybgworker/logger.py +14 -0
- pybgworker-0.2.1/pybgworker/purge.py +14 -0
- pybgworker-0.2.1/pybgworker/queue.py +23 -0
- pybgworker-0.2.1/pybgworker/ratelimit.py +26 -0
- pybgworker-0.2.1/pybgworker/result.py +64 -0
- pybgworker-0.2.1/pybgworker/retry.py +30 -0
- pybgworker-0.2.1/pybgworker/scheduler.py +56 -0
- pybgworker-0.2.1/pybgworker/sqlite_queue.py +140 -0
- pybgworker-0.2.1/pybgworker/state.py +27 -0
- pybgworker-0.2.1/pybgworker/stats.py +27 -0
- pybgworker-0.2.1/pybgworker/task.py +63 -0
- pybgworker-0.2.1/pybgworker/utils.py +31 -0
- pybgworker-0.2.1/pybgworker/worker.py +122 -0
- pybgworker-0.2.1/pybgworker.egg-info/PKG-INFO +209 -0
- pybgworker-0.2.1/pybgworker.egg-info/SOURCES.txt +30 -0
- pybgworker-0.2.1/pybgworker.egg-info/dependency_links.txt +1 -0
- pybgworker-0.2.1/pybgworker.egg-info/entry_points.txt +2 -0
- pybgworker-0.2.1/pybgworker.egg-info/requires.txt +1 -0
- pybgworker-0.2.1/pybgworker.egg-info/top_level.txt +1 -0
- pybgworker-0.2.1/pyproject.toml +49 -0
- pybgworker-0.2.1/setup.cfg +4 -0
- pybgworker-0.2.1/tests/test_retry.py +85 -0
pybgworker-0.2.1/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Prabhat Verma
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,209 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: pybgworker
|
|
3
|
+
Version: 0.2.1
|
|
4
|
+
Summary: Lightweight production-ready background task worker with cron, rate limiting and JSON observability
|
|
5
|
+
Author: Prabhat Verma
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/prabhat708/pybgworker
|
|
8
|
+
Project-URL: Repository, https://github.com/prabhat708/pybgworker
|
|
9
|
+
Project-URL: Issues, https://github.com/prabhat708/pybgworker/issues
|
|
10
|
+
Keywords: background-jobs,task-queue,sqlite,cron,worker,job-queue
|
|
11
|
+
Classifier: Development Status :: 5 - Production/Stable
|
|
12
|
+
Classifier: Intended Audience :: Developers
|
|
13
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
14
|
+
Classifier: Programming Language :: Python :: 3
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
19
|
+
Classifier: Operating System :: OS Independent
|
|
20
|
+
Requires-Python: >=3.8
|
|
21
|
+
Description-Content-Type: text/markdown
|
|
22
|
+
License-File: LICENSE
|
|
23
|
+
Requires-Dist: croniter>=1.4.0
|
|
24
|
+
Dynamic: license-file
|
|
25
|
+
|
|
26
|
+
# PyBgWorker
|
|
27
|
+
|
|
28
|
+
A lightweight, production-ready background task framework for Python.
|
|
29
|
+
|
|
30
|
+
PyBgWorker provides a durable SQLite-backed task queue, cron scheduling,
|
|
31
|
+
rate limiting, retries, and structured observability โ all without external
|
|
32
|
+
infrastructure.
|
|
33
|
+
|
|
34
|
+
It is designed to be simple, reliable, and easy to deploy.
|
|
35
|
+
|
|
36
|
+
---
|
|
37
|
+
|
|
38
|
+
## โจ Features
|
|
39
|
+
|
|
40
|
+
- Persistent SQLite task queue
|
|
41
|
+
- Multi-worker safe execution
|
|
42
|
+
- Retry + failure handling
|
|
43
|
+
- Crash isolation via subprocess
|
|
44
|
+
- Cron scheduler for recurring jobs
|
|
45
|
+
- JSON structured logging
|
|
46
|
+
- Task duration tracking
|
|
47
|
+
- Rate limiting (overload protection)
|
|
48
|
+
- Heartbeat monitoring
|
|
49
|
+
- CLI inspect / retry / purge / cancel
|
|
50
|
+
- Production-safe worker loop
|
|
51
|
+
|
|
52
|
+
---
|
|
53
|
+
|
|
54
|
+
## ๐ Installation
|
|
55
|
+
|
|
56
|
+
```bash
|
|
57
|
+
pip install pybgworker
|
|
58
|
+
```
|
|
59
|
+
|
|
60
|
+
---
|
|
61
|
+
|
|
62
|
+
## ๐ง Basic Usage
|
|
63
|
+
|
|
64
|
+
### Define a task
|
|
65
|
+
|
|
66
|
+
```python
|
|
67
|
+
from pybgworker.task import task
|
|
68
|
+
|
|
69
|
+
@task(name="add")
|
|
70
|
+
def add(a, b):
|
|
71
|
+
return a + b
|
|
72
|
+
```
|
|
73
|
+
|
|
74
|
+
### Enqueue a task
|
|
75
|
+
|
|
76
|
+
```python
|
|
77
|
+
add.delay(1, 2)
|
|
78
|
+
```
|
|
79
|
+
|
|
80
|
+
---
|
|
81
|
+
|
|
82
|
+
## โถ Run worker
|
|
83
|
+
|
|
84
|
+
```bash
|
|
85
|
+
python -m pybgworker.cli run --app example
|
|
86
|
+
```
|
|
87
|
+
|
|
88
|
+
---
|
|
89
|
+
|
|
90
|
+
## โฐ Cron Scheduler
|
|
91
|
+
|
|
92
|
+
Run recurring tasks:
|
|
93
|
+
|
|
94
|
+
```python
|
|
95
|
+
from pybgworker.scheduler import cron
|
|
96
|
+
from pybgworker.task import task
|
|
97
|
+
|
|
98
|
+
@task(name="heartbeat_task")
|
|
99
|
+
@cron("*/1 * * * *")
|
|
100
|
+
def heartbeat():
|
|
101
|
+
print("alive")
|
|
102
|
+
```
|
|
103
|
+
|
|
104
|
+
Cron runs automatically inside the worker.
|
|
105
|
+
|
|
106
|
+
---
|
|
107
|
+
|
|
108
|
+
## ๐ JSON Logging
|
|
109
|
+
|
|
110
|
+
All worker events are structured JSON:
|
|
111
|
+
|
|
112
|
+
```json
|
|
113
|
+
{"event":"task_start","task_id":"..."}
|
|
114
|
+
{"event":"task_success","duration":0.12}
|
|
115
|
+
```
|
|
116
|
+
|
|
117
|
+
This enables:
|
|
118
|
+
|
|
119
|
+
- monitoring
|
|
120
|
+
- analytics
|
|
121
|
+
- alerting
|
|
122
|
+
- observability pipelines
|
|
123
|
+
|
|
124
|
+
---
|
|
125
|
+
|
|
126
|
+
## ๐ฆ Rate Limiting
|
|
127
|
+
|
|
128
|
+
Protect infrastructure from overload:
|
|
129
|
+
|
|
130
|
+
```python
|
|
131
|
+
RATE_LIMIT = 5 # tasks per second
|
|
132
|
+
```
|
|
133
|
+
|
|
134
|
+
Ensures predictable execution under heavy load.
|
|
135
|
+
|
|
136
|
+
---
|
|
137
|
+
|
|
138
|
+
## ๐ CLI Commands
|
|
139
|
+
|
|
140
|
+
Inspect queue:
|
|
141
|
+
|
|
142
|
+
```bash
|
|
143
|
+
python -m pybgworker.cli inspect
|
|
144
|
+
```
|
|
145
|
+
|
|
146
|
+
Retry failed task:
|
|
147
|
+
|
|
148
|
+
```bash
|
|
149
|
+
python -m pybgworker.cli retry <task_id>
|
|
150
|
+
```
|
|
151
|
+
|
|
152
|
+
Cancel task:
|
|
153
|
+
|
|
154
|
+
```bash
|
|
155
|
+
python -m pybgworker.cli cancel <task_id>
|
|
156
|
+
```
|
|
157
|
+
|
|
158
|
+
Purge queued tasks:
|
|
159
|
+
|
|
160
|
+
```bash
|
|
161
|
+
python -m pybgworker.cli purge
|
|
162
|
+
```
|
|
163
|
+
|
|
164
|
+
---
|
|
165
|
+
|
|
166
|
+
## ๐งช Observability
|
|
167
|
+
|
|
168
|
+
PyBgWorker logs:
|
|
169
|
+
|
|
170
|
+
- worker start
|
|
171
|
+
- cron events
|
|
172
|
+
- task start
|
|
173
|
+
- success
|
|
174
|
+
- retry
|
|
175
|
+
- failure
|
|
176
|
+
- timeout
|
|
177
|
+
- crash
|
|
178
|
+
- heartbeat errors
|
|
179
|
+
|
|
180
|
+
All machine-readable.
|
|
181
|
+
|
|
182
|
+
---
|
|
183
|
+
|
|
184
|
+
## ๐ฏ Design Goals
|
|
185
|
+
|
|
186
|
+
- zero external dependencies
|
|
187
|
+
- SQLite durability
|
|
188
|
+
- safe multiprocessing
|
|
189
|
+
- operator-friendly CLI
|
|
190
|
+
- production observability
|
|
191
|
+
- infrastructure protection
|
|
192
|
+
|
|
193
|
+
---
|
|
194
|
+
|
|
195
|
+
## ๐ Roadmap
|
|
196
|
+
|
|
197
|
+
Future upgrades may include:
|
|
198
|
+
|
|
199
|
+
- dashboard web UI
|
|
200
|
+
- metrics endpoint
|
|
201
|
+
- Redis backend
|
|
202
|
+
- workflow pipelines
|
|
203
|
+
- cluster coordination
|
|
204
|
+
|
|
205
|
+
---
|
|
206
|
+
|
|
207
|
+
## ๐ License
|
|
208
|
+
|
|
209
|
+
MIT License
|
|
@@ -0,0 +1,184 @@
|
|
|
1
|
+
# PyBgWorker
|
|
2
|
+
|
|
3
|
+
A lightweight, production-ready background task framework for Python.
|
|
4
|
+
|
|
5
|
+
PyBgWorker provides a durable SQLite-backed task queue, cron scheduling,
|
|
6
|
+
rate limiting, retries, and structured observability โ all without external
|
|
7
|
+
infrastructure.
|
|
8
|
+
|
|
9
|
+
It is designed to be simple, reliable, and easy to deploy.
|
|
10
|
+
|
|
11
|
+
---
|
|
12
|
+
|
|
13
|
+
## โจ Features
|
|
14
|
+
|
|
15
|
+
- Persistent SQLite task queue
|
|
16
|
+
- Multi-worker safe execution
|
|
17
|
+
- Retry + failure handling
|
|
18
|
+
- Crash isolation via subprocess
|
|
19
|
+
- Cron scheduler for recurring jobs
|
|
20
|
+
- JSON structured logging
|
|
21
|
+
- Task duration tracking
|
|
22
|
+
- Rate limiting (overload protection)
|
|
23
|
+
- Heartbeat monitoring
|
|
24
|
+
- CLI inspect / retry / purge / cancel
|
|
25
|
+
- Production-safe worker loop
|
|
26
|
+
|
|
27
|
+
---
|
|
28
|
+
|
|
29
|
+
## ๐ Installation
|
|
30
|
+
|
|
31
|
+
```bash
|
|
32
|
+
pip install pybgworker
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
---
|
|
36
|
+
|
|
37
|
+
## ๐ง Basic Usage
|
|
38
|
+
|
|
39
|
+
### Define a task
|
|
40
|
+
|
|
41
|
+
```python
|
|
42
|
+
from pybgworker.task import task
|
|
43
|
+
|
|
44
|
+
@task(name="add")
|
|
45
|
+
def add(a, b):
|
|
46
|
+
return a + b
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
### Enqueue a task
|
|
50
|
+
|
|
51
|
+
```python
|
|
52
|
+
add.delay(1, 2)
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
---
|
|
56
|
+
|
|
57
|
+
## โถ Run worker
|
|
58
|
+
|
|
59
|
+
```bash
|
|
60
|
+
python -m pybgworker.cli run --app example
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
---
|
|
64
|
+
|
|
65
|
+
## โฐ Cron Scheduler
|
|
66
|
+
|
|
67
|
+
Run recurring tasks:
|
|
68
|
+
|
|
69
|
+
```python
|
|
70
|
+
from pybgworker.scheduler import cron
|
|
71
|
+
from pybgworker.task import task
|
|
72
|
+
|
|
73
|
+
@task(name="heartbeat_task")
|
|
74
|
+
@cron("*/1 * * * *")
|
|
75
|
+
def heartbeat():
|
|
76
|
+
print("alive")
|
|
77
|
+
```
|
|
78
|
+
|
|
79
|
+
Cron runs automatically inside the worker.
|
|
80
|
+
|
|
81
|
+
---
|
|
82
|
+
|
|
83
|
+
## ๐ JSON Logging
|
|
84
|
+
|
|
85
|
+
All worker events are structured JSON:
|
|
86
|
+
|
|
87
|
+
```json
|
|
88
|
+
{"event":"task_start","task_id":"..."}
|
|
89
|
+
{"event":"task_success","duration":0.12}
|
|
90
|
+
```
|
|
91
|
+
|
|
92
|
+
This enables:
|
|
93
|
+
|
|
94
|
+
- monitoring
|
|
95
|
+
- analytics
|
|
96
|
+
- alerting
|
|
97
|
+
- observability pipelines
|
|
98
|
+
|
|
99
|
+
---
|
|
100
|
+
|
|
101
|
+
## ๐ฆ Rate Limiting
|
|
102
|
+
|
|
103
|
+
Protect infrastructure from overload:
|
|
104
|
+
|
|
105
|
+
```python
|
|
106
|
+
RATE_LIMIT = 5 # tasks per second
|
|
107
|
+
```
|
|
108
|
+
|
|
109
|
+
Ensures predictable execution under heavy load.
|
|
110
|
+
|
|
111
|
+
---
|
|
112
|
+
|
|
113
|
+
## ๐ CLI Commands
|
|
114
|
+
|
|
115
|
+
Inspect queue:
|
|
116
|
+
|
|
117
|
+
```bash
|
|
118
|
+
python -m pybgworker.cli inspect
|
|
119
|
+
```
|
|
120
|
+
|
|
121
|
+
Retry failed task:
|
|
122
|
+
|
|
123
|
+
```bash
|
|
124
|
+
python -m pybgworker.cli retry <task_id>
|
|
125
|
+
```
|
|
126
|
+
|
|
127
|
+
Cancel task:
|
|
128
|
+
|
|
129
|
+
```bash
|
|
130
|
+
python -m pybgworker.cli cancel <task_id>
|
|
131
|
+
```
|
|
132
|
+
|
|
133
|
+
Purge queued tasks:
|
|
134
|
+
|
|
135
|
+
```bash
|
|
136
|
+
python -m pybgworker.cli purge
|
|
137
|
+
```
|
|
138
|
+
|
|
139
|
+
---
|
|
140
|
+
|
|
141
|
+
## ๐งช Observability
|
|
142
|
+
|
|
143
|
+
PyBgWorker logs:
|
|
144
|
+
|
|
145
|
+
- worker start
|
|
146
|
+
- cron events
|
|
147
|
+
- task start
|
|
148
|
+
- success
|
|
149
|
+
- retry
|
|
150
|
+
- failure
|
|
151
|
+
- timeout
|
|
152
|
+
- crash
|
|
153
|
+
- heartbeat errors
|
|
154
|
+
|
|
155
|
+
All machine-readable.
|
|
156
|
+
|
|
157
|
+
---
|
|
158
|
+
|
|
159
|
+
## ๐ฏ Design Goals
|
|
160
|
+
|
|
161
|
+
- zero external dependencies
|
|
162
|
+
- SQLite durability
|
|
163
|
+
- safe multiprocessing
|
|
164
|
+
- operator-friendly CLI
|
|
165
|
+
- production observability
|
|
166
|
+
- infrastructure protection
|
|
167
|
+
|
|
168
|
+
---
|
|
169
|
+
|
|
170
|
+
## ๐ Roadmap
|
|
171
|
+
|
|
172
|
+
Future upgrades may include:
|
|
173
|
+
|
|
174
|
+
- dashboard web UI
|
|
175
|
+
- metrics endpoint
|
|
176
|
+
- Redis backend
|
|
177
|
+
- workflow pipelines
|
|
178
|
+
- cluster coordination
|
|
179
|
+
|
|
180
|
+
---
|
|
181
|
+
|
|
182
|
+
## ๐ License
|
|
183
|
+
|
|
184
|
+
MIT License
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
from abc import ABC, abstractmethod
|
|
2
|
+
import sqlite3
|
|
3
|
+
import json
|
|
4
|
+
from .config import DB_PATH
|
|
5
|
+
|
|
6
|
+
class BaseBackend(ABC):
|
|
7
|
+
@abstractmethod
|
|
8
|
+
def get_task(self, task_id):
|
|
9
|
+
pass
|
|
10
|
+
|
|
11
|
+
@abstractmethod
|
|
12
|
+
def store_result(self, task_id, result):
|
|
13
|
+
pass
|
|
14
|
+
|
|
15
|
+
@abstractmethod
|
|
16
|
+
def forget(self, task_id):
|
|
17
|
+
pass
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class SQLiteBackend(BaseBackend):
|
|
21
|
+
def __init__(self, db_path=DB_PATH):
|
|
22
|
+
self.db_path = db_path
|
|
23
|
+
|
|
24
|
+
def get_task(self, task_id):
|
|
25
|
+
from .utils import get_conn
|
|
26
|
+
with get_conn() as conn:
|
|
27
|
+
conn.row_factory = sqlite3.Row
|
|
28
|
+
row = conn.execute(
|
|
29
|
+
"SELECT * FROM tasks WHERE id=?",
|
|
30
|
+
(task_id,)
|
|
31
|
+
).fetchone()
|
|
32
|
+
return dict(row) if row else None
|
|
33
|
+
|
|
34
|
+
def store_result(self, task_id, result):
|
|
35
|
+
from .utils import get_conn
|
|
36
|
+
with get_conn() as conn:
|
|
37
|
+
conn.execute(
|
|
38
|
+
"UPDATE tasks SET result=? WHERE id=?",
|
|
39
|
+
(json.dumps(result), task_id)
|
|
40
|
+
)
|
|
41
|
+
conn.commit()
|
|
42
|
+
|
|
43
|
+
def forget(self, task_id):
|
|
44
|
+
from .utils import get_conn
|
|
45
|
+
with get_conn() as conn:
|
|
46
|
+
conn.execute("DELETE FROM tasks WHERE id=?", (task_id,))
|
|
47
|
+
conn.commit()
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
from .utils import get_conn, now
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def cancel(task_id):
|
|
5
|
+
with get_conn() as conn:
|
|
6
|
+
row = conn.execute(
|
|
7
|
+
"SELECT status FROM tasks WHERE id=?",
|
|
8
|
+
(task_id,)
|
|
9
|
+
).fetchone()
|
|
10
|
+
|
|
11
|
+
if not row:
|
|
12
|
+
print("โ Task not found")
|
|
13
|
+
return
|
|
14
|
+
|
|
15
|
+
if row[0] != "running":
|
|
16
|
+
print("โ Task is not running")
|
|
17
|
+
return
|
|
18
|
+
|
|
19
|
+
conn.execute("""
|
|
20
|
+
UPDATE tasks
|
|
21
|
+
SET status='cancelled',
|
|
22
|
+
finished_at=?,
|
|
23
|
+
updated_at=?
|
|
24
|
+
WHERE id=?
|
|
25
|
+
""", (now().isoformat(), now().isoformat(), task_id))
|
|
26
|
+
|
|
27
|
+
conn.commit()
|
|
28
|
+
|
|
29
|
+
print("๐ Task cancelled")
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
import sys
|
|
3
|
+
import os
|
|
4
|
+
import importlib
|
|
5
|
+
|
|
6
|
+
from .worker import run_worker
|
|
7
|
+
from .inspect import inspect
|
|
8
|
+
from .retry import retry
|
|
9
|
+
from .purge import purge
|
|
10
|
+
from .cancel import cancel
|
|
11
|
+
from .failed import list_failed
|
|
12
|
+
from .stats import stats
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def main():
|
|
16
|
+
parser = argparse.ArgumentParser("pybgworker")
|
|
17
|
+
|
|
18
|
+
parser.add_argument(
|
|
19
|
+
"command",
|
|
20
|
+
choices=["run", "inspect", "retry", "purge", "cancel", "failed", "stats"],
|
|
21
|
+
help="worker control commands"
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
parser.add_argument(
|
|
25
|
+
"task_id",
|
|
26
|
+
nargs="?",
|
|
27
|
+
help="task id for retry/cancel"
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
parser.add_argument(
|
|
31
|
+
"--app",
|
|
32
|
+
help="module containing task definitions (required for run)"
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
args = parser.parse_args()
|
|
36
|
+
|
|
37
|
+
if args.command == "run":
|
|
38
|
+
if not args.app:
|
|
39
|
+
parser.error("--app is required for 'run'")
|
|
40
|
+
|
|
41
|
+
sys.path.insert(0, os.getcwd())
|
|
42
|
+
importlib.import_module(args.app)
|
|
43
|
+
run_worker()
|
|
44
|
+
|
|
45
|
+
elif args.command == "inspect":
|
|
46
|
+
inspect()
|
|
47
|
+
|
|
48
|
+
elif args.command == "retry":
|
|
49
|
+
if not args.task_id:
|
|
50
|
+
parser.error("retry requires task_id")
|
|
51
|
+
retry(args.task_id)
|
|
52
|
+
|
|
53
|
+
elif args.command == "purge":
|
|
54
|
+
purge()
|
|
55
|
+
|
|
56
|
+
elif args.command == "cancel":
|
|
57
|
+
if not args.task_id:
|
|
58
|
+
parser.error("cancel requires task_id")
|
|
59
|
+
cancel(args.task_id)
|
|
60
|
+
|
|
61
|
+
elif args.command == "failed":
|
|
62
|
+
list_failed()
|
|
63
|
+
|
|
64
|
+
elif args.command == "stats":
|
|
65
|
+
stats()
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
if __name__ == "__main__":
|
|
69
|
+
main()
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
import os
|
|
2
|
+
WORKER_TIMEOUT = 15
|
|
3
|
+
RATE_LIMIT = 5 # tasks per second
|
|
4
|
+
DB_PATH = os.getenv("PYBGWORKER_DB", "pybgworker.db")
|
|
5
|
+
WORKER_NAME = os.getenv("PYBGWORKER_WORKER_NAME", "worker-1")
|
|
6
|
+
POLL_INTERVAL = float(os.getenv("PYBGWORKER_POLL_INTERVAL", 1.0))
|
|
7
|
+
LOCK_TIMEOUT = int(os.getenv("PYBGWORKER_LOCK_TIMEOUT", 60))
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
from .utils import get_conn
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def list_failed():
|
|
5
|
+
with get_conn() as conn:
|
|
6
|
+
rows = conn.execute("""
|
|
7
|
+
SELECT id, name, attempt, last_error
|
|
8
|
+
FROM tasks
|
|
9
|
+
WHERE status='failed'
|
|
10
|
+
ORDER BY updated_at DESC
|
|
11
|
+
""").fetchall()
|
|
12
|
+
|
|
13
|
+
if not rows:
|
|
14
|
+
print("โ
No failed tasks")
|
|
15
|
+
return
|
|
16
|
+
|
|
17
|
+
print("\nโ Failed Tasks\n")
|
|
18
|
+
|
|
19
|
+
for r in rows:
|
|
20
|
+
print(f"ID: {r[0]}")
|
|
21
|
+
print(f"Task: {r[1]}")
|
|
22
|
+
print(f"Attempts: {r[2]}")
|
|
23
|
+
print(f"Error: {r[3][:120] if r[3] else 'None'}")
|
|
24
|
+
print("-" * 40)
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
from .utils import get_conn
|
|
2
|
+
from datetime import datetime, timezone
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
def inspect():
|
|
6
|
+
with get_conn() as conn:
|
|
7
|
+
conn.row_factory = dict_factory
|
|
8
|
+
|
|
9
|
+
print("\n๐ฆ Task Stats")
|
|
10
|
+
|
|
11
|
+
stats = conn.execute("""
|
|
12
|
+
SELECT status, COUNT(*) as count
|
|
13
|
+
FROM tasks
|
|
14
|
+
GROUP BY status
|
|
15
|
+
""").fetchall()
|
|
16
|
+
|
|
17
|
+
total = 0
|
|
18
|
+
for row in stats:
|
|
19
|
+
print(f"{row['status']:10} {row['count']}")
|
|
20
|
+
total += row["count"]
|
|
21
|
+
|
|
22
|
+
print(f"{'total':10} {total}")
|
|
23
|
+
|
|
24
|
+
print("\n๐ท Workers")
|
|
25
|
+
|
|
26
|
+
workers = conn.execute("""
|
|
27
|
+
SELECT name, last_seen
|
|
28
|
+
FROM workers
|
|
29
|
+
""").fetchall()
|
|
30
|
+
|
|
31
|
+
now = datetime.now(timezone.utc)
|
|
32
|
+
|
|
33
|
+
for w in workers:
|
|
34
|
+
last_seen = datetime.fromisoformat(w["last_seen"])
|
|
35
|
+
delta = (now - last_seen).total_seconds()
|
|
36
|
+
|
|
37
|
+
status = "alive" if delta < 15 else "dead"
|
|
38
|
+
print(f"{w['name']:10} {status:5} ({int(delta)}s ago)")
|
|
39
|
+
|
|
40
|
+
print()
|
|
41
|
+
def dict_factory(cursor, row):
|
|
42
|
+
return {col[0]: row[idx] for idx, col in enumerate(cursor.description)}
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import sys
|
|
3
|
+
from datetime import datetime, timezone
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def log(event, **fields):
|
|
7
|
+
entry = {
|
|
8
|
+
"timestamp": datetime.now(timezone.utc).isoformat(),
|
|
9
|
+
"event": event,
|
|
10
|
+
**fields
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
sys.stdout.write(json.dumps(entry) + "\n")
|
|
14
|
+
sys.stdout.flush()
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
from .utils import get_conn
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def purge():
|
|
5
|
+
with get_conn() as conn:
|
|
6
|
+
cursor = conn.execute("""
|
|
7
|
+
DELETE FROM tasks
|
|
8
|
+
WHERE status IN ('queued', 'retrying')
|
|
9
|
+
""")
|
|
10
|
+
|
|
11
|
+
deleted = cursor.rowcount
|
|
12
|
+
conn.commit()
|
|
13
|
+
|
|
14
|
+
print(f"๐งน Purged {deleted} queued tasks")
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
from abc import ABC, abstractmethod
|
|
2
|
+
|
|
3
|
+
class BaseQueue(ABC):
|
|
4
|
+
|
|
5
|
+
@abstractmethod
|
|
6
|
+
def enqueue(self, task: dict):
|
|
7
|
+
pass
|
|
8
|
+
|
|
9
|
+
@abstractmethod
|
|
10
|
+
def fetch_next(self, worker_name: str):
|
|
11
|
+
pass
|
|
12
|
+
|
|
13
|
+
@abstractmethod
|
|
14
|
+
def ack(self, task_id: str):
|
|
15
|
+
pass
|
|
16
|
+
|
|
17
|
+
@abstractmethod
|
|
18
|
+
def fail(self, task_id: str, error: str):
|
|
19
|
+
pass
|
|
20
|
+
|
|
21
|
+
@abstractmethod
|
|
22
|
+
def reschedule(self, task_id: str, run_at):
|
|
23
|
+
pass
|