rundown-workers 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,68 @@
1
+ Metadata-Version: 2.4
2
+ Name: rundown-workers
3
+ Version: 0.1.0
4
+ Summary: Lightweight workflow executor SDK for Python
5
+ Author-email: Ernest <ernestdior5050@gmail.com>
6
+ License: MIT
7
+ Project-URL: Homepage, https://github.com/its-ernest/rundown-workers
8
+ Project-URL: Bug Tracker, https://github.com/its-ernest/rundown-workers/issues
9
+ Project-URL: Documentation, https://github.com/its-ernest/rundown-workers/blob/main/sdk/python/README.md
10
+ Requires-Python: >=3.7
11
+ Description-Content-Type: text/markdown
12
+ Requires-Dist: requests
13
+
14
+ # Rundown-Workers Python SDK (v0.1.0)
15
+
16
+ [![Go Coverage](https://img.shields.io/badge/coverage-80%25-brightgreen)](https://github.com/its-ernest/rundown-workers)
17
+
18
+ A highly developer-friendly, lightweight Python SDK for implementing and running workflow workers.
19
+
20
+ ## Installation
21
+
22
+ To install the SDK locally in editable mode (during development):
23
+
24
+ ```bash
25
+ # From the project root
26
+ pip install -e sdk/python
27
+ ```
28
+
29
+ ## Basic Usage
30
+
31
+ The SDK uses decorators to register worker functions.
32
+
33
+ ```python
34
+ import rundown_workers as rw
35
+
36
+ # Register a simple task
37
+ @rw.queue(name="greetings", max_retries=3)
38
+ def hello_task(payload):
39
+ print(f"[*] Received: {payload}")
40
+ return True
41
+
42
+ if __name__ == "__main__":
43
+ # Start all registered workers in separate polling threads
44
+ rw.run()
45
+ ```
46
+
47
+ ### Advanced Features
48
+
49
+ #### 1. Retries with Exponential Backoff
50
+ When enqueuing a job or defining a queue, you can specify `max_retries`. If your function raises an exception, the engine will automatically reschedule the job with an increasing delay (e.g., 5s, 20s, 45s).
51
+
52
+ #### 2. Local Timeout Enforcement
53
+ You can set a `timeout` (in seconds) for each task. If your function hangs beyond this period, the worker will automatically:
54
+ 1. Detect the timeout.
55
+ 2. Report the failure to the engine.
56
+ 3. Move on to the next available job.
57
+
58
+ ```python
59
+ # This task will fail if it takes longer than 2 seconds
60
+ rw.enqueue(queue="greetings", payload="Hello!", timeout=2)
61
+ ```
62
+
63
+ ## Internal Architecture
64
+
65
+ The Python SDK uses `threading` to handle local polling for multiple queues simultaneously and to monitor task execution times.
66
+
67
+ - **Polling Loop**: Each queue has its own daemon thread continuously hitting the `/poll` endpoint.
68
+ - **Task Execution**: Handlers are executed in a temporary thread with a `join(timeout=...)` call to ensure the worker process doesn't hang.
@@ -0,0 +1,55 @@
1
+ # Rundown-Workers Python SDK (v0.1.0)
2
+
3
+ [![Go Coverage](https://img.shields.io/badge/coverage-80%25-brightgreen)](https://github.com/its-ernest/rundown-workers)
4
+
5
+ A highly developer-friendly, lightweight Python SDK for implementing and running workflow workers.
6
+
7
+ ## Installation
8
+
9
+ To install the SDK locally in editable mode (during development):
10
+
11
+ ```bash
12
+ # From the project root
13
+ pip install -e sdk/python
14
+ ```
15
+
16
+ ## Basic Usage
17
+
18
+ The SDK uses decorators to register worker functions.
19
+
20
+ ```python
21
+ import rundown_workers as rw
22
+
23
+ # Register a simple task
24
+ @rw.queue(name="greetings", max_retries=3)
25
+ def hello_task(payload):
26
+ print(f"[*] Received: {payload}")
27
+ return True
28
+
29
+ if __name__ == "__main__":
30
+ # Start all registered workers in separate polling threads
31
+ rw.run()
32
+ ```
33
+
34
+ ### Advanced Features
35
+
36
+ #### 1. Retries with Exponential Backoff
37
+ When enqueuing a job or defining a queue, you can specify `max_retries`. If your function raises an exception, the engine will automatically reschedule the job with an increasing delay (e.g., 5s, 20s, 45s).
38
+
39
+ #### 2. Local Timeout Enforcement
40
+ You can set a `timeout` (in seconds) for each task. If your function hangs beyond this period, the worker will automatically:
41
+ 1. Detect the timeout.
42
+ 2. Report the failure to the engine.
43
+ 3. Move on to the next available job.
44
+
45
+ ```python
46
+ # This task will fail if it takes longer than 2 seconds
47
+ rw.enqueue(queue="greetings", payload="Hello!", timeout=2)
48
+ ```
49
+
50
+ ## Internal Architecture
51
+
52
+ The Python SDK uses `threading` to handle local polling for multiple queues simultaneously and to monitor task execution times.
53
+
54
+ - **Polling Loop**: Each queue has its own daemon thread continuously hitting the `/poll` endpoint.
55
+ - **Task Execution**: Handlers are executed in a temporary thread with a `join(timeout=...)` call to ensure the worker process doesn't hang.
@@ -0,0 +1,26 @@
1
+ [build-system]
2
+ requires = ["setuptools>=61.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "rundown-workers"
7
+ version = "0.1.0"
8
+ description = "Lightweight workflow executor SDK for Python"
9
+ readme = "README.md"
10
+ license = { text = "MIT" }
11
+ authors = [
12
+ { name = "Ernest", email = "ernestdior5050@gmail.com" }
13
+ ]
14
+ dependencies = [
15
+ "requests",
16
+ ]
17
+ requires-python = ">=3.7"
18
+
19
+ [project.urls]
20
+ "Homepage" = "https://github.com/its-ernest/rundown-workers"
21
+ "Bug Tracker" = "https://github.com/its-ernest/rundown-workers/issues"
22
+ "Documentation" = "https://github.com/its-ernest/rundown-workers/blob/main/sdk/python/README.md"
23
+
24
+ [tool.setuptools.packages.find]
25
+ where = ["."]
26
+ include = ["rundown_workers*"]
@@ -0,0 +1,54 @@
1
+ from .client import Client
2
+ import functools
3
+ import sys
4
+ import signal
5
+ import threading
6
+ import time
7
+
8
+ # Global list of workers to start
9
+ _workers = []
10
+
11
+ def queue(name, host="http://localhost:8181", poll_interval=1.0, max_retries=3):
12
+ def decorator(func):
13
+ _workers.append((name, host, func, poll_interval, max_retries))
14
+
15
+ @functools.wraps(func)
16
+ def wrapper(payload):
17
+ return func(payload)
18
+ return wrapper
19
+ return decorator
20
+
21
+ def enqueue(queue, payload, host="http://localhost:8181", timeout=None, max_retries=None):
22
+ """Submit a job to a queue."""
23
+ client = Client(host=host)
24
+ return client.enqueue(queue, payload, timeout=timeout, max_retries=max_retries)
25
+
26
+ def run():
27
+ """Main entry point to start all decorated workers."""
28
+ if not _workers:
29
+ print("[!] No workers registered. Use @rw.queue to register functions.")
30
+ return
31
+
32
+ # Handle graceful shutdown (Ctrl+C)
33
+ def signal_handler(sig, frame):
34
+ print("\n[*] Rundown-Workers worker stopped.")
35
+ sys.exit(0)
36
+ signal.signal(signal.SIGINT, signal_handler)
37
+
38
+ # Start each worker in a separate polling thread
39
+ threads = []
40
+ for worker_info in _workers:
41
+ queue_name, host, handler, interval, max_retries = worker_info
42
+ client = Client(host=host)
43
+
44
+ t = threading.Thread(
45
+ target=client.start_worker,
46
+ args=(queue_name, handler, interval),
47
+ daemon=True
48
+ )
49
+ t.start()
50
+ threads.append(t)
51
+
52
+ # Keep main thread alive
53
+ while True:
54
+ time.sleep(1)
@@ -0,0 +1,105 @@
1
+ import requests
2
+ import time
3
+ import threading
4
+
5
+ class Client:
6
+ def __init__(self, host="http://localhost:8181"):
7
+ self.host = host
8
+
9
+ def enqueue(self, queue, payload, timeout=None, max_retries=None):
10
+ """
11
+ Submits a new task to the Rundown-Workers engine.
12
+
13
+ :param queue: The name of the queue to join.
14
+ :param payload: The data content of the job.
15
+ :param timeout: Optional override for the execution time limit (in seconds).
16
+ :param max_retries: Optional override for the maximum retry count.
17
+ """
18
+ url = f"{self.host}/enqueue"
19
+ data = {"queue": queue, "payload": payload}
20
+ if timeout:
21
+ data["timeout"] = timeout
22
+ if max_retries is not None:
23
+ data["max_retries"] = max_retries
24
+ resp = requests.post(url, json=data)
25
+ resp.raise_for_status()
26
+ return resp.json()
27
+
28
+ def poll(self, queue):
29
+ url = f"{self.host}/poll"
30
+ data = {"queue": queue}
31
+ resp = requests.post(url, json=data)
32
+ if resp.status_code == 204:
33
+ return None
34
+ resp.raise_for_status()
35
+ return resp.json()
36
+
37
+ def complete(self, job_id):
38
+ url = f"{self.host}/complete"
39
+ data = {"id": job_id}
40
+ resp = requests.post(url, json=data)
41
+ resp.raise_for_status()
42
+
43
+ def fail(self, job_id):
44
+ url = f"{self.host}/fail"
45
+ data = {"id": job_id}
46
+ resp = requests.post(url, json=data)
47
+ resp.raise_for_status()
48
+
49
+ def start_worker(self, queue_name, handler, poll_interval=1.0):
50
+ """
51
+ Starts a continuous polling loop for a specific queue.
52
+
53
+ This method supports thread-safe local execution and will automatically
54
+ fail jobs if they exceed their timeout limit.
55
+ """
56
+ print(f"[*] Starting worker for queue '{queue_name}'...")
57
+ while True:
58
+ try:
59
+ job = self.poll(queue_name)
60
+ if not job:
61
+ time.sleep(poll_interval)
62
+ continue
63
+
64
+ print(f"[*] Job {job['id']} assigned. Executing...")
65
+
66
+ # Get timeout from job (default 300s)
67
+ job_timeout = job.get('timeout', 300)
68
+
69
+ # Container for thread results
70
+ # [success, error_message]
71
+ state = [False, ""]
72
+
73
+ def run_handler():
74
+ try:
75
+ handler(job['payload'])
76
+ state[0] = True
77
+ except Exception as e:
78
+ state[1] = str(e)
79
+
80
+ task_thread = threading.Thread(target=run_handler, daemon=True)
81
+ task_thread.start()
82
+ task_thread.join(timeout=job_timeout)
83
+
84
+ if task_thread.is_alive():
85
+ print(f"[!] Job {job['id']} timed out after {job_timeout}s.")
86
+ try:
87
+ self.fail(job['id'])
88
+ except Exception as fe:
89
+ print(f"[!] Error reporting timeout: {fe}")
90
+ elif state[0]:
91
+ try:
92
+ self.complete(job['id'])
93
+ print(f"[*] Job {job['id']} completed successfully.")
94
+ except Exception as ce:
95
+ print(f"[!] Error reporting completion: {ce}")
96
+ else:
97
+ print(f"[!] Job {job['id']} failed: {state[1]}")
98
+ try:
99
+ self.fail(job['id'])
100
+ except Exception as fe:
101
+ print(f"[!] Error reporting failure: {fe}")
102
+
103
+ except Exception as e:
104
+ print(f"[!] Worker polling error: {e}")
105
+ time.sleep(5)
@@ -0,0 +1,68 @@
1
+ Metadata-Version: 2.4
2
+ Name: rundown-workers
3
+ Version: 0.1.0
4
+ Summary: Lightweight workflow executor SDK for Python
5
+ Author-email: Ernest <ernestdior5050@gmail.com>
6
+ License: MIT
7
+ Project-URL: Homepage, https://github.com/its-ernest/rundown-workers
8
+ Project-URL: Bug Tracker, https://github.com/its-ernest/rundown-workers/issues
9
+ Project-URL: Documentation, https://github.com/its-ernest/rundown-workers/blob/main/sdk/python/README.md
10
+ Requires-Python: >=3.7
11
+ Description-Content-Type: text/markdown
12
+ Requires-Dist: requests
13
+
14
+ # Rundown-Workers Python SDK (v0.1.0)
15
+
16
+ [![Go Coverage](https://img.shields.io/badge/coverage-80%25-brightgreen)](https://github.com/its-ernest/rundown-workers)
17
+
18
+ A highly developer-friendly, lightweight Python SDK for implementing and running workflow workers.
19
+
20
+ ## Installation
21
+
22
+ To install the SDK locally in editable mode (during development):
23
+
24
+ ```bash
25
+ # From the project root
26
+ pip install -e sdk/python
27
+ ```
28
+
29
+ ## Basic Usage
30
+
31
+ The SDK uses decorators to register worker functions.
32
+
33
+ ```python
34
+ import rundown_workers as rw
35
+
36
+ # Register a simple task
37
+ @rw.queue(name="greetings", max_retries=3)
38
+ def hello_task(payload):
39
+ print(f"[*] Received: {payload}")
40
+ return True
41
+
42
+ if __name__ == "__main__":
43
+ # Start all registered workers in separate polling threads
44
+ rw.run()
45
+ ```
46
+
47
+ ### Advanced Features
48
+
49
+ #### 1. Retries with Exponential Backoff
50
+ When enqueuing a job or defining a queue, you can specify `max_retries`. If your function raises an exception, the engine will automatically reschedule the job with an increasing delay (e.g., 5s, 20s, 45s).
51
+
52
+ #### 2. Local Timeout Enforcement
53
+ You can set a `timeout` (in seconds) for each task. If your function hangs beyond this period, the worker will automatically:
54
+ 1. Detect the timeout.
55
+ 2. Report the failure to the engine.
56
+ 3. Move on to the next available job.
57
+
58
+ ```python
59
+ # This task will fail if it takes longer than 2 seconds
60
+ rw.enqueue(queue="greetings", payload="Hello!", timeout=2)
61
+ ```
62
+
63
+ ## Internal Architecture
64
+
65
+ The Python SDK uses `threading` to handle local polling for multiple queues simultaneously and to monitor task execution times.
66
+
67
+ - **Polling Loop**: Each queue has its own daemon thread continuously hitting the `/poll` endpoint.
68
+ - **Task Execution**: Handlers are executed in a temporary thread with a `join(timeout=...)` call to ensure the worker process doesn't hang.
@@ -0,0 +1,9 @@
1
+ README.md
2
+ pyproject.toml
3
+ rundown_workers/__init__.py
4
+ rundown_workers/client.py
5
+ rundown_workers.egg-info/PKG-INFO
6
+ rundown_workers.egg-info/SOURCES.txt
7
+ rundown_workers.egg-info/dependency_links.txt
8
+ rundown_workers.egg-info/requires.txt
9
+ rundown_workers.egg-info/top_level.txt
@@ -0,0 +1 @@
1
+ rundown_workers
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+