python-durable 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,22 @@
1
+ name: Publish to PyPI
2
+
3
+ on:
4
+ release:
5
+ types: [published]
6
+
7
+ jobs:
8
+ publish:
9
+ runs-on: ubuntu-latest
10
+ permissions:
11
+ id-token: write
12
+ steps:
13
+ - uses: actions/checkout@v4
14
+
15
+ - uses: astral-sh/setup-uv@v6
16
+
17
+ - run: uv build
18
+
19
+ - uses: astral-sh/setup-uv@v6
20
+ with:
21
+ enable-cache: false
22
+ - run: uv publish --trusted-publishing always
@@ -0,0 +1,10 @@
1
+ # Python-generated files
2
+ __pycache__/
3
+ *.py[oc]
4
+ build/
5
+ dist/
6
+ wheels/
7
+ *.egg-info
8
+
9
+ # Virtual environments
10
+ .venv
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 python-durable contributors
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,133 @@
1
+ Metadata-Version: 2.4
2
+ Name: python-durable
3
+ Version: 0.1.0
4
+ Summary: Lightweight workflow durability for Python — make any async workflow resumable after crashes with just a decorator.
5
+ Project-URL: Repository, https://github.com/WillemDeGroef/python-durable
6
+ Author: Willem
7
+ License-Expression: MIT
8
+ License-File: LICENSE
9
+ Classifier: Development Status :: 3 - Alpha
10
+ Classifier: Framework :: AsyncIO
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: License :: OSI Approved :: MIT License
13
+ Classifier: Programming Language :: Python :: 3
14
+ Classifier: Programming Language :: Python :: 3.12
15
+ Classifier: Programming Language :: Python :: 3.13
16
+ Classifier: Typing :: Typed
17
+ Requires-Python: >=3.12
18
+ Requires-Dist: aiosqlite>=0.20
19
+ Provides-Extra: dev
20
+ Requires-Dist: pytest-asyncio>=0.24; extra == 'dev'
21
+ Requires-Dist: pytest>=8.0; extra == 'dev'
22
+ Requires-Dist: ruff>=0.9; extra == 'dev'
23
+ Requires-Dist: ty>=0.0.1a7; extra == 'dev'
24
+ Provides-Extra: examples
25
+ Requires-Dist: pydantic-ai>=0.1; extra == 'examples'
26
+ Requires-Dist: pydantic>=2.0; extra == 'examples'
27
+ Description-Content-Type: text/markdown
28
+
29
+ # durable
30
+
31
+ Lightweight workflow durability for Python. Make any async workflow resumable after crashes with just a decorator.
32
+
33
+ Backed by SQLite out of the box; swap in any `Store` subclass for production.
34
+
35
+ ## Install
36
+
37
+ ```bash
38
+ pip install python-durable
39
+ ```
40
+
41
+ ## Quick start
42
+
43
+ ```python
44
+ from durable import Workflow
45
+ from durable.backoff import exponential
46
+
47
+ wf = Workflow("my-app")
48
+
49
+ @wf.task(retries=3, backoff=exponential(base=2, max=60))
50
+ async def fetch_data(url: str) -> dict:
51
+ async with httpx.AsyncClient() as client:
52
+ return (await client.get(url)).json()
53
+
54
+ @wf.task
55
+ async def save_result(data: dict) -> None:
56
+ await db.insert(data)
57
+
58
+ @wf.workflow(id="pipeline-{source}")
59
+ async def run_pipeline(source: str) -> None:
60
+ data = await fetch_data(f"https://api.example.com/{source}")
61
+ await save_result(data)
62
+
63
+ # First call: runs all steps and checkpoints each one.
64
+ # If it crashes and you call it again with the same args,
65
+ # completed steps are replayed from SQLite instantly.
66
+ await run_pipeline(source="users")
67
+ ```
68
+
69
+ ## How it works
70
+
71
+ 1. **`@wf.task`** wraps an async function with checkpoint + retry logic. When called inside a workflow, results are persisted to the store. On re-run, completed steps return their cached result without re-executing.
72
+
73
+ 2. **`@wf.workflow`** marks the entry point of a durable run. It manages a `RunContext` (via `ContextVar`) so tasks automatically know which run they belong to. The `id` parameter is a template string resolved from function arguments at call time.
74
+
75
+ 3. **`Store`** is the persistence backend. `SQLiteStore` is the default (zero config, backed by aiosqlite). Subclass `Store` to use Postgres, Redis, or anything else.
76
+
77
+ ## Features
78
+
79
+ - **Crash recovery** — completed steps are never re-executed after a restart
80
+ - **Automatic retries** — configurable per-task with `exponential`, `linear`, or `constant` backoff
81
+ - **Loop support** — use `step_id` to checkpoint each iteration independently
82
+ - **Zero magic outside workflows** — tasks work as plain async functions when called without a workflow context
83
+ - **Pluggable storage** — SQLite by default, bring your own `Store` for production
84
+
85
+ ## Backoff strategies
86
+
87
+ ```python
88
+ from durable.backoff import exponential, linear, constant
89
+
90
+ @wf.task(retries=5, backoff=exponential(base=2, max=60)) # 2s, 4s, 8s, 16s, 32s
91
+ async def exp_task(): ...
92
+
93
+ @wf.task(retries=3, backoff=linear(start=2, step=3)) # 2s, 5s, 8s
94
+ async def linear_task(): ...
95
+
96
+ @wf.task(retries=3, backoff=constant(5)) # 5s, 5s, 5s
97
+ async def const_task(): ...
98
+ ```
99
+
100
+ ## Loops with step_id
101
+
102
+ When calling the same task in a loop, pass `step_id` so each iteration is checkpointed independently:
103
+
104
+ ```python
105
+ @wf.workflow(id="batch-{batch_id}")
106
+ async def process_batch(batch_id: str) -> None:
107
+ for i, item in enumerate(items):
108
+ await process_item(item, step_id=f"item-{i}")
109
+ ```
110
+
111
+ If the workflow crashes mid-loop, only the remaining items are processed on restart.
112
+
113
+ ## Important: JSON serialization
114
+
115
+ Task return values must be JSON-serializable (dicts, lists, strings, numbers, booleans, `None`). The store uses `json.dumps` internally.
116
+
117
+ For Pydantic models, return `.model_dump()` from tasks and reconstruct with `.model_validate()` downstream:
118
+
119
+ ```python
120
+ @wf.task
121
+ async def validate_invoice(draft: InvoiceDraft) -> dict:
122
+ validated = ValidatedInvoice(...)
123
+ return validated.model_dump()
124
+
125
+ @wf.task
126
+ async def book_invoice(data: dict) -> dict:
127
+ invoice = ValidatedInvoice.model_validate(data)
128
+ ...
129
+ ```
130
+
131
+ ## License
132
+
133
+ MIT
@@ -0,0 +1,105 @@
1
+ # durable
2
+
3
+ Lightweight workflow durability for Python. Make any async workflow resumable after crashes with just a decorator.
4
+
5
+ Backed by SQLite out of the box; swap in any `Store` subclass for production.
6
+
7
+ ## Install
8
+
9
+ ```bash
10
+ pip install python-durable
11
+ ```
12
+
13
+ ## Quick start
14
+
15
+ ```python
16
+ from durable import Workflow
17
+ from durable.backoff import exponential
18
+
19
+ wf = Workflow("my-app")
20
+
21
+ @wf.task(retries=3, backoff=exponential(base=2, max=60))
22
+ async def fetch_data(url: str) -> dict:
23
+ async with httpx.AsyncClient() as client:
24
+ return (await client.get(url)).json()
25
+
26
+ @wf.task
27
+ async def save_result(data: dict) -> None:
28
+ await db.insert(data)
29
+
30
+ @wf.workflow(id="pipeline-{source}")
31
+ async def run_pipeline(source: str) -> None:
32
+ data = await fetch_data(f"https://api.example.com/{source}")
33
+ await save_result(data)
34
+
35
+ # First call: runs all steps and checkpoints each one.
36
+ # If it crashes and you call it again with the same args,
37
+ # completed steps are replayed from SQLite instantly.
38
+ await run_pipeline(source="users")
39
+ ```
40
+
41
+ ## How it works
42
+
43
+ 1. **`@wf.task`** wraps an async function with checkpoint + retry logic. When called inside a workflow, results are persisted to the store. On re-run, completed steps return their cached result without re-executing.
44
+
45
+ 2. **`@wf.workflow`** marks the entry point of a durable run. It manages a `RunContext` (via `ContextVar`) so tasks automatically know which run they belong to. The `id` parameter is a template string resolved from function arguments at call time.
46
+
47
+ 3. **`Store`** is the persistence backend. `SQLiteStore` is the default (zero config, backed by aiosqlite). Subclass `Store` to use Postgres, Redis, or anything else.
48
+
49
+ ## Features
50
+
51
+ - **Crash recovery** — completed steps are never re-executed after a restart
52
+ - **Automatic retries** — configurable per-task with `exponential`, `linear`, or `constant` backoff
53
+ - **Loop support** — use `step_id` to checkpoint each iteration independently
54
+ - **Zero magic outside workflows** — tasks work as plain async functions when called without a workflow context
55
+ - **Pluggable storage** — SQLite by default, bring your own `Store` for production
56
+
57
+ ## Backoff strategies
58
+
59
+ ```python
60
+ from durable.backoff import exponential, linear, constant
61
+
62
+ @wf.task(retries=5, backoff=exponential(base=2, max=60)) # 2s, 4s, 8s, 16s, 32s
63
+ async def exp_task(): ...
64
+
65
+ @wf.task(retries=3, backoff=linear(start=2, step=3)) # 2s, 5s, 8s
66
+ async def linear_task(): ...
67
+
68
+ @wf.task(retries=3, backoff=constant(5)) # 5s, 5s, 5s
69
+ async def const_task(): ...
70
+ ```
71
+
72
+ ## Loops with step_id
73
+
74
+ When calling the same task in a loop, pass `step_id` so each iteration is checkpointed independently:
75
+
76
+ ```python
77
+ @wf.workflow(id="batch-{batch_id}")
78
+ async def process_batch(batch_id: str) -> None:
79
+ for i, item in enumerate(items):
80
+ await process_item(item, step_id=f"item-{i}")
81
+ ```
82
+
83
+ If the workflow crashes mid-loop, only the remaining items are processed on restart.
84
+
85
+ ## Important: JSON serialization
86
+
87
+ Task return values must be JSON-serializable (dicts, lists, strings, numbers, booleans, `None`). The store uses `json.dumps` internally.
88
+
89
+ For Pydantic models, return `.model_dump()` from tasks and reconstruct with `.model_validate()` downstream:
90
+
91
+ ```python
92
+ @wf.task
93
+ async def validate_invoice(draft: InvoiceDraft) -> dict:
94
+ validated = ValidatedInvoice(...)
95
+ return validated.model_dump()
96
+
97
+ @wf.task
98
+ async def book_invoice(data: dict) -> dict:
99
+ invoice = ValidatedInvoice.model_validate(data)
100
+ ...
101
+ ```
102
+
103
+ ## License
104
+
105
+ MIT
@@ -0,0 +1,267 @@
1
+ """
2
+ examples.py — durable library usage examples.
3
+
4
+ Run with: python examples.py
5
+ """
6
+
7
+ import asyncio
8
+ from dataclasses import dataclass
9
+ from enum import StrEnum
10
+
11
+ from pydantic import BaseModel
12
+
13
+ from durable import Workflow
14
+ from durable.backoff import constant, exponential, linear
15
+
16
+ # ---------------------------------------------------------------------------
17
+ # Setup
18
+ # ---------------------------------------------------------------------------
19
+
20
+ wf = Workflow("examples", db="sqlite:///examples.db")
21
+
22
+
23
+ # ---------------------------------------------------------------------------
24
+ # Example 1 — Basic pipeline
25
+ # ---------------------------------------------------------------------------
26
+
27
+
28
+ @dataclass
29
+ class User:
30
+ id: str
31
+ email: str
32
+ name: str
33
+
34
+
35
+ @dataclass
36
+ class Invoice:
37
+ user_id: str
38
+ amount: float
39
+ items: list[str]
40
+
41
+
42
+ @wf.task
43
+ async def fetch_user(user_id: str) -> dict:
44
+ print(f" [fetch_user] fetching user {user_id}...")
45
+ await asyncio.sleep(0.1) # simulate I/O
46
+ return {"id": user_id, "email": f"{user_id}@example.com", "name": "Alice"}
47
+
48
+
49
+ @wf.task
50
+ async def build_invoice(user: dict) -> dict:
51
+ print(f" [build_invoice] building for {user['name']}...")
52
+ await asyncio.sleep(0.1)
53
+ return {"user_id": user["id"], "amount": 49.99, "items": ["Pro Plan"]}
54
+
55
+
56
+ @wf.task
57
+ async def send_email(user: dict, invoice: dict) -> None:
58
+ print(f" [send_email] sending to {user['email']} for ${invoice['amount']}")
59
+ await asyncio.sleep(0.1)
60
+
61
+
62
+ @wf.workflow(id="process-order-{order_id}")
63
+ async def process_order(order_id: str) -> None:
64
+ user = await fetch_user(order_id)
65
+ invoice = await build_invoice(user)
66
+ await send_email(user, invoice)
67
+
68
+
69
+ # ---------------------------------------------------------------------------
70
+ # Example 2 — Flaky API with aggressive retries
71
+ # ---------------------------------------------------------------------------
72
+
73
+ _call_count = 0
74
+
75
+
76
+ @wf.task(retries=5, backoff=exponential(base=2, max=30))
77
+ async def call_flaky_api(endpoint: str) -> dict:
78
+ global _call_count
79
+ _call_count += 1
80
+ if _call_count < 3:
81
+ raise ConnectionError(f"API down (attempt {_call_count})")
82
+ print(f" [call_flaky_api] succeeded on attempt {_call_count}")
83
+ return {"status": "ok", "data": [1, 2, 3]}
84
+
85
+
86
+ @wf.workflow(id="flaky-{job_id}")
87
+ async def flaky_pipeline(job_id: str) -> dict:
88
+ return await call_flaky_api("/data")
89
+
90
+
91
+ # ---------------------------------------------------------------------------
92
+ # Example 3 — Loop with explicit step_id
93
+ # ---------------------------------------------------------------------------
94
+
95
+
96
+ @wf.task(retries=2, backoff=constant(1))
97
+ async def push_record(record: dict) -> bool:
98
+ print(f" [push_record] pushing {record['id']}...")
99
+ await asyncio.sleep(0.05)
100
+ return True
101
+
102
+
103
+ @wf.task
104
+ async def post_summary(count: int) -> None:
105
+ print(f" [post_summary] all done — pushed {count} records")
106
+
107
+
108
+ @wf.workflow(id="crm-sync-{batch_id}")
109
+ async def sync_to_crm(batch_id: str) -> None:
110
+ # Records loaded with plain Python — not every function needs to be a task
111
+ records = [{"id": f"rec-{i}", "value": i * 10} for i in range(5)]
112
+
113
+ for i, record in enumerate(records):
114
+ # step_id disambiguates repeated calls to the same task in a loop.
115
+ # If the workflow crashes mid-loop, only the remaining records are pushed.
116
+ await push_record(record, step_id=f"push-record-{i}")
117
+
118
+ await post_summary(len(records))
119
+
120
+
121
+ # ---------------------------------------------------------------------------
122
+ # Example 4 — Multiple backoff strategies side by side
123
+ # ---------------------------------------------------------------------------
124
+
125
+
126
+ @wf.task(retries=3, backoff=exponential(base=2, max=60)) # 2s, 4s, 8s
127
+ async def sync_to_warehouse(row: dict) -> None:
128
+ print(f" [sync_to_warehouse] writing row {row['id']}")
129
+
130
+
131
+ @wf.task(retries=3, backoff=linear(start=2, step=3)) # 2s, 5s, 8s
132
+ async def notify_slack(message: str) -> None:
133
+ print(f" [notify_slack] {message}")
134
+
135
+
136
+ @wf.task(retries=5, backoff=constant(5)) # always 5s
137
+ async def send_sms(number: str, body: str) -> None:
138
+ print(f" [send_sms] → {number}: {body}")
139
+
140
+
141
+ # ---------------------------------------------------------------------------
142
+ # Example 5 — Explicit run ID via .run()
143
+ # ---------------------------------------------------------------------------
144
+
145
+
146
+ @wf.workflow(id="report-{date}")
147
+ async def generate_report(date: str) -> dict:
148
+ data = await fetch_user("analyst-1") # reusing tasks across workflows!
149
+ return {"generated_for": date, "by": data["name"]}
150
+
151
+
152
+ # ---------------------------------------------------------------------------
153
+ # Example 6 — Pydantic models as task parameters
154
+ #
155
+ # The checkpoint store serializes results with json.dumps / json.loads, so
156
+ # task return values must be JSON-serializable (dicts, lists, primitives).
157
+ #
158
+ # Pattern: tasks return model.model_dump(), downstream tasks reconstruct
159
+ # with Model.model_validate(). This keeps checkpoints portable and lets
160
+ # workflows resume cleanly after a crash.
161
+ # ---------------------------------------------------------------------------
162
+
163
+
164
+ class LineItem(BaseModel):
165
+ description: str
166
+ quantity: int
167
+ unit_price: float
168
+
169
+
170
+ class Currency(StrEnum):
171
+ EUR = "EUR"
172
+ USD = "USD"
173
+
174
+
175
+ class InvoiceDraft(BaseModel):
176
+ customer_name: str
177
+ currency: Currency
178
+ lines: list[LineItem]
179
+
180
+
181
+ class ValidatedInvoice(BaseModel):
182
+ customer_name: str
183
+ currency: Currency
184
+ lines: list[LineItem]
185
+ total: float
186
+ reference: str
187
+
188
+
189
+ @wf.task
190
+ async def validate_invoice(draft: InvoiceDraft) -> dict:
191
+ """Accept a Pydantic model, return a dict for checkpoint storage."""
192
+ print(
193
+ f" [validate_invoice] validating {len(draft.lines)} line(s) for {draft.customer_name}"
194
+ )
195
+ total = sum(line.quantity * line.unit_price for line in draft.lines)
196
+ validated = ValidatedInvoice(
197
+ customer_name=draft.customer_name,
198
+ currency=draft.currency,
199
+ lines=draft.lines,
200
+ total=round(total, 2),
201
+ reference=f"INV-{hash(draft.customer_name) % 10000:04d}",
202
+ )
203
+ # .model_dump() → plain dict that json.dumps can serialize
204
+ return validated.model_dump()
205
+
206
+
207
+ @wf.task
208
+ async def book_invoice(invoice_data: dict) -> dict:
209
+ """Reconstruct the Pydantic model from the checkpointed dict."""
210
+ invoice = ValidatedInvoice.model_validate(invoice_data)
211
+ print(
212
+ f" [book_invoice] booking {invoice.reference}: "
213
+ f"{invoice.currency} {invoice.total:.2f} for {invoice.customer_name}"
214
+ )
215
+ return {"reference": invoice.reference, "status": "booked"}
216
+
217
+
218
+ @wf.workflow(id="invoice-{customer}")
219
+ async def process_invoice(customer: str, draft: InvoiceDraft) -> dict:
220
+ validated = await validate_invoice(draft)
221
+ return await book_invoice(validated)
222
+
223
+
224
+ # ---------------------------------------------------------------------------
225
+ # Demo runner
226
+ # ---------------------------------------------------------------------------
227
+
228
+
229
+ async def main():
230
+ print("\n── Example 1: Basic order pipeline ──")
231
+ await process_order(order_id="ord-42")
232
+ print(" (run again → all steps replayed from cache)")
233
+ await process_order(order_id="ord-42")
234
+
235
+ print("\n── Example 2: Flaky API with retries ──")
236
+ result = await flaky_pipeline(job_id="job-1")
237
+ print(f" result: {result}")
238
+
239
+ print("\n── Example 3: Loop with step_id ──")
240
+ await sync_to_crm(batch_id="batch-2024-01")
241
+ print(" (run again → all records skipped, already pushed)")
242
+ await sync_to_crm(batch_id="batch-2024-01")
243
+
244
+ print("\n── Example 4: Explicit run ID ──")
245
+ report = await generate_report.run("weekly-report-custom-id", date="2024-01-15")
246
+ print(f" report: {report}")
247
+
248
+ print("\n── Example 6: Pydantic models as task params ──")
249
+ draft = InvoiceDraft(
250
+ customer_name="Acme Corp",
251
+ currency=Currency.EUR,
252
+ lines=[
253
+ LineItem(description="Consulting", quantity=10, unit_price=150.0),
254
+ LineItem(description="Travel expenses", quantity=1, unit_price=340.50),
255
+ ],
256
+ )
257
+ booking = await process_invoice(customer="acme", draft=draft)
258
+ print(f" result: {booking}")
259
+ print(" (run again → all steps replayed from cache)")
260
+ booking = await process_invoice(customer="acme", draft=draft)
261
+ print(f" result: {booking}")
262
+
263
+ print("\n✓ All examples complete. Check examples.db to see the checkpoint store.")
264
+
265
+
266
+ if __name__ == "__main__":
267
+ asyncio.run(main())
@@ -0,0 +1,59 @@
1
+ """
2
+ in_memory_example.py — Using InMemoryStore for testing and ephemeral workflows.
3
+
4
+ No files, no SQLite — just a dict. Checkpoints live for the duration of the
5
+ process, so retries and deduplication work within a single run, but nothing
6
+ survives a restart.
7
+
8
+ Run with:
9
+ uv run python examples/in_memory_example.py
10
+ """
11
+
12
+ import asyncio
13
+
14
+ from durable import Workflow
15
+ from durable.backoff import constant
16
+ from durable.store import InMemoryStore
17
+
18
+ wf = Workflow("demo", db=InMemoryStore())
19
+
20
+ call_count = 0
21
+
22
+
23
+ @wf.task(retries=3, backoff=constant(0))
24
+ async def flaky_fetch(url: str) -> dict:
25
+ """Fails twice, then succeeds — retries are handled automatically."""
26
+ global call_count
27
+ call_count += 1
28
+ if call_count < 3:
29
+ raise ConnectionError(f"attempt {call_count}: connection refused")
30
+ print(f" [flaky_fetch] succeeded on attempt {call_count}")
31
+ return {"url": url, "status": "ok"}
32
+
33
+
34
+ @wf.task
35
+ async def transform(data: dict) -> str:
36
+ print(f" [transform] processing {data['url']}")
37
+ return f"transformed-{data['status']}"
38
+
39
+
40
+ @wf.workflow(id="pipeline-{job_id}")
41
+ async def pipeline(job_id: str) -> str:
42
+ data = await flaky_fetch("https://api.example.com/data")
43
+ return await transform(data)
44
+
45
+
46
+ async def main():
47
+ print("── First run: flaky_fetch retries, then both steps execute ──")
48
+ result = await pipeline(job_id="job-1")
49
+ print(f" result: {result}")
50
+
51
+ print("\n── Second run (same id): both steps replayed from memory ──")
52
+ result = await pipeline(job_id="job-1")
53
+ print(f" result: {result}")
54
+
55
+ print("\n✓ Done. Nothing written to disk.")
56
+
57
+
58
+ if __name__ == "__main__":
59
+ asyncio.run(main())