avtomatika 1.0b2__tar.gz → 1.0b4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {avtomatika-1.0b2 → avtomatika-1.0b4}/PKG-INFO +57 -11
- avtomatika-1.0b2/src/avtomatika.egg-info/PKG-INFO → avtomatika-1.0b4/README.md +47 -47
- {avtomatika-1.0b2 → avtomatika-1.0b4}/pyproject.toml +10 -10
- {avtomatika-1.0b2 → avtomatika-1.0b4}/src/avtomatika/__init__.py +2 -3
- {avtomatika-1.0b2 → avtomatika-1.0b4}/src/avtomatika/api.html +0 -11
- {avtomatika-1.0b2 → avtomatika-1.0b4}/src/avtomatika/blueprint.py +5 -7
- {avtomatika-1.0b2 → avtomatika-1.0b4}/src/avtomatika/client_config_loader.py +18 -6
- {avtomatika-1.0b2 → avtomatika-1.0b4}/src/avtomatika/dispatcher.py +13 -19
- {avtomatika-1.0b2 → avtomatika-1.0b4}/src/avtomatika/engine.py +52 -16
- {avtomatika-1.0b2 → avtomatika-1.0b4}/src/avtomatika/executor.py +6 -3
- {avtomatika-1.0b2 → avtomatika-1.0b4}/src/avtomatika/ratelimit.py +3 -10
- {avtomatika-1.0b2 → avtomatika-1.0b4}/src/avtomatika/reputation.py +11 -2
- {avtomatika-1.0b2 → avtomatika-1.0b4}/src/avtomatika/storage/__init__.py +3 -3
- {avtomatika-1.0b2 → avtomatika-1.0b4}/src/avtomatika/storage/base.py +23 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/src/avtomatika/storage/memory.py +34 -8
- {avtomatika-1.0b2 → avtomatika-1.0b4}/src/avtomatika/storage/redis.py +37 -20
- {avtomatika-1.0b2 → avtomatika-1.0b4}/src/avtomatika/telemetry.py +3 -3
- avtomatika-1.0b4/src/avtomatika/watcher.py +82 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/src/avtomatika/worker_config_loader.py +7 -2
- {avtomatika-1.0b2 → avtomatika-1.0b4}/src/avtomatika/ws_manager.py +2 -1
- avtomatika-1.0b2/README.md → avtomatika-1.0b4/src/avtomatika.egg-info/PKG-INFO +93 -1
- {avtomatika-1.0b2 → avtomatika-1.0b4}/src/avtomatika.egg-info/SOURCES.txt +4 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/src/avtomatika.egg-info/requires.txt +8 -8
- {avtomatika-1.0b2 → avtomatika-1.0b4}/tests/test_client_config_loader.py +7 -6
- avtomatika-1.0b4/tests/test_config_validation.py +60 -0
- avtomatika-1.0b4/tests/test_dispatcher_extended.py +95 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/tests/test_engine.py +22 -0
- avtomatika-1.0b4/tests/test_memory_locking.py +44 -0
- avtomatika-1.0b4/tests/test_redis_locking.py +45 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/tests/test_watcher.py +3 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/tests/test_worker_config_loader.py +7 -4
- avtomatika-1.0b2/src/avtomatika/watcher.py +0 -68
- {avtomatika-1.0b2 → avtomatika-1.0b4}/LICENSE +0 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/setup.cfg +0 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/src/avtomatika/compression.py +0 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/src/avtomatika/config.py +0 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/src/avtomatika/context.py +0 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/src/avtomatika/data_types.py +0 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/src/avtomatika/datastore.py +0 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/src/avtomatika/health_checker.py +0 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/src/avtomatika/history/base.py +0 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/src/avtomatika/history/noop.py +0 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/src/avtomatika/history/postgres.py +0 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/src/avtomatika/history/sqlite.py +0 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/src/avtomatika/logging_config.py +0 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/src/avtomatika/metrics.py +0 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/src/avtomatika/py.typed +0 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/src/avtomatika/quota.py +0 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/src/avtomatika/security.py +0 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/src/avtomatika.egg-info/dependency_links.txt +0 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/src/avtomatika.egg-info/top_level.txt +0 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/tests/test_blueprint_conditions.py +0 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/tests/test_blueprints.py +0 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/tests/test_compression.py +0 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/tests/test_context.py +0 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/tests/test_dispatcher.py +0 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/tests/test_error_handling.py +0 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/tests/test_executor.py +0 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/tests/test_health_checker.py +0 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/tests/test_history.py +0 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/tests/test_integration.py +0 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/tests/test_logging_config.py +0 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/tests/test_memory_storage.py +0 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/tests/test_metrics.py +0 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/tests/test_noop_history.py +0 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/tests/test_postgres_history.py +0 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/tests/test_ratelimit.py +0 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/tests/test_redis_storage.py +0 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/tests/test_reputation.py +0 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/tests/test_telemetry.py +0 -0
- {avtomatika-1.0b2 → avtomatika-1.0b4}/tests/test_ws_manager.py +0 -0
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: avtomatika
|
|
3
|
-
Version: 1.
|
|
4
|
-
Summary: A state-machine based orchestrator for long-running jobs.
|
|
3
|
+
Version: 1.0b4
|
|
4
|
+
Summary: A state-machine based orchestrator for long-running AI and other jobs.
|
|
5
5
|
Project-URL: Homepage, https://github.com/avtomatika-ai/avtomatika
|
|
6
6
|
Project-URL: Bug Tracker, https://github.com/avtomatika-ai/avtomatika/issues
|
|
7
7
|
Classifier: Development Status :: 4 - Beta
|
|
@@ -18,25 +18,25 @@ Requires-Dist: graphviz~=0.21
|
|
|
18
18
|
Requires-Dist: zstandard~=0.24
|
|
19
19
|
Requires-Dist: aioprometheus~=23.12
|
|
20
20
|
Provides-Extra: redis
|
|
21
|
-
Requires-Dist: redis~=
|
|
21
|
+
Requires-Dist: redis~=7.1; extra == "redis"
|
|
22
22
|
Requires-Dist: orjson~=3.11; extra == "redis"
|
|
23
23
|
Provides-Extra: history
|
|
24
|
-
Requires-Dist: aiosqlite~=0.
|
|
24
|
+
Requires-Dist: aiosqlite~=0.22; extra == "history"
|
|
25
25
|
Requires-Dist: asyncpg~=0.30; extra == "history"
|
|
26
26
|
Requires-Dist: orjson~=3.11; extra == "history"
|
|
27
27
|
Provides-Extra: telemetry
|
|
28
|
-
Requires-Dist: opentelemetry-api~=1.
|
|
29
|
-
Requires-Dist: opentelemetry-sdk~=1.
|
|
30
|
-
Requires-Dist: opentelemetry-exporter-otlp~=1.
|
|
28
|
+
Requires-Dist: opentelemetry-api~=1.39; extra == "telemetry"
|
|
29
|
+
Requires-Dist: opentelemetry-sdk~=1.39; extra == "telemetry"
|
|
30
|
+
Requires-Dist: opentelemetry-exporter-otlp~=1.39; extra == "telemetry"
|
|
31
31
|
Requires-Dist: opentelemetry-instrumentation-aiohttp-client~=0.59b0; extra == "telemetry"
|
|
32
32
|
Provides-Extra: test
|
|
33
|
-
Requires-Dist: pytest~=
|
|
33
|
+
Requires-Dist: pytest~=9.0; extra == "test"
|
|
34
34
|
Requires-Dist: pytest-asyncio~=1.1; extra == "test"
|
|
35
|
-
Requires-Dist: fakeredis~=2.
|
|
35
|
+
Requires-Dist: fakeredis~=2.33; extra == "test"
|
|
36
36
|
Requires-Dist: pytest-aiohttp~=1.1; extra == "test"
|
|
37
37
|
Requires-Dist: pytest-mock~=3.14; extra == "test"
|
|
38
38
|
Requires-Dist: aioresponses~=0.7; extra == "test"
|
|
39
|
-
Requires-Dist: backports.zstd; extra == "test"
|
|
39
|
+
Requires-Dist: backports.zstd~=1.2; extra == "test"
|
|
40
40
|
Requires-Dist: opentelemetry-instrumentation-aiohttp-client; extra == "test"
|
|
41
41
|
Provides-Extra: all
|
|
42
42
|
Requires-Dist: avtomatika[redis]; extra == "all"
|
|
@@ -285,7 +285,7 @@ Run multiple tasks simultaneously and gather their results.
|
|
|
285
285
|
@my_blueprint.handler_for("process_files")
|
|
286
286
|
async def fan_out_handler(initial_data, actions):
|
|
287
287
|
tasks_to_dispatch = [
|
|
288
|
-
{"task_type": "file_analysis", "params": {"file": file}}
|
|
288
|
+
{"task_type": "file_analysis", "params": {"file": file}})
|
|
289
289
|
for file in initial_data.get("files", [])
|
|
290
290
|
]
|
|
291
291
|
# Use dispatch_parallel to send all tasks at once.
|
|
@@ -332,6 +332,26 @@ async def cache_handler(data_stores):
|
|
|
332
332
|
|
|
333
333
|
The orchestrator's behavior can be configured through environment variables. Additionally, any configuration parameter loaded from environment variables can be programmatically overridden in your application code after the `Config` object has been initialized. This provides flexibility for different deployment and testing scenarios.
|
|
334
334
|
|
|
335
|
+
**Important:** The system employs **strict validation** for configuration files (`clients.toml`, `workers.toml`) at startup. If a configuration file is invalid (e.g., malformed TOML, missing required fields), the application will **fail fast** and exit with an error, rather than starting in a partially broken state. This ensures the security and integrity of the deployment.
|
|
336
|
+
|
|
337
|
+
### Configuration Files
|
|
338
|
+
|
|
339
|
+
To manage access and worker settings securely, Avtomatika uses TOML configuration files.
|
|
340
|
+
|
|
341
|
+
- **`clients.toml`**: Defines API clients, their tokens, plans, and quotas.
|
|
342
|
+
```toml
|
|
343
|
+
[client_premium]
|
|
344
|
+
token = "secret-token-123"
|
|
345
|
+
plan = "premium"
|
|
346
|
+
```
|
|
347
|
+
- **`workers.toml`**: Defines individual tokens for workers to enhance security.
|
|
348
|
+
```toml
|
|
349
|
+
[gpu-worker-01]
|
|
350
|
+
token = "worker-secret-456"
|
|
351
|
+
```
|
|
352
|
+
|
|
353
|
+
For detailed specifications and examples, please refer to the [**Configuration Guide**](docs/configuration.md).
|
|
354
|
+
|
|
335
355
|
### Fault Tolerance
|
|
336
356
|
|
|
337
357
|
The orchestrator has built-in mechanisms for handling failures based on the `error.code` field in a worker's response.
|
|
@@ -340,6 +360,13 @@ The orchestrator has built-in mechanisms for handling failures based on the `err
|
|
|
340
360
|
* **PERMANENT_ERROR**: A permanent error (e.g., a corrupted file). The task will be immediately sent to quarantine for manual investigation.
|
|
341
361
|
* **INVALID_INPUT_ERROR**: An error in the input data. The entire pipeline (Job) will be immediately moved to the failed state.
|
|
342
362
|
|
|
363
|
+
### High Availability & Distributed Locking
|
|
364
|
+
|
|
365
|
+
The architecture supports horizontal scaling. Multiple Orchestrator instances can run behind a load balancer.
|
|
366
|
+
|
|
367
|
+
* **Stateless API:** The API is stateless; all state is persisted in Redis.
|
|
368
|
+
* **Distributed Locking:** Background processes (`Watcher`, `ReputationCalculator`) use distributed locks (via Redis `SET NX`) to coordinate and prevent race conditions when multiple instances are active.
|
|
369
|
+
|
|
343
370
|
### Storage Backend
|
|
344
371
|
|
|
345
372
|
By default, the engine uses in-memory storage. For production, you must configure persistent storage via environment variables.
|
|
@@ -408,3 +435,22 @@ To run the `avtomatika` test suite:
|
|
|
408
435
|
```bash
|
|
409
436
|
pytest avtomatika/tests/
|
|
410
437
|
```
|
|
438
|
+
|
|
439
|
+
### Interactive API Documentation
|
|
440
|
+
|
|
441
|
+
Avtomatika provides a built-in interactive API documentation page (similar to Swagger UI) that is automatically generated based on your registered blueprints.
|
|
442
|
+
|
|
443
|
+
* **Endpoint:** `/_public/docs`
|
|
444
|
+
* **Features:**
|
|
445
|
+
* **List of all system endpoints:** Detailed documentation for Public, Protected, and Worker API groups.
|
|
446
|
+
* **Dynamic Blueprint Documentation:** Automatically generates and lists documentation for all blueprints registered in the engine, including their specific API endpoints.
|
|
447
|
+
* **Interactive Testing:** Allows you to test API calls directly from the browser. You can provide authentication tokens, parameters, and request bodies to see real server responses.
|
|
448
|
+
|
|
449
|
+
## Detailed Documentation
|
|
450
|
+
|
|
451
|
+
For a deeper dive into the system, please refer to the following documents:
|
|
452
|
+
|
|
453
|
+
- [**Architecture Guide**](https://github.com/avtomatika-ai/avtomatika/blob/main/docs/architecture.md): A detailed overview of the system components and their interactions.
|
|
454
|
+
- [**API Reference**](https://github.com/avtomatika-ai/avtomatika/blob/main/docs/api_reference.md): Full specification of the HTTP API.
|
|
455
|
+
- [**Deployment Guide**](https://github.com/avtomatika-ai/avtomatika/blob/main/docs/deployment.md): Instructions for deploying with Gunicorn/Uvicorn and NGINX.
|
|
456
|
+
- [**Cookbook**](https://github.com/avtomatika-ai/avtomatika/blob/main/docs/cookbook/README.md): Examples and best practices for creating blueprints.
|
|
@@ -1,49 +1,3 @@
|
|
|
1
|
-
Metadata-Version: 2.4
|
|
2
|
-
Name: avtomatika
|
|
3
|
-
Version: 1.0b2
|
|
4
|
-
Summary: A state-machine based orchestrator for long-running jobs.
|
|
5
|
-
Project-URL: Homepage, https://github.com/avtomatika-ai/avtomatika
|
|
6
|
-
Project-URL: Bug Tracker, https://github.com/avtomatika-ai/avtomatika/issues
|
|
7
|
-
Classifier: Development Status :: 4 - Beta
|
|
8
|
-
Classifier: Programming Language :: Python :: 3
|
|
9
|
-
Classifier: License :: OSI Approved :: MIT License
|
|
10
|
-
Classifier: Operating System :: OS Independent
|
|
11
|
-
Requires-Python: >=3.11
|
|
12
|
-
Description-Content-Type: text/markdown
|
|
13
|
-
License-File: LICENSE
|
|
14
|
-
Requires-Dist: aiohttp~=3.12
|
|
15
|
-
Requires-Dist: aiocache~=0.12
|
|
16
|
-
Requires-Dist: python-json-logger~=4.0
|
|
17
|
-
Requires-Dist: graphviz~=0.21
|
|
18
|
-
Requires-Dist: zstandard~=0.24
|
|
19
|
-
Requires-Dist: aioprometheus~=23.12
|
|
20
|
-
Provides-Extra: redis
|
|
21
|
-
Requires-Dist: redis~=6.4; extra == "redis"
|
|
22
|
-
Requires-Dist: orjson~=3.11; extra == "redis"
|
|
23
|
-
Provides-Extra: history
|
|
24
|
-
Requires-Dist: aiosqlite~=0.21; extra == "history"
|
|
25
|
-
Requires-Dist: asyncpg~=0.30; extra == "history"
|
|
26
|
-
Requires-Dist: orjson~=3.11; extra == "history"
|
|
27
|
-
Provides-Extra: telemetry
|
|
28
|
-
Requires-Dist: opentelemetry-api~=1.38; extra == "telemetry"
|
|
29
|
-
Requires-Dist: opentelemetry-sdk~=1.38; extra == "telemetry"
|
|
30
|
-
Requires-Dist: opentelemetry-exporter-otlp~=1.36; extra == "telemetry"
|
|
31
|
-
Requires-Dist: opentelemetry-instrumentation-aiohttp-client~=0.59b0; extra == "telemetry"
|
|
32
|
-
Provides-Extra: test
|
|
33
|
-
Requires-Dist: pytest~=8.4; extra == "test"
|
|
34
|
-
Requires-Dist: pytest-asyncio~=1.1; extra == "test"
|
|
35
|
-
Requires-Dist: fakeredis~=2.31; extra == "test"
|
|
36
|
-
Requires-Dist: pytest-aiohttp~=1.1; extra == "test"
|
|
37
|
-
Requires-Dist: pytest-mock~=3.14; extra == "test"
|
|
38
|
-
Requires-Dist: aioresponses~=0.7; extra == "test"
|
|
39
|
-
Requires-Dist: backports.zstd; extra == "test"
|
|
40
|
-
Requires-Dist: opentelemetry-instrumentation-aiohttp-client; extra == "test"
|
|
41
|
-
Provides-Extra: all
|
|
42
|
-
Requires-Dist: avtomatika[redis]; extra == "all"
|
|
43
|
-
Requires-Dist: avtomatika[history]; extra == "all"
|
|
44
|
-
Requires-Dist: avtomatika[telemetry]; extra == "all"
|
|
45
|
-
Dynamic: license-file
|
|
46
|
-
|
|
47
1
|
# Avtomatika Orchestrator
|
|
48
2
|
|
|
49
3
|
Avtomatika is a powerful, state-driven engine for managing complex asynchronous workflows in Python. It provides a robust framework for building scalable and resilient applications by separating process logic from execution logic.
|
|
@@ -285,7 +239,7 @@ Run multiple tasks simultaneously and gather their results.
|
|
|
285
239
|
@my_blueprint.handler_for("process_files")
|
|
286
240
|
async def fan_out_handler(initial_data, actions):
|
|
287
241
|
tasks_to_dispatch = [
|
|
288
|
-
{"task_type": "file_analysis", "params": {"file": file}}
|
|
242
|
+
{"task_type": "file_analysis", "params": {"file": file}})
|
|
289
243
|
for file in initial_data.get("files", [])
|
|
290
244
|
]
|
|
291
245
|
# Use dispatch_parallel to send all tasks at once.
|
|
@@ -332,6 +286,26 @@ async def cache_handler(data_stores):
|
|
|
332
286
|
|
|
333
287
|
The orchestrator's behavior can be configured through environment variables. Additionally, any configuration parameter loaded from environment variables can be programmatically overridden in your application code after the `Config` object has been initialized. This provides flexibility for different deployment and testing scenarios.
|
|
334
288
|
|
|
289
|
+
**Important:** The system employs **strict validation** for configuration files (`clients.toml`, `workers.toml`) at startup. If a configuration file is invalid (e.g., malformed TOML, missing required fields), the application will **fail fast** and exit with an error, rather than starting in a partially broken state. This ensures the security and integrity of the deployment.
|
|
290
|
+
|
|
291
|
+
### Configuration Files
|
|
292
|
+
|
|
293
|
+
To manage access and worker settings securely, Avtomatika uses TOML configuration files.
|
|
294
|
+
|
|
295
|
+
- **`clients.toml`**: Defines API clients, their tokens, plans, and quotas.
|
|
296
|
+
```toml
|
|
297
|
+
[client_premium]
|
|
298
|
+
token = "secret-token-123"
|
|
299
|
+
plan = "premium"
|
|
300
|
+
```
|
|
301
|
+
- **`workers.toml`**: Defines individual tokens for workers to enhance security.
|
|
302
|
+
```toml
|
|
303
|
+
[gpu-worker-01]
|
|
304
|
+
token = "worker-secret-456"
|
|
305
|
+
```
|
|
306
|
+
|
|
307
|
+
For detailed specifications and examples, please refer to the [**Configuration Guide**](docs/configuration.md).
|
|
308
|
+
|
|
335
309
|
### Fault Tolerance
|
|
336
310
|
|
|
337
311
|
The orchestrator has built-in mechanisms for handling failures based on the `error.code` field in a worker's response.
|
|
@@ -340,6 +314,13 @@ The orchestrator has built-in mechanisms for handling failures based on the `err
|
|
|
340
314
|
* **PERMANENT_ERROR**: A permanent error (e.g., a corrupted file). The task will be immediately sent to quarantine for manual investigation.
|
|
341
315
|
* **INVALID_INPUT_ERROR**: An error in the input data. The entire pipeline (Job) will be immediately moved to the failed state.
|
|
342
316
|
|
|
317
|
+
### High Availability & Distributed Locking
|
|
318
|
+
|
|
319
|
+
The architecture supports horizontal scaling. Multiple Orchestrator instances can run behind a load balancer.
|
|
320
|
+
|
|
321
|
+
* **Stateless API:** The API is stateless; all state is persisted in Redis.
|
|
322
|
+
* **Distributed Locking:** Background processes (`Watcher`, `ReputationCalculator`) use distributed locks (via Redis `SET NX`) to coordinate and prevent race conditions when multiple instances are active.
|
|
323
|
+
|
|
343
324
|
### Storage Backend
|
|
344
325
|
|
|
345
326
|
By default, the engine uses in-memory storage. For production, you must configure persistent storage via environment variables.
|
|
@@ -408,3 +389,22 @@ To run the `avtomatika` test suite:
|
|
|
408
389
|
```bash
|
|
409
390
|
pytest avtomatika/tests/
|
|
410
391
|
```
|
|
392
|
+
|
|
393
|
+
### Interactive API Documentation
|
|
394
|
+
|
|
395
|
+
Avtomatika provides a built-in interactive API documentation page (similar to Swagger UI) that is automatically generated based on your registered blueprints.
|
|
396
|
+
|
|
397
|
+
* **Endpoint:** `/_public/docs`
|
|
398
|
+
* **Features:**
|
|
399
|
+
* **List of all system endpoints:** Detailed documentation for Public, Protected, and Worker API groups.
|
|
400
|
+
* **Dynamic Blueprint Documentation:** Automatically generates and lists documentation for all blueprints registered in the engine, including their specific API endpoints.
|
|
401
|
+
* **Interactive Testing:** Allows you to test API calls directly from the browser. You can provide authentication tokens, parameters, and request bodies to see real server responses.
|
|
402
|
+
|
|
403
|
+
## Detailed Documentation
|
|
404
|
+
|
|
405
|
+
For a deeper dive into the system, please refer to the following documents:
|
|
406
|
+
|
|
407
|
+
- [**Architecture Guide**](https://github.com/avtomatika-ai/avtomatika/blob/main/docs/architecture.md): A detailed overview of the system components and their interactions.
|
|
408
|
+
- [**API Reference**](https://github.com/avtomatika-ai/avtomatika/blob/main/docs/api_reference.md): Full specification of the HTTP API.
|
|
409
|
+
- [**Deployment Guide**](https://github.com/avtomatika-ai/avtomatika/blob/main/docs/deployment.md): Instructions for deploying with Gunicorn/Uvicorn and NGINX.
|
|
410
|
+
- [**Cookbook**](https://github.com/avtomatika-ai/avtomatika/blob/main/docs/cookbook/README.md): Examples and best practices for creating blueprints.
|
|
@@ -4,8 +4,8 @@ build-backend = "setuptools.build_meta"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "avtomatika"
|
|
7
|
-
version = "1.
|
|
8
|
-
description = "A state-machine based orchestrator for long-running jobs."
|
|
7
|
+
version = "1.0b4"
|
|
8
|
+
description = "A state-machine based orchestrator for long-running AI and other jobs."
|
|
9
9
|
readme = "README.md"
|
|
10
10
|
requires-python = ">=3.11"
|
|
11
11
|
classifiers = [
|
|
@@ -24,22 +24,22 @@ dependencies = [
|
|
|
24
24
|
]
|
|
25
25
|
|
|
26
26
|
[project.optional-dependencies]
|
|
27
|
-
redis = ["redis~=
|
|
28
|
-
history = ["aiosqlite~=0.
|
|
27
|
+
redis = ["redis~=7.1", "orjson~=3.11"]
|
|
28
|
+
history = ["aiosqlite~=0.22", "asyncpg~=0.30", "orjson~=3.11"]
|
|
29
29
|
telemetry = [
|
|
30
|
-
"opentelemetry-api~=1.
|
|
31
|
-
"opentelemetry-sdk~=1.
|
|
32
|
-
"opentelemetry-exporter-otlp~=1.
|
|
30
|
+
"opentelemetry-api~=1.39",
|
|
31
|
+
"opentelemetry-sdk~=1.39",
|
|
32
|
+
"opentelemetry-exporter-otlp~=1.39",
|
|
33
33
|
"opentelemetry-instrumentation-aiohttp-client~=0.59b0",
|
|
34
34
|
]
|
|
35
35
|
test = [
|
|
36
|
-
"pytest~=
|
|
36
|
+
"pytest~=9.0",
|
|
37
37
|
"pytest-asyncio~=1.1",
|
|
38
|
-
"fakeredis~=2.
|
|
38
|
+
"fakeredis~=2.33",
|
|
39
39
|
"pytest-aiohttp~=1.1",
|
|
40
40
|
"pytest-mock~=3.14",
|
|
41
41
|
"aioresponses~=0.7",
|
|
42
|
-
"backports.zstd",
|
|
42
|
+
"backports.zstd~=1.2",
|
|
43
43
|
"opentelemetry-instrumentation-aiohttp-client",
|
|
44
44
|
]
|
|
45
45
|
all = [
|
|
@@ -4,6 +4,7 @@
|
|
|
4
4
|
This module exposes the primary classes for building and running state-driven automations.
|
|
5
5
|
"""
|
|
6
6
|
|
|
7
|
+
import contextlib
|
|
7
8
|
from importlib.metadata import version
|
|
8
9
|
|
|
9
10
|
__version__ = version("avtomatika")
|
|
@@ -22,9 +23,7 @@ __all__ = [
|
|
|
22
23
|
"StorageBackend",
|
|
23
24
|
]
|
|
24
25
|
|
|
25
|
-
|
|
26
|
+
with contextlib.suppress(ImportError):
|
|
26
27
|
from .storage.redis import RedisStorage # noqa: F401
|
|
27
28
|
|
|
28
29
|
__all__.append("RedisStorage")
|
|
29
|
-
except ImportError:
|
|
30
|
-
pass
|
|
@@ -199,17 +199,6 @@
|
|
|
199
199
|
{ code: '202 Accepted', description: 'Job successfully accepted for processing.', body: { "status": "accepted", "job_id": "..." } }
|
|
200
200
|
]
|
|
201
201
|
},
|
|
202
|
-
{
|
|
203
|
-
id: 'post-create-showcase-job',
|
|
204
|
-
name: 'Create a Full Showcase Job',
|
|
205
|
-
method: 'POST',
|
|
206
|
-
path: '/api/v1/jobs/full_showcase',
|
|
207
|
-
description: 'Creates and starts a new instance (Job) of the `full_showcase` blueprint. This blueprint demonstrates most of the features of the Avtomatika library.',
|
|
208
|
-
request: { body: { "path": "/path/to/video.mp4", "user_id": "user-123", "quality": "high" } },
|
|
209
|
-
responses: [
|
|
210
|
-
{ code: '202 Accepted', description: 'Job successfully accepted for processing.', body: { "status": "accepted", "job_id": "..." } }
|
|
211
|
-
]
|
|
212
|
-
},
|
|
213
202
|
{
|
|
214
203
|
id: 'get-job-status',
|
|
215
204
|
name: 'Get Job Status',
|
|
@@ -168,8 +168,7 @@ class StateMachineBlueprint:
|
|
|
168
168
|
for handler in self.conditional_handlers:
|
|
169
169
|
if handler.state == state and handler.evaluate(context):
|
|
170
170
|
return handler.func
|
|
171
|
-
default_handler
|
|
172
|
-
if default_handler:
|
|
171
|
+
if default_handler := self.handlers.get(state):
|
|
173
172
|
return default_handler
|
|
174
173
|
raise ValueError(
|
|
175
174
|
f"No suitable handler found for state '{state}' in blueprint '{self.name}' for the given context.",
|
|
@@ -230,12 +229,11 @@ class StateMachineBlueprint:
|
|
|
230
229
|
f"Could not parse handler '{handler_func.__name__}' for state '{handler_state}'. "
|
|
231
230
|
f"Graph may be incomplete. Error: {e}"
|
|
232
231
|
)
|
|
233
|
-
pass
|
|
234
232
|
for state in states:
|
|
235
233
|
dot.node(state, state)
|
|
236
234
|
|
|
237
|
-
if output_filename:
|
|
238
|
-
dot.render(output_filename, format=output_format, cleanup=True)
|
|
239
|
-
print(f"Graph rendered to {output_filename}.{output_format}")
|
|
240
|
-
else:
|
|
235
|
+
if not output_filename:
|
|
241
236
|
return dot.source
|
|
237
|
+
dot.render(output_filename, format=output_format, cleanup=True)
|
|
238
|
+
print(f"Graph rendered to {output_filename}.{output_format}")
|
|
239
|
+
return None
|
|
@@ -26,25 +26,37 @@ async def load_client_configs_to_redis(
|
|
|
26
26
|
config_path,
|
|
27
27
|
)
|
|
28
28
|
return
|
|
29
|
+
except Exception as e:
|
|
30
|
+
logger.error(f"Failed to parse client config file '{config_path}': {e}")
|
|
31
|
+
raise ValueError(f"Invalid client configuration file: {e}") from e
|
|
29
32
|
|
|
30
33
|
loaded_count = 0
|
|
31
34
|
for client_name, config in clients_data.items():
|
|
35
|
+
if not isinstance(config, dict):
|
|
36
|
+
logger.error(f"Client '{client_name}' configuration must be a table (dict).")
|
|
37
|
+
raise ValueError(f"Invalid configuration for client '{client_name}'")
|
|
38
|
+
|
|
32
39
|
token = config.get("token")
|
|
33
40
|
if not token:
|
|
34
|
-
logger.
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
41
|
+
logger.error(f"Client '{client_name}' is missing required 'token' field.")
|
|
42
|
+
raise ValueError(f"Missing token for client '{client_name}'")
|
|
43
|
+
|
|
44
|
+
if not isinstance(token, str):
|
|
45
|
+
logger.error(f"Token for client '{client_name}' must be a string.")
|
|
46
|
+
raise ValueError(f"Invalid token type for client '{client_name}'")
|
|
39
47
|
|
|
40
48
|
# Separate static config from dynamic quota values
|
|
41
49
|
static_config = {k: v for k, v in config.items() if k != "monthly_attempts"}
|
|
42
50
|
quota = config.get("monthly_attempts")
|
|
43
51
|
|
|
52
|
+
if quota is not None and not isinstance(quota, int):
|
|
53
|
+
logger.error(f"Quota 'monthly_attempts' for client '{client_name}' must be an integer.")
|
|
54
|
+
raise ValueError(f"Invalid quota type for client '{client_name}'")
|
|
55
|
+
|
|
44
56
|
try:
|
|
45
57
|
# Assume these storage methods will be implemented
|
|
46
58
|
await storage.save_client_config(token, static_config)
|
|
47
|
-
if quota is not None
|
|
59
|
+
if quota is not None:
|
|
48
60
|
await storage.initialize_client_quota(token, quota)
|
|
49
61
|
|
|
50
62
|
loaded_count += 1
|
|
@@ -28,15 +28,13 @@ class Dispatcher:
|
|
|
28
28
|
self.config = config
|
|
29
29
|
self._round_robin_indices: Dict[str, int] = defaultdict(int)
|
|
30
30
|
|
|
31
|
+
@staticmethod
|
|
31
32
|
def _is_worker_compliant(
|
|
32
|
-
self,
|
|
33
33
|
worker: Dict[str, Any],
|
|
34
34
|
requirements: Dict[str, Any],
|
|
35
35
|
) -> bool:
|
|
36
36
|
"""Checks if a worker meets the specified resource requirements."""
|
|
37
|
-
|
|
38
|
-
required_gpu = requirements.get("gpu_info")
|
|
39
|
-
if required_gpu:
|
|
37
|
+
if required_gpu := requirements.get("gpu_info"):
|
|
40
38
|
gpu_info = worker.get("resources", {}).get("gpu_info")
|
|
41
39
|
if not gpu_info:
|
|
42
40
|
return False
|
|
@@ -51,17 +49,15 @@ class Dispatcher:
|
|
|
51
49
|
):
|
|
52
50
|
return False
|
|
53
51
|
|
|
54
|
-
|
|
55
|
-
required_models = requirements.get("installed_models")
|
|
56
|
-
if required_models:
|
|
52
|
+
if required_models := requirements.get("installed_models"):
|
|
57
53
|
installed_models = {m["name"] for m in worker.get("installed_models", [])}
|
|
58
54
|
if not set(required_models).issubset(installed_models):
|
|
59
55
|
return False
|
|
60
56
|
|
|
61
57
|
return True
|
|
62
58
|
|
|
59
|
+
@staticmethod
|
|
63
60
|
def _select_default(
|
|
64
|
-
self,
|
|
65
61
|
workers: List[Dict[str, Any]],
|
|
66
62
|
task_type: str,
|
|
67
63
|
) -> Dict[str, Any]:
|
|
@@ -74,7 +70,7 @@ class Dispatcher:
|
|
|
74
70
|
"""
|
|
75
71
|
warm_workers = [w for w in workers if task_type in w.get("hot_cache", [])]
|
|
76
72
|
|
|
77
|
-
target_pool = warm_workers
|
|
73
|
+
target_pool = warm_workers or workers
|
|
78
74
|
|
|
79
75
|
# The `cost` field is deprecated but maintained for backward compatibility.
|
|
80
76
|
min_cost = min(w.get("cost", float("inf")) for w in target_pool)
|
|
@@ -95,8 +91,8 @@ class Dispatcher:
|
|
|
95
91
|
self._round_robin_indices[task_type] = idx + 1
|
|
96
92
|
return selected_worker
|
|
97
93
|
|
|
94
|
+
@staticmethod
|
|
98
95
|
def _select_least_connections(
|
|
99
|
-
self,
|
|
100
96
|
workers: List[Dict[str, Any]],
|
|
101
97
|
task_type: str,
|
|
102
98
|
) -> Dict[str, Any]:
|
|
@@ -105,15 +101,16 @@ class Dispatcher:
|
|
|
105
101
|
"""
|
|
106
102
|
return min(workers, key=lambda w: w.get("load", 0.0))
|
|
107
103
|
|
|
104
|
+
@staticmethod
|
|
108
105
|
def _select_cheapest(
|
|
109
|
-
self,
|
|
110
106
|
workers: List[Dict[str, Any]],
|
|
111
107
|
task_type: str,
|
|
112
108
|
) -> Dict[str, Any]:
|
|
113
109
|
"""Selects the cheapest worker based on 'cost_per_second'."""
|
|
114
110
|
return min(workers, key=lambda w: w.get("cost_per_second", float("inf")))
|
|
115
111
|
|
|
116
|
-
|
|
112
|
+
@staticmethod
|
|
113
|
+
def _get_best_value_score(worker: Dict[str, Any]) -> float:
|
|
117
114
|
"""Calculates a "score" for a worker using the formula cost / reputation.
|
|
118
115
|
The lower the score, the better.
|
|
119
116
|
"""
|
|
@@ -121,9 +118,7 @@ class Dispatcher:
|
|
|
121
118
|
# Default reputation is 1.0 if absent
|
|
122
119
|
reputation = worker.get("reputation", 1.0)
|
|
123
120
|
# Avoid division by zero
|
|
124
|
-
if reputation == 0
|
|
125
|
-
return float("inf")
|
|
126
|
-
return cost / reputation
|
|
121
|
+
return float("inf") if reputation == 0 else cost / reputation
|
|
127
122
|
|
|
128
123
|
def _select_best_value(
|
|
129
124
|
self,
|
|
@@ -153,10 +148,9 @@ class Dispatcher:
|
|
|
153
148
|
idle_workers = [w for w in all_workers if w.get("status", "idle") == "idle"]
|
|
154
149
|
logger.debug(f"Idle workers: {[w['worker_id'] for w in idle_workers]}")
|
|
155
150
|
if not idle_workers:
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
if busy_mo_workers:
|
|
151
|
+
if busy_mo_workers := [
|
|
152
|
+
w for w in all_workers if w.get("status") == "busy" and "multi_orchestrator_info" in w
|
|
153
|
+
]:
|
|
160
154
|
logger.warning(
|
|
161
155
|
f"No idle workers. Found {len(busy_mo_workers)} busy workers "
|
|
162
156
|
f"in multi-orchestrator mode. They are likely performing tasks for other Orchestrators.",
|
|
@@ -485,8 +485,7 @@ class OrchestratorEngine:
|
|
|
485
485
|
await self.storage.save_job_state(job_id, job_state)
|
|
486
486
|
# Optionally, trigger a specific 'cancelled' transition if defined in the blueprint
|
|
487
487
|
transitions = job_state.get("current_task_transitions", {})
|
|
488
|
-
next_state
|
|
489
|
-
if next_state:
|
|
488
|
+
if next_state := transitions.get("cancelled"):
|
|
490
489
|
job_state["current_state"] = next_state
|
|
491
490
|
job_state["status"] = "running" # It's running the cancellation handler now
|
|
492
491
|
await self.storage.save_job_state(job_id, job_state)
|
|
@@ -494,9 +493,7 @@ class OrchestratorEngine:
|
|
|
494
493
|
return web.json_response({"status": "result_accepted_cancelled"}, status=200)
|
|
495
494
|
|
|
496
495
|
transitions = job_state.get("current_task_transitions", {})
|
|
497
|
-
next_state
|
|
498
|
-
|
|
499
|
-
if next_state:
|
|
496
|
+
if next_state := transitions.get(result_status):
|
|
500
497
|
logging.info(f"Job {job_id} transitioning based on worker status '{result_status}' to state '{next_state}'")
|
|
501
498
|
|
|
502
499
|
worker_data = result.get("data")
|
|
@@ -603,15 +600,52 @@ class OrchestratorEngine:
|
|
|
603
600
|
return web.json_response({"status": "db_flushed"}, status=200)
|
|
604
601
|
|
|
605
602
|
async def _docs_handler(self, request: web.Request) -> web.Response:
|
|
603
|
+
import json
|
|
606
604
|
from importlib import resources
|
|
607
605
|
|
|
608
606
|
try:
|
|
609
607
|
content = resources.read_text("avtomatika", "api.html")
|
|
610
|
-
return web.Response(text=content, content_type="text/html")
|
|
611
608
|
except FileNotFoundError:
|
|
612
609
|
logger.error("api.html not found within the avtomatika package.")
|
|
613
610
|
return web.json_response({"error": "Documentation file not found on server."}, status=500)
|
|
614
611
|
|
|
612
|
+
# Generate dynamic documentation for registered blueprints
|
|
613
|
+
blueprint_endpoints = []
|
|
614
|
+
for bp in self.blueprints.values():
|
|
615
|
+
if not bp.api_endpoint:
|
|
616
|
+
continue
|
|
617
|
+
|
|
618
|
+
version_prefix = f"/{bp.api_version}" if bp.api_version else ""
|
|
619
|
+
endpoint_path = bp.api_endpoint if bp.api_endpoint.startswith("/") else f"/{bp.api_endpoint}"
|
|
620
|
+
full_path = f"/api{version_prefix}{endpoint_path}"
|
|
621
|
+
|
|
622
|
+
blueprint_endpoints.append(
|
|
623
|
+
{
|
|
624
|
+
"id": f"post-create-{bp.name.replace('_', '-')}",
|
|
625
|
+
"name": f"Create {bp.name.replace('_', ' ').title()} Job",
|
|
626
|
+
"method": "POST",
|
|
627
|
+
"path": full_path,
|
|
628
|
+
"description": f"Creates and starts a new instance (Job) of the `{bp.name}` blueprint.",
|
|
629
|
+
"request": {"body": {"initial_data": {}}},
|
|
630
|
+
"responses": [
|
|
631
|
+
{
|
|
632
|
+
"code": "202 Accepted",
|
|
633
|
+
"description": "Job successfully accepted for processing.",
|
|
634
|
+
"body": {"status": "accepted", "job_id": "..."},
|
|
635
|
+
}
|
|
636
|
+
],
|
|
637
|
+
}
|
|
638
|
+
)
|
|
639
|
+
|
|
640
|
+
# Inject dynamic endpoints into the apiData structure in the HTML
|
|
641
|
+
if blueprint_endpoints:
|
|
642
|
+
endpoints_json = json.dumps(blueprint_endpoints, indent=2)
|
|
643
|
+
# We insert the new endpoints at the beginning of the 'Protected API' group
|
|
644
|
+
marker = "group: 'Protected API',\n endpoints: ["
|
|
645
|
+
content = content.replace(marker, f"{marker}\n{endpoints_json.strip('[]')},")
|
|
646
|
+
|
|
647
|
+
return web.Response(text=content, content_type="text/html")
|
|
648
|
+
|
|
615
649
|
def _setup_routes(self):
|
|
616
650
|
public_app = web.Application()
|
|
617
651
|
public_app.router.add_get("/status", status_handler)
|
|
@@ -647,16 +681,7 @@ class OrchestratorEngine:
|
|
|
647
681
|
all_protected_apps.append(protected_app)
|
|
648
682
|
|
|
649
683
|
for app in all_protected_apps:
|
|
650
|
-
|
|
651
|
-
app.router.add_post("/jobs/{job_id}/cancel", self._cancel_job_handler)
|
|
652
|
-
if not isinstance(self.history_storage, NoOpHistoryStorage):
|
|
653
|
-
app.router.add_get("/jobs/{job_id}/history", self._get_job_history_handler)
|
|
654
|
-
app.router.add_get("/blueprints/{blueprint_name}/graph", self._get_blueprint_graph_handler)
|
|
655
|
-
app.router.add_get("/workers", self._get_workers_handler)
|
|
656
|
-
app.router.add_get("/jobs", self._get_jobs_handler)
|
|
657
|
-
app.router.add_get("/dashboard", self._get_dashboard_handler)
|
|
658
|
-
app.router.add_post("/admin/reload-workers", self._reload_worker_configs_handler)
|
|
659
|
-
|
|
684
|
+
self._register_common_routes(app)
|
|
660
685
|
if has_unversioned_routes:
|
|
661
686
|
self.app.add_subapp("/api/", protected_app)
|
|
662
687
|
for version, app in versioned_apps.items():
|
|
@@ -676,6 +701,17 @@ class OrchestratorEngine:
|
|
|
676
701
|
worker_app.router.add_get("/ws/{worker_id}", self._websocket_handler)
|
|
677
702
|
self.app.add_subapp("/_worker/", worker_app)
|
|
678
703
|
|
|
704
|
+
def _register_common_routes(self, app):
|
|
705
|
+
app.router.add_get("/jobs/{job_id}", self._get_job_status_handler)
|
|
706
|
+
app.router.add_post("/jobs/{job_id}/cancel", self._cancel_job_handler)
|
|
707
|
+
if not isinstance(self.history_storage, NoOpHistoryStorage):
|
|
708
|
+
app.router.add_get("/jobs/{job_id}/history", self._get_job_history_handler)
|
|
709
|
+
app.router.add_get("/blueprints/{blueprint_name}/graph", self._get_blueprint_graph_handler)
|
|
710
|
+
app.router.add_get("/workers", self._get_workers_handler)
|
|
711
|
+
app.router.add_get("/jobs", self._get_jobs_handler)
|
|
712
|
+
app.router.add_get("/dashboard", self._get_dashboard_handler)
|
|
713
|
+
app.router.add_post("/admin/reload-workers", self._reload_worker_configs_handler)
|
|
714
|
+
|
|
679
715
|
async def _websocket_handler(self, request: web.Request) -> web.WebSocketResponse:
|
|
680
716
|
worker_id = request.match_info.get("worker_id")
|
|
681
717
|
if not worker_id:
|
|
@@ -35,11 +35,13 @@ except ImportError:
|
|
|
35
35
|
def inject(self, *args, **kwargs):
|
|
36
36
|
pass
|
|
37
37
|
|
|
38
|
-
|
|
38
|
+
@staticmethod
|
|
39
|
+
def extract(*args, **kwargs):
|
|
39
40
|
return None
|
|
40
41
|
|
|
41
42
|
class NoOpTraceContextTextMapPropagator:
|
|
42
|
-
|
|
43
|
+
@staticmethod
|
|
44
|
+
def extract(*args, **kwargs):
|
|
43
45
|
return None
|
|
44
46
|
|
|
45
47
|
trace = NoOpTracer()
|
|
@@ -485,7 +487,8 @@ class JobExecutor:
|
|
|
485
487
|
await self.storage.save_job_state(parent_job_id, parent_job_state)
|
|
486
488
|
await self.storage.enqueue_job(parent_job_id)
|
|
487
489
|
|
|
488
|
-
|
|
490
|
+
@staticmethod
|
|
491
|
+
def _handle_task_completion(task: Task):
|
|
489
492
|
"""Callback to handle completion of a job processing task."""
|
|
490
493
|
try:
|
|
491
494
|
# This will re-raise any exception caught in the task
|
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
from contextlib import suppress
|
|
1
2
|
from typing import Awaitable, Callable
|
|
2
3
|
|
|
3
4
|
from aiohttp import web
|
|
@@ -23,23 +24,15 @@ def rate_limit_middleware_factory(
|
|
|
23
24
|
"""Rate-limiting middleware that uses the provided storage backend."""
|
|
24
25
|
# Determine the key for rate limiting (e.g., by worker_id or IP)
|
|
25
26
|
# For worker endpoints, we key by worker_id. For others, by IP.
|
|
26
|
-
key_identifier = request.match_info.get("worker_id", request.remote)
|
|
27
|
-
if not key_identifier:
|
|
28
|
-
# Fallback for cases where remote IP might not be available
|
|
29
|
-
key_identifier = "unknown"
|
|
27
|
+
key_identifier = request.match_info.get("worker_id", request.remote) or "unknown"
|
|
30
28
|
|
|
31
29
|
# Key by identifier and path to have per-endpoint limits
|
|
32
30
|
rate_limit_key = f"ratelimit:{key_identifier}:{request.path}"
|
|
33
31
|
|
|
34
|
-
|
|
32
|
+
with suppress(Exception):
|
|
35
33
|
count = await storage.increment_key_with_ttl(rate_limit_key, period)
|
|
36
34
|
if count > limit:
|
|
37
35
|
return web.json_response({"error": "Too Many Requests"}, status=429)
|
|
38
|
-
except Exception:
|
|
39
|
-
# If the rate limiter fails for any reason (e.g., Redis down),
|
|
40
|
-
# it's safer to let the request through than to block everything.
|
|
41
|
-
pass
|
|
42
|
-
|
|
43
36
|
return await handler(request)
|
|
44
37
|
|
|
45
38
|
return rate_limit_middleware
|