FlowerPower 0.11.6.20__py3-none-any.whl → 0.21.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (101) hide show
  1. flowerpower/__init__.py +2 -6
  2. flowerpower/cfg/__init__.py +7 -14
  3. flowerpower/cfg/base.py +29 -25
  4. flowerpower/cfg/pipeline/__init__.py +8 -6
  5. flowerpower/cfg/pipeline/_schedule.py +32 -0
  6. flowerpower/cfg/pipeline/adapter.py +0 -5
  7. flowerpower/cfg/pipeline/builder.py +377 -0
  8. flowerpower/cfg/pipeline/run.py +36 -0
  9. flowerpower/cfg/project/__init__.py +11 -24
  10. flowerpower/cfg/project/adapter.py +0 -12
  11. flowerpower/cli/__init__.py +2 -21
  12. flowerpower/cli/cfg.py +0 -3
  13. flowerpower/cli/mqtt.py +0 -6
  14. flowerpower/cli/pipeline.py +22 -415
  15. flowerpower/cli/utils.py +0 -1
  16. flowerpower/flowerpower.py +345 -146
  17. flowerpower/pipeline/__init__.py +2 -0
  18. flowerpower/pipeline/base.py +21 -12
  19. flowerpower/pipeline/io.py +58 -54
  20. flowerpower/pipeline/manager.py +165 -726
  21. flowerpower/pipeline/pipeline.py +643 -0
  22. flowerpower/pipeline/registry.py +285 -18
  23. flowerpower/pipeline/visualizer.py +5 -6
  24. flowerpower/plugins/io/__init__.py +8 -0
  25. flowerpower/plugins/mqtt/__init__.py +7 -11
  26. flowerpower/settings/__init__.py +0 -2
  27. flowerpower/settings/{backend.py → _backend.py} +0 -21
  28. flowerpower/settings/logging.py +1 -1
  29. flowerpower/utils/logging.py +24 -12
  30. flowerpower/utils/misc.py +17 -256
  31. flowerpower/utils/monkey.py +1 -83
  32. flowerpower-0.21.0.dist-info/METADATA +463 -0
  33. flowerpower-0.21.0.dist-info/RECORD +44 -0
  34. flowerpower/cfg/pipeline/schedule.py +0 -74
  35. flowerpower/cfg/project/job_queue.py +0 -238
  36. flowerpower/cli/job_queue.py +0 -1061
  37. flowerpower/fs/__init__.py +0 -29
  38. flowerpower/fs/base.py +0 -662
  39. flowerpower/fs/ext.py +0 -2143
  40. flowerpower/fs/storage_options.py +0 -1420
  41. flowerpower/job_queue/__init__.py +0 -294
  42. flowerpower/job_queue/apscheduler/__init__.py +0 -11
  43. flowerpower/job_queue/apscheduler/_setup/datastore.py +0 -110
  44. flowerpower/job_queue/apscheduler/_setup/eventbroker.py +0 -93
  45. flowerpower/job_queue/apscheduler/manager.py +0 -1051
  46. flowerpower/job_queue/apscheduler/setup.py +0 -554
  47. flowerpower/job_queue/apscheduler/trigger.py +0 -169
  48. flowerpower/job_queue/apscheduler/utils.py +0 -311
  49. flowerpower/job_queue/base.py +0 -413
  50. flowerpower/job_queue/rq/__init__.py +0 -10
  51. flowerpower/job_queue/rq/_trigger.py +0 -37
  52. flowerpower/job_queue/rq/concurrent_workers/gevent_worker.py +0 -226
  53. flowerpower/job_queue/rq/concurrent_workers/thread_worker.py +0 -231
  54. flowerpower/job_queue/rq/manager.py +0 -1582
  55. flowerpower/job_queue/rq/setup.py +0 -154
  56. flowerpower/job_queue/rq/utils.py +0 -69
  57. flowerpower/mqtt.py +0 -12
  58. flowerpower/pipeline/job_queue.py +0 -583
  59. flowerpower/pipeline/runner.py +0 -603
  60. flowerpower/plugins/io/base.py +0 -2520
  61. flowerpower/plugins/io/helpers/datetime.py +0 -298
  62. flowerpower/plugins/io/helpers/polars.py +0 -875
  63. flowerpower/plugins/io/helpers/pyarrow.py +0 -570
  64. flowerpower/plugins/io/helpers/sql.py +0 -202
  65. flowerpower/plugins/io/loader/__init__.py +0 -28
  66. flowerpower/plugins/io/loader/csv.py +0 -37
  67. flowerpower/plugins/io/loader/deltatable.py +0 -190
  68. flowerpower/plugins/io/loader/duckdb.py +0 -19
  69. flowerpower/plugins/io/loader/json.py +0 -37
  70. flowerpower/plugins/io/loader/mqtt.py +0 -159
  71. flowerpower/plugins/io/loader/mssql.py +0 -26
  72. flowerpower/plugins/io/loader/mysql.py +0 -26
  73. flowerpower/plugins/io/loader/oracle.py +0 -26
  74. flowerpower/plugins/io/loader/parquet.py +0 -35
  75. flowerpower/plugins/io/loader/postgres.py +0 -26
  76. flowerpower/plugins/io/loader/pydala.py +0 -19
  77. flowerpower/plugins/io/loader/sqlite.py +0 -23
  78. flowerpower/plugins/io/metadata.py +0 -244
  79. flowerpower/plugins/io/saver/__init__.py +0 -28
  80. flowerpower/plugins/io/saver/csv.py +0 -36
  81. flowerpower/plugins/io/saver/deltatable.py +0 -186
  82. flowerpower/plugins/io/saver/duckdb.py +0 -19
  83. flowerpower/plugins/io/saver/json.py +0 -36
  84. flowerpower/plugins/io/saver/mqtt.py +0 -28
  85. flowerpower/plugins/io/saver/mssql.py +0 -26
  86. flowerpower/plugins/io/saver/mysql.py +0 -26
  87. flowerpower/plugins/io/saver/oracle.py +0 -26
  88. flowerpower/plugins/io/saver/parquet.py +0 -36
  89. flowerpower/plugins/io/saver/postgres.py +0 -26
  90. flowerpower/plugins/io/saver/pydala.py +0 -20
  91. flowerpower/plugins/io/saver/sqlite.py +0 -24
  92. flowerpower/plugins/mqtt/cfg.py +0 -17
  93. flowerpower/plugins/mqtt/manager.py +0 -962
  94. flowerpower/settings/job_queue.py +0 -87
  95. flowerpower/utils/scheduler.py +0 -311
  96. flowerpower-0.11.6.20.dist-info/METADATA +0 -537
  97. flowerpower-0.11.6.20.dist-info/RECORD +0 -102
  98. {flowerpower-0.11.6.20.dist-info → flowerpower-0.21.0.dist-info}/WHEEL +0 -0
  99. {flowerpower-0.11.6.20.dist-info → flowerpower-0.21.0.dist-info}/entry_points.txt +0 -0
  100. {flowerpower-0.11.6.20.dist-info → flowerpower-0.21.0.dist-info}/licenses/LICENSE +0 -0
  101. {flowerpower-0.11.6.20.dist-info → flowerpower-0.21.0.dist-info}/top_level.txt +0 -0
@@ -1,554 +0,0 @@
1
- # Standard library imports
2
- from dataclasses import dataclass, field
3
-
4
- # Third-party imports
5
- from apscheduler.datastores.base import BaseDataStore
6
- from apscheduler.eventbrokers.base import BaseEventBroker
7
- from loguru import logger
8
- from sqlalchemy import text
9
- from sqlalchemy.ext.asyncio import AsyncEngine, create_async_engine
10
-
11
- # Local imports
12
- from ...utils.logging import setup_logging
13
- from ..base import BaseBackend
14
-
15
- setup_logging()
16
-
17
-
18
- @dataclass # (slots=True)
19
- class APSDataStore(BaseBackend):
20
- """APScheduler data store implementation that supports multiple backend types.
21
-
22
- This class provides a flexible data store interface for APScheduler, supporting various
23
- backend storage options including SQLAlchemy-compatible databases, MongoDB, and in-memory
24
- storage.
25
-
26
- Args:
27
- schema (str | None): Database schema name. Defaults to "flowerpower".
28
- Note: Ignored for SQLite databases.
29
-
30
- Attributes:
31
- type (BackendType): Type of backend storage (inherited from BaseBackend)
32
- uri (str): Connection URI for the backend (inherited from BaseBackend)
33
- _client (BaseDataStore): The APScheduler data store instance
34
- _sqla_engine (AsyncEngine): SQLAlchemy async engine for SQL databases
35
-
36
- Raises:
37
- ValueError: If an invalid backend type is specified
38
-
39
- Example:
40
- ```python
41
- # Create PostgreSQL data store
42
- data_store = APSDataStore(
43
- type="postgresql",
44
- uri="postgresql+asyncpg://user:pass@localhost/db",
45
- schema="scheduler"
46
- )
47
- data_store.setup()
48
-
49
- # Create in-memory data store
50
- memory_store = APSDataStore(type="memory")
51
- memory_store.setup()
52
-
53
- # Create MongoDB data store
54
- mongo_store = APSDataStore(
55
- type="mongodb",
56
- uri="mongodb://localhost:27017",
57
- schema="scheduler"
58
- )
59
- mongo_store.setup()
60
- ```
61
- """
62
-
63
- schema: str | None = "flowerpower"
64
-
65
- def __post_init__(self):
66
- """Initialize and validate the data store configuration.
67
-
68
- This method is called automatically after instance creation. It:
69
- 1. Sets default type to "memory" if not specified
70
- 2. Calls parent class initialization
71
- 3. Validates backend type
72
- 4. Warns about schema limitations with SQLite
73
-
74
- Raises:
75
- ValueError: If an invalid backend type is specified
76
- """
77
- if self.type is None:
78
- self.type = "memory"
79
- super().__post_init__()
80
-
81
- if (
82
- not self.type.is_memory_type
83
- and not self.type.is_mongodb_type
84
- and not self.type.is_sqla_type
85
- ):
86
- raise ValueError(
87
- f"Invalid backend type: {self.type}. Valid types: {
88
- [
89
- self.type.POSTGRESQL,
90
- self.type.MYSQL,
91
- self.type.SQLITE,
92
- self.type.MONGODB,
93
- self.type.MEMORY,
94
- ]
95
- }"
96
- )
97
- if self.type.is_sqlite_type and self.schema is not None:
98
- logger.warning(
99
- "SQLite does not support schema. When using SQLite, the schema will be ignored.",
100
- "When you need to use schemas, you can use several SQLite databases, ",
101
- "one for each schema. Or use PostgreSQL or MySQL.",
102
- )
103
- self.setup()
104
-
105
- async def _setup_db(self) -> None:
106
- """Initialize database and schema for SQL backends.
107
-
108
- Creates the database and schema if they don't exist. This is an internal async
109
- method called by setup_db().
110
-
111
- Raises:
112
- Exception: If database/schema creation fails
113
- """
114
- sqla_engine = create_async_engine(self.uri)
115
-
116
- try:
117
- await self._create_schema(sqla_engine)
118
- except Exception:
119
- await self._create_database_and_schema(sqla_engine)
120
-
121
- async def _create_schema(self, engine: AsyncEngine) -> None:
122
- """Create schema in existing database if it doesn't exist.
123
-
124
- Args:
125
- engine: SQLAlchemy async engine connected to the database
126
- """
127
- if not self.schema:
128
- return
129
-
130
- async with engine.begin() as conn:
131
- await conn.execute(text(f"CREATE SCHEMA IF NOT EXISTS {self.schema}"))
132
- await conn.commit()
133
-
134
- async def _create_database_and_schema(self, engine: AsyncEngine) -> None:
135
- """Create both database and schema if they don't exist.
136
-
137
- Creates a temporary connection to template1 to create the database,
138
- then creates the schema within the new database.
139
-
140
- Args:
141
- engine: SQLAlchemy async engine
142
- """
143
- database_name = self.uri.split("/")[-1].split("?")[0]
144
- temp_uri = self.uri.replace(f"/{database_name}", "/template1")
145
- temp_engine = create_async_engine(temp_uri)
146
-
147
- async with temp_engine.begin() as conn:
148
- await conn.execute(text("COMMIT"))
149
- try:
150
- await conn.execute(text(f"CREATE DATABASE {database_name}"))
151
- finally:
152
- await conn.execute(text("COMMIT"))
153
-
154
- if self.schema:
155
- await self._create_schema(engine)
156
-
157
- def setup_db(self) -> None:
158
- """Initialize the database synchronously.
159
-
160
- This is a blocking wrapper around the async _setup_db() method.
161
- Uses anyio portal to run async code from synchronous context.
162
- """
163
- from anyio.from_thread import start_blocking_portal
164
-
165
- with start_blocking_portal() as portal:
166
- portal.call(self._setup_db)
167
-
168
- def _setup_sqlalchemy(self) -> None:
169
- """Initialize SQLAlchemy data store.
170
-
171
- Sets up SQLAlchemy engine and data store for PostgreSQL, MySQL, or SQLite.
172
- Creates database and schema if needed.
173
- """
174
- from apscheduler.datastores.sqlalchemy import SQLAlchemyDataStore
175
-
176
- if not self.type.is_sqlite_type:
177
- self.setup_db()
178
- self._sqla_engine = create_async_engine(self.uri)
179
- self._client = SQLAlchemyDataStore(self._sqla_engine, schema=self.schema)
180
-
181
- def _setup_mongodb(self) -> None:
182
- """Initialize MongoDB data store.
183
-
184
- Creates MongoDBDataStore instance using provided URI and schema (database name).
185
- """
186
- from apscheduler.datastores.mongodb import MongoDBDataStore
187
-
188
- self._client = MongoDBDataStore(self.uri, database=self.schema)
189
-
190
- def _setup_memory(self) -> None:
191
- """Initialize in-memory data store.
192
-
193
- Creates MemoryDataStore instance for temporary storage.
194
- """
195
- from apscheduler.datastores.memory import MemoryDataStore
196
-
197
- self._client = MemoryDataStore()
198
-
199
- def setup(self) -> None:
200
- """Initialize the appropriate data store based on backend type.
201
-
202
- This is the main setup method that should be called after creating the data store.
203
- It delegates to the appropriate setup method based on the backend type.
204
- """
205
- try:
206
- if self.type.is_sqla_type:
207
- self._setup_sqlalchemy()
208
- elif self.type.is_mongodb_type:
209
- self._setup_mongodb()
210
- else:
211
- self._setup_memory()
212
- except Exception as e:
213
- logger.info(
214
- f"Failed to initialize APScheduler data store for type {self.type}: {e}"
215
- )
216
-
217
- self._client = None
218
- self._sqla_engine = None
219
-
220
- @property
221
- def client(self) -> BaseDataStore:
222
- """Get the initialized data store client.
223
-
224
- Returns:
225
- BaseDataStore: The APScheduler data store instance, initializing it if needed.
226
- """
227
- if self._client is None:
228
- self.setup()
229
- return self._client
230
-
231
- @property
232
- def sqla_engine(self) -> AsyncEngine | None:
233
- """Get the SQLAlchemy engine.
234
-
235
- Returns:
236
- AsyncEngine | None: The async SQLAlchemy engine for SQL backends,
237
- None for non-SQL backends
238
- """
239
- if self._sqla_engine is None:
240
- self.setup()
241
- return self._sqla_engine
242
-
243
-
244
- @dataclass # (slots=True)
245
- class APSEventBroker(BaseBackend):
246
- """APScheduler event broker implementation supporting multiple messaging backends.
247
-
248
- This class provides a flexible event broker interface for APScheduler that can use
249
- various messaging systems including PostgreSQL NOTIFY/LISTEN, MQTT, Redis pub/sub,
250
- and in-memory event handling.
251
-
252
- Attributes:
253
- type (BackendType): Type of backend messaging system (inherited from BaseBackend)
254
- uri (str): Connection URI for the backend (inherited from BaseBackend)
255
- _client (BaseEventBroker): The APScheduler event broker instance
256
- _sqla_engine (AsyncEngine): SQLAlchemy async engine for PostgreSQL NOTIFY/LISTEN
257
-
258
- Raises:
259
- ValueError: If an invalid backend type is specified or if SQLAlchemy engine is not PostgreSQL
260
- when using from_ds_sqla
261
-
262
- Example:
263
- ```python
264
- # Create Redis event broker
265
- redis_broker = APSEventBroker(
266
- type="redis",
267
- uri="redis://localhost:6379/0"
268
- )
269
- redis_broker.setup()
270
-
271
- # Create MQTT event broker
272
- mqtt_broker = APSEventBroker(
273
- type="mqtt",
274
- uri="mqtt://user:pass@localhost:1883"
275
- )
276
- mqtt_broker.setup()
277
-
278
- # Create PostgreSQL event broker from existing SQLAlchemy engine
279
- pg_broker = APSEventBroker.from_ds_sqla(pg_engine)
280
-
281
- # Create in-memory event broker
282
- memory_broker = APSEventBroker(type="memory")
283
- memory_broker.setup()
284
- ```
285
- """
286
-
287
- def __post_init__(self):
288
- """Initialize and validate the event broker configuration.
289
-
290
- This method is called automatically after instance creation. It:
291
- 1. Sets default type to "memory" if not specified
292
- 2. Calls parent class initialization
293
- 3. Validates backend type compatibility
294
-
295
- Raises:
296
- ValueError: If an invalid backend type is specified or an unsupported
297
- combination of settings is provided (e.g., Redis without URI)
298
- """
299
- if self.type is None:
300
- self.type = "memory"
301
- super().__post_init__()
302
-
303
- if (
304
- not self.type.is_redis_type
305
- and not self.type.is_memory_type
306
- and not self.type.is_mongodb_type
307
- and not self.type.is_sqla_type
308
- ):
309
- raise ValueError(
310
- f"Invalid backend type: {self.type}. Valid types: {
311
- [
312
- self.type.POSTGRESQL,
313
- self.type.MQTT,
314
- self.type.REDIS,
315
- self.type.MEMORY,
316
- ]
317
- }"
318
- )
319
- self.setup()
320
-
321
- def _setup_asyncpg_event_broker(self):
322
- """Initialize PostgreSQL event broker.
323
-
324
- Sets up AsyncpgEventBroker using either a DSN string or existing SQLAlchemy engine.
325
- Uses PostgreSQL's NOTIFY/LISTEN for event messaging.
326
- """
327
- from apscheduler.eventbrokers.asyncpg import AsyncpgEventBroker
328
-
329
- if self._sqla_engine is None:
330
- self._client = AsyncpgEventBroker.from_dsn(dsn=self.uri)
331
- else:
332
- self._client = AsyncpgEventBroker.from_async_sqla_engine(
333
- engine=self._sqla_engine
334
- )
335
-
336
- def _setup_mqtt_event_broker(self):
337
- """Initialize MQTT event broker.
338
-
339
- Parses MQTT connection URI for host, port, credentials and SSL settings.
340
- Sets up MQTTEventBroker for pub/sub messaging.
341
- """
342
- import urllib.parse
343
-
344
- from apscheduler.eventbrokers.mqtt import MQTTEventBroker
345
-
346
- # Parse the URI
347
- parsed = urllib.parse.urlparse(self.uri)
348
-
349
- hostname = parsed.hostname
350
- port = parsed.port
351
- username = parsed.username
352
- password = parsed.password
353
- use_ssl = parsed.scheme == "mqtts"
354
-
355
- self._client = MQTTEventBroker(
356
- host=hostname, port=port, ssl=use_ssl, topic="flowerpower/worker"
357
- )
358
- if (self.username is not None) and (self.password is not None):
359
- self._client._client.username_pw_set(
360
- username,
361
- password,
362
- )
363
-
364
- def _setup_redis_event_broker(self):
365
- """Initialize Redis event broker.
366
-
367
- Creates RedisEventBroker instance using provided Redis URI.
368
- Uses Redis pub/sub for event messaging.
369
- """
370
- from apscheduler.eventbrokers.redis import RedisEventBroker
371
-
372
- self._client = RedisEventBroker(self.uri)
373
-
374
- def _setup_local_event_broker(self):
375
- """Initialize in-memory event broker.
376
-
377
- Creates LocalEventBroker for in-process event handling.
378
- """
379
- from apscheduler.eventbrokers.local import LocalEventBroker
380
-
381
- self._client = LocalEventBroker()
382
-
383
- def setup(self):
384
- """Initialize the appropriate event broker based on backend type.
385
-
386
- This is the main setup method that should be called after creating the event broker.
387
- It delegates to the appropriate setup method based on the backend type.
388
- """
389
- try:
390
- if self.type.is_sqla_type:
391
- self._setup_asyncpg_event_broker()
392
- elif self.type.is_mqtt_type:
393
- self._setup_mqtt_event_broker()
394
- elif self.type.is_redis_type:
395
- self._setup_redis_event_broker()
396
- else:
397
- self._setup_local_event_broker()
398
- except Exception as e:
399
- logger.info(
400
- f"Failed to initialize APScheduler event broker for type {self.type}: {e}"
401
- )
402
- self._client = None
403
- self._sqla_engine = None
404
-
405
- @property
406
- def client(self) -> BaseEventBroker:
407
- """Get the initialized event broker client.
408
-
409
- Returns:
410
- BaseEventBroker: The APScheduler event broker instance, initializing it if needed.
411
- """
412
- if self._client is None:
413
- self.setup()
414
- return self._client
415
-
416
- @property
417
- def sqla_engine(self) -> AsyncEngine | None:
418
- """Get the SQLAlchemy engine.
419
-
420
- Returns:
421
- AsyncEngine | None: The async SQLAlchemy engine for PostgreSQL backend,
422
- None for other backends
423
- """
424
- if self._sqla_engine is None:
425
- self.setup()
426
- return self._sqla_engine
427
-
428
- @classmethod
429
- def from_ds_sqla(cls, sqla_engine: AsyncEngine) -> "APSEventBroker":
430
- """Create event broker from existing SQLAlchemy engine.
431
-
432
- This factory method creates a PostgreSQL event broker that shares the
433
- same database connection as a data store.
434
-
435
- Args:
436
- sqla_engine: Async SQLAlchemy engine, must be PostgreSQL with asyncpg driver
437
-
438
- Returns:
439
- APSEventBroker: New event broker instance using the provided engine
440
-
441
- Raises:
442
- ValueError: If engine is not PostgreSQL with asyncpg driver
443
-
444
- Example:
445
- ```python
446
- # Create data store with PostgreSQL
447
- data_store = APSDataStore(
448
- type="postgresql",
449
- uri="postgresql+asyncpg://user:pass@localhost/db"
450
- )
451
- data_store.setup()
452
-
453
- # Create event broker using same connection
454
- event_broker = APSEventBroker.from_ds_sqla(data_store.sqla_engine)
455
- ```
456
- """
457
- if sqla_engine.url.drivername != "postgresql+asyncpg":
458
- raise ValueError(
459
- f"sqla_engine must be a PostgreSQL engine ('postgresql+asyncpg://'), got '{sqla_engine.url.drivername}'"
460
- )
461
- return cls(
462
- type="postgresql",
463
- _sqla_engine=sqla_engine,
464
- )
465
-
466
-
467
- @dataclass(slots=True)
468
- class APSBackend:
469
- """Main backend configuration class for APScheduler combining data store and event broker.
470
-
471
- This class serves as a container for configuring both the data store and event broker
472
- components of APScheduler. It handles initialization and setup of both components,
473
- with support for dictionary-based configuration.
474
-
475
- Args:
476
- data_store (APSDataStore | dict | None): Data store configuration, either as an
477
- APSDataStore instance or a configuration dictionary. Defaults to a new
478
- APSDataStore instance.
479
- event_broker (APSEventBroker | dict | None): Event broker configuration, either as
480
- an APSEventBroker instance or a configuration dictionary. Defaults to a new
481
- APSEventBroker instance.
482
- cleanup_interval (int): Interval in seconds for cleaning up old jobs. Defaults to 300.
483
- max_concurrent_jobs (int): Maximum number of jobs that can run concurrently.
484
- default_job_executor (str): Default job executor to use. Defaults to "threadpool".
485
-
486
- Example:
487
- ```python
488
- # Create backend with default memory storage
489
- backend = APSBackend()
490
-
491
- # Create backend with PostgreSQL data store and Redis event broker
492
- backend = APSBackend(
493
- data_store={
494
- "type": "postgresql",
495
- "uri": "postgresql+asyncpg://user:pass@localhost/db",
496
- "schema": "scheduler"
497
- },
498
- event_broker={
499
- "type": "redis",
500
- "uri": "redis://localhost:6379/0"
501
- }
502
- )
503
-
504
- # Create backend with PostgreSQL for both data store and event broker
505
- backend = APSBackend(
506
- data_store={
507
- "type": "postgresql",
508
- "uri": "postgresql+asyncpg://user:pass@localhost/db",
509
- },
510
- event_broker={
511
- "from_ds_sqla": True # Use same PostgreSQL connection for events
512
- }
513
- )
514
- ```
515
- """
516
-
517
- data_store: APSDataStore | dict | None = field(default_factory=APSDataStore)
518
- event_broker: APSEventBroker | dict | None = field(default_factory=APSEventBroker)
519
- cleanup_interval: int = field(default=300)
520
- max_concurrent_jobs: int = field(default=10)
521
- default_job_executor: str = field(default="threadpool")
522
-
523
- def __post_init__(self):
524
- """Initialize and setup data store and event broker components.
525
-
526
- Called automatically after instance creation. This method:
527
- 1. Converts data store dict to APSDataStore instance if needed
528
- 2. Initializes data store
529
- 3. Converts event broker dict to APSEventBroker instance if needed
530
- 4. Sets up event broker using data store connection if specified
531
- 5. Initializes event broker
532
- """
533
- if self.data_store is not None:
534
- if isinstance(self.data_store, dict):
535
- self.data_store = APSDataStore.from_dict(self.data_store)
536
- # self.data_store.setup()
537
- if self.event_broker is not None:
538
- if isinstance(self.event_broker, dict):
539
- if (
540
- "from_ds_sqla" in self.event_broker
541
- and self.data_store._sqla_engine is not None
542
- ):
543
- self.event_broker = APSEventBroker.from_ds_sqla(
544
- self.data_store._sqla_engine
545
- )
546
- else:
547
- self.event_broker.pop("from_ds_sqla", None)
548
- self.event_broker = APSEventBroker.from_dict(self.event_broker)
549
- # self.event_broker.setup()
550
-
551
- if self.data_store._client is None or self.event_broker._client is None:
552
- logger.warning(
553
- "APSBackend is not fully initialized. Job Queue is not available."
554
- )