FlowerPower 0.9.13.1__py3-none-any.whl → 1.0.0b2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- flowerpower/__init__.py +17 -2
- flowerpower/cfg/__init__.py +201 -149
- flowerpower/cfg/base.py +122 -24
- flowerpower/cfg/pipeline/__init__.py +254 -0
- flowerpower/cfg/pipeline/adapter.py +66 -0
- flowerpower/cfg/pipeline/run.py +40 -11
- flowerpower/cfg/pipeline/schedule.py +69 -79
- flowerpower/cfg/project/__init__.py +149 -0
- flowerpower/cfg/project/adapter.py +57 -0
- flowerpower/cfg/project/job_queue.py +165 -0
- flowerpower/cli/__init__.py +92 -37
- flowerpower/cli/job_queue.py +878 -0
- flowerpower/cli/mqtt.py +32 -1
- flowerpower/cli/pipeline.py +559 -406
- flowerpower/cli/utils.py +29 -18
- flowerpower/flowerpower.py +12 -8
- flowerpower/fs/__init__.py +20 -2
- flowerpower/fs/base.py +350 -26
- flowerpower/fs/ext.py +797 -216
- flowerpower/fs/storage_options.py +1097 -55
- flowerpower/io/base.py +13 -18
- flowerpower/io/loader/__init__.py +28 -0
- flowerpower/io/loader/deltatable.py +7 -10
- flowerpower/io/metadata.py +1 -0
- flowerpower/io/saver/__init__.py +28 -0
- flowerpower/io/saver/deltatable.py +4 -3
- flowerpower/job_queue/__init__.py +252 -0
- flowerpower/job_queue/apscheduler/__init__.py +11 -0
- flowerpower/job_queue/apscheduler/_setup/datastore.py +110 -0
- flowerpower/job_queue/apscheduler/_setup/eventbroker.py +93 -0
- flowerpower/job_queue/apscheduler/manager.py +1063 -0
- flowerpower/job_queue/apscheduler/setup.py +524 -0
- flowerpower/job_queue/apscheduler/trigger.py +169 -0
- flowerpower/job_queue/apscheduler/utils.py +309 -0
- flowerpower/job_queue/base.py +382 -0
- flowerpower/job_queue/rq/__init__.py +10 -0
- flowerpower/job_queue/rq/_trigger.py +37 -0
- flowerpower/job_queue/rq/concurrent_workers/gevent_worker.py +226 -0
- flowerpower/job_queue/rq/concurrent_workers/thread_worker.py +231 -0
- flowerpower/job_queue/rq/manager.py +1449 -0
- flowerpower/job_queue/rq/setup.py +150 -0
- flowerpower/job_queue/rq/utils.py +69 -0
- flowerpower/pipeline/__init__.py +5 -0
- flowerpower/pipeline/base.py +118 -0
- flowerpower/pipeline/io.py +407 -0
- flowerpower/pipeline/job_queue.py +505 -0
- flowerpower/pipeline/manager.py +1586 -0
- flowerpower/pipeline/registry.py +560 -0
- flowerpower/pipeline/runner.py +560 -0
- flowerpower/pipeline/visualizer.py +142 -0
- flowerpower/plugins/mqtt/__init__.py +12 -0
- flowerpower/plugins/mqtt/cfg.py +16 -0
- flowerpower/plugins/mqtt/manager.py +789 -0
- flowerpower/settings.py +110 -0
- flowerpower/utils/logging.py +21 -0
- flowerpower/utils/misc.py +57 -9
- flowerpower/utils/sql.py +122 -24
- flowerpower/utils/templates.py +2 -142
- flowerpower-1.0.0b2.dist-info/METADATA +324 -0
- flowerpower-1.0.0b2.dist-info/RECORD +94 -0
- flowerpower/_web/__init__.py +0 -61
- flowerpower/_web/routes/config.py +0 -103
- flowerpower/_web/routes/pipelines.py +0 -173
- flowerpower/_web/routes/scheduler.py +0 -136
- flowerpower/cfg/pipeline/tracker.py +0 -14
- flowerpower/cfg/project/open_telemetry.py +0 -8
- flowerpower/cfg/project/tracker.py +0 -11
- flowerpower/cfg/project/worker.py +0 -19
- flowerpower/cli/scheduler.py +0 -309
- flowerpower/cli/web.py +0 -44
- flowerpower/event_handler.py +0 -23
- flowerpower/mqtt.py +0 -609
- flowerpower/pipeline.py +0 -2499
- flowerpower/scheduler.py +0 -680
- flowerpower/tui.py +0 -79
- flowerpower/utils/datastore.py +0 -186
- flowerpower/utils/eventbroker.py +0 -127
- flowerpower/utils/executor.py +0 -58
- flowerpower/utils/trigger.py +0 -140
- flowerpower-0.9.13.1.dist-info/METADATA +0 -586
- flowerpower-0.9.13.1.dist-info/RECORD +0 -76
- /flowerpower/{cfg/pipeline/params.py → cli/worker.py} +0 -0
- {flowerpower-0.9.13.1.dist-info → flowerpower-1.0.0b2.dist-info}/WHEEL +0 -0
- {flowerpower-0.9.13.1.dist-info → flowerpower-1.0.0b2.dist-info}/entry_points.txt +0 -0
- {flowerpower-0.9.13.1.dist-info → flowerpower-1.0.0b2.dist-info}/top_level.txt +0 -0
flowerpower/io/base.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
import importlib
|
2
2
|
import posixpath
|
3
|
-
from typing import
|
3
|
+
from typing import Any, Generator
|
4
4
|
|
5
5
|
import datafusion
|
6
6
|
import duckdb
|
@@ -12,23 +12,14 @@ from fsspec.utils import get_protocol
|
|
12
12
|
from pydantic import BaseModel, ConfigDict
|
13
13
|
|
14
14
|
from ..fs import get_filesystem
|
15
|
-
from ..fs.ext import
|
16
|
-
from ..fs.storage_options import (
|
17
|
-
|
18
|
-
|
19
|
-
GcsStorageOptions,
|
20
|
-
GitHubStorageOptions,
|
21
|
-
GitLabStorageOptions,
|
22
|
-
StorageOptions,
|
23
|
-
)
|
15
|
+
from ..fs.ext import _dict_to_dataframe, path_to_glob
|
16
|
+
from ..fs.storage_options import (AwsStorageOptions, AzureStorageOptions,
|
17
|
+
GcsStorageOptions, GitHubStorageOptions,
|
18
|
+
GitLabStorageOptions, StorageOptions)
|
24
19
|
from ..utils.misc import convert_large_types_to_standard, to_pyarrow_table
|
25
20
|
from ..utils.polars import pl
|
26
21
|
from ..utils.sql import sql2polars_filter, sql2pyarrow_filter
|
27
|
-
from .metadata import
|
28
|
-
get_dataframe_metadata,
|
29
|
-
get_pyarrow_dataset_metadata,
|
30
|
-
)
|
31
|
-
|
22
|
+
from .metadata import get_dataframe_metadata, get_pyarrow_dataset_metadata
|
32
23
|
|
33
24
|
if importlib.util.find_spec("pydala"):
|
34
25
|
from pydala.dataset import ParquetDataset
|
@@ -1420,9 +1411,13 @@ class BaseDatabaseIO(BaseModel):
|
|
1420
1411
|
db in ["postgres", "mysql", "mssql", "oracle"]
|
1421
1412
|
and not self.connection_string
|
1422
1413
|
):
|
1423
|
-
if not all(
|
1424
|
-
|
1425
|
-
|
1414
|
+
if not all([
|
1415
|
+
self.username,
|
1416
|
+
self.password,
|
1417
|
+
self.server,
|
1418
|
+
self.port,
|
1419
|
+
self.database,
|
1420
|
+
]):
|
1426
1421
|
raise ValueError(
|
1427
1422
|
f"{self.type_} requires connection_string or username, password, server, port, and table_name "
|
1428
1423
|
"to build it."
|
@@ -0,0 +1,28 @@
|
|
1
|
+
from .csv import CSVDatasetReader, CSVFileReader
|
2
|
+
from .deltatable import DeltaTableReader
|
3
|
+
from .duckdb import DuckDBReader
|
4
|
+
from .json import JsonDatasetReader, JsonFileReader
|
5
|
+
from .mssql import MSSQLReader
|
6
|
+
from .mysql import MySQLReader
|
7
|
+
from .oracle import OracleDBReader
|
8
|
+
from .parquet import ParquetDatasetReader, ParquetFileReader
|
9
|
+
from .postgres import PostgreSQLReader
|
10
|
+
from .pydala import PydalaDatasetReader
|
11
|
+
from .sqlite import SQLiteReader
|
12
|
+
|
13
|
+
__all__ = [
|
14
|
+
"CSVFileReader",
|
15
|
+
"CSVDatasetReader",
|
16
|
+
"DeltaTableReader",
|
17
|
+
"DuckDBReader",
|
18
|
+
"JsonFileReader",
|
19
|
+
"JsonDatasetReader",
|
20
|
+
"MSSQLReader",
|
21
|
+
"MySQLReader",
|
22
|
+
"OracleDBReader",
|
23
|
+
"ParquetFileReader",
|
24
|
+
"ParquetDatasetReader",
|
25
|
+
"PostgreSQLReader",
|
26
|
+
"PydalaDatasetReader",
|
27
|
+
"SQLiteReader",
|
28
|
+
]
|
@@ -1,26 +1,23 @@
|
|
1
1
|
# import datetime as dt
|
2
2
|
|
3
3
|
|
4
|
+
import datetime
|
5
|
+
|
4
6
|
import pyarrow as pa
|
5
7
|
import pyarrow.dataset as pds
|
6
8
|
from deltalake import DeltaTable, table
|
7
9
|
from deltalake.exceptions import TableNotFoundError
|
8
|
-
import
|
10
|
+
# from ..utils import get_dataframe_metadata, get_delta_metadata
|
11
|
+
from loguru import logger
|
9
12
|
from sherlock import RedisLock
|
13
|
+
|
10
14
|
from ..base import BaseDatasetReader
|
11
|
-
from ..metadata import (
|
12
|
-
|
13
|
-
get_dataframe_metadata,
|
14
|
-
get_pyarrow_dataset_metadata,
|
15
|
-
)
|
15
|
+
from ..metadata import (get_dataframe_metadata, get_delta_metadata,
|
16
|
+
get_pyarrow_dataset_metadata)
|
16
17
|
|
17
18
|
# from hamilton.function_modifiers import dataloader
|
18
19
|
|
19
20
|
|
20
|
-
# from ..utils import get_dataframe_metadata, get_delta_metadata
|
21
|
-
from loguru import logger
|
22
|
-
|
23
|
-
|
24
21
|
class DeltaTableReader(BaseDatasetReader):
|
25
22
|
"""Delta table loader.
|
26
23
|
|
flowerpower/io/metadata.py
CHANGED
@@ -0,0 +1,28 @@
|
|
1
|
+
from .csv import CSVDatasetWriter, CSVFileWriter
|
2
|
+
from .deltatable import DeltaTableWriter
|
3
|
+
from .duckdb import DuckDBWriter
|
4
|
+
from .json import JsonDatasetWriter, JsonFileWriter
|
5
|
+
from .mssql import MSSQLWriter
|
6
|
+
from .mysql import MySQLWriter
|
7
|
+
from .oracle import OracleDBWriter
|
8
|
+
from .parquet import ParquetDatasetWriter, ParquetFileWriter
|
9
|
+
from .postgres import PostgreSQLWriter
|
10
|
+
from .pydala import PydalaDatasetWriter
|
11
|
+
from .sqlite import SQLiteWriter
|
12
|
+
|
13
|
+
__all__ = [
|
14
|
+
"CSVFileWriter",
|
15
|
+
"CSVDatasetWriter",
|
16
|
+
"DeltaTableWriter",
|
17
|
+
"DuckDBWriter",
|
18
|
+
"JsonFileWriter",
|
19
|
+
"JsonDatasetWriter",
|
20
|
+
"MSSQLWriter",
|
21
|
+
"MySQLWriter",
|
22
|
+
"OracleDBWriter",
|
23
|
+
"ParquetFileWriter",
|
24
|
+
"ParquetDatasetWriter",
|
25
|
+
"PostgreSQLWriter",
|
26
|
+
"PydalaDatasetWriter",
|
27
|
+
"SQLiteWriter",
|
28
|
+
]
|
@@ -3,14 +3,15 @@ from typing import Any
|
|
3
3
|
import pandas as pd
|
4
4
|
import polars as pl
|
5
5
|
import pyarrow as pa
|
6
|
-
from deltalake.table import ColumnProperties, CommitProperties,
|
6
|
+
from deltalake.table import (ColumnProperties, CommitProperties,
|
7
|
+
PostCommitHookProperties)
|
7
8
|
from deltalake.writer import WriterProperties, write_deltalake
|
9
|
+
from redis import Redis, StrictRedis
|
10
|
+
from sherlock import RedisLock
|
8
11
|
|
9
12
|
from ...utils.misc import _dict_to_dataframe
|
10
13
|
from ..base import BaseDatasetWriter
|
11
14
|
from ..metadata import get_dataframe_metadata
|
12
|
-
from sherlock import RedisLock
|
13
|
-
from redis import StrictRedis, Redis
|
14
15
|
|
15
16
|
|
16
17
|
class DeltaTableWriter(BaseDatasetWriter):
|
@@ -0,0 +1,252 @@
|
|
1
|
+
from typing import Any, Optional
|
2
|
+
|
3
|
+
from ..fs import AbstractFileSystem
|
4
|
+
from ..utils.logging import setup_logging
|
5
|
+
from .apscheduler import APSBackend, APSManager
|
6
|
+
from .base import BaseBackend, BaseJobQueueManager
|
7
|
+
from .rq import RQBackend, RQManager
|
8
|
+
from ..cfg.project import ProjectConfig
|
9
|
+
|
10
|
+
setup_logging()
|
11
|
+
|
12
|
+
|
13
|
+
class JobQueue:
|
14
|
+
"""A factory class for creating job queue instances for job scheduling and execution.
|
15
|
+
|
16
|
+
This class provides a unified interface for creating different types of job queue instances
|
17
|
+
(RQ, APScheduler, Huey) based on the specified backend type. Each job queue type provides
|
18
|
+
different capabilities for job scheduling and execution.
|
19
|
+
|
20
|
+
The job queue instances handle:
|
21
|
+
- Job scheduling and execution
|
22
|
+
- Background task processing
|
23
|
+
- Job queue management
|
24
|
+
- Result storage and retrieval
|
25
|
+
|
26
|
+
Example:
|
27
|
+
```python
|
28
|
+
# Create an RQ job queue
|
29
|
+
rq_worker = JobQueue(
|
30
|
+
type="rq",
|
31
|
+
name="my_worker",
|
32
|
+
log_level="DEBUG"
|
33
|
+
)
|
34
|
+
|
35
|
+
# Create an APScheduler job queue with custom backend
|
36
|
+
from flowerpower.job_queue.apscheduler import APSBackend
|
37
|
+
backend_config = APSBackend(
|
38
|
+
data_store={"type": "postgresql", "uri": "postgresql+asyncpg://user:pass@localhost/db"},
|
39
|
+
event_broker={"type": "redis", "uri": "redis://localhost:6379/0"}
|
40
|
+
)
|
41
|
+
aps_worker = JobQueue(
|
42
|
+
type="apscheduler",
|
43
|
+
name="scheduler",
|
44
|
+
backend=backend_config
|
45
|
+
)
|
46
|
+
|
47
|
+
```
|
48
|
+
"""
|
49
|
+
|
50
|
+
def __new__(
|
51
|
+
cls,
|
52
|
+
type: str |None = None,
|
53
|
+
name: str | None = None,
|
54
|
+
base_dir: str | None = ".",
|
55
|
+
backend: BaseBackend | None = None,
|
56
|
+
storage_options: Optional[dict[str, Any]] = None,
|
57
|
+
fs: AbstractFileSystem | None = None,
|
58
|
+
log_level: str | None = None,
|
59
|
+
**kwargs,
|
60
|
+
) -> BaseJobQueueManager:
|
61
|
+
"""Create a new job queue instance based on the specified backend type.
|
62
|
+
|
63
|
+
Args:
|
64
|
+
type: The type of job queue to create. Valid values are:
|
65
|
+
- "rq": Redis Queue job queue for Redis-based job queuing
|
66
|
+
- "apscheduler": APScheduler job queue for advanced job scheduling
|
67
|
+
name: Name of the job queue instance. Used for identification in logs
|
68
|
+
and monitoring.
|
69
|
+
base_dir: Base directory for job queue files and configuration. Defaults
|
70
|
+
to current working directory if not specified.
|
71
|
+
backend: Pre-configured backend instance. If not provided, one will
|
72
|
+
be created based on configuration settings.
|
73
|
+
storage_options: Options for configuring filesystem storage access.
|
74
|
+
Example: {"mode": "async", "root": "/tmp", "protocol": "s3"}
|
75
|
+
fs: Custom filesystem implementation for storage operations.
|
76
|
+
Example: S3FileSystem, LocalFileSystem, etc.
|
77
|
+
log_level: Logging level for the job queue. Valid values are:
|
78
|
+
"DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"
|
79
|
+
**kwargs: Additional configuration options passed to the specific
|
80
|
+
job queue implementation.
|
81
|
+
|
82
|
+
Returns:
|
83
|
+
BaseJobQueueManager: An instance of the specified job queue type (RQManager,
|
84
|
+
APSManager).
|
85
|
+
|
86
|
+
Raises:
|
87
|
+
ValueError: If an invalid job queue type is specified.
|
88
|
+
ImportError: If required dependencies for the chosen job queue type
|
89
|
+
are not installed.
|
90
|
+
RuntimeError: If job queue initialization fails due to configuration
|
91
|
+
or connection issues.
|
92
|
+
|
93
|
+
Example:
|
94
|
+
```python
|
95
|
+
# Basic RQ job queue
|
96
|
+
worker = JobQueue(type="rq", name="basic_worker")
|
97
|
+
|
98
|
+
# APScheduler with custom logging and storage
|
99
|
+
worker = JobQueue(
|
100
|
+
type="apscheduler",
|
101
|
+
name="scheduler",
|
102
|
+
base_dir="/app/data",
|
103
|
+
storage_options={"mode": "async"},
|
104
|
+
log_level="DEBUG"
|
105
|
+
)
|
106
|
+
|
107
|
+
```
|
108
|
+
"""
|
109
|
+
if type is None:
|
110
|
+
type = ProjectConfig.load(
|
111
|
+
base_dir=base_dir, name=name, fs=fs, storage_options=storage_options or {}).job_queue.type
|
112
|
+
|
113
|
+
if type == "rq":
|
114
|
+
return RQManager(
|
115
|
+
name=name,
|
116
|
+
base_dir=base_dir,
|
117
|
+
backend=backend,
|
118
|
+
storage_options=storage_options,
|
119
|
+
fs=fs,
|
120
|
+
log_level=log_level,
|
121
|
+
**kwargs,
|
122
|
+
)
|
123
|
+
elif type == "apscheduler":
|
124
|
+
return APSManager(
|
125
|
+
name=name,
|
126
|
+
base_dir=base_dir,
|
127
|
+
backend=backend,
|
128
|
+
storage_options=storage_options,
|
129
|
+
fs=fs,
|
130
|
+
log_level=log_level,
|
131
|
+
**kwargs,
|
132
|
+
)
|
133
|
+
|
134
|
+
else:
|
135
|
+
raise ValueError(
|
136
|
+
f"Invalid job queue type: {type}. Valid types: ['rq', 'apscheduler']"
|
137
|
+
)
|
138
|
+
|
139
|
+
|
140
|
+
class Backend:
|
141
|
+
"""A factory class for creating backend instances for different job queue types.
|
142
|
+
|
143
|
+
This class provides a unified interface for creating backend instances that handle
|
144
|
+
the storage, queuing, and event management for different job queue types. Each backend
|
145
|
+
type provides specific implementations for:
|
146
|
+
- Job storage and persistence
|
147
|
+
- Queue management
|
148
|
+
- Event handling and communication
|
149
|
+
- Result storage
|
150
|
+
|
151
|
+
Example:
|
152
|
+
```python
|
153
|
+
# Create RQ backend with Redis
|
154
|
+
rq_backend = Backend(
|
155
|
+
job_queue_type="rq",
|
156
|
+
uri="redis://localhost:6379/0",
|
157
|
+
queues=["high", "default", "low"]
|
158
|
+
)
|
159
|
+
|
160
|
+
# Create APScheduler backend with PostgreSQL and Redis
|
161
|
+
aps_backend = Backend(
|
162
|
+
job_queue_type="apscheduler",
|
163
|
+
data_store={
|
164
|
+
"type": "postgresql",
|
165
|
+
"uri": "postgresql+asyncpg://user:pass@localhost/db"
|
166
|
+
},
|
167
|
+
event_broker={
|
168
|
+
"type": "redis",
|
169
|
+
"uri": "redis://localhost:6379/0"
|
170
|
+
}
|
171
|
+
)
|
172
|
+
```
|
173
|
+
"""
|
174
|
+
|
175
|
+
def __new__(
|
176
|
+
cls,
|
177
|
+
job_queue_type: str,
|
178
|
+
**kwargs,
|
179
|
+
) -> BaseBackend:
|
180
|
+
"""Create a new backend instance based on the specified job queue type.
|
181
|
+
|
182
|
+
Args:
|
183
|
+
job_queue_type: The type of backend to create. Valid values are:
|
184
|
+
- "rq": Redis Queue backend using Redis
|
185
|
+
- "apscheduler": APScheduler backend supporting various databases
|
186
|
+
and event brokers
|
187
|
+
**kwargs: Backend-specific configuration options:
|
188
|
+
For RQ:
|
189
|
+
- uri (str): Redis connection URI
|
190
|
+
- queues (list[str]): List of queue names
|
191
|
+
- result_ttl (int): Time to live for results in seconds
|
192
|
+
For APScheduler:
|
193
|
+
- data_store (dict): Data store configuration
|
194
|
+
- event_broker (dict): Event broker configuration
|
195
|
+
- cleanup_interval (int): Cleanup interval in seconds
|
196
|
+
- max_concurrent_jobs (int): Maximum concurrent jobs
|
197
|
+
|
198
|
+
Returns:
|
199
|
+
BaseBackend: An instance of RQBackend or APSBackend depending on
|
200
|
+
the specified job queue type.
|
201
|
+
|
202
|
+
Raises:
|
203
|
+
ValueError: If an invalid job queue type is specified.
|
204
|
+
RuntimeError: If backend initialization fails due to configuration
|
205
|
+
or connection issues.
|
206
|
+
|
207
|
+
Example:
|
208
|
+
```python
|
209
|
+
# Create RQ backend
|
210
|
+
rq_backend = Backend(
|
211
|
+
job_queue_type="rq",
|
212
|
+
uri="redis://localhost:6379/0",
|
213
|
+
queues=["high", "default", "low"],
|
214
|
+
result_ttl=3600
|
215
|
+
)
|
216
|
+
|
217
|
+
# Create APScheduler backend with PostgreSQL and Redis
|
218
|
+
aps_backend = Backend(
|
219
|
+
job_queue_type="apscheduler",
|
220
|
+
data_store={
|
221
|
+
"type": "postgresql",
|
222
|
+
"uri": "postgresql+asyncpg://user:pass@localhost/db",
|
223
|
+
"schema": "scheduler"
|
224
|
+
},
|
225
|
+
event_broker={
|
226
|
+
"type": "redis",
|
227
|
+
"uri": "redis://localhost:6379/0"
|
228
|
+
},
|
229
|
+
cleanup_interval=300,
|
230
|
+
max_concurrent_jobs=10
|
231
|
+
)
|
232
|
+
```
|
233
|
+
"""
|
234
|
+
if job_queue_type == "rq":
|
235
|
+
return RQBackend(**kwargs)
|
236
|
+
elif job_queue_type == "apscheduler":
|
237
|
+
return APSBackend(**kwargs)
|
238
|
+
else:
|
239
|
+
raise ValueError(
|
240
|
+
f"Invalid job queue type: {job_queue_type}. Valid types: ['rq', 'apscheduler']"
|
241
|
+
)
|
242
|
+
|
243
|
+
|
244
|
+
__all__ = [
|
245
|
+
"JobQueue",
|
246
|
+
"RQManager",
|
247
|
+
"APSManager",
|
248
|
+
#"HueyWorker",
|
249
|
+
"Backend",
|
250
|
+
"RQBackend",
|
251
|
+
"APSBackend",
|
252
|
+
]
|
@@ -0,0 +1,110 @@
|
|
1
|
+
from apscheduler.datastores.base import BaseDataStore
|
2
|
+
from sqlalchemy import text
|
3
|
+
from sqlalchemy.ext.asyncio import AsyncEngine, create_async_engine
|
4
|
+
|
5
|
+
from ...base import BackendType, BaseBackend
|
6
|
+
|
7
|
+
|
8
|
+
class APSDataStoreType(BackendType):
|
9
|
+
POSTGRESQL = "postgresql"
|
10
|
+
SQLITE = "sqlite"
|
11
|
+
MYSQL = "mysql"
|
12
|
+
MONGODB = "mongodb"
|
13
|
+
MEMORY = "memory"
|
14
|
+
|
15
|
+
|
16
|
+
class APSDataStore(BaseBackend):
|
17
|
+
"""Data store for APScheduler."""
|
18
|
+
|
19
|
+
def __post_init__(self):
|
20
|
+
super().__post_init__(backend_type=APSDataStoreType)
|
21
|
+
self._validate_inputs()
|
22
|
+
|
23
|
+
@classmethod
|
24
|
+
def from_dict(cls, d: dict[str, any]) -> "APSDataStore":
|
25
|
+
return cls(**d)
|
26
|
+
|
27
|
+
def _validate_inputs(self) -> None:
|
28
|
+
if self.type.value not in [ds.value for ds in APSDataStoreType]:
|
29
|
+
raise ValueError(
|
30
|
+
f"Invalid data store type: {self.type}. Valid types: {[ds.value for ds in APSDataStoreType]}"
|
31
|
+
)
|
32
|
+
|
33
|
+
async def _setup_db(self) -> None:
|
34
|
+
sqla_engine = create_async_engine(self.uri)
|
35
|
+
|
36
|
+
try:
|
37
|
+
await self._create_schema(sqla_engine)
|
38
|
+
except Exception:
|
39
|
+
await self._create_database_and_schema(sqla_engine)
|
40
|
+
|
41
|
+
async def _create_schema(self, engine: AsyncEngine) -> None:
|
42
|
+
if not self.schema_or_queue:
|
43
|
+
return
|
44
|
+
|
45
|
+
async with engine.begin() as conn:
|
46
|
+
await conn.execute(
|
47
|
+
text(f"CREATE SCHEMA IF NOT EXISTS {self.schema_or_queue}")
|
48
|
+
)
|
49
|
+
await conn.commit()
|
50
|
+
|
51
|
+
async def _create_database_and_schema(self, engine: AsyncEngine) -> None:
|
52
|
+
database_name = self.uri.split("/")[-1].split("?")[0]
|
53
|
+
temp_uri = self.uri.replace(f"/{database_name}", "/template1")
|
54
|
+
temp_engine = create_async_engine(temp_uri)
|
55
|
+
|
56
|
+
async with temp_engine.begin() as conn:
|
57
|
+
await conn.execute(text("COMMIT"))
|
58
|
+
try:
|
59
|
+
await conn.execute(text(f"CREATE DATABASE {database_name}"))
|
60
|
+
finally:
|
61
|
+
await conn.execute(text("COMMIT"))
|
62
|
+
|
63
|
+
if self.schema_or_queue:
|
64
|
+
await self._create_schema(engine)
|
65
|
+
|
66
|
+
def setup_db(self) -> None:
|
67
|
+
from anyio.from_thread import start_blocking_portal
|
68
|
+
|
69
|
+
with start_blocking_portal() as portal:
|
70
|
+
portal.call(self._setup_db)
|
71
|
+
|
72
|
+
def _setup_sqlalchemy(self) -> None:
|
73
|
+
from apscheduler.datastores.sqlalchemy import SQLAlchemyDataStore
|
74
|
+
|
75
|
+
if not self.type.is_sqlite_type:
|
76
|
+
self.setup_db()
|
77
|
+
self._sqla_engine = create_async_engine(self.uri)
|
78
|
+
self._client = SQLAlchemyDataStore(
|
79
|
+
self._sqla_engine, schema=self.schema_or_queue
|
80
|
+
)
|
81
|
+
|
82
|
+
def _setup_mongodb(self) -> None:
|
83
|
+
from apscheduler.datastores.mongodb import MongoDBDataStore
|
84
|
+
|
85
|
+
self._client = MongoDBDataStore(self.uri, database=self.schema_or_queue)
|
86
|
+
|
87
|
+
def _setup_memory(self) -> None:
|
88
|
+
from apscheduler.datastores.memory import MemoryDataStore
|
89
|
+
|
90
|
+
self._client = MemoryDataStore()
|
91
|
+
|
92
|
+
def setup(self) -> None:
|
93
|
+
if self.type.is_sqla_type:
|
94
|
+
self._setup_sqlalchemy()
|
95
|
+
elif self.type.is_mongodb_type:
|
96
|
+
self._setup_mongodb()
|
97
|
+
else:
|
98
|
+
self._setup_memory()
|
99
|
+
|
100
|
+
@property
|
101
|
+
def client(self) -> BaseDataStore:
|
102
|
+
if self._client is None:
|
103
|
+
self.setup()
|
104
|
+
return self._client
|
105
|
+
|
106
|
+
@property
|
107
|
+
def sqla_engine(self) -> AsyncEngine | None:
|
108
|
+
if self._sqla_engine is None:
|
109
|
+
self.setup()
|
110
|
+
return self._sqla_engine
|
@@ -0,0 +1,93 @@
|
|
1
|
+
from apscheduler.eventbrokers.base import BaseEventBroker
|
2
|
+
from sqlalchemy.ext.asyncio import AsyncEngine
|
3
|
+
|
4
|
+
from ...base import BackendType, BaseBackend
|
5
|
+
|
6
|
+
|
7
|
+
class APSEventBrokerType(BackendType):
|
8
|
+
POSTGRESQL = "postgresql"
|
9
|
+
MEMORY = "memory"
|
10
|
+
REDIS = "redis"
|
11
|
+
MQTT = "mqtt"
|
12
|
+
|
13
|
+
|
14
|
+
class APSEventBroker(BaseBackend):
|
15
|
+
"""Data store for APScheduler."""
|
16
|
+
|
17
|
+
def __post_init__(self):
|
18
|
+
super().__post_init__(backend_type=APSEventBrokerType)
|
19
|
+
|
20
|
+
@classmethod
|
21
|
+
def from_dict(cls, d: dict[str, any]) -> "APSEventBroker":
|
22
|
+
return cls(**d)
|
23
|
+
|
24
|
+
def _validate_inputs(self) -> None:
|
25
|
+
if self.type.value not in [ds.value for ds in APSEventBrokerType]:
|
26
|
+
raise ValueError(
|
27
|
+
f"Invalid data store type: {self.type}. Valid types: {[ds.value for ds in APSEventBrokerType]}"
|
28
|
+
)
|
29
|
+
|
30
|
+
def _setup_asyncpg_event_broker(self):
|
31
|
+
from apscheduler.eventbrokers.asyncpg import AsyncpgEventBroker
|
32
|
+
|
33
|
+
if self._sqla_engine is None:
|
34
|
+
self._event_broker = AsyncpgEventBroker.from_dsn(dsn=self.uri)
|
35
|
+
else:
|
36
|
+
self._event_broker = AsyncpgEventBroker.from_async_sqla_engine(
|
37
|
+
engine=self._sqla_engine
|
38
|
+
)
|
39
|
+
|
40
|
+
def _setup_mqtt_event_broker(self):
|
41
|
+
import urllib.parse
|
42
|
+
|
43
|
+
from apscheduler.eventbrokers.mqtt import MQTTEventBroker
|
44
|
+
|
45
|
+
# Parse the URI
|
46
|
+
parsed = urllib.parse.urlparse(self.uri)
|
47
|
+
|
48
|
+
hostname = parsed.hostname
|
49
|
+
port = parsed.port
|
50
|
+
username = parsed.username
|
51
|
+
password = parsed.password
|
52
|
+
use_ssl = parsed.scheme == "mqtts"
|
53
|
+
|
54
|
+
self._event_broker = MQTTEventBroker(
|
55
|
+
host=hostname, port=port, ssl=use_ssl, topic="flowerpower/scheduler"
|
56
|
+
)
|
57
|
+
if (self.username is not None) and (self.password is not None):
|
58
|
+
self._event_broker._client.username_pw_set(
|
59
|
+
username,
|
60
|
+
password,
|
61
|
+
)
|
62
|
+
|
63
|
+
def _setup_redis_event_broker(self):
|
64
|
+
from apscheduler.eventbrokers.redis import RedisEventBroker
|
65
|
+
|
66
|
+
self._event_broker = RedisEventBroker(self.uri)
|
67
|
+
|
68
|
+
def _setup_local_event_broker(self):
|
69
|
+
from apscheduler.eventbrokers.local import LocalEventBroker
|
70
|
+
|
71
|
+
self._event_broker = LocalEventBroker()
|
72
|
+
|
73
|
+
def setup(self):
|
74
|
+
if self.is_sqla_type:
|
75
|
+
self._setup_asyncpg_event_broker()
|
76
|
+
elif self.is_mqtt_type:
|
77
|
+
self._setup_mqtt_event_broker()
|
78
|
+
elif self.is_redis_type:
|
79
|
+
self._setup_redis_event_broker()
|
80
|
+
else:
|
81
|
+
self._setup_local_event_broker()
|
82
|
+
|
83
|
+
@property
|
84
|
+
def client(self) -> BaseEventBroker:
|
85
|
+
if self._event_broker is None:
|
86
|
+
self.setup()
|
87
|
+
return self._event_broker
|
88
|
+
|
89
|
+
@property
|
90
|
+
def sqla_engine(self) -> AsyncEngine | None:
|
91
|
+
if self._sqla_engine is None:
|
92
|
+
self.setup()
|
93
|
+
return self._sqla_engine
|