frequenz-dispatch 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- frequenz/dispatch/__init__.py +29 -0
- frequenz/dispatch/_dispatch.py +251 -0
- frequenz/dispatch/_dispatcher.py +252 -0
- frequenz/dispatch/_event.py +40 -0
- frequenz/dispatch/actor.py +256 -0
- frequenz/dispatch/conftest.py +13 -0
- frequenz/dispatch/py.typed +0 -0
- frequenz_dispatch-0.1.0.dist-info/LICENSE +21 -0
- frequenz_dispatch-0.1.0.dist-info/METADATA +149 -0
- frequenz_dispatch-0.1.0.dist-info/RECORD +12 -0
- frequenz_dispatch-0.1.0.dist-info/WHEEL +5 -0
- frequenz_dispatch-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# License: MIT
|
|
2
|
+
# Copyright © 2024 Frequenz Energy-as-a-Service GmbH
|
|
3
|
+
|
|
4
|
+
"""A highlevel interface for the dispatch API.
|
|
5
|
+
|
|
6
|
+
A small overview of the most important classes in this module:
|
|
7
|
+
|
|
8
|
+
* [Dispatcher][frequenz.dispatch.Dispatcher]: The entry point for the API.
|
|
9
|
+
* [Dispatch][frequenz.dispatch.Dispatch]: A dispatch type with lots of useful extra functionality.
|
|
10
|
+
* [Created][frequenz.dispatch.Created],
|
|
11
|
+
[Updated][frequenz.dispatch.Updated],
|
|
12
|
+
[Deleted][frequenz.dispatch.Deleted]: Dispatch event types.
|
|
13
|
+
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
from ._dispatch import Dispatch, RunningState
|
|
17
|
+
from ._dispatcher import Dispatcher, ReceiverFetcher
|
|
18
|
+
from ._event import Created, Deleted, DispatchEvent, Updated
|
|
19
|
+
|
|
20
|
+
__all__ = [
|
|
21
|
+
"Created",
|
|
22
|
+
"Deleted",
|
|
23
|
+
"DispatchEvent",
|
|
24
|
+
"Dispatcher",
|
|
25
|
+
"ReceiverFetcher",
|
|
26
|
+
"Updated",
|
|
27
|
+
"Dispatch",
|
|
28
|
+
"RunningState",
|
|
29
|
+
]
|
|
@@ -0,0 +1,251 @@
|
|
|
1
|
+
# License: MIT
|
|
2
|
+
# Copyright © 2024 Frequenz Energy-as-a-Service GmbH
|
|
3
|
+
|
|
4
|
+
"""Dispatch type with support for next_run calculation."""
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
import logging
|
|
8
|
+
from dataclasses import dataclass
|
|
9
|
+
from datetime import datetime, timezone
|
|
10
|
+
from enum import Enum
|
|
11
|
+
from typing import Iterator, cast
|
|
12
|
+
|
|
13
|
+
from dateutil import rrule
|
|
14
|
+
from frequenz.client.dispatch.types import Dispatch as BaseDispatch
|
|
15
|
+
from frequenz.client.dispatch.types import Frequency, Weekday
|
|
16
|
+
|
|
17
|
+
_logger = logging.getLogger(__name__)
|
|
18
|
+
"""The logger for this module."""
|
|
19
|
+
|
|
20
|
+
_RRULE_FREQ_MAP = {
|
|
21
|
+
Frequency.MINUTELY: rrule.MINUTELY,
|
|
22
|
+
Frequency.HOURLY: rrule.HOURLY,
|
|
23
|
+
Frequency.DAILY: rrule.DAILY,
|
|
24
|
+
Frequency.WEEKLY: rrule.WEEKLY,
|
|
25
|
+
Frequency.MONTHLY: rrule.MONTHLY,
|
|
26
|
+
}
|
|
27
|
+
"""To map from our Frequency enum to the dateutil library enum."""
|
|
28
|
+
|
|
29
|
+
_RRULE_WEEKDAY_MAP = {
|
|
30
|
+
Weekday.MONDAY: rrule.MO,
|
|
31
|
+
Weekday.TUESDAY: rrule.TU,
|
|
32
|
+
Weekday.WEDNESDAY: rrule.WE,
|
|
33
|
+
Weekday.THURSDAY: rrule.TH,
|
|
34
|
+
Weekday.FRIDAY: rrule.FR,
|
|
35
|
+
Weekday.SATURDAY: rrule.SA,
|
|
36
|
+
Weekday.SUNDAY: rrule.SU,
|
|
37
|
+
}
|
|
38
|
+
"""To map from our Weekday enum to the dateutil library enum."""
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class RunningState(Enum):
|
|
42
|
+
"""The running state of a dispatch."""
|
|
43
|
+
|
|
44
|
+
RUNNING = "RUNNING"
|
|
45
|
+
"""The dispatch is running."""
|
|
46
|
+
|
|
47
|
+
STOPPED = "STOPPED"
|
|
48
|
+
"""The dispatch is stopped."""
|
|
49
|
+
|
|
50
|
+
DIFFERENT_TYPE = "DIFFERENT_TYPE"
|
|
51
|
+
"""The dispatch is for a different type."""
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
@dataclass(frozen=True)
|
|
55
|
+
class Dispatch(BaseDispatch):
|
|
56
|
+
"""Dispatch type with extra functionality."""
|
|
57
|
+
|
|
58
|
+
deleted: bool = False
|
|
59
|
+
"""Whether the dispatch is deleted."""
|
|
60
|
+
|
|
61
|
+
running_state_change_synced: datetime | None = None
|
|
62
|
+
"""The last time a message was sent about the running state change."""
|
|
63
|
+
|
|
64
|
+
def __init__(
|
|
65
|
+
self,
|
|
66
|
+
client_dispatch: BaseDispatch,
|
|
67
|
+
deleted: bool = False,
|
|
68
|
+
running_state_change_synced: datetime | None = None,
|
|
69
|
+
):
|
|
70
|
+
"""Initialize the dispatch.
|
|
71
|
+
|
|
72
|
+
Args:
|
|
73
|
+
client_dispatch: The client dispatch.
|
|
74
|
+
deleted: Whether the dispatch is deleted.
|
|
75
|
+
running_state_change_synced: Timestamp of the last running state change message.
|
|
76
|
+
"""
|
|
77
|
+
super().__init__(**client_dispatch.__dict__)
|
|
78
|
+
# Work around frozen to set deleted
|
|
79
|
+
object.__setattr__(self, "deleted", deleted)
|
|
80
|
+
object.__setattr__(
|
|
81
|
+
self,
|
|
82
|
+
"running_state_change_synced",
|
|
83
|
+
running_state_change_synced,
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
def _set_deleted(self) -> None:
|
|
87
|
+
"""Mark the dispatch as deleted."""
|
|
88
|
+
object.__setattr__(self, "deleted", True)
|
|
89
|
+
|
|
90
|
+
@property
|
|
91
|
+
def _running_status_notified(self) -> bool:
|
|
92
|
+
"""Check that the latest running state change notification was sent.
|
|
93
|
+
|
|
94
|
+
Returns:
|
|
95
|
+
True if the latest running state change notification was sent, False otherwise.
|
|
96
|
+
"""
|
|
97
|
+
return self.running_state_change_synced == self.update_time
|
|
98
|
+
|
|
99
|
+
def _set_running_status_notified(self) -> None:
|
|
100
|
+
"""Mark the latest running state change notification as sent."""
|
|
101
|
+
object.__setattr__(self, "running_state_change_synced", self.update_time)
|
|
102
|
+
|
|
103
|
+
def running(self, type_: str) -> RunningState:
|
|
104
|
+
"""Check if the dispatch is currently supposed to be running.
|
|
105
|
+
|
|
106
|
+
Args:
|
|
107
|
+
type_: The type of the dispatch that should be running.
|
|
108
|
+
|
|
109
|
+
Returns:
|
|
110
|
+
RUNNING if the dispatch is running,
|
|
111
|
+
STOPPED if it is stopped,
|
|
112
|
+
DIFFERENT_TYPE if it is for a different type.
|
|
113
|
+
"""
|
|
114
|
+
if self.type != type_:
|
|
115
|
+
return RunningState.DIFFERENT_TYPE
|
|
116
|
+
|
|
117
|
+
if not self.active or self.deleted:
|
|
118
|
+
return RunningState.STOPPED
|
|
119
|
+
|
|
120
|
+
now = datetime.now(tz=timezone.utc)
|
|
121
|
+
if until := self._until(now):
|
|
122
|
+
return RunningState.RUNNING if now < until else RunningState.STOPPED
|
|
123
|
+
|
|
124
|
+
return RunningState.STOPPED
|
|
125
|
+
|
|
126
|
+
@property
|
|
127
|
+
def until(self) -> datetime | None:
|
|
128
|
+
"""Time when the dispatch should end.
|
|
129
|
+
|
|
130
|
+
Returns the time that a running dispatch should end.
|
|
131
|
+
If the dispatch is not running, None is returned.
|
|
132
|
+
|
|
133
|
+
Returns:
|
|
134
|
+
The time when the dispatch should end or None if the dispatch is not running.
|
|
135
|
+
"""
|
|
136
|
+
if not self.active or self.deleted:
|
|
137
|
+
return None
|
|
138
|
+
|
|
139
|
+
now = datetime.now(tz=timezone.utc)
|
|
140
|
+
return self._until(now)
|
|
141
|
+
|
|
142
|
+
@property
|
|
143
|
+
# noqa is needed because of a bug in pydoclint that makes it think a `return` without a return
|
|
144
|
+
# value needs documenting
|
|
145
|
+
def missed_runs(self) -> Iterator[datetime]: # noqa: DOC405
|
|
146
|
+
"""Yield all missed runs of a dispatch.
|
|
147
|
+
|
|
148
|
+
Yields all missed runs of a dispatch.
|
|
149
|
+
|
|
150
|
+
If a running state change notification was not sent in time
|
|
151
|
+
due to connection issues, this method will yield all missed runs
|
|
152
|
+
since the last sent notification.
|
|
153
|
+
|
|
154
|
+
Returns:
|
|
155
|
+
A generator that yields all missed runs of a dispatch.
|
|
156
|
+
"""
|
|
157
|
+
if self.update_time == self.running_state_change_synced:
|
|
158
|
+
return
|
|
159
|
+
|
|
160
|
+
from_time = self.update_time
|
|
161
|
+
now = datetime.now(tz=timezone.utc)
|
|
162
|
+
|
|
163
|
+
while (next_run := self.next_run_after(from_time)) and next_run < now:
|
|
164
|
+
yield next_run
|
|
165
|
+
from_time = next_run
|
|
166
|
+
|
|
167
|
+
@property
|
|
168
|
+
def next_run(self) -> datetime | None:
|
|
169
|
+
"""Calculate the next run of a dispatch.
|
|
170
|
+
|
|
171
|
+
Returns:
|
|
172
|
+
The next run of the dispatch or None if the dispatch is finished.
|
|
173
|
+
"""
|
|
174
|
+
return self.next_run_after(datetime.now(tz=timezone.utc))
|
|
175
|
+
|
|
176
|
+
def next_run_after(self, after: datetime) -> datetime | None:
|
|
177
|
+
"""Calculate the next run of a dispatch.
|
|
178
|
+
|
|
179
|
+
Args:
|
|
180
|
+
after: The time to calculate the next run from.
|
|
181
|
+
|
|
182
|
+
Returns:
|
|
183
|
+
The next run of the dispatch or None if the dispatch is finished.
|
|
184
|
+
"""
|
|
185
|
+
if (
|
|
186
|
+
not self.recurrence.frequency
|
|
187
|
+
or self.recurrence.frequency == Frequency.UNSPECIFIED
|
|
188
|
+
):
|
|
189
|
+
if after > self.start_time:
|
|
190
|
+
return None
|
|
191
|
+
return self.start_time
|
|
192
|
+
|
|
193
|
+
# Make sure no weekday is UNSPECIFIED
|
|
194
|
+
if Weekday.UNSPECIFIED in self.recurrence.byweekdays:
|
|
195
|
+
_logger.warning("Dispatch %s has UNSPECIFIED weekday, ignoring...", self.id)
|
|
196
|
+
return None
|
|
197
|
+
|
|
198
|
+
# No type information for rrule, so we need to cast
|
|
199
|
+
return cast(datetime | None, self._prepare_rrule().after(after, inc=True))
|
|
200
|
+
|
|
201
|
+
def _prepare_rrule(self) -> rrule.rrule:
|
|
202
|
+
"""Prepare the rrule object.
|
|
203
|
+
|
|
204
|
+
Returns:
|
|
205
|
+
The rrule object.
|
|
206
|
+
"""
|
|
207
|
+
count, until = (None, None)
|
|
208
|
+
if end := self.recurrence.end_criteria:
|
|
209
|
+
count = end.count
|
|
210
|
+
until = end.until
|
|
211
|
+
|
|
212
|
+
rrule_obj = rrule.rrule(
|
|
213
|
+
freq=_RRULE_FREQ_MAP[self.recurrence.frequency],
|
|
214
|
+
dtstart=self.start_time,
|
|
215
|
+
count=count,
|
|
216
|
+
until=until,
|
|
217
|
+
byminute=self.recurrence.byminutes,
|
|
218
|
+
byhour=self.recurrence.byhours,
|
|
219
|
+
byweekday=[
|
|
220
|
+
_RRULE_WEEKDAY_MAP[weekday] for weekday in self.recurrence.byweekdays
|
|
221
|
+
],
|
|
222
|
+
bymonthday=self.recurrence.bymonthdays,
|
|
223
|
+
bymonth=self.recurrence.bymonths,
|
|
224
|
+
interval=self.recurrence.interval,
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
return rrule_obj
|
|
228
|
+
|
|
229
|
+
def _until(self, now: datetime) -> datetime | None:
|
|
230
|
+
"""Calculate the time when the dispatch should end.
|
|
231
|
+
|
|
232
|
+
If no previous run is found, None is returned.
|
|
233
|
+
|
|
234
|
+
Args:
|
|
235
|
+
now: The current time.
|
|
236
|
+
|
|
237
|
+
Returns:
|
|
238
|
+
The time when the dispatch should end or None if the dispatch is not running.
|
|
239
|
+
"""
|
|
240
|
+
if (
|
|
241
|
+
not self.recurrence.frequency
|
|
242
|
+
or self.recurrence.frequency == Frequency.UNSPECIFIED
|
|
243
|
+
):
|
|
244
|
+
return self.start_time + self.duration
|
|
245
|
+
|
|
246
|
+
latest_past_start: datetime | None = self._prepare_rrule().before(now, inc=True)
|
|
247
|
+
|
|
248
|
+
if not latest_past_start:
|
|
249
|
+
return None
|
|
250
|
+
|
|
251
|
+
return latest_past_start + self.duration
|
|
@@ -0,0 +1,252 @@
|
|
|
1
|
+
# License: MIT
|
|
2
|
+
# Copyright © 2024 Frequenz Energy-as-a-Service GmbH
|
|
3
|
+
|
|
4
|
+
"""A highlevel interface for the dispatch API."""
|
|
5
|
+
|
|
6
|
+
import abc
|
|
7
|
+
from typing import Protocol, TypeVar
|
|
8
|
+
|
|
9
|
+
import grpc.aio
|
|
10
|
+
from frequenz.channels import Broadcast, Receiver
|
|
11
|
+
from frequenz.client.dispatch import Client
|
|
12
|
+
|
|
13
|
+
from ._dispatch import Dispatch
|
|
14
|
+
from ._event import DispatchEvent
|
|
15
|
+
from .actor import DispatchingActor
|
|
16
|
+
|
|
17
|
+
ReceivedT_co = TypeVar("ReceivedT_co", covariant=True)
|
|
18
|
+
"""The type being received."""
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class ReceiverFetcher(Protocol[ReceivedT_co]):
|
|
22
|
+
"""An interface that just exposes a `new_receiver` method."""
|
|
23
|
+
|
|
24
|
+
@abc.abstractmethod
|
|
25
|
+
def new_receiver(
|
|
26
|
+
self, *, name: str | None = None, limit: int = 50
|
|
27
|
+
) -> Receiver[ReceivedT_co]:
|
|
28
|
+
"""Get a receiver from the channel.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
name: A name to identify the receiver in the logs.
|
|
32
|
+
limit: The maximum size of the receiver.
|
|
33
|
+
|
|
34
|
+
Returns:
|
|
35
|
+
A receiver instance.
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class Dispatcher:
|
|
40
|
+
"""A highlevel interface for the dispatch API.
|
|
41
|
+
|
|
42
|
+
This class provides a highlevel interface to the dispatch API.
|
|
43
|
+
It provides two channels:
|
|
44
|
+
|
|
45
|
+
Lifecycle events:
|
|
46
|
+
A channel that sends a dispatch event message whenever a dispatch
|
|
47
|
+
is created, updated or deleted.
|
|
48
|
+
|
|
49
|
+
Running status change:
|
|
50
|
+
Sends a dispatch message whenever a dispatch is ready
|
|
51
|
+
to be executed according to the schedule or the running status of the
|
|
52
|
+
dispatch changed in a way that could potentially require the consumer to start,
|
|
53
|
+
stop or reconfigure itself.
|
|
54
|
+
|
|
55
|
+
Example: Processing running state change dispatches
|
|
56
|
+
```python
|
|
57
|
+
import os
|
|
58
|
+
import grpc.aio
|
|
59
|
+
from frequenz.dispatch import Dispatcher, RunningState
|
|
60
|
+
from unittest.mock import MagicMock
|
|
61
|
+
|
|
62
|
+
async def run():
|
|
63
|
+
host = os.getenv("DISPATCH_API_HOST", "localhost")
|
|
64
|
+
port = os.getenv("DISPATCH_API_PORT", "50051")
|
|
65
|
+
|
|
66
|
+
service_address = f"{host}:{port}"
|
|
67
|
+
grpc_channel = grpc.aio.insecure_channel(service_address)
|
|
68
|
+
microgrid_id = 1
|
|
69
|
+
dispatcher = Dispatcher(microgrid_id, grpc_channel, service_address)
|
|
70
|
+
await dispatcher.start()
|
|
71
|
+
|
|
72
|
+
actor = MagicMock() # replace with your actor
|
|
73
|
+
|
|
74
|
+
changed_running_status = dispatcher.running_status_change.new_receiver()
|
|
75
|
+
|
|
76
|
+
async for dispatch in changed_running_status:
|
|
77
|
+
match dispatch.running("DEMO_TYPE"):
|
|
78
|
+
case RunningState.RUNNING:
|
|
79
|
+
print(f"Executing dispatch {dispatch.id}, due on {dispatch.start_time}")
|
|
80
|
+
if actor.is_running:
|
|
81
|
+
actor.reconfigure(
|
|
82
|
+
components=dispatch.selector,
|
|
83
|
+
run_parameters=dispatch.payload, # custom actor parameters
|
|
84
|
+
dry_run=dispatch.dry_run,
|
|
85
|
+
until=dispatch.until,
|
|
86
|
+
) # this will reconfigure the actor
|
|
87
|
+
else:
|
|
88
|
+
# this will start a new actor with the given components
|
|
89
|
+
# and run it for the duration of the dispatch
|
|
90
|
+
actor.start(
|
|
91
|
+
components=dispatch.selector,
|
|
92
|
+
run_parameters=dispatch.payload, # custom actor parameters
|
|
93
|
+
dry_run=dispatch.dry_run,
|
|
94
|
+
until=dispatch.until,
|
|
95
|
+
)
|
|
96
|
+
case RunningState.STOPPED:
|
|
97
|
+
actor.stop() # this will stop the actor
|
|
98
|
+
case RunningState.DIFFERENT_TYPE:
|
|
99
|
+
pass # dispatch not for this type
|
|
100
|
+
```
|
|
101
|
+
|
|
102
|
+
Example: Getting notification about dispatch lifecycle events
|
|
103
|
+
```python
|
|
104
|
+
import os
|
|
105
|
+
from typing import assert_never
|
|
106
|
+
|
|
107
|
+
import grpc.aio
|
|
108
|
+
from frequenz.dispatch import Created, Deleted, Dispatcher, Updated
|
|
109
|
+
|
|
110
|
+
async def run():
|
|
111
|
+
host = os.getenv("DISPATCH_API_HOST", "localhost")
|
|
112
|
+
port = os.getenv("DISPATCH_API_PORT", "50051")
|
|
113
|
+
|
|
114
|
+
service_address = f"{host}:{port}"
|
|
115
|
+
grpc_channel = grpc.aio.insecure_channel(service_address)
|
|
116
|
+
microgrid_id = 1
|
|
117
|
+
dispatcher = Dispatcher(microgrid_id, grpc_channel, service_address)
|
|
118
|
+
dispatcher.start() # this will start the actor
|
|
119
|
+
|
|
120
|
+
events_receiver = dispatcher.lifecycle_events.new_receiver()
|
|
121
|
+
|
|
122
|
+
async for event in events_receiver:
|
|
123
|
+
match event:
|
|
124
|
+
case Created(dispatch):
|
|
125
|
+
print(f"A dispatch was created: {dispatch}")
|
|
126
|
+
case Deleted(dispatch):
|
|
127
|
+
print(f"A dispatch was deleted: {dispatch}")
|
|
128
|
+
case Updated(dispatch):
|
|
129
|
+
print(f"A dispatch was updated: {dispatch}")
|
|
130
|
+
case _ as unhandled:
|
|
131
|
+
assert_never(unhandled)
|
|
132
|
+
```
|
|
133
|
+
|
|
134
|
+
Example: Creating a new dispatch and then modifying it.
|
|
135
|
+
Note that this uses the lower-level `Client` class to create and update the dispatch.
|
|
136
|
+
|
|
137
|
+
```python
|
|
138
|
+
import os
|
|
139
|
+
from datetime import datetime, timedelta, timezone
|
|
140
|
+
|
|
141
|
+
import grpc.aio
|
|
142
|
+
from frequenz.client.common.microgrid.components import ComponentCategory
|
|
143
|
+
|
|
144
|
+
from frequenz.dispatch import Dispatcher
|
|
145
|
+
|
|
146
|
+
async def run():
|
|
147
|
+
host = os.getenv("DISPATCH_API_HOST", "localhost")
|
|
148
|
+
port = os.getenv("DISPATCH_API_PORT", "50051")
|
|
149
|
+
|
|
150
|
+
service_address = f"{host}:{port}"
|
|
151
|
+
grpc_channel = grpc.aio.insecure_channel(service_address)
|
|
152
|
+
microgrid_id = 1
|
|
153
|
+
dispatcher = Dispatcher(microgrid_id, grpc_channel, service_address)
|
|
154
|
+
await dispatcher.start() # this will start the actor
|
|
155
|
+
|
|
156
|
+
# Create a new dispatch
|
|
157
|
+
new_dispatch = await dispatcher.client.create(
|
|
158
|
+
microgrid_id=microgrid_id,
|
|
159
|
+
_type="ECHO_FREQUENCY", # replace with your own type
|
|
160
|
+
start_time=datetime.now(tz=timezone.utc) + timedelta(minutes=10),
|
|
161
|
+
duration=timedelta(minutes=5),
|
|
162
|
+
selector=ComponentCategory.INVERTER,
|
|
163
|
+
payload={"font": "Times New Roman"}, # Arbitrary payload data
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
# Modify the dispatch
|
|
167
|
+
await dispatcher.client.update(
|
|
168
|
+
dispatch_id=new_dispatch.id, new_fields={"duration": timedelta(minutes=10)}
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
# Validate the modification
|
|
172
|
+
modified_dispatch = await dispatcher.client.get(new_dispatch.id)
|
|
173
|
+
assert modified_dispatch.duration == timedelta(minutes=10)
|
|
174
|
+
```
|
|
175
|
+
"""
|
|
176
|
+
|
|
177
|
+
def __init__(
|
|
178
|
+
self, microgrid_id: int, grpc_channel: grpc.aio.Channel, svc_addr: str
|
|
179
|
+
):
|
|
180
|
+
"""Initialize the dispatcher.
|
|
181
|
+
|
|
182
|
+
Args:
|
|
183
|
+
microgrid_id: The microgrid id.
|
|
184
|
+
grpc_channel: The gRPC channel.
|
|
185
|
+
svc_addr: The service address.
|
|
186
|
+
"""
|
|
187
|
+
self._running_state_channel = Broadcast[Dispatch](name="running_state_change")
|
|
188
|
+
self._lifecycle_events_channel = Broadcast[DispatchEvent](
|
|
189
|
+
name="lifecycle_events"
|
|
190
|
+
)
|
|
191
|
+
self._client = Client(grpc_channel, svc_addr)
|
|
192
|
+
self._actor = DispatchingActor(
|
|
193
|
+
microgrid_id,
|
|
194
|
+
self._client,
|
|
195
|
+
self._lifecycle_events_channel.new_sender(),
|
|
196
|
+
self._running_state_channel.new_sender(),
|
|
197
|
+
)
|
|
198
|
+
|
|
199
|
+
async def start(self) -> None:
|
|
200
|
+
"""Start the actor."""
|
|
201
|
+
self._actor.start()
|
|
202
|
+
|
|
203
|
+
@property
|
|
204
|
+
def client(self) -> Client:
|
|
205
|
+
"""Return the client."""
|
|
206
|
+
return self._client
|
|
207
|
+
|
|
208
|
+
@property
|
|
209
|
+
def lifecycle_events(self) -> ReceiverFetcher[DispatchEvent]:
|
|
210
|
+
"""Return new, updated or deleted dispatches receiver fetcher.
|
|
211
|
+
|
|
212
|
+
Returns:
|
|
213
|
+
A new receiver for new dispatches.
|
|
214
|
+
"""
|
|
215
|
+
return self._lifecycle_events_channel
|
|
216
|
+
|
|
217
|
+
@property
|
|
218
|
+
def running_status_change(self) -> ReceiverFetcher[Dispatch]:
|
|
219
|
+
"""Return running status change receiver fetcher.
|
|
220
|
+
|
|
221
|
+
This receiver will receive a message whenever the current running
|
|
222
|
+
status of a dispatch changes.
|
|
223
|
+
|
|
224
|
+
Usually, one message per scheduled run is to be expected.
|
|
225
|
+
However, things get complicated when a dispatch was modified:
|
|
226
|
+
|
|
227
|
+
If it was currently running and the modification now says
|
|
228
|
+
it should not be running or running with different parameters,
|
|
229
|
+
then a message will be sent.
|
|
230
|
+
|
|
231
|
+
In other words: Any change that is expected to make an actor start, stop
|
|
232
|
+
or reconfigure itself with new parameters causes a message to be
|
|
233
|
+
sent.
|
|
234
|
+
|
|
235
|
+
A non-exhaustive list of possible changes that will cause a message to be sent:
|
|
236
|
+
- The normal scheduled start_time has been reached
|
|
237
|
+
- The duration of the dispatch has been modified
|
|
238
|
+
- The start_time has been modified to be in the future
|
|
239
|
+
- The component selection changed
|
|
240
|
+
- The active status changed
|
|
241
|
+
- The dry_run status changed
|
|
242
|
+
- The payload changed
|
|
243
|
+
- The dispatch was deleted
|
|
244
|
+
|
|
245
|
+
Note: Reaching the end time (start_time + duration) will not
|
|
246
|
+
send a message, except when it was reached by modifying the duration.
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
Returns:
|
|
250
|
+
A new receiver for dispatches whose running status changed.
|
|
251
|
+
"""
|
|
252
|
+
return self._running_state_channel
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
# License: MIT
|
|
2
|
+
# Copyright © 2024 Frequenz Energy-as-a-Service GmbH
|
|
3
|
+
|
|
4
|
+
"""Dispatch lifecycle events."""
|
|
5
|
+
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
|
|
8
|
+
from ._dispatch import Dispatch
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@dataclass(frozen=True)
|
|
12
|
+
class Created:
|
|
13
|
+
"""A dispatch created event."""
|
|
14
|
+
|
|
15
|
+
dispatch: Dispatch
|
|
16
|
+
"""The dispatch that was created."""
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
@dataclass(frozen=True)
|
|
20
|
+
class Updated:
|
|
21
|
+
"""A dispatch updated event."""
|
|
22
|
+
|
|
23
|
+
dispatch: Dispatch
|
|
24
|
+
"""The dispatch that was updated."""
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
@dataclass(frozen=True)
|
|
28
|
+
class Deleted:
|
|
29
|
+
"""A dispatch deleted event."""
|
|
30
|
+
|
|
31
|
+
dispatch: Dispatch
|
|
32
|
+
"""The dispatch that was deleted."""
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
DispatchEvent = Created | Updated | Deleted
|
|
36
|
+
"""Type that is sent over the channel for dispatch updates.
|
|
37
|
+
|
|
38
|
+
This type is used to send dispatches that were created, updated or deleted
|
|
39
|
+
over the channel.
|
|
40
|
+
"""
|
|
@@ -0,0 +1,256 @@
|
|
|
1
|
+
# License: MIT
|
|
2
|
+
# Copyright © 2024 Frequenz Energy-as-a-Service GmbH
|
|
3
|
+
|
|
4
|
+
"""The dispatch actor."""
|
|
5
|
+
|
|
6
|
+
import asyncio
|
|
7
|
+
import logging
|
|
8
|
+
from datetime import datetime, timedelta, timezone
|
|
9
|
+
|
|
10
|
+
import grpc.aio
|
|
11
|
+
from frequenz.channels import Sender
|
|
12
|
+
from frequenz.channels.timer import SkipMissedAndDrift, Timer
|
|
13
|
+
from frequenz.client.dispatch import Client
|
|
14
|
+
from frequenz.sdk.actor import Actor
|
|
15
|
+
|
|
16
|
+
from ._dispatch import Dispatch, RunningState
|
|
17
|
+
from ._event import Created, Deleted, DispatchEvent, Updated
|
|
18
|
+
|
|
19
|
+
_MAX_AHEAD_SCHEDULE = timedelta(hours=5)
|
|
20
|
+
"""The maximum time ahead to schedule a dispatch.
|
|
21
|
+
|
|
22
|
+
We don't want to schedule dispatches too far ahead,
|
|
23
|
+
as they could start drifting if the delay is too long.
|
|
24
|
+
|
|
25
|
+
This also prevents us from scheduling too many dispatches at once.
|
|
26
|
+
|
|
27
|
+
The exact value is not important, but should be a few hours and not more than a day.
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
_DEFAULT_POLL_INTERVAL = timedelta(seconds=10)
|
|
31
|
+
"""The default interval to poll the API for dispatch changes."""
|
|
32
|
+
|
|
33
|
+
_logger = logging.getLogger(__name__)
|
|
34
|
+
"""The logger for this module."""
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class DispatchingActor(Actor):
|
|
38
|
+
"""Dispatch actor.
|
|
39
|
+
|
|
40
|
+
This actor is responsible for handling dispatches for a microgrid.
|
|
41
|
+
|
|
42
|
+
This means staying in sync with the API and scheduling
|
|
43
|
+
dispatches as necessary.
|
|
44
|
+
"""
|
|
45
|
+
|
|
46
|
+
# pylint: disable=too-many-arguments
|
|
47
|
+
def __init__(
|
|
48
|
+
self,
|
|
49
|
+
microgrid_id: int,
|
|
50
|
+
client: Client,
|
|
51
|
+
lifecycle_updates_sender: Sender[DispatchEvent],
|
|
52
|
+
running_state_change_sender: Sender[Dispatch],
|
|
53
|
+
poll_interval: timedelta = _DEFAULT_POLL_INTERVAL,
|
|
54
|
+
) -> None:
|
|
55
|
+
"""Initialize the actor.
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
microgrid_id: The microgrid ID to handle dispatches for.
|
|
59
|
+
client: The client to use for fetching dispatches.
|
|
60
|
+
lifecycle_updates_sender: A sender for dispatch lifecycle events.
|
|
61
|
+
running_state_change_sender: A sender for dispatch running state changes.
|
|
62
|
+
poll_interval: The interval to poll the API for dispatche changes.
|
|
63
|
+
"""
|
|
64
|
+
super().__init__(name="dispatch")
|
|
65
|
+
|
|
66
|
+
self._client = client
|
|
67
|
+
self._dispatches: dict[int, Dispatch] = {}
|
|
68
|
+
self._scheduled: dict[int, asyncio.Task[None]] = {}
|
|
69
|
+
self._microgrid_id = microgrid_id
|
|
70
|
+
self._lifecycle_updates_sender = lifecycle_updates_sender
|
|
71
|
+
self._running_state_change_sender = running_state_change_sender
|
|
72
|
+
self._poll_timer = Timer(poll_interval, SkipMissedAndDrift())
|
|
73
|
+
|
|
74
|
+
async def _run(self) -> None:
|
|
75
|
+
"""Run the actor."""
|
|
76
|
+
self._poll_timer.reset()
|
|
77
|
+
try:
|
|
78
|
+
async for _ in self._poll_timer:
|
|
79
|
+
await self._fetch()
|
|
80
|
+
except asyncio.CancelledError:
|
|
81
|
+
for task in self._scheduled.values():
|
|
82
|
+
task.cancel()
|
|
83
|
+
raise
|
|
84
|
+
|
|
85
|
+
async def _fetch(self) -> None:
|
|
86
|
+
"""Fetch all relevant dispatches."""
|
|
87
|
+
old_dispatches = self._dispatches
|
|
88
|
+
self._dispatches = {}
|
|
89
|
+
|
|
90
|
+
try:
|
|
91
|
+
_logger.info("Fetching dispatches for microgrid %s", self._microgrid_id)
|
|
92
|
+
async for client_dispatch in self._client.list(
|
|
93
|
+
microgrid_id=self._microgrid_id
|
|
94
|
+
):
|
|
95
|
+
dispatch = Dispatch(client_dispatch)
|
|
96
|
+
|
|
97
|
+
self._dispatches[dispatch.id] = Dispatch(client_dispatch)
|
|
98
|
+
old_dispatch = old_dispatches.pop(dispatch.id, None)
|
|
99
|
+
if not old_dispatch:
|
|
100
|
+
self._update_dispatch_schedule(dispatch, None)
|
|
101
|
+
_logger.info("New dispatch: %s", dispatch)
|
|
102
|
+
await self._lifecycle_updates_sender.send(
|
|
103
|
+
Created(dispatch=dispatch)
|
|
104
|
+
)
|
|
105
|
+
elif dispatch.update_time != old_dispatch.update_time:
|
|
106
|
+
self._update_dispatch_schedule(dispatch, old_dispatch)
|
|
107
|
+
_logger.info("Updated dispatch: %s", dispatch)
|
|
108
|
+
await self._lifecycle_updates_sender.send(
|
|
109
|
+
Updated(dispatch=dispatch)
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
if self._running_state_change(dispatch, old_dispatch):
|
|
113
|
+
await self._send_running_state_change(dispatch)
|
|
114
|
+
|
|
115
|
+
except grpc.aio.AioRpcError as error:
|
|
116
|
+
_logger.error("Error fetching dispatches: %s", error)
|
|
117
|
+
self._dispatches = old_dispatches
|
|
118
|
+
return
|
|
119
|
+
|
|
120
|
+
for dispatch in old_dispatches.values():
|
|
121
|
+
_logger.info("Deleted dispatch: %s", dispatch)
|
|
122
|
+
dispatch._set_deleted() # pylint: disable=protected-access
|
|
123
|
+
await self._lifecycle_updates_sender.send(Deleted(dispatch=dispatch))
|
|
124
|
+
if task := self._scheduled.pop(dispatch.id, None):
|
|
125
|
+
task.cancel()
|
|
126
|
+
|
|
127
|
+
if self._running_state_change(None, dispatch):
|
|
128
|
+
await self._send_running_state_change(dispatch)
|
|
129
|
+
|
|
130
|
+
def _update_dispatch_schedule(
|
|
131
|
+
self, dispatch: Dispatch, old_dispatch: Dispatch | None
|
|
132
|
+
) -> None:
|
|
133
|
+
"""Update the schedule for a dispatch.
|
|
134
|
+
|
|
135
|
+
Schedules, reschedules or cancels the dispatch based on the start_time
|
|
136
|
+
and active status.
|
|
137
|
+
|
|
138
|
+
For example:
|
|
139
|
+
* when the start_time changes, the dispatch is rescheduled
|
|
140
|
+
* when the dispatch is deactivated, the dispatch is cancelled
|
|
141
|
+
|
|
142
|
+
Args:
|
|
143
|
+
dispatch: The dispatch to update the schedule for.
|
|
144
|
+
old_dispatch: The old dispatch, if available.
|
|
145
|
+
"""
|
|
146
|
+
if (
|
|
147
|
+
old_dispatch
|
|
148
|
+
and old_dispatch.active
|
|
149
|
+
and old_dispatch.start_time != dispatch.start_time
|
|
150
|
+
):
|
|
151
|
+
if task := self._scheduled.pop(dispatch.id, None):
|
|
152
|
+
task.cancel()
|
|
153
|
+
|
|
154
|
+
if dispatch.active and dispatch.id not in self._scheduled:
|
|
155
|
+
self._scheduled[dispatch.id] = asyncio.create_task(
|
|
156
|
+
self._schedule_task(dispatch)
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
async def _schedule_task(self, dispatch: Dispatch) -> None:
|
|
160
|
+
"""Wait for a dispatch to become ready.
|
|
161
|
+
|
|
162
|
+
Waits for the dispatches next run and then notifies that it is ready.
|
|
163
|
+
|
|
164
|
+
Args:
|
|
165
|
+
dispatch: The dispatch to schedule.
|
|
166
|
+
"""
|
|
167
|
+
|
|
168
|
+
def next_run_info() -> tuple[datetime, datetime] | None:
|
|
169
|
+
now = datetime.now(tz=timezone.utc)
|
|
170
|
+
next_run = dispatch.next_run_after(now)
|
|
171
|
+
|
|
172
|
+
if next_run is None:
|
|
173
|
+
return None
|
|
174
|
+
|
|
175
|
+
return now, next_run
|
|
176
|
+
|
|
177
|
+
while pair := next_run_info():
|
|
178
|
+
now, next_time = pair
|
|
179
|
+
|
|
180
|
+
if next_time - now > _MAX_AHEAD_SCHEDULE:
|
|
181
|
+
await asyncio.sleep(_MAX_AHEAD_SCHEDULE.total_seconds())
|
|
182
|
+
continue
|
|
183
|
+
|
|
184
|
+
_logger.info("Dispatch %s scheduled for %s", dispatch.id, next_time)
|
|
185
|
+
await asyncio.sleep((next_time - now).total_seconds())
|
|
186
|
+
|
|
187
|
+
_logger.info("Dispatch ready: %s", dispatch)
|
|
188
|
+
await self._running_state_change_sender.send(dispatch)
|
|
189
|
+
|
|
190
|
+
_logger.info("Dispatch finished: %s", dispatch)
|
|
191
|
+
self._scheduled.pop(dispatch.id)
|
|
192
|
+
|
|
193
|
+
def _running_state_change(
|
|
194
|
+
self, updated_dispatch: Dispatch | None, previous_dispatch: Dispatch | None
|
|
195
|
+
) -> bool:
|
|
196
|
+
"""Check if the running state of a dispatch has changed.
|
|
197
|
+
|
|
198
|
+
Checks if any of the running state changes to the dispatch
|
|
199
|
+
require a new message to be sent to the actor so that it can potentially
|
|
200
|
+
change its runtime configuration or start/stop itself.
|
|
201
|
+
|
|
202
|
+
Also checks if a dispatch update was not sent due to connection issues
|
|
203
|
+
in which case we need to send the message now.
|
|
204
|
+
|
|
205
|
+
Args:
|
|
206
|
+
updated_dispatch: The new dispatch, if available.
|
|
207
|
+
previous_dispatch: The old dispatch, if available.
|
|
208
|
+
|
|
209
|
+
Returns:
|
|
210
|
+
True if the running state has changed, False otherwise.
|
|
211
|
+
"""
|
|
212
|
+
# New dispatch
|
|
213
|
+
if previous_dispatch is None:
|
|
214
|
+
assert updated_dispatch is not None
|
|
215
|
+
|
|
216
|
+
# Client was not informed about the dispatch, do it now
|
|
217
|
+
# pylint: disable=protected-access
|
|
218
|
+
if not updated_dispatch._running_status_notified:
|
|
219
|
+
return True
|
|
220
|
+
|
|
221
|
+
# Deleted dispatch
|
|
222
|
+
if updated_dispatch is None:
|
|
223
|
+
assert previous_dispatch is not None
|
|
224
|
+
return (
|
|
225
|
+
previous_dispatch.running(previous_dispatch.type)
|
|
226
|
+
== RunningState.RUNNING
|
|
227
|
+
)
|
|
228
|
+
|
|
229
|
+
# If any of the runtime attributes changed, we need to send a message
|
|
230
|
+
runtime_state_attributes = [
|
|
231
|
+
"running",
|
|
232
|
+
"type",
|
|
233
|
+
"selector",
|
|
234
|
+
"duration",
|
|
235
|
+
"dry_run",
|
|
236
|
+
"payload",
|
|
237
|
+
]
|
|
238
|
+
|
|
239
|
+
for attribute in runtime_state_attributes:
|
|
240
|
+
if getattr(updated_dispatch, attribute) != getattr(
|
|
241
|
+
previous_dispatch, attribute
|
|
242
|
+
):
|
|
243
|
+
return True
|
|
244
|
+
|
|
245
|
+
return False
|
|
246
|
+
|
|
247
|
+
async def _send_running_state_change(self, dispatch: Dispatch) -> None:
|
|
248
|
+
"""Send a running state change message.
|
|
249
|
+
|
|
250
|
+
Args:
|
|
251
|
+
dispatch: The dispatch that changed.
|
|
252
|
+
"""
|
|
253
|
+
await self._running_state_change_sender.send(dispatch)
|
|
254
|
+
# Update the last sent notification time
|
|
255
|
+
# so we know if this change was already sent
|
|
256
|
+
dispatch._set_running_status_notified() # pylint: disable=protected-access
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
# License: MIT
|
|
2
|
+
# Copyright © 2024 Frequenz Energy-as-a-Service GmbH
|
|
3
|
+
|
|
4
|
+
"""Validate docstring code examples.
|
|
5
|
+
|
|
6
|
+
Code examples are often wrapped in triple backticks (```) within docstrings.
|
|
7
|
+
This plugin extracts these code examples and validates them using pylint.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from frequenz.repo.config.pytest import examples
|
|
11
|
+
from sybil import Sybil
|
|
12
|
+
|
|
13
|
+
pytest_collect_file = Sybil(**examples.get_sybil_arguments()).pytest()
|
|
File without changes
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright © 2024 Frequenz Energy-as-a-Service GmbH
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,149 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: frequenz-dispatch
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: A highlevel interface for the dispatch API
|
|
5
|
+
Author-email: Frequenz Energy-as-a-Service GmbH <floss@frequenz.com>
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Documentation, https://frequenz-floss.github.io/frequenz-dispatch-python/
|
|
8
|
+
Project-URL: Changelog, https://github.com/frequenz-floss/frequenz-dispatch-python/releases
|
|
9
|
+
Project-URL: Issues, https://github.com/frequenz-floss/frequenz-dispatch-python/issues
|
|
10
|
+
Project-URL: Repository, https://github.com/frequenz-floss/frequenz-dispatch-python
|
|
11
|
+
Project-URL: Support, https://github.com/frequenz-floss/frequenz-dispatch-python/discussions/categories/support
|
|
12
|
+
Keywords: frequenz,python,actor,frequenz-dispatch,dispatch,highlevel,api
|
|
13
|
+
Classifier: Development Status :: 3 - Alpha
|
|
14
|
+
Classifier: Intended Audience :: Developers
|
|
15
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
16
|
+
Classifier: Programming Language :: Python :: 3
|
|
17
|
+
Classifier: Programming Language :: Python :: 3 :: Only
|
|
18
|
+
Classifier: Topic :: Software Development :: Libraries
|
|
19
|
+
Classifier: Typing :: Typed
|
|
20
|
+
Requires-Python: <4,>=3.11
|
|
21
|
+
Description-Content-Type: text/markdown
|
|
22
|
+
License-File: LICENSE
|
|
23
|
+
Requires-Dist: python-dateutil <3.0,>=2.8.2
|
|
24
|
+
Requires-Dist: typing-extensions ==4.11.0
|
|
25
|
+
Requires-Dist: frequenz-sdk ==v1.0.0-rc6
|
|
26
|
+
Requires-Dist: frequenz-channels ==1.0.0
|
|
27
|
+
Requires-Dist: frequenz-api-dispatch <0.14,>=0.13.0
|
|
28
|
+
Requires-Dist: frequenz-client-dispatch ==0.2.0
|
|
29
|
+
Requires-Dist: frequenz-client-base <0.4.0,>=0.3.0
|
|
30
|
+
Requires-Dist: frequenz-client-common <0.2.0,>=0.1.0
|
|
31
|
+
Provides-Extra: dev
|
|
32
|
+
Requires-Dist: frequenz-dispatch[dev-flake8,dev-formatting,dev-mkdocs,dev-mypy,dev-noxfile,dev-pylint,dev-pytest] ; extra == 'dev'
|
|
33
|
+
Provides-Extra: dev-flake8
|
|
34
|
+
Requires-Dist: flake8 ==7.0.0 ; extra == 'dev-flake8'
|
|
35
|
+
Requires-Dist: flake8-docstrings ==1.7.0 ; extra == 'dev-flake8'
|
|
36
|
+
Requires-Dist: flake8-pyproject ==1.2.3 ; extra == 'dev-flake8'
|
|
37
|
+
Requires-Dist: pydoclint ==0.4.1 ; extra == 'dev-flake8'
|
|
38
|
+
Requires-Dist: pydocstyle ==6.3.0 ; extra == 'dev-flake8'
|
|
39
|
+
Provides-Extra: dev-formatting
|
|
40
|
+
Requires-Dist: black ==24.4.2 ; extra == 'dev-formatting'
|
|
41
|
+
Requires-Dist: isort ==5.13.2 ; extra == 'dev-formatting'
|
|
42
|
+
Provides-Extra: dev-mkdocs
|
|
43
|
+
Requires-Dist: black ==24.4.2 ; extra == 'dev-mkdocs'
|
|
44
|
+
Requires-Dist: Markdown ==3.6 ; extra == 'dev-mkdocs'
|
|
45
|
+
Requires-Dist: mike ==2.1.0 ; extra == 'dev-mkdocs'
|
|
46
|
+
Requires-Dist: mkdocs-gen-files ==0.5.0 ; extra == 'dev-mkdocs'
|
|
47
|
+
Requires-Dist: mkdocs-literate-nav ==0.6.1 ; extra == 'dev-mkdocs'
|
|
48
|
+
Requires-Dist: mkdocs-macros-plugin ==1.0.5 ; extra == 'dev-mkdocs'
|
|
49
|
+
Requires-Dist: mkdocs-material ==9.5.20 ; extra == 'dev-mkdocs'
|
|
50
|
+
Requires-Dist: mkdocstrings[python] ==0.25.0 ; extra == 'dev-mkdocs'
|
|
51
|
+
Requires-Dist: frequenz-repo-config[lib] ==0.9.2 ; extra == 'dev-mkdocs'
|
|
52
|
+
Provides-Extra: dev-mypy
|
|
53
|
+
Requires-Dist: mypy ==1.10.0 ; extra == 'dev-mypy'
|
|
54
|
+
Requires-Dist: types-Markdown ==3.6.0.20240316 ; extra == 'dev-mypy'
|
|
55
|
+
Requires-Dist: types-python-dateutil ==2.9.0.20240316 ; extra == 'dev-mypy'
|
|
56
|
+
Requires-Dist: frequenz-dispatch[dev-mkdocs,dev-noxfile,dev-pytest] ; extra == 'dev-mypy'
|
|
57
|
+
Provides-Extra: dev-noxfile
|
|
58
|
+
Requires-Dist: uv ==0.1.39 ; extra == 'dev-noxfile'
|
|
59
|
+
Requires-Dist: nox ==2024.4.15 ; extra == 'dev-noxfile'
|
|
60
|
+
Requires-Dist: frequenz-repo-config[lib] ==0.9.2 ; extra == 'dev-noxfile'
|
|
61
|
+
Provides-Extra: dev-pylint
|
|
62
|
+
Requires-Dist: pylint ==3.1.0 ; extra == 'dev-pylint'
|
|
63
|
+
Requires-Dist: frequenz-dispatch[dev-mkdocs,dev-noxfile,dev-pytest] ; extra == 'dev-pylint'
|
|
64
|
+
Provides-Extra: dev-pytest
|
|
65
|
+
Requires-Dist: pytest ==8.2.0 ; extra == 'dev-pytest'
|
|
66
|
+
Requires-Dist: frequenz-repo-config[extra-lint-examples] ==0.9.2 ; extra == 'dev-pytest'
|
|
67
|
+
Requires-Dist: pytest-mock ==3.14.0 ; extra == 'dev-pytest'
|
|
68
|
+
Requires-Dist: pytest-asyncio ==0.23.6 ; extra == 'dev-pytest'
|
|
69
|
+
Requires-Dist: async-solipsism ==0.6 ; extra == 'dev-pytest'
|
|
70
|
+
Requires-Dist: time-machine ==2.14.1 ; extra == 'dev-pytest'
|
|
71
|
+
|
|
72
|
+
# Dispatch Highlevel Interface
|
|
73
|
+
|
|
74
|
+
[](https://github.com/frequenz-floss/frequenz-dispatch-python/actions/workflows/ci.yaml)
|
|
75
|
+
[](https://pypi.org/project/frequenz-dispatch/)
|
|
76
|
+
[](https://frequenz-floss.github.io/frequenz-dispatch-python/)
|
|
77
|
+
|
|
78
|
+
## Introduction
|
|
79
|
+
|
|
80
|
+
A highlevel interface for the dispatch API.
|
|
81
|
+
|
|
82
|
+
See [the documentation](https://frequenz-floss.github.io/frequenz-dispatch-python/v0.1/reference/frequenz/dispatch) for more information.
|
|
83
|
+
|
|
84
|
+
## Usage
|
|
85
|
+
|
|
86
|
+
The [`Dispatcher` class](https://frequenz-floss.github.io/frequenz-dispatch-python/v0.1/reference/frequenz/dispatch/#frequenz.dispatch.Dispatcher), the main entry point for the API, provides two channels:
|
|
87
|
+
|
|
88
|
+
* [Lifecycle events](https://frequenz-floss.github.io/frequenz-dispatch-python/v0.1/reference/frequenz/dispatch/#frequenz.dispatch.Dispatcher.lifecycle_events): A channel that sends a message whenever a [Dispatch][frequenz.dispatch.Dispatch] is created, updated or deleted.
|
|
89
|
+
* [Running status change](https://frequenz-floss.github.io/frequenz-dispatch-python/v0.1/reference/frequenz/dispatch/#frequenz.dispatch.Dispatcher.running_status_change): Sends a dispatch message whenever a dispatch is ready to be executed according to the schedule or the running status of the dispatch changed in a way that could potentially require the actor to start, stop or reconfigure itself.
|
|
90
|
+
|
|
91
|
+
### Example using the running status change channel
|
|
92
|
+
|
|
93
|
+
```python
|
|
94
|
+
import os
|
|
95
|
+
import grpc.aio
|
|
96
|
+
from unittest.mock import MagicMock
|
|
97
|
+
|
|
98
|
+
async def run():
|
|
99
|
+
host = os.getenv("DISPATCH_API_HOST", "localhost")
|
|
100
|
+
port = os.getenv("DISPATCH_API_PORT", "50051")
|
|
101
|
+
|
|
102
|
+
service_address = f"{host}:{port}"
|
|
103
|
+
grpc_channel = grpc.aio.insecure_channel(service_address)
|
|
104
|
+
microgrid_id = 1
|
|
105
|
+
dispatcher = Dispatcher(microgrid_id, grpc_channel, service_address)
|
|
106
|
+
await dispatcher.start()
|
|
107
|
+
|
|
108
|
+
actor = MagicMock() # replace with your actor
|
|
109
|
+
|
|
110
|
+
changed_running_status_rx = dispatcher.running_status_change.new_receiver()
|
|
111
|
+
|
|
112
|
+
async for dispatch in changed_running_status_rx:
|
|
113
|
+
match dispatch.running("DEMO_TYPE"):
|
|
114
|
+
case RunningState.RUNNING:
|
|
115
|
+
print(f"Executing dispatch {dispatch.id}, due on {dispatch.start_time}")
|
|
116
|
+
if actor.is_running:
|
|
117
|
+
actor.reconfigure(
|
|
118
|
+
components=dispatch.selector,
|
|
119
|
+
run_parameters=dispatch.payload, # custom actor parameters
|
|
120
|
+
dry_run=dispatch.dry_run,
|
|
121
|
+
until=dispatch.until,
|
|
122
|
+
) # this will reconfigure the actor
|
|
123
|
+
else:
|
|
124
|
+
# this will start a new actor with the given components
|
|
125
|
+
# and run it for the duration of the dispatch
|
|
126
|
+
actor.start(
|
|
127
|
+
components=dispatch.selector,
|
|
128
|
+
run_parameters=dispatch.payload, # custom actor parameters
|
|
129
|
+
dry_run=dispatch.dry_run,
|
|
130
|
+
until=dispatch.until,
|
|
131
|
+
)
|
|
132
|
+
case RunningState.STOPPED:
|
|
133
|
+
actor.stop() # this will stop the actor
|
|
134
|
+
case RunningState.DIFFERENT_TYPE:
|
|
135
|
+
pass # dispatch not for this type
|
|
136
|
+
```
|
|
137
|
+
|
|
138
|
+
## Supported Platforms
|
|
139
|
+
|
|
140
|
+
The following platforms are officially supported (tested):
|
|
141
|
+
|
|
142
|
+
- **Python:** 3.11
|
|
143
|
+
- **Operating System:** Ubuntu Linux 20.04
|
|
144
|
+
- **Architectures:** amd64, arm64
|
|
145
|
+
|
|
146
|
+
## Contributing
|
|
147
|
+
|
|
148
|
+
If you want to know how to build this project and contribute to it, please
|
|
149
|
+
check out the [Contributing Guide](CONTRIBUTING.md).
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
frequenz/dispatch/__init__.py,sha256=jHsYSQPqc9TAOojlARbb63UsulHv5_gfyXb_mpkIWJQ,822
|
|
2
|
+
frequenz/dispatch/_dispatch.py,sha256=NAhGufnIuQgYbef-tgqwCdq503khW_BEQM7JsXnomDc,7959
|
|
3
|
+
frequenz/dispatch/_dispatcher.py,sha256=uXHdG43ig7nuFXkFF3payCFwi_OyC02Rpltj_bOVViU,9509
|
|
4
|
+
frequenz/dispatch/_event.py,sha256=70fhuxMJ4CY_Gi-9kGh5URFpguD2WeXUgn3g1tevbTQ,801
|
|
5
|
+
frequenz/dispatch/actor.py,sha256=JnIiTwqPuuhUfnVODj7YfSW0WAZzDTkRwZO1A3IwAHA,9226
|
|
6
|
+
frequenz/dispatch/conftest.py,sha256=kxmvkzTdvGfh7SiDINIFX0FG9PU0EoKROl9YY75zN8w,409
|
|
7
|
+
frequenz/dispatch/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
8
|
+
frequenz_dispatch-0.1.0.dist-info/LICENSE,sha256=zt0sW1KvE_KWE2ILOabrQYlfOoP0zUZXC3xCLrzGpIA,1089
|
|
9
|
+
frequenz_dispatch-0.1.0.dist-info/METADATA,sha256=8sZYjNr1gztS7sjtbF08YH1j_iTYjaE98XNYtIzbQRk,7580
|
|
10
|
+
frequenz_dispatch-0.1.0.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
|
|
11
|
+
frequenz_dispatch-0.1.0.dist-info/top_level.txt,sha256=x08GRcWytsyKXa2Ayme9e5pg3L5Kcq6lw_BaQmToMO4,9
|
|
12
|
+
frequenz_dispatch-0.1.0.dist-info/RECORD,,
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
frequenz
|