frequenz-dispatch 0.3.0__tar.gz → 0.3.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (22) hide show
  1. {frequenz-dispatch-0.3.0/src/frequenz_dispatch.egg-info → frequenz-dispatch-0.3.2}/PKG-INFO +1 -1
  2. frequenz-dispatch-0.3.2/RELEASE_NOTES.md +5 -0
  3. {frequenz-dispatch-0.3.0 → frequenz-dispatch-0.3.2}/pyproject.toml +13 -12
  4. {frequenz-dispatch-0.3.0 → frequenz-dispatch-0.3.2}/src/frequenz/dispatch/__init__.py +5 -0
  5. {frequenz-dispatch-0.3.0 → frequenz-dispatch-0.3.2}/src/frequenz/dispatch/_dispatch.py +14 -0
  6. frequenz-dispatch-0.3.2/src/frequenz/dispatch/_managing_actor.py +180 -0
  7. frequenz-dispatch-0.3.2/src/frequenz/dispatch/actor.py +347 -0
  8. {frequenz-dispatch-0.3.0 → frequenz-dispatch-0.3.2/src/frequenz_dispatch.egg-info}/PKG-INFO +1 -1
  9. {frequenz-dispatch-0.3.0 → frequenz-dispatch-0.3.2}/src/frequenz_dispatch.egg-info/SOURCES.txt +1 -0
  10. {frequenz-dispatch-0.3.0 → frequenz-dispatch-0.3.2}/src/frequenz_dispatch.egg-info/requires.txt +12 -12
  11. frequenz-dispatch-0.3.0/RELEASE_NOTES.md +0 -20
  12. frequenz-dispatch-0.3.0/src/frequenz/dispatch/actor.py +0 -255
  13. {frequenz-dispatch-0.3.0 → frequenz-dispatch-0.3.2}/LICENSE +0 -0
  14. {frequenz-dispatch-0.3.0 → frequenz-dispatch-0.3.2}/MANIFEST.in +0 -0
  15. {frequenz-dispatch-0.3.0 → frequenz-dispatch-0.3.2}/README.md +0 -0
  16. {frequenz-dispatch-0.3.0 → frequenz-dispatch-0.3.2}/setup.cfg +0 -0
  17. {frequenz-dispatch-0.3.0 → frequenz-dispatch-0.3.2}/src/frequenz/dispatch/_dispatcher.py +0 -0
  18. {frequenz-dispatch-0.3.0 → frequenz-dispatch-0.3.2}/src/frequenz/dispatch/_event.py +0 -0
  19. {frequenz-dispatch-0.3.0 → frequenz-dispatch-0.3.2}/src/frequenz/dispatch/conftest.py +0 -0
  20. {frequenz-dispatch-0.3.0 → frequenz-dispatch-0.3.2}/src/frequenz/dispatch/py.typed +0 -0
  21. {frequenz-dispatch-0.3.0 → frequenz-dispatch-0.3.2}/src/frequenz_dispatch.egg-info/dependency_links.txt +0 -0
  22. {frequenz-dispatch-0.3.0 → frequenz-dispatch-0.3.2}/src/frequenz_dispatch.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: frequenz-dispatch
3
- Version: 0.3.0
3
+ Version: 0.3.2
4
4
  Summary: A highlevel interface for the dispatch API
5
5
  Author-email: Frequenz Energy-as-a-Service GmbH <floss@frequenz.com>
6
6
  License: MIT
@@ -0,0 +1,5 @@
1
+ # Dispatch Highlevel Interface Release Notes
2
+
3
+ ## Summary
4
+
5
+ * This fixes a crash when the `YEARLY` frequency is used in a dispatch.
@@ -39,9 +39,9 @@ dependencies = [
39
39
  # Make sure to update the version for cross-referencing also in the
40
40
  # mkdocs.yml file when changing the version here (look for the config key
41
41
  # plugins.mkdocstrings.handlers.python.import)
42
- "frequenz-sdk == 1.0.0-rc900",
43
- "frequenz-channels >= 1.1.0, < 2.0.0",
44
- "frequenz-client-dispatch >= 0.6.0, < 0.7.0",
42
+ "frequenz-sdk >= 1.0.0-rc900, < 1.0.0-rc1000",
43
+ "frequenz-channels >= 1.2.0, < 2.0.0",
44
+ "frequenz-client-dispatch >= 0.7.1, < 0.8.0",
45
45
  ]
46
46
  dynamic = ["version"]
47
47
 
@@ -54,7 +54,7 @@ dev-flake8 = [
54
54
  "flake8 == 7.1.1",
55
55
  "flake8-docstrings == 1.7.0",
56
56
  "flake8-pyproject == 1.2.3", # For reading the flake8 config from pyproject.toml
57
- "pydoclint == 0.5.6",
57
+ "pydoclint == 0.5.9",
58
58
  "pydocstyle == 6.3.0",
59
59
  ]
60
60
  dev-formatting = ["black == 24.8.0", "isort == 5.13.2"]
@@ -64,32 +64,32 @@ dev-mkdocs = [
64
64
  "mike == 2.1.3",
65
65
  "mkdocs-gen-files == 0.5.0",
66
66
  "mkdocs-literate-nav == 0.6.1",
67
- "mkdocs-macros-plugin == 1.0.5",
68
- "mkdocs-material == 9.5.34",
69
- "mkdocstrings[python] == 0.25.2",
70
- "mkdocstrings-python == 1.10.9",
67
+ "mkdocs-macros-plugin == 1.2.0",
68
+ "mkdocs-material == 9.5.39",
69
+ "mkdocstrings[python] == 0.26.1",
70
+ "mkdocstrings-python == 1.11.1",
71
71
  "frequenz-repo-config[lib] == 0.10.0",
72
72
  ]
73
73
  dev-mypy = [
74
74
  "mypy == 1.11.2",
75
75
  "grpc-stubs == 1.53.0.5", # This dependency introduces breaking changes in patch releases
76
76
  "types-Markdown == 3.7.0.20240822",
77
- "types-python-dateutil==2.9.0.20240821",
77
+ "types-python-dateutil==2.9.0.20240906",
78
78
  # For checking the noxfile, docs/ script, and tests
79
79
  "frequenz-dispatch[dev-mkdocs,dev-noxfile,dev-pytest]",
80
80
  ]
81
81
  dev-noxfile = [
82
- "uv == 0.4.1",
82
+ "uv == 0.4.17",
83
83
  "nox == 2024.4.15",
84
84
  "frequenz-repo-config[lib] == 0.10.0",
85
85
  ]
86
86
  dev-pylint = [
87
- "pylint == 3.2.7",
87
+ "pylint == 3.3.1",
88
88
  # For checking the noxfile, docs/ script, and tests
89
89
  "frequenz-dispatch[dev-mkdocs,dev-noxfile,dev-pytest]",
90
90
  ]
91
91
  dev-pytest = [
92
- "pytest == 8.3.2",
92
+ "pytest == 8.3.3",
93
93
  "frequenz-repo-config[extra-lint-examples] == 0.10.0",
94
94
  "pytest-mock == 3.14.0",
95
95
  "pytest-asyncio == 0.24.0",
@@ -165,6 +165,7 @@ disable = [
165
165
  [tool.pytest.ini_options]
166
166
  testpaths = ["tests", "src"]
167
167
  asyncio_mode = "auto"
168
+ asyncio_default_fixture_loop_scope = "function"
168
169
  required_plugins = ["pytest-asyncio", "pytest-mock"]
169
170
 
170
171
  [tool.mypy]
@@ -7,6 +7,8 @@ A small overview of the most important classes in this module:
7
7
 
8
8
  * [Dispatcher][frequenz.dispatch.Dispatcher]: The entry point for the API.
9
9
  * [Dispatch][frequenz.dispatch.Dispatch]: A dispatch type with lots of useful extra functionality.
10
+ * [DispatchManagingActor][frequenz.dispatch.DispatchManagingActor]: An actor to
11
+ manage other actors based on incoming dispatches.
10
12
  * [Created][frequenz.dispatch.Created],
11
13
  [Updated][frequenz.dispatch.Updated],
12
14
  [Deleted][frequenz.dispatch.Deleted]: Dispatch event types.
@@ -16,6 +18,7 @@ A small overview of the most important classes in this module:
16
18
  from ._dispatch import Dispatch, RunningState
17
19
  from ._dispatcher import Dispatcher, ReceiverFetcher
18
20
  from ._event import Created, Deleted, DispatchEvent, Updated
21
+ from ._managing_actor import DispatchManagingActor, DispatchUpdate
19
22
 
20
23
  __all__ = [
21
24
  "Created",
@@ -26,4 +29,6 @@ __all__ = [
26
29
  "Updated",
27
30
  "Dispatch",
28
31
  "RunningState",
32
+ "DispatchManagingActor",
33
+ "DispatchUpdate",
29
34
  ]
@@ -118,6 +118,13 @@ class Dispatch(BaseDispatch):
118
118
  return RunningState.STOPPED
119
119
 
120
120
  now = datetime.now(tz=timezone.utc)
121
+
122
+ if now < self.start_time:
123
+ return RunningState.STOPPED
124
+ # A dispatch without duration is always running once it started
125
+ if self.duration is None:
126
+ return RunningState.RUNNING
127
+
121
128
  if until := self._until(now):
122
129
  return RunningState.RUNNING if now < until else RunningState.STOPPED
123
130
 
@@ -185,6 +192,7 @@ class Dispatch(BaseDispatch):
185
192
  if (
186
193
  not self.recurrence.frequency
187
194
  or self.recurrence.frequency == Frequency.UNSPECIFIED
195
+ or self.duration is None # Infinite duration
188
196
  ):
189
197
  if after > self.start_time:
190
198
  return None
@@ -236,7 +244,13 @@ class Dispatch(BaseDispatch):
236
244
 
237
245
  Returns:
238
246
  The time when the dispatch should end or None if the dispatch is not running.
247
+
248
+ Raises:
249
+ ValueError: If the dispatch has no duration.
239
250
  """
251
+ if self.duration is None:
252
+ raise ValueError("_until: Dispatch has no duration")
253
+
240
254
  if (
241
255
  not self.recurrence.frequency
242
256
  or self.recurrence.frequency == Frequency.UNSPECIFIED
@@ -0,0 +1,180 @@
1
+ # License: All rights reserved
2
+ # Copyright © 2024 Frequenz Energy-as-a-Service GmbH
3
+
4
+ """Helper class to manage actors based on dispatches."""
5
+
6
+ import logging
7
+ from dataclasses import dataclass
8
+ from typing import Any, Set
9
+
10
+ from frequenz.channels import Receiver, Sender
11
+ from frequenz.client.dispatch.types import ComponentSelector
12
+ from frequenz.sdk.actor import Actor
13
+
14
+ from ._dispatch import Dispatch, RunningState
15
+
16
+ _logger = logging.getLogger(__name__)
17
+
18
+
19
+ @dataclass(frozen=True, kw_only=True)
20
+ class DispatchUpdate:
21
+ """Event emitted when the dispatch changes."""
22
+
23
+ components: ComponentSelector
24
+ """Components to be used."""
25
+
26
+ dry_run: bool
27
+ """Whether this is a dry run."""
28
+
29
+ options: dict[str, Any]
30
+ """Additional options."""
31
+
32
+
33
+ class DispatchManagingActor(Actor):
34
+ """Helper class to manage actors based on dispatches.
35
+
36
+ Example usage:
37
+
38
+ ```python
39
+ import os
40
+ import asyncio
41
+ from frequenz.dispatch import Dispatcher, DispatchManagingActor, DispatchUpdate
42
+ from frequenz.client.dispatch.types import ComponentSelector
43
+ from frequenz.client.common.microgrid.components import ComponentCategory
44
+
45
+ from frequenz.channels import Receiver, Broadcast
46
+
47
+ class MyActor(Actor):
48
+ def __init__(self, updates_channel: Receiver[DispatchUpdate]):
49
+ super().__init__()
50
+ self._updates_channel = updates_channel
51
+ self._dry_run: bool
52
+ self._options : dict[str, Any]
53
+
54
+ async def _run(self) -> None:
55
+ while True:
56
+ update = await self._updates_channel.receive()
57
+ print("Received update:", update)
58
+
59
+ self.set_components(update.components)
60
+ self._dry_run = update.dry_run
61
+ self._options = update.options
62
+
63
+ def set_components(self, components: ComponentSelector) -> None:
64
+ match components:
65
+ case [int(), *_] as component_ids:
66
+ print("Dispatch: Setting components to %s", components)
67
+ case [ComponentCategory.BATTERY, *_]:
68
+ print("Dispatch: Using all battery components")
69
+ case unsupported:
70
+ print(
71
+ "Dispatch: Requested an unsupported selector %r, "
72
+ "but only component IDs or category BATTERY are supported.",
73
+ unsupported,
74
+ )
75
+
76
+ async def run():
77
+ url = os.getenv("DISPATCH_API_URL", "grpc://fz-0004.frequenz.io:50051")
78
+ key = os.getenv("DISPATCH_API_KEY", "some-key")
79
+
80
+ microgrid_id = 1
81
+
82
+ dispatcher = Dispatcher(
83
+ microgrid_id=microgrid_id,
84
+ server_url=url,
85
+ key=key
86
+ )
87
+
88
+ # Create update channel to receive dispatch update events pre-start and mid-run
89
+ dispatch_updates_channel = Broadcast[DispatchUpdate](name="dispatch_updates_channel")
90
+
91
+ # Start actor and give it an dispatch updates channel receiver
92
+ my_actor = MyActor(dispatch_updates_channel.new_receiver())
93
+
94
+ status_receiver = dispatcher.running_status_change.new_receiver()
95
+
96
+ managing_actor = DispatchManagingActor(
97
+ actor=my_actor,
98
+ dispatch_type="EXAMPLE",
99
+ running_status_receiver=status_receiver,
100
+ updates_sender=dispatch_updates_channel.new_sender(),
101
+ )
102
+
103
+ await asyncio.gather(dispatcher.start(), managing_actor.start())
104
+ ```
105
+ """
106
+
107
+ def __init__(
108
+ self,
109
+ actor: Actor | Set[Actor],
110
+ dispatch_type: str,
111
+ running_status_receiver: Receiver[Dispatch],
112
+ updates_sender: Sender[DispatchUpdate] | None = None,
113
+ ) -> None:
114
+ """Initialize the dispatch handler.
115
+
116
+ Args:
117
+ actor: A set of actors or a single actor to manage.
118
+ dispatch_type: The type of dispatches to handle.
119
+ running_status_receiver: The receiver for dispatch running status changes.
120
+ updates_sender: The sender for dispatch events
121
+ """
122
+ super().__init__()
123
+ self._dispatch_rx = running_status_receiver
124
+ self._actors = frozenset([actor] if isinstance(actor, Actor) else actor)
125
+ self._dispatch_type = dispatch_type
126
+ self._updates_sender = updates_sender
127
+
128
+ def _start_actors(self) -> None:
129
+ """Start all actors."""
130
+ for actor in self._actors:
131
+ if actor.is_running:
132
+ _logger.warning("Actor %s is already running", actor.name)
133
+ else:
134
+ actor.start()
135
+
136
+ async def _stop_actors(self, msg: str) -> None:
137
+ """Stop all actors.
138
+
139
+ Args:
140
+ msg: The message to be passed to the actors being stopped.
141
+ """
142
+ for actor in self._actors:
143
+ if actor.is_running:
144
+ await actor.stop(msg)
145
+ else:
146
+ _logger.warning("Actor %s is not running", actor.name)
147
+
148
+ async def _run(self) -> None:
149
+ """Wait for dispatches and handle them."""
150
+ async for dispatch in self._dispatch_rx:
151
+ await self._handle_dispatch(dispatch=dispatch)
152
+
153
+ async def _handle_dispatch(self, dispatch: Dispatch) -> None:
154
+ """Handle a dispatch.
155
+
156
+ Args:
157
+ dispatch: The dispatch to handle.
158
+ """
159
+ running = dispatch.running(self._dispatch_type)
160
+ match running:
161
+ case RunningState.STOPPED:
162
+ _logger.info("Stopped by dispatch %s", dispatch.id)
163
+ await self._stop_actors("Dispatch stopped")
164
+ case RunningState.RUNNING:
165
+ if self._updates_sender is not None:
166
+ _logger.info("Updated by dispatch %s", dispatch.id)
167
+ await self._updates_sender.send(
168
+ DispatchUpdate(
169
+ components=dispatch.selector,
170
+ dry_run=dispatch.dry_run,
171
+ options=dispatch.payload,
172
+ )
173
+ )
174
+
175
+ _logger.info("Started by dispatch %s", dispatch.id)
176
+ self._start_actors()
177
+ case RunningState.DIFFERENT_TYPE:
178
+ _logger.debug(
179
+ "Unknown dispatch! Ignoring dispatch of type %s", dispatch.type
180
+ )
@@ -0,0 +1,347 @@
1
+ # License: MIT
2
+ # Copyright © 2024 Frequenz Energy-as-a-Service GmbH
3
+
4
+ """The dispatch actor."""
5
+
6
+ import logging
7
+ from datetime import datetime, timedelta, timezone
8
+ from heapq import heappop, heappush
9
+
10
+ import grpc.aio
11
+ from frequenz.channels import Sender, select, selected_from
12
+ from frequenz.channels.timer import SkipMissedAndResync, Timer
13
+ from frequenz.client.dispatch import Client
14
+ from frequenz.client.dispatch.types import Event
15
+ from frequenz.sdk.actor import Actor
16
+
17
+ from ._dispatch import Dispatch, RunningState
18
+ from ._event import Created, Deleted, DispatchEvent, Updated
19
+
20
+ _logger = logging.getLogger(__name__)
21
+ """The logger for this module."""
22
+
23
+
24
+ class DispatchingActor(Actor):
25
+ """Dispatch actor.
26
+
27
+ This actor is responsible for handling dispatches for a microgrid.
28
+
29
+ This means staying in sync with the API and scheduling
30
+ dispatches as necessary.
31
+ """
32
+
33
+ # pylint: disable=too-many-arguments
34
+ def __init__(
35
+ self,
36
+ microgrid_id: int,
37
+ client: Client,
38
+ lifecycle_updates_sender: Sender[DispatchEvent],
39
+ running_state_change_sender: Sender[Dispatch],
40
+ ) -> None:
41
+ """Initialize the actor.
42
+
43
+ Args:
44
+ microgrid_id: The microgrid ID to handle dispatches for.
45
+ client: The client to use for fetching dispatches.
46
+ lifecycle_updates_sender: A sender for dispatch lifecycle events.
47
+ running_state_change_sender: A sender for dispatch running state changes.
48
+ """
49
+ super().__init__(name="dispatch")
50
+
51
+ self._client = client
52
+ self._dispatches: dict[int, Dispatch] = {}
53
+ self._microgrid_id = microgrid_id
54
+ self._lifecycle_updates_sender = lifecycle_updates_sender
55
+ self._running_state_change_sender = running_state_change_sender
56
+ self._next_event_timer = Timer(
57
+ timedelta(seconds=100), SkipMissedAndResync(), auto_start=False
58
+ )
59
+ """The timer to schedule the next event.
60
+
61
+ Interval is chosen arbitrarily, as it will be reset on the first event.
62
+ """
63
+
64
+ self._scheduled_events: list[tuple[datetime, Dispatch]] = []
65
+ """The scheduled events, sorted by time.
66
+
67
+ Each event is a tuple of the scheduled time and the dispatch.
68
+ heapq is used to keep the list sorted by time, so the next event is
69
+ always at index 0.
70
+ """
71
+
72
+ async def _run(self) -> None:
73
+ """Run the actor."""
74
+ _logger.info("Starting dispatch actor for microgrid %s", self._microgrid_id)
75
+
76
+ # Initial fetch
77
+ await self._fetch()
78
+
79
+ stream = self._client.stream(microgrid_id=self._microgrid_id)
80
+
81
+ # Streaming updates
82
+ async for selected in select(self._next_event_timer, stream):
83
+ if selected_from(selected, self._next_event_timer):
84
+ if not self._scheduled_events:
85
+ continue
86
+ _logger.debug(
87
+ "Executing scheduled event: %s", self._scheduled_events[0][1]
88
+ )
89
+ await self._execute_scheduled_event(heappop(self._scheduled_events)[1])
90
+ elif selected_from(selected, stream):
91
+ _logger.debug("Received dispatch event: %s", selected.message)
92
+ dispatch = Dispatch(selected.message.dispatch)
93
+ match selected.message.event:
94
+ case Event.CREATED:
95
+ self._dispatches[dispatch.id] = dispatch
96
+ await self._update_dispatch_schedule_and_notify(dispatch, None)
97
+ await self._lifecycle_updates_sender.send(
98
+ Created(dispatch=dispatch)
99
+ )
100
+ case Event.UPDATED:
101
+ await self._update_dispatch_schedule_and_notify(
102
+ dispatch, self._dispatches[dispatch.id]
103
+ )
104
+ self._dispatches[dispatch.id] = dispatch
105
+ await self._lifecycle_updates_sender.send(
106
+ Updated(dispatch=dispatch)
107
+ )
108
+ case Event.DELETED:
109
+ self._dispatches.pop(dispatch.id)
110
+ await self._update_dispatch_schedule_and_notify(None, dispatch)
111
+
112
+ dispatch._set_deleted() # pylint: disable=protected-access
113
+ await self._lifecycle_updates_sender.send(
114
+ Deleted(dispatch=dispatch)
115
+ )
116
+
117
+ async def _execute_scheduled_event(self, dispatch: Dispatch) -> None:
118
+ """Execute a scheduled event.
119
+
120
+ Args:
121
+ dispatch: The dispatch to execute.
122
+ """
123
+ await self._send_running_state_change(dispatch)
124
+
125
+ # The timer is always a tiny bit delayed, so we need to check if the
126
+ # actor is supposed to be running now (we're assuming it wasn't already
127
+ # running, as all checks are done before scheduling)
128
+ if dispatch.running(dispatch.type) == RunningState.RUNNING:
129
+ # If it should be running, schedule the stop event
130
+ self._schedule_stop(dispatch)
131
+ # If the actor is not running, we need to schedule the next start
132
+ else:
133
+ self._schedule_start(dispatch)
134
+
135
+ self._update_timer()
136
+
137
+ async def _fetch(self) -> None:
138
+ """Fetch all relevant dispatches using list.
139
+
140
+ This is used for the initial fetch and for re-fetching all dispatches
141
+ if the connection was lost.
142
+ """
143
+ old_dispatches = self._dispatches
144
+ self._dispatches = {}
145
+
146
+ try:
147
+ _logger.info("Fetching dispatches for microgrid %s", self._microgrid_id)
148
+ async for page in self._client.list(microgrid_id=self._microgrid_id):
149
+ for client_dispatch in page:
150
+ dispatch = Dispatch(client_dispatch)
151
+
152
+ self._dispatches[dispatch.id] = Dispatch(client_dispatch)
153
+ old_dispatch = old_dispatches.pop(dispatch.id, None)
154
+ if not old_dispatch:
155
+ _logger.info("New dispatch: %s", dispatch)
156
+ await self._update_dispatch_schedule_and_notify(dispatch, None)
157
+ await self._lifecycle_updates_sender.send(
158
+ Created(dispatch=dispatch)
159
+ )
160
+ elif dispatch.update_time != old_dispatch.update_time:
161
+ _logger.info("Updated dispatch: %s", dispatch)
162
+ await self._update_dispatch_schedule_and_notify(
163
+ dispatch, old_dispatch
164
+ )
165
+ await self._lifecycle_updates_sender.send(
166
+ Updated(dispatch=dispatch)
167
+ )
168
+
169
+ except grpc.aio.AioRpcError as error:
170
+ _logger.error("Error fetching dispatches: %s", error)
171
+ self._dispatches = old_dispatches
172
+ return
173
+
174
+ for dispatch in old_dispatches.values():
175
+ _logger.info("Deleted dispatch: %s", dispatch)
176
+ await self._lifecycle_updates_sender.send(Deleted(dispatch=dispatch))
177
+ await self._update_dispatch_schedule_and_notify(None, dispatch)
178
+
179
+ # Set deleted only here as it influences the result of dispatch.running()
180
+ # which is used in above in _running_state_change
181
+ dispatch._set_deleted() # pylint: disable=protected-access
182
+ await self._lifecycle_updates_sender.send(Deleted(dispatch=dispatch))
183
+
184
+ async def _update_dispatch_schedule_and_notify(
185
+ self, dispatch: Dispatch | None, old_dispatch: Dispatch | None
186
+ ) -> None:
187
+ """Update the schedule for a dispatch.
188
+
189
+ Schedules, reschedules or cancels the dispatch events
190
+ based on the start_time and active status.
191
+
192
+ Sends a running state change notification if necessary.
193
+
194
+ For example:
195
+ * when the start_time changes, the dispatch is rescheduled
196
+ * when the dispatch is deactivated, the dispatch is cancelled
197
+
198
+ Args:
199
+ dispatch: The dispatch to update the schedule for.
200
+ old_dispatch: The old dispatch, if available.
201
+ """
202
+ # If dispatch is None, the dispatch was deleted
203
+ # and we need to cancel any existing event for it
204
+ if not dispatch and old_dispatch:
205
+ self._remove_scheduled(old_dispatch)
206
+
207
+ # If the dispatch was running, we need to notify
208
+ if old_dispatch.running(old_dispatch.type) == RunningState.RUNNING:
209
+ await self._send_running_state_change(old_dispatch)
210
+
211
+ # A new dispatch was created
212
+ elif dispatch and not old_dispatch:
213
+ assert not self._remove_scheduled(
214
+ dispatch
215
+ ), "New dispatch already scheduled?!"
216
+
217
+ # If its currently running, send notification right away
218
+ if dispatch.running(dispatch.type) == RunningState.RUNNING:
219
+ await self._send_running_state_change(dispatch)
220
+
221
+ self._schedule_stop(dispatch)
222
+ # Otherwise, if it's enabled but not yet running, schedule it
223
+ else:
224
+ self._schedule_start(dispatch)
225
+
226
+ # Dispatch was updated
227
+ elif dispatch and old_dispatch:
228
+ # Remove potentially existing scheduled event
229
+ self._remove_scheduled(old_dispatch)
230
+
231
+ # Check if the change requires an immediate notification
232
+ if self._update_changed_running_state(dispatch, old_dispatch):
233
+ await self._send_running_state_change(dispatch)
234
+
235
+ if dispatch.running(dispatch.type) == RunningState.RUNNING:
236
+ self._schedule_stop(dispatch)
237
+ else:
238
+ self._schedule_start(dispatch)
239
+
240
+ # We modified the schedule, so we need to reset the timer
241
+ self._update_timer()
242
+
243
+ def _update_timer(self) -> None:
244
+ """Update the timer to the next event."""
245
+ if self._scheduled_events:
246
+ due_at: datetime = self._scheduled_events[0][0]
247
+ self._next_event_timer.reset(interval=due_at - datetime.now(timezone.utc))
248
+ _logger.debug("Next event scheduled at %s", self._scheduled_events[0][0])
249
+
250
+ def _remove_scheduled(self, dispatch: Dispatch) -> bool:
251
+ """Remove a dispatch from the scheduled events.
252
+
253
+ Args:
254
+ dispatch: The dispatch to remove.
255
+
256
+ Returns:
257
+ True if the dispatch was found and removed, False otherwise.
258
+ """
259
+ for idx, (_, sched_dispatch) in enumerate(self._scheduled_events):
260
+ if dispatch.id == sched_dispatch.id:
261
+ self._scheduled_events.pop(idx)
262
+ return True
263
+
264
+ return False
265
+
266
+ def _schedule_start(self, dispatch: Dispatch) -> None:
267
+ """Schedule a dispatch to start.
268
+
269
+ Args:
270
+ dispatch: The dispatch to schedule.
271
+ """
272
+ # If the dispatch is not active, don't schedule it
273
+ if not dispatch.active:
274
+ return
275
+
276
+ # Schedule the next run
277
+ try:
278
+ if next_run := dispatch.next_run:
279
+ heappush(self._scheduled_events, (next_run, dispatch))
280
+ _logger.debug(
281
+ "Scheduled dispatch %s to start at %s", dispatch.id, next_run
282
+ )
283
+ else:
284
+ _logger.debug("Dispatch %s has no next run", dispatch.id)
285
+ except ValueError as error:
286
+ _logger.error("Error scheduling dispatch %s: %s", dispatch.id, error)
287
+
288
+ def _schedule_stop(self, dispatch: Dispatch) -> None:
289
+ """Schedule a dispatch to stop.
290
+
291
+ Args:
292
+ dispatch: The dispatch to schedule.
293
+ """
294
+ # Setup stop timer if the dispatch has a duration
295
+ if dispatch.duration and dispatch.duration > timedelta(seconds=0):
296
+ until = dispatch.until
297
+ assert until is not None
298
+ heappush(self._scheduled_events, (until, dispatch))
299
+ _logger.debug("Scheduled dispatch %s to stop at %s", dispatch, until)
300
+
301
+ def _update_changed_running_state(
302
+ self, updated_dispatch: Dispatch, previous_dispatch: Dispatch
303
+ ) -> bool:
304
+ """Check if the running state of a dispatch has changed.
305
+
306
+ Checks if any of the running state changes to the dispatch
307
+ require a new message to be sent to the actor so that it can potentially
308
+ change its runtime configuration or start/stop itself.
309
+
310
+ Also checks if a dispatch update was not sent due to connection issues
311
+ in which case we need to send the message now.
312
+
313
+ Args:
314
+ updated_dispatch: The new dispatch
315
+ previous_dispatch: The old dispatch
316
+
317
+ Returns:
318
+ True if the running state has changed, False otherwise.
319
+ """
320
+ # If any of the runtime attributes changed, we need to send a message
321
+ runtime_state_attributes = [
322
+ "running",
323
+ "type",
324
+ "selector",
325
+ "duration",
326
+ "dry_run",
327
+ "payload",
328
+ ]
329
+
330
+ for attribute in runtime_state_attributes:
331
+ if getattr(updated_dispatch, attribute) != getattr(
332
+ previous_dispatch, attribute
333
+ ):
334
+ return True
335
+
336
+ return False
337
+
338
+ async def _send_running_state_change(self, dispatch: Dispatch) -> None:
339
+ """Send a running state change message.
340
+
341
+ Args:
342
+ dispatch: The dispatch that changed.
343
+ """
344
+ await self._running_state_change_sender.send(dispatch)
345
+ # Update the last sent notification time
346
+ # so we know if this change was already sent
347
+ dispatch._set_running_status_notified() # pylint: disable=protected-access
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: frequenz-dispatch
3
- Version: 0.3.0
3
+ Version: 0.3.2
4
4
  Summary: A highlevel interface for the dispatch API
5
5
  Author-email: Frequenz Energy-as-a-Service GmbH <floss@frequenz.com>
6
6
  License: MIT
@@ -7,6 +7,7 @@ src/frequenz/dispatch/__init__.py
7
7
  src/frequenz/dispatch/_dispatch.py
8
8
  src/frequenz/dispatch/_dispatcher.py
9
9
  src/frequenz/dispatch/_event.py
10
+ src/frequenz/dispatch/_managing_actor.py
10
11
  src/frequenz/dispatch/actor.py
11
12
  src/frequenz/dispatch/conftest.py
12
13
  src/frequenz/dispatch/py.typed
@@ -1,8 +1,8 @@
1
1
  python-dateutil<3.0,>=2.8.2
2
2
  typing-extensions<5.0.0,>=4.11.0
3
- frequenz-sdk==1.0.0-rc900
4
- frequenz-channels<2.0.0,>=1.1.0
5
- frequenz-client-dispatch<0.7.0,>=0.6.0
3
+ frequenz-sdk<1.0.0-rc1000,>=1.0.0-rc900
4
+ frequenz-channels<2.0.0,>=1.2.0
5
+ frequenz-client-dispatch<0.8.0,>=0.7.1
6
6
 
7
7
  [dev]
8
8
  frequenz-dispatch[dev-flake8,dev-formatting,dev-mkdocs,dev-mypy,dev-noxfile,dev-pylint,dev-pytest]
@@ -11,7 +11,7 @@ frequenz-dispatch[dev-flake8,dev-formatting,dev-mkdocs,dev-mypy,dev-noxfile,dev-
11
11
  flake8==7.1.1
12
12
  flake8-docstrings==1.7.0
13
13
  flake8-pyproject==1.2.3
14
- pydoclint==0.5.6
14
+ pydoclint==0.5.9
15
15
  pydocstyle==6.3.0
16
16
 
17
17
  [dev-formatting]
@@ -24,30 +24,30 @@ Markdown==3.7
24
24
  mike==2.1.3
25
25
  mkdocs-gen-files==0.5.0
26
26
  mkdocs-literate-nav==0.6.1
27
- mkdocs-macros-plugin==1.0.5
28
- mkdocs-material==9.5.34
29
- mkdocstrings[python]==0.25.2
30
- mkdocstrings-python==1.10.9
27
+ mkdocs-macros-plugin==1.2.0
28
+ mkdocs-material==9.5.39
29
+ mkdocstrings[python]==0.26.1
30
+ mkdocstrings-python==1.11.1
31
31
  frequenz-repo-config[lib]==0.10.0
32
32
 
33
33
  [dev-mypy]
34
34
  mypy==1.11.2
35
35
  grpc-stubs==1.53.0.5
36
36
  types-Markdown==3.7.0.20240822
37
- types-python-dateutil==2.9.0.20240821
37
+ types-python-dateutil==2.9.0.20240906
38
38
  frequenz-dispatch[dev-mkdocs,dev-noxfile,dev-pytest]
39
39
 
40
40
  [dev-noxfile]
41
- uv==0.4.1
41
+ uv==0.4.17
42
42
  nox==2024.4.15
43
43
  frequenz-repo-config[lib]==0.10.0
44
44
 
45
45
  [dev-pylint]
46
- pylint==3.2.7
46
+ pylint==3.3.1
47
47
  frequenz-dispatch[dev-mkdocs,dev-noxfile,dev-pytest]
48
48
 
49
49
  [dev-pytest]
50
- pytest==8.3.2
50
+ pytest==8.3.3
51
51
  frequenz-repo-config[extra-lint-examples]==0.10.0
52
52
  pytest-mock==3.14.0
53
53
  pytest-asyncio==0.24.0
@@ -1,20 +0,0 @@
1
- # Dispatch Highlevel Interface Release Notes
2
-
3
- ## Summary
4
-
5
- <!-- Here goes a general summary of what this release is about -->
6
-
7
- ## Upgrading
8
-
9
- - The dispatch high level interface now depends on `frequenz-sdk` version `v1.0.0-rc900`.
10
- - We are now using the version `0.6.0` of the underlying `frequenz-client-dispatch` client library.
11
- - The init parameter of the `Dispatcher` class has been changed to accept a `server_url` instead.
12
-
13
- ## New Features
14
-
15
- * Using the new dispatch client, we now have support for pagination in the dispatch list request.
16
- * The new client version also supports streaming, however it is not yet used internally in the high level interface.
17
-
18
- ## Bug Fixes
19
-
20
- - Fix documentation cross-linking to the `frequenz-client-dispatch` package.
@@ -1,255 +0,0 @@
1
- # License: MIT
2
- # Copyright © 2024 Frequenz Energy-as-a-Service GmbH
3
-
4
- """The dispatch actor."""
5
-
6
- import asyncio
7
- import logging
8
- from datetime import datetime, timedelta, timezone
9
-
10
- import grpc.aio
11
- from frequenz.channels import Sender
12
- from frequenz.channels.timer import SkipMissedAndDrift, Timer
13
- from frequenz.client.dispatch import Client
14
- from frequenz.sdk.actor import Actor
15
-
16
- from ._dispatch import Dispatch, RunningState
17
- from ._event import Created, Deleted, DispatchEvent, Updated
18
-
19
- _MAX_AHEAD_SCHEDULE = timedelta(hours=5)
20
- """The maximum time ahead to schedule a dispatch.
21
-
22
- We don't want to schedule dispatches too far ahead,
23
- as they could start drifting if the delay is too long.
24
-
25
- This also prevents us from scheduling too many dispatches at once.
26
-
27
- The exact value is not important, but should be a few hours and not more than a day.
28
- """
29
-
30
- _DEFAULT_POLL_INTERVAL = timedelta(seconds=10)
31
- """The default interval to poll the API for dispatch changes."""
32
-
33
- _logger = logging.getLogger(__name__)
34
- """The logger for this module."""
35
-
36
-
37
- class DispatchingActor(Actor):
38
- """Dispatch actor.
39
-
40
- This actor is responsible for handling dispatches for a microgrid.
41
-
42
- This means staying in sync with the API and scheduling
43
- dispatches as necessary.
44
- """
45
-
46
- # pylint: disable=too-many-arguments
47
- def __init__(
48
- self,
49
- microgrid_id: int,
50
- client: Client,
51
- lifecycle_updates_sender: Sender[DispatchEvent],
52
- running_state_change_sender: Sender[Dispatch],
53
- poll_interval: timedelta = _DEFAULT_POLL_INTERVAL,
54
- ) -> None:
55
- """Initialize the actor.
56
-
57
- Args:
58
- microgrid_id: The microgrid ID to handle dispatches for.
59
- client: The client to use for fetching dispatches.
60
- lifecycle_updates_sender: A sender for dispatch lifecycle events.
61
- running_state_change_sender: A sender for dispatch running state changes.
62
- poll_interval: The interval to poll the API for dispatche changes.
63
- """
64
- super().__init__(name="dispatch")
65
-
66
- self._client = client
67
- self._dispatches: dict[int, Dispatch] = {}
68
- self._scheduled: dict[int, asyncio.Task[None]] = {}
69
- self._microgrid_id = microgrid_id
70
- self._lifecycle_updates_sender = lifecycle_updates_sender
71
- self._running_state_change_sender = running_state_change_sender
72
- self._poll_timer = Timer(poll_interval, SkipMissedAndDrift())
73
-
74
- async def _run(self) -> None:
75
- """Run the actor."""
76
- self._poll_timer.reset()
77
- try:
78
- async for _ in self._poll_timer:
79
- await self._fetch()
80
- except asyncio.CancelledError:
81
- for task in self._scheduled.values():
82
- task.cancel()
83
- raise
84
-
85
- async def _fetch(self) -> None:
86
- """Fetch all relevant dispatches."""
87
- old_dispatches = self._dispatches
88
- self._dispatches = {}
89
-
90
- try:
91
- _logger.info("Fetching dispatches for microgrid %s", self._microgrid_id)
92
- async for page in self._client.list(microgrid_id=self._microgrid_id):
93
- for client_dispatch in page:
94
- dispatch = Dispatch(client_dispatch)
95
-
96
- self._dispatches[dispatch.id] = Dispatch(client_dispatch)
97
- old_dispatch = old_dispatches.pop(dispatch.id, None)
98
- if not old_dispatch:
99
- self._update_dispatch_schedule(dispatch, None)
100
- _logger.info("New dispatch: %s", dispatch)
101
- await self._lifecycle_updates_sender.send(
102
- Created(dispatch=dispatch)
103
- )
104
- elif dispatch.update_time != old_dispatch.update_time:
105
- self._update_dispatch_schedule(dispatch, old_dispatch)
106
- _logger.info("Updated dispatch: %s", dispatch)
107
- await self._lifecycle_updates_sender.send(
108
- Updated(dispatch=dispatch)
109
- )
110
-
111
- if self._running_state_change(dispatch, old_dispatch):
112
- await self._send_running_state_change(dispatch)
113
-
114
- except grpc.aio.AioRpcError as error:
115
- _logger.error("Error fetching dispatches: %s", error)
116
- self._dispatches = old_dispatches
117
- return
118
-
119
- for dispatch in old_dispatches.values():
120
- _logger.info("Deleted dispatch: %s", dispatch)
121
- dispatch._set_deleted() # pylint: disable=protected-access
122
- await self._lifecycle_updates_sender.send(Deleted(dispatch=dispatch))
123
- if task := self._scheduled.pop(dispatch.id, None):
124
- task.cancel()
125
-
126
- if self._running_state_change(None, dispatch):
127
- await self._send_running_state_change(dispatch)
128
-
129
- def _update_dispatch_schedule(
130
- self, dispatch: Dispatch, old_dispatch: Dispatch | None
131
- ) -> None:
132
- """Update the schedule for a dispatch.
133
-
134
- Schedules, reschedules or cancels the dispatch based on the start_time
135
- and active status.
136
-
137
- For example:
138
- * when the start_time changes, the dispatch is rescheduled
139
- * when the dispatch is deactivated, the dispatch is cancelled
140
-
141
- Args:
142
- dispatch: The dispatch to update the schedule for.
143
- old_dispatch: The old dispatch, if available.
144
- """
145
- if (
146
- old_dispatch
147
- and old_dispatch.active
148
- and old_dispatch.start_time != dispatch.start_time
149
- ):
150
- if task := self._scheduled.pop(dispatch.id, None):
151
- task.cancel()
152
-
153
- if dispatch.active and dispatch.id not in self._scheduled:
154
- self._scheduled[dispatch.id] = asyncio.create_task(
155
- self._schedule_task(dispatch)
156
- )
157
-
158
- async def _schedule_task(self, dispatch: Dispatch) -> None:
159
- """Wait for a dispatch to become ready.
160
-
161
- Waits for the dispatches next run and then notifies that it is ready.
162
-
163
- Args:
164
- dispatch: The dispatch to schedule.
165
- """
166
-
167
- def next_run_info() -> tuple[datetime, datetime] | None:
168
- now = datetime.now(tz=timezone.utc)
169
- next_run = dispatch.next_run_after(now)
170
-
171
- if next_run is None:
172
- return None
173
-
174
- return now, next_run
175
-
176
- while pair := next_run_info():
177
- now, next_time = pair
178
-
179
- if next_time - now > _MAX_AHEAD_SCHEDULE:
180
- await asyncio.sleep(_MAX_AHEAD_SCHEDULE.total_seconds())
181
- continue
182
-
183
- _logger.info("Dispatch %s scheduled for %s", dispatch.id, next_time)
184
- await asyncio.sleep((next_time - now).total_seconds())
185
-
186
- _logger.info("Dispatch ready: %s", dispatch)
187
- await self._running_state_change_sender.send(dispatch)
188
-
189
- _logger.info("Dispatch finished: %s", dispatch)
190
- self._scheduled.pop(dispatch.id)
191
-
192
- def _running_state_change(
193
- self, updated_dispatch: Dispatch | None, previous_dispatch: Dispatch | None
194
- ) -> bool:
195
- """Check if the running state of a dispatch has changed.
196
-
197
- Checks if any of the running state changes to the dispatch
198
- require a new message to be sent to the actor so that it can potentially
199
- change its runtime configuration or start/stop itself.
200
-
201
- Also checks if a dispatch update was not sent due to connection issues
202
- in which case we need to send the message now.
203
-
204
- Args:
205
- updated_dispatch: The new dispatch, if available.
206
- previous_dispatch: The old dispatch, if available.
207
-
208
- Returns:
209
- True if the running state has changed, False otherwise.
210
- """
211
- # New dispatch
212
- if previous_dispatch is None:
213
- assert updated_dispatch is not None
214
-
215
- # Client was not informed about the dispatch, do it now
216
- # pylint: disable=protected-access
217
- if not updated_dispatch._running_status_notified:
218
- return True
219
-
220
- # Deleted dispatch
221
- if updated_dispatch is None:
222
- assert previous_dispatch is not None
223
- return (
224
- previous_dispatch.running(previous_dispatch.type)
225
- == RunningState.RUNNING
226
- )
227
-
228
- # If any of the runtime attributes changed, we need to send a message
229
- runtime_state_attributes = [
230
- "running",
231
- "type",
232
- "selector",
233
- "duration",
234
- "dry_run",
235
- "payload",
236
- ]
237
-
238
- for attribute in runtime_state_attributes:
239
- if getattr(updated_dispatch, attribute) != getattr(
240
- previous_dispatch, attribute
241
- ):
242
- return True
243
-
244
- return False
245
-
246
- async def _send_running_state_change(self, dispatch: Dispatch) -> None:
247
- """Send a running state change message.
248
-
249
- Args:
250
- dispatch: The dispatch that changed.
251
- """
252
- await self._running_state_change_sender.send(dispatch)
253
- # Update the last sent notification time
254
- # so we know if this change was already sent
255
- dispatch._set_running_status_notified() # pylint: disable=protected-access