reactor-sdk 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
reactor_sdk/reactor.py ADDED
@@ -0,0 +1,739 @@
1
+ """
2
+ Reactor - Main entry point for the Reactor Python SDK.
3
+
4
+ This module provides the main Reactor class that orchestrates the
5
+ coordinator and GPU machine clients for real-time AI video streaming.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import logging
11
+ import time
12
+ from typing import Any, Callable, Optional, Set, TypeVar, Union, overload
13
+
14
+ from aiortc import MediaStreamTrack
15
+
16
+ from reactor_sdk.coordinator import CoordinatorClient, LocalCoordinatorClient
17
+ from reactor_sdk.model import ModelClient
18
+ from reactor_sdk.types import (
19
+ ConflictError,
20
+ FrameCallback,
21
+ GPUMachineStatus,
22
+ ReactorError,
23
+ ReactorEvent,
24
+ ReactorState,
25
+ ReactorStatus,
26
+ )
27
+ from reactor_sdk.utils.tokens import fetch_jwt_token
28
+ from reactor_sdk.utils.webrtc import WebRTCConfig
29
+
30
+ logger = logging.getLogger(__name__)
31
+
32
+
33
+ # =============================================================================
34
+ # Constants
35
+ # =============================================================================
36
+
37
+ LOCAL_COORDINATOR_URL = "http://localhost:8080"
38
+ PROD_COORDINATOR_URL = "https://api.reactor.inc"
39
+
40
+
41
+ # Type for event handlers
42
+ EventHandler = Callable[..., None]
43
+
44
+ # Type variable for decorator return types
45
+ F = TypeVar("F", bound=Callable[..., Any])
46
+
47
+
48
+ # =============================================================================
49
+ # Reactor Implementation (Internal)
50
+ # =============================================================================
51
+
52
+
53
+ class _ReactorImpl:
54
+ """
55
+ Internal implementation of the Reactor class.
56
+
57
+ Do not instantiate directly - use `Reactor(...)` instead.
58
+ """
59
+
60
+ def __init__(
61
+ self,
62
+ model_name: str,
63
+ api_key: Optional[str] = None,
64
+ coordinator_url: str = PROD_COORDINATOR_URL,
65
+ local: bool = False,
66
+ ) -> None:
67
+ """
68
+ Initialize the Reactor.
69
+
70
+ Args:
71
+ model_name: Name of the model to connect to.
72
+ api_key: Your Reactor API key. The SDK will automatically fetch
73
+ a JWT token using this key. Required unless local=True.
74
+ coordinator_url: URL of the coordinator API (ignored if local=True).
75
+ local: If True, use local coordinator at localhost:8080.
76
+ """
77
+ self._model = model_name
78
+ self._api_key = api_key
79
+ self._local = local
80
+ self._coordinator_url = LOCAL_COORDINATOR_URL if local else coordinator_url
81
+
82
+ # Clients
83
+ self._coordinator_client: Optional[
84
+ Union[CoordinatorClient, LocalCoordinatorClient]
85
+ ] = None
86
+ self._machine_client: Optional[ModelClient] = None
87
+
88
+ # State
89
+ self._status: ReactorStatus = ReactorStatus.DISCONNECTED
90
+ self._session_id: Optional[str] = None
91
+ self._session_expiration: Optional[float] = None
92
+ self._last_error: Optional[ReactorError] = None
93
+
94
+ # Event system
95
+ self._event_listeners: dict[ReactorEvent, Set[EventHandler]] = {}
96
+
97
+ # Frame callback (delegated to machine client)
98
+ self._frame_callback: Optional[FrameCallback] = None
99
+
100
+ # =========================================================================
101
+ # Event Emitter API
102
+ # =========================================================================
103
+
104
+ def on(self, event: ReactorEvent, handler: EventHandler) -> None:
105
+ """
106
+ Register an event handler.
107
+
108
+ Args:
109
+ event: The event name.
110
+ handler: The callback function.
111
+
112
+ Events:
113
+ - "status_changed": Called with (status: ReactorStatus)
114
+ - "session_id_changed": Called with (session_id: str | None)
115
+ - "new_message": Called with (message: dict)
116
+ - "stream_changed": Called with (track: MediaStreamTrack)
117
+ - "error": Called with (error: ReactorError)
118
+ - "session_expiration_changed": Called with (expiration: float | None)
119
+ """
120
+ if event not in self._event_listeners:
121
+ self._event_listeners[event] = set()
122
+ self._event_listeners[event].add(handler)
123
+
124
+ def off(self, event: ReactorEvent, handler: EventHandler) -> None:
125
+ """
126
+ Unregister an event handler.
127
+
128
+ Args:
129
+ event: The event name.
130
+ handler: The callback function to remove.
131
+ """
132
+ if event in self._event_listeners:
133
+ self._event_listeners[event].discard(handler)
134
+
135
+ def _emit(self, event: ReactorEvent, *args: Any) -> None:
136
+ """
137
+ Emit an event to all registered handlers.
138
+
139
+ Args:
140
+ event: The event name.
141
+ *args: Arguments to pass to handlers.
142
+ """
143
+ if event in self._event_listeners:
144
+ for handler in self._event_listeners[event]:
145
+ try:
146
+ handler(*args)
147
+ except Exception as e:
148
+ logger.exception(f"Error in event handler for '{event}': {e}")
149
+
150
+ # =========================================================================
151
+ # Decorator API
152
+ # =========================================================================
153
+
154
+ def on_frame(self, func: F) -> F:
155
+ """
156
+ Decorator to register a frame callback.
157
+
158
+ The decorated function will be called with each received video frame
159
+ as a numpy array in RGB format with shape (H, W, 3).
160
+
161
+ Example:
162
+ @reactor.on_frame
163
+ def handle_frame(frame):
164
+ print(f"Frame shape: {frame.shape}")
165
+
166
+ Args:
167
+ func: The callback function that receives a numpy frame.
168
+
169
+ Returns:
170
+ The original function (unchanged).
171
+ """
172
+ self.set_frame_callback(func)
173
+ return func
174
+
175
+ def on_message(self, func: F) -> F:
176
+ """
177
+ Decorator to register a message handler.
178
+
179
+ The decorated function will be called with each message received
180
+ from the GPU machine.
181
+
182
+ Example:
183
+ @reactor.on_message
184
+ def handle_message(message):
185
+ print(f"Received: {message}")
186
+
187
+ Args:
188
+ func: The callback function that receives the message dict.
189
+
190
+ Returns:
191
+ The original function (unchanged).
192
+ """
193
+ self.on("new_message", func)
194
+ return func
195
+
196
+ @overload
197
+ def on_status(self, func: F) -> F:
198
+ """Register handler for all status changes."""
199
+ ...
200
+
201
+ @overload
202
+ def on_status(
203
+ self, status_filter: ReactorStatus
204
+ ) -> Callable[[F], F]:
205
+ """Register handler for specific status."""
206
+ ...
207
+
208
+ @overload
209
+ def on_status(
210
+ self, status_filter: list[ReactorStatus]
211
+ ) -> Callable[[F], F]:
212
+ """Register handler for multiple specific statuses."""
213
+ ...
214
+
215
+ def on_status(
216
+ self,
217
+ func_or_filter: Union[F, ReactorStatus, list[ReactorStatus], None] = None,
218
+ ) -> Union[F, Callable[[F], F]]:
219
+ """
220
+ Decorator to register a status change handler.
221
+
222
+ Can be used with or without a filter argument:
223
+
224
+ Example:
225
+ # Handle all status changes
226
+ @reactor.on_status
227
+ def handle_any_status(status):
228
+ print(f"Status: {status}")
229
+
230
+ # Handle specific status only
231
+ @reactor.on_status(ReactorStatus.READY)
232
+ def handle_ready():
233
+ print("Ready!")
234
+
235
+ # Handle multiple statuses
236
+ @reactor.on_status([ReactorStatus.READY, ReactorStatus.CONNECTING])
237
+ def handle_active(status):
238
+ print(f"Active state: {status}")
239
+
240
+ Args:
241
+ func_or_filter: Either the function to decorate (no filter),
242
+ a single ReactorStatus, or a list of ReactorStatus values to filter on.
243
+
244
+ Returns:
245
+ The decorator or the decorated function.
246
+ """
247
+ # Case 1: @reactor.on_status (no parentheses, no filter)
248
+ if callable(func_or_filter):
249
+ func = func_or_filter
250
+ self.on("status_changed", func)
251
+ return func
252
+
253
+ # Case 2: @reactor.on_status(ReactorStatus.READY) or
254
+ # @reactor.on_status([ReactorStatus.READY, ReactorStatus.CONNECTING])
255
+ status_filter = func_or_filter
256
+
257
+ # Normalize to a set for efficient lookup
258
+ if status_filter is None:
259
+ allowed_statuses: Optional[Set[ReactorStatus]] = None
260
+ elif isinstance(status_filter, list):
261
+ allowed_statuses = set(status_filter)
262
+ else:
263
+ allowed_statuses = {status_filter}
264
+
265
+ def decorator(func: F) -> F:
266
+ if allowed_statuses is None:
267
+ # No filter - call for all status changes
268
+ self.on("status_changed", func)
269
+ else:
270
+ # Filter - only call when status matches
271
+ def filtered_handler(status: ReactorStatus) -> None:
272
+ if status in allowed_statuses:
273
+ func(status)
274
+
275
+ self.on("status_changed", filtered_handler)
276
+ return func
277
+
278
+ return decorator
279
+
280
+ def on_error(self, func: F) -> F:
281
+ """
282
+ Decorator to register an error handler.
283
+
284
+ The decorated function will be called when an error occurs.
285
+
286
+ Example:
287
+ @reactor.on_error
288
+ def handle_error(error):
289
+ print(f"Error: {error.code} - {error.message}")
290
+
291
+ Args:
292
+ func: The callback function that receives the ReactorError.
293
+
294
+ Returns:
295
+ The original function (unchanged).
296
+ """
297
+ self.on("error", func)
298
+ return func
299
+
300
+ def on_stream(self, func: F) -> F:
301
+ """
302
+ Decorator to register a stream/track handler.
303
+
304
+ The decorated function will be called when the video stream changes
305
+ (i.e., when a new track is received from the GPU machine).
306
+
307
+ Example:
308
+ @reactor.on_stream
309
+ def handle_stream(track):
310
+ print(f"Stream changed: {track}")
311
+
312
+ Args:
313
+ func: The callback function that receives the MediaStreamTrack.
314
+
315
+ Returns:
316
+ The original function (unchanged).
317
+ """
318
+ self.on("stream_changed", func)
319
+ return func
320
+
321
+ # =========================================================================
322
+ # Connection Management
323
+ # =========================================================================
324
+
325
+ async def connect(self) -> None:
326
+ """
327
+ Connect to the coordinator and GPU machine.
328
+
329
+ If an API key was provided in the constructor, the SDK will
330
+ automatically fetch a JWT token before connecting.
331
+
332
+ Raises:
333
+ ValueError: If no API key provided and not in local mode.
334
+ RuntimeError: If connection fails.
335
+ """
336
+ logger.debug(f"Connecting, status: {self._status}")
337
+
338
+ if self._api_key is None and not self._local:
339
+ raise ValueError(
340
+ "No API key provided and not in local mode. "
341
+ "Pass api_key to the Reactor constructor."
342
+ )
343
+
344
+ if self._status != ReactorStatus.DISCONNECTED:
345
+ raise RuntimeError("Already connected or connecting")
346
+
347
+ self._set_status(ReactorStatus.CONNECTING)
348
+
349
+ try:
350
+ logger.debug("Connecting to coordinator")
351
+
352
+ # Fetch JWT token if we have an API key
353
+ jwt_token: Optional[str] = None
354
+ if self._api_key is not None:
355
+ logger.debug("Fetching JWT token from coordinator...")
356
+ jwt_token = await fetch_jwt_token(
357
+ api_key=self._api_key,
358
+ coordinator_url=self._coordinator_url,
359
+ )
360
+ logger.debug("JWT token obtained successfully")
361
+
362
+ # Create coordinator client
363
+ if self._local:
364
+ self._coordinator_client = LocalCoordinatorClient(self._coordinator_url)
365
+ else:
366
+ self._coordinator_client = CoordinatorClient(
367
+ base_url=self._coordinator_url,
368
+ jwt_token=jwt_token or "",
369
+ model=self._model,
370
+ )
371
+
372
+ # Get ICE servers from coordinator
373
+ ice_servers = await self._coordinator_client.get_ice_servers()
374
+
375
+ # Create GPU machine client and generate SDP offer
376
+ config = WebRTCConfig(ice_servers=ice_servers)
377
+ self._machine_client = ModelClient(config)
378
+ self._setup_machine_client_handlers()
379
+
380
+ # Set frame callback if one was registered
381
+ if self._frame_callback is not None:
382
+ self._machine_client.set_frame_callback(self._frame_callback)
383
+
384
+ sdp_offer = await self._machine_client.create_offer()
385
+
386
+ # Create session with coordinator
387
+ session_id = await self._coordinator_client.create_session(sdp_offer)
388
+ self._set_session_id(session_id)
389
+
390
+ # Get SDP answer from coordinator
391
+ sdp_answer = await self._coordinator_client.connect(session_id)
392
+
393
+ # Connect to GPU machine with the answer
394
+ await self._machine_client.connect(sdp_answer)
395
+
396
+ except Exception as e:
397
+ logger.error(f"Connection failed: {e}")
398
+ self._create_error(
399
+ "CONNECTION_FAILED",
400
+ f"Connection failed: {e}",
401
+ "coordinator",
402
+ recoverable=True,
403
+ )
404
+ self._set_status(ReactorStatus.DISCONNECTED)
405
+ raise
406
+
407
+ async def reconnect(self) -> None:
408
+ """
409
+ Reconnect to an existing session that may have been interrupted.
410
+
411
+ Raises:
412
+ RuntimeError: If no active session or reconnection fails.
413
+ """
414
+ if self._session_id is None or self._coordinator_client is None:
415
+ logger.warning("No active session to reconnect to.")
416
+ return
417
+
418
+ if self._status == ReactorStatus.READY:
419
+ logger.warning("Already connected, no need to reconnect.")
420
+ return
421
+
422
+ self._set_status(ReactorStatus.CONNECTING)
423
+
424
+ if self._machine_client is None:
425
+ # Get ICE servers from coordinator
426
+ ice_servers = await self._coordinator_client.get_ice_servers()
427
+
428
+ config = WebRTCConfig(ice_servers=ice_servers)
429
+ self._machine_client = ModelClient(config)
430
+ self._setup_machine_client_handlers()
431
+
432
+ # Always calculate a new offer for reconnection
433
+ sdp_offer = await self._machine_client.create_offer()
434
+
435
+ try:
436
+ # Send offer to coordinator and get answer
437
+ sdp_answer = await self._coordinator_client.connect(
438
+ self._session_id,
439
+ sdp_offer,
440
+ )
441
+
442
+ # Connect to GPU machine with the answer
443
+ await self._machine_client.connect(sdp_answer)
444
+ self._set_status(ReactorStatus.READY)
445
+
446
+ except ConflictError:
447
+ logger.error("Reconnection failed: conflict error")
448
+ await self.disconnect(recoverable=True)
449
+ self._create_error(
450
+ "RECONNECTION_FAILED",
451
+ "Reconnection failed: connection conflict",
452
+ "coordinator",
453
+ recoverable=True,
454
+ )
455
+ except Exception as e:
456
+ logger.error(f"Failed to reconnect: {e}")
457
+ await self.disconnect(recoverable=False)
458
+ self._create_error(
459
+ "RECONNECTION_FAILED",
460
+ f"Failed to reconnect: {e}",
461
+ "coordinator",
462
+ recoverable=True,
463
+ )
464
+
465
+ async def disconnect(self, recoverable: bool = False) -> None:
466
+ """
467
+ Disconnect from the coordinator and GPU machine.
468
+
469
+ Args:
470
+ recoverable: If True, keep session info for potential reconnection.
471
+ """
472
+ if self._status == ReactorStatus.DISCONNECTED and self._session_id is None:
473
+ logger.warning("Already disconnected")
474
+ return
475
+
476
+ # Terminate coordinator session if not recoverable
477
+ if self._coordinator_client is not None and not recoverable:
478
+ try:
479
+ await self._coordinator_client.terminate_session()
480
+ except Exception as e:
481
+ logger.error(f"Error terminating coordinator session: {e}")
482
+ finally:
483
+ await self._coordinator_client.close()
484
+ self._coordinator_client = None
485
+
486
+ # Disconnect machine client
487
+ if self._machine_client is not None:
488
+ try:
489
+ await self._machine_client.disconnect()
490
+ except Exception as e:
491
+ logger.error(f"Error disconnecting from GPU machine: {e}")
492
+
493
+ if not recoverable:
494
+ self._machine_client = None
495
+
496
+ self._set_status(ReactorStatus.DISCONNECTED)
497
+
498
+ if not recoverable:
499
+ self._set_session_expiration(None)
500
+ self._set_session_id(None)
501
+
502
+ # =========================================================================
503
+ # Communication
504
+ # =========================================================================
505
+
506
+ async def send_command(self, command: str, data: Any) -> None:
507
+ """
508
+ Send a command to the GPU machine.
509
+
510
+ Args:
511
+ command: The command type.
512
+ data: The data to send with the command.
513
+ """
514
+ if self._status != ReactorStatus.READY:
515
+ logger.warning(f"Cannot send message, status is {self._status}")
516
+ return
517
+
518
+ try:
519
+ if self._machine_client is not None:
520
+ self._machine_client.send_command(command, data)
521
+ except Exception as e:
522
+ logger.error(f"Failed to send message: {e}")
523
+ self._create_error(
524
+ "MESSAGE_SEND_FAILED",
525
+ f"Failed to send message: {e}",
526
+ "gpu",
527
+ recoverable=True,
528
+ )
529
+
530
+ # =========================================================================
531
+ # Track Publishing
532
+ # =========================================================================
533
+
534
+ async def publish_track(self, track: MediaStreamTrack) -> None:
535
+ """
536
+ Publish a video track to the GPU machine.
537
+
538
+ Args:
539
+ track: The MediaStreamTrack to publish.
540
+ """
541
+ if self._status != ReactorStatus.READY:
542
+ logger.warning(f"Cannot publish track, status is {self._status}")
543
+ return
544
+
545
+ try:
546
+ if self._machine_client is not None:
547
+ await self._machine_client.publish_track(track)
548
+ except Exception as e:
549
+ logger.error(f"Failed to publish track: {e}")
550
+ self._create_error(
551
+ "TRACK_PUBLISH_FAILED",
552
+ f"Failed to publish track: {e}",
553
+ "gpu",
554
+ recoverable=True,
555
+ )
556
+
557
+ async def unpublish_track(self) -> None:
558
+ """
559
+ Unpublish the currently published track.
560
+ """
561
+ try:
562
+ if self._machine_client is not None:
563
+ await self._machine_client.unpublish_track()
564
+ except Exception as e:
565
+ logger.error(f"Failed to unpublish track: {e}")
566
+ self._create_error(
567
+ "TRACK_UNPUBLISH_FAILED",
568
+ f"Failed to unpublish track: {e}",
569
+ "gpu",
570
+ recoverable=True,
571
+ )
572
+
573
+ # =========================================================================
574
+ # Frame Callback
575
+ # =========================================================================
576
+
577
+ def set_frame_callback(self, callback: Optional[FrameCallback]) -> None:
578
+ """
579
+ Set a callback to receive individual video frames.
580
+
581
+ The callback will be called with each received frame as a numpy array
582
+ in RGB format with shape (H, W, 3).
583
+
584
+ This can be called before or after connect().
585
+
586
+ Args:
587
+ callback: The callback function, or None to clear.
588
+ """
589
+ self._frame_callback = callback
590
+
591
+ # If already connected, update the machine client
592
+ if self._machine_client is not None:
593
+ self._machine_client.set_frame_callback(callback)
594
+
595
+ # =========================================================================
596
+ # Remote Stream Access
597
+ # =========================================================================
598
+
599
+ def get_remote_track(self) -> Optional[MediaStreamTrack]:
600
+ """
601
+ Get the remote video track from the GPU machine.
602
+
603
+ Returns:
604
+ The remote MediaStreamTrack, or None if not available.
605
+ """
606
+ if self._machine_client is None:
607
+ return None
608
+ return self._machine_client.get_remote_track()
609
+
610
+ # =========================================================================
611
+ # State Accessors
612
+ # =========================================================================
613
+
614
+ def get_status(self) -> ReactorStatus:
615
+ """
616
+ Get the current connection status.
617
+
618
+ Returns:
619
+ The current ReactorStatus.
620
+ """
621
+ return self._status
622
+
623
+ def get_state(self) -> ReactorState:
624
+ """
625
+ Get the current state including status and error info.
626
+
627
+ Returns:
628
+ The current ReactorState.
629
+ """
630
+ return ReactorState(
631
+ status=self._status,
632
+ last_error=self._last_error,
633
+ )
634
+
635
+ def get_session_id(self) -> Optional[str]:
636
+ """
637
+ Get the current session ID.
638
+
639
+ Returns:
640
+ The session ID, or None if not connected.
641
+ """
642
+ return self._session_id
643
+
644
+ def get_last_error(self) -> Optional[ReactorError]:
645
+ """
646
+ Get the last error that occurred.
647
+
648
+ Returns:
649
+ The last ReactorError, or None.
650
+ """
651
+ return self._last_error
652
+
653
+ # =========================================================================
654
+ # Private Helpers
655
+ # =========================================================================
656
+
657
+ def _set_status(self, new_status: ReactorStatus) -> None:
658
+ """Set the status and emit event if changed."""
659
+ logger.debug(f"Setting status: {new_status} from {self._status}")
660
+ if self._status != new_status:
661
+ self._status = new_status
662
+ self._emit("status_changed", new_status)
663
+
664
+ def _set_session_id(self, new_session_id: Optional[str]) -> None:
665
+ """Set the session ID and emit event if changed."""
666
+ logger.debug(f"Setting session ID: {new_session_id} from {self._session_id}")
667
+ if self._session_id != new_session_id:
668
+ self._session_id = new_session_id
669
+ self._emit("session_id_changed", new_session_id)
670
+
671
+ def _set_session_expiration(self, new_expiration: Optional[float]) -> None:
672
+ """Set the session expiration and emit event if changed."""
673
+ logger.debug(f"Setting session expiration: {new_expiration}")
674
+ if self._session_expiration != new_expiration:
675
+ self._session_expiration = new_expiration
676
+ self._emit("session_expiration_changed", new_expiration)
677
+
678
+ def _create_error(
679
+ self,
680
+ code: str,
681
+ message: str,
682
+ component: str,
683
+ recoverable: bool,
684
+ retry_after: Optional[float] = None,
685
+ ) -> None:
686
+ """Create and store an error, then emit the error event."""
687
+ self._last_error = ReactorError(
688
+ code=code,
689
+ message=message,
690
+ timestamp=time.time(),
691
+ recoverable=recoverable,
692
+ component=component, # type: ignore
693
+ retry_after=retry_after,
694
+ )
695
+ self._emit("error", self._last_error)
696
+
697
+ def _setup_machine_client_handlers(self) -> None:
698
+ """Set up event handlers for the machine client."""
699
+ if self._machine_client is None:
700
+ return
701
+
702
+ def on_application(message: Any) -> None:
703
+ self._emit("new_message", message)
704
+
705
+ def on_status_changed(status: GPUMachineStatus) -> None:
706
+ if status == GPUMachineStatus.CONNECTED:
707
+ self._set_status(ReactorStatus.READY)
708
+ elif status == GPUMachineStatus.DISCONNECTED:
709
+ # Schedule disconnect on the event loop
710
+ import asyncio
711
+ asyncio.create_task(self.disconnect(recoverable=True))
712
+ elif status == GPUMachineStatus.ERROR:
713
+ self._create_error(
714
+ "GPU_CONNECTION_ERROR",
715
+ "GPU machine connection failed",
716
+ "gpu",
717
+ recoverable=True,
718
+ )
719
+ import asyncio
720
+ asyncio.create_task(self.disconnect())
721
+
722
+ def on_track_received(track: MediaStreamTrack) -> None:
723
+ self._emit("stream_changed", track)
724
+
725
+ self._machine_client.on("application", on_application)
726
+ self._machine_client.on("status_changed", on_status_changed)
727
+ self._machine_client.on("track_received", on_track_received)
728
+
729
+ # =========================================================================
730
+ # Context Manager
731
+ # =========================================================================
732
+
733
+ async def __aenter__(self) -> "_ReactorImpl":
734
+ """Async context manager entry."""
735
+ return self
736
+
737
+ async def __aexit__(self, *args: object) -> None:
738
+ """Async context manager exit."""
739
+ await self.disconnect()