genai-protocol-lite 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- AIConnector/__init__.py +0 -0
- AIConnector/common/__init__.py +0 -0
- AIConnector/common/exceptions.py +13 -0
- AIConnector/common/logger.py +57 -0
- AIConnector/common/message.py +14 -0
- AIConnector/common/network.py +146 -0
- AIConnector/connector/__init__.py +0 -0
- AIConnector/connector/azure_connector.py +205 -0
- AIConnector/connector/base_connector.py +51 -0
- AIConnector/connector/peer_connection_manager.py +260 -0
- AIConnector/connector/ws_connector.py +213 -0
- AIConnector/core/__init__.py +0 -0
- AIConnector/core/chat_client.py +505 -0
- AIConnector/core/job.py +48 -0
- AIConnector/core/job_manager.py +219 -0
- AIConnector/core/message_factory.py +44 -0
- AIConnector/discovery/__init__.py +0 -0
- AIConnector/discovery/azure_discovery_service.py +206 -0
- AIConnector/discovery/base_discovery_service.py +27 -0
- AIConnector/discovery/discovery_service.py +226 -0
- AIConnector/session.py +274 -0
- genai_protocol_lite-1.0.0.dist-info/METADATA +186 -0
- genai_protocol_lite-1.0.0.dist-info/RECORD +26 -0
- genai_protocol_lite-1.0.0.dist-info/WHEEL +5 -0
- genai_protocol_lite-1.0.0.dist-info/licenses/LICENSE +201 -0
- genai_protocol_lite-1.0.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,219 @@
|
|
1
|
+
import asyncio
|
2
|
+
import uuid
|
3
|
+
import logging
|
4
|
+
from typing import Any, Callable, Awaitable, Dict, List, Optional
|
5
|
+
|
6
|
+
from AIConnector.common.message import MessageTypes
|
7
|
+
from .job import Job
|
8
|
+
|
9
|
+
logger = logging.getLogger(__name__)
|
10
|
+
|
11
|
+
|
12
|
+
class JobManager:
|
13
|
+
"""
|
14
|
+
Manages asynchronous jobs by creating, monitoring, and reporting their status.
|
15
|
+
"""
|
16
|
+
|
17
|
+
def __init__(self, send_message_callback: Callable[..., Awaitable[None]]) -> None:
|
18
|
+
"""
|
19
|
+
Initialize the JobManager.
|
20
|
+
|
21
|
+
Args:
|
22
|
+
send_message_callback (Callable[..., Awaitable[None]]):
|
23
|
+
Asynchronous callback to send messages about job status.
|
24
|
+
"""
|
25
|
+
self.send_message_callback: Callable[..., Awaitable[None]] = send_message_callback
|
26
|
+
self.active_jobs: Dict[str, Job] = {} # Maps job IDs to Job instances
|
27
|
+
self.jobs: List[Callable[[Any], Awaitable[Any]]] = [] # Registered job callables
|
28
|
+
self.monitoring_task: Optional[asyncio.Task[Any]] = None # Task for monitoring jobs
|
29
|
+
self.job_futures: Dict[str, asyncio.Future[Any]] = {} # Futures for job results
|
30
|
+
|
31
|
+
async def create_job(
|
32
|
+
self,
|
33
|
+
data: Any,
|
34
|
+
job_callable: Callable[[Any], Awaitable[Any]],
|
35
|
+
peer_id: str,
|
36
|
+
queue_id: str,
|
37
|
+
) -> str:
|
38
|
+
"""
|
39
|
+
Create a new job, start its asynchronous task, and notify via system messages.
|
40
|
+
|
41
|
+
Args:
|
42
|
+
data (Any): Data associated with the job.
|
43
|
+
job_callable (Callable[[Any], Awaitable[Any]]): Asynchronous callable to process the job.
|
44
|
+
peer_id (str): Identifier of the peer associated with the job.
|
45
|
+
queue_id (str): Identifier for the job queue.
|
46
|
+
|
47
|
+
Returns:
|
48
|
+
str: A unique job identifier.
|
49
|
+
"""
|
50
|
+
# Generate a unique job ID.
|
51
|
+
job_id = str(uuid.uuid4())
|
52
|
+
# Create a new job instance and store it.
|
53
|
+
job = Job(job_id, data, peer_id, queue_id)
|
54
|
+
self.active_jobs[job_id] = job
|
55
|
+
|
56
|
+
# Create a future to await the job's result.
|
57
|
+
future = asyncio.get_event_loop().create_future()
|
58
|
+
self.job_futures[job_id] = future
|
59
|
+
|
60
|
+
# Notify that the job has been initialized.
|
61
|
+
await self.send_message_callback(
|
62
|
+
message_type=MessageTypes.SYSTEM_MESSAGE.value,
|
63
|
+
job_id=job_id,
|
64
|
+
peer_id=peer_id,
|
65
|
+
queue_id=queue_id,
|
66
|
+
text=f"Job initialized with id {job_id}",
|
67
|
+
)
|
68
|
+
|
69
|
+
# Start the job's asynchronous task.
|
70
|
+
job.task = asyncio.create_task(job_callable(data))
|
71
|
+
# Notify that the job has started.
|
72
|
+
await self.send_message_callback(
|
73
|
+
message_type=MessageTypes.SYSTEM_MESSAGE.value,
|
74
|
+
job_id=job_id,
|
75
|
+
peer_id=peer_id,
|
76
|
+
queue_id=queue_id,
|
77
|
+
text=f"Job started with id {job_id}",
|
78
|
+
)
|
79
|
+
return job_id
|
80
|
+
|
81
|
+
async def start_monitoring(self, interval: float = 1) -> None:
|
82
|
+
"""
|
83
|
+
Continuously monitor active jobs, sending heartbeat and final messages.
|
84
|
+
|
85
|
+
Args:
|
86
|
+
interval (float): Time (in seconds) between monitoring checks.
|
87
|
+
"""
|
88
|
+
while True:
|
89
|
+
finished_jobs: List[str] = []
|
90
|
+
# Iterate through a copy of active jobs to safely modify the dictionary.
|
91
|
+
for job_id, job in list(self.active_jobs.items()):
|
92
|
+
peer_id = job.peer_id
|
93
|
+
queue_id = job.queue_id
|
94
|
+
if job.task.done():
|
95
|
+
try:
|
96
|
+
# Get the result of the job.
|
97
|
+
result = job.task.result()
|
98
|
+
except Exception as e:
|
99
|
+
logger.error(f"[JobManager] Error: {e}")
|
100
|
+
result = f"Job error: {e}"
|
101
|
+
|
102
|
+
# Set the result for the awaiting future.
|
103
|
+
future = self.job_futures.get(job_id)
|
104
|
+
if future and not future.done():
|
105
|
+
future.set_result(result)
|
106
|
+
|
107
|
+
# Notify that the job has finished.
|
108
|
+
await self.send_message_callback(
|
109
|
+
message_type=MessageTypes.FINAL_MESSAGE.value,
|
110
|
+
job_id=job_id,
|
111
|
+
peer_id=peer_id,
|
112
|
+
queue_id=queue_id,
|
113
|
+
text=result,
|
114
|
+
)
|
115
|
+
finished_jobs.append(job_id)
|
116
|
+
else:
|
117
|
+
# Send a heartbeat message to indicate the job is still running.
|
118
|
+
await self.send_message_callback(
|
119
|
+
message_type=MessageTypes.HEARTBEAT.value,
|
120
|
+
job_id=job_id,
|
121
|
+
queue_id=queue_id,
|
122
|
+
peer_id=peer_id,
|
123
|
+
text="working",
|
124
|
+
)
|
125
|
+
|
126
|
+
# Clean up finished jobs.
|
127
|
+
for job_id in finished_jobs:
|
128
|
+
del self.active_jobs[job_id]
|
129
|
+
if job_id in self.job_futures:
|
130
|
+
del self.job_futures[job_id]
|
131
|
+
|
132
|
+
await asyncio.sleep(interval)
|
133
|
+
|
134
|
+
async def await_job_result(self, job_id: str, timeout: Optional[float] = None) -> Any:
|
135
|
+
"""
|
136
|
+
Await the result of a specific job.
|
137
|
+
|
138
|
+
Args:
|
139
|
+
job_id (str): The unique job identifier.
|
140
|
+
timeout (Optional[float]): Optional timeout (in seconds).
|
141
|
+
|
142
|
+
Returns:
|
143
|
+
Any: The result of the job.
|
144
|
+
|
145
|
+
Raises:
|
146
|
+
ValueError: If the job does not exist.
|
147
|
+
"""
|
148
|
+
future = self.job_futures.get(job_id)
|
149
|
+
if not future:
|
150
|
+
raise ValueError(f"Job with {job_id=} doesn't exist")
|
151
|
+
return await asyncio.wait_for(future, timeout=timeout)
|
152
|
+
|
153
|
+
def start(self, interval: float = 1) -> None:
|
154
|
+
"""
|
155
|
+
Start monitoring active jobs.
|
156
|
+
|
157
|
+
Args:
|
158
|
+
interval (float): Time (in seconds) between monitoring checks.
|
159
|
+
"""
|
160
|
+
self.monitoring_task = asyncio.create_task(self.start_monitoring(interval))
|
161
|
+
|
162
|
+
async def stop(self) -> None:
|
163
|
+
"""
|
164
|
+
Stop monitoring active jobs by cancelling the monitoring task.
|
165
|
+
"""
|
166
|
+
if self.monitoring_task:
|
167
|
+
self.monitoring_task.cancel()
|
168
|
+
try:
|
169
|
+
await self.monitoring_task
|
170
|
+
except asyncio.CancelledError:
|
171
|
+
pass
|
172
|
+
|
173
|
+
async def process_message(self, msg: str, from_id: str, queue_id: str) -> None:
|
174
|
+
"""
|
175
|
+
Process an incoming message by creating a job using the first registered job callable.
|
176
|
+
|
177
|
+
Args:
|
178
|
+
msg (str): The message data.
|
179
|
+
from_id (str): Identifier of the peer that sent the message.
|
180
|
+
queue_id (str): Identifier for the job queue.
|
181
|
+
"""
|
182
|
+
if self.jobs:
|
183
|
+
job_callable = self.jobs[0]
|
184
|
+
job_id = await self.create_job(msg, job_callable, from_id, queue_id)
|
185
|
+
await self.send_message_callback(
|
186
|
+
message_type=MessageTypes.SYSTEM_MESSAGE.value,
|
187
|
+
job_id=job_id,
|
188
|
+
peer_id=from_id,
|
189
|
+
queue_id=queue_id,
|
190
|
+
text=f"Ack: created job with id {job_id} for peer {from_id}",
|
191
|
+
)
|
192
|
+
|
193
|
+
def register_job(self, job_call_back: Callable[[Any], Awaitable[Any]]) -> None:
|
194
|
+
"""
|
195
|
+
Register a new job callable.
|
196
|
+
|
197
|
+
Args:
|
198
|
+
job_call_back (Callable[[Any], Awaitable[Any]]):
|
199
|
+
Asynchronous callable that processes job data.
|
200
|
+
"""
|
201
|
+
self.jobs.append(job_call_back)
|
202
|
+
|
203
|
+
def job_list(self) -> List[Callable[[Any], Awaitable[Any]]]:
|
204
|
+
"""
|
205
|
+
Get the list of registered job callables.
|
206
|
+
|
207
|
+
Returns:
|
208
|
+
List[Callable[[Any], Awaitable[Any]]]: A list of job callables.
|
209
|
+
"""
|
210
|
+
return list(self.jobs)
|
211
|
+
|
212
|
+
def job_active_list(self) -> Dict[str, Job]:
|
213
|
+
"""
|
214
|
+
Get the dictionary of currently active jobs.
|
215
|
+
|
216
|
+
Returns:
|
217
|
+
Dict[str, Job]: A mapping of job IDs to Job instances.
|
218
|
+
"""
|
219
|
+
return self.active_jobs
|
@@ -0,0 +1,44 @@
|
|
1
|
+
from typing import Any, Dict, Optional
|
2
|
+
|
3
|
+
|
4
|
+
class MessageFactory:
|
5
|
+
"""
|
6
|
+
Factory for generating message dictionaries with a specified type and sender.
|
7
|
+
"""
|
8
|
+
|
9
|
+
def __init__(self, message_type: str, from_id: str, *args: Any, **kwargs: Any) -> None:
|
10
|
+
"""
|
11
|
+
Initialize the MessageFactory.
|
12
|
+
|
13
|
+
Args:
|
14
|
+
message_type (str): The type of the message.
|
15
|
+
from_id (str): The identifier of the sender.
|
16
|
+
*args (Any): Additional positional arguments (unused).
|
17
|
+
**kwargs (Any): Additional keyword arguments to include in the message.
|
18
|
+
"""
|
19
|
+
self.message_type = message_type
|
20
|
+
self.from_id = from_id
|
21
|
+
self.args = args
|
22
|
+
self.kwargs = kwargs
|
23
|
+
|
24
|
+
def generate_message(self, text: Optional[str] = None) -> Dict[str, Any]:
|
25
|
+
"""
|
26
|
+
Generate a message dictionary.
|
27
|
+
|
28
|
+
Args:
|
29
|
+
text (Optional[str]): Optional text to include in the message.
|
30
|
+
|
31
|
+
Returns:
|
32
|
+
Dict[str, Any]: A dictionary containing the message data.
|
33
|
+
"""
|
34
|
+
# Base message with type and sender.
|
35
|
+
message: Dict[str, Any] = {
|
36
|
+
"type": self.message_type,
|
37
|
+
"from_id": self.from_id,
|
38
|
+
}
|
39
|
+
# Optionally include text if provided.
|
40
|
+
if text is not None:
|
41
|
+
message["text"] = text
|
42
|
+
# Merge in any additional keyword arguments.
|
43
|
+
message.update(self.kwargs)
|
44
|
+
return message
|
File without changes
|
@@ -0,0 +1,206 @@
|
|
1
|
+
import asyncio
|
2
|
+
import json
|
3
|
+
import logging
|
4
|
+
import time
|
5
|
+
import websockets
|
6
|
+
from typing import Callable, Optional, Dict, Any, Tuple
|
7
|
+
|
8
|
+
from azure.messaging.webpubsubservice import WebPubSubServiceClient
|
9
|
+
from AIConnector.common.exceptions import ParameterException
|
10
|
+
from AIConnector.discovery.base_discovery_service import BaseDiscoveryService
|
11
|
+
|
12
|
+
logger = logging.getLogger(__name__)
|
13
|
+
|
14
|
+
|
15
|
+
class AzureDiscoveryService(BaseDiscoveryService):
|
16
|
+
"""
|
17
|
+
Service for peer discovery via Azure Web PubSub.
|
18
|
+
|
19
|
+
This service functions similarly to the UDP-based DiscoveryService,
|
20
|
+
but uses Azure for sending and receiving DISCOVERY messages.
|
21
|
+
"""
|
22
|
+
|
23
|
+
def __init__(
|
24
|
+
self,
|
25
|
+
on_peer_discovered_callback: Optional[Callable[[Dict[str, Any], Tuple[str, int]], None]] = None,
|
26
|
+
on_peer_lost: Optional[Callable[[Dict[str, Any], Tuple[str, int]], None]] = None,
|
27
|
+
client_id: Optional[str] = None,
|
28
|
+
client_name: Optional[str] = None,
|
29
|
+
azure_endpoint_url: str = None,
|
30
|
+
azure_access_key: str = None,
|
31
|
+
azure_api_version: str = "1.0",
|
32
|
+
discovery_hub: str = "discovery_demo",
|
33
|
+
discovery_interval: float = 10,
|
34
|
+
heartbeat_interval: float = 10,
|
35
|
+
) -> None:
|
36
|
+
"""
|
37
|
+
Initialize the discovery service.
|
38
|
+
|
39
|
+
Args:
|
40
|
+
on_peer_discovered_callback: Callback invoked when a peer is discovered.
|
41
|
+
on_peer_lost: Callback invoked when a peer is lost.
|
42
|
+
client_id: Unique identifier of the client.
|
43
|
+
client_name: Display name of the client.
|
44
|
+
azure_endpoint_url: Azure Web PubSub endpoint URL (must start with "http://" or "https://").
|
45
|
+
azure_access_key: Azure access key.
|
46
|
+
azure_api_version: Azure API version.
|
47
|
+
discovery_hub: Name of the discovery hub.
|
48
|
+
discovery_interval: Interval (in seconds) for sending DISCOVERY messages.
|
49
|
+
heartbeat_interval: Interval (in seconds) for cleaning up stale peer entries.
|
50
|
+
"""
|
51
|
+
if not azure_endpoint_url or not azure_endpoint_url.startswith(("http://", "https://")):
|
52
|
+
raise ParameterException(
|
53
|
+
f"Invalid azure_endpoint_url: {azure_endpoint_url}. It must start with 'http://' or 'https://'."
|
54
|
+
)
|
55
|
+
self.azure_endpoint_url = azure_endpoint_url
|
56
|
+
self.azure_access_key = azure_access_key
|
57
|
+
self.azure_api_version = azure_api_version
|
58
|
+
self.discovery_hub = discovery_hub
|
59
|
+
self.discovery_interval = discovery_interval
|
60
|
+
self.heartbeat_interval = heartbeat_interval
|
61
|
+
|
62
|
+
self.on_peer_discovered_callback = on_peer_discovered_callback
|
63
|
+
self.on_peer_lost_callback = on_peer_lost
|
64
|
+
self.client_id = client_id
|
65
|
+
self.client_name = client_name
|
66
|
+
|
67
|
+
self.running = False
|
68
|
+
self.allow_discovery = False
|
69
|
+
self.discovered_peers: Dict[str, Dict[str, Any]] = {}
|
70
|
+
|
71
|
+
# Initialize the service client for the discovery hub.
|
72
|
+
self.service_client = WebPubSubServiceClient.from_connection_string(
|
73
|
+
f"Endpoint={self.azure_endpoint_url};AccessKey={self.azure_access_key};ApiVersion={self.azure_api_version}",
|
74
|
+
hub=self.discovery_hub,
|
75
|
+
)
|
76
|
+
self.websocket = None
|
77
|
+
self._receive_task: Optional[asyncio.Task] = None
|
78
|
+
self._announce_task: Optional[asyncio.Task] = None
|
79
|
+
self._cleanup_task: Optional[asyncio.Task] = None
|
80
|
+
|
81
|
+
async def start(self, allow_discovery: bool = True) -> None:
|
82
|
+
"""
|
83
|
+
Start the discovery service by establishing a WebSocket connection with the discovery hub
|
84
|
+
and starting background tasks for receiving DISCOVERY messages and periodic announcements.
|
85
|
+
|
86
|
+
Args:
|
87
|
+
allow_discovery: If True, enables peer discovery.
|
88
|
+
"""
|
89
|
+
self.running = True
|
90
|
+
self.allow_discovery = allow_discovery
|
91
|
+
try:
|
92
|
+
token = self.service_client.get_client_access_token()
|
93
|
+
self.websocket = await websockets.connect(token["url"])
|
94
|
+
logger.info(f"[AzureDiscoveryService] Connected to Azure Web PubSub discovery hub: {self.discovery_hub}")
|
95
|
+
|
96
|
+
if allow_discovery:
|
97
|
+
self._announce_task = asyncio.create_task(self._periodic_announce())
|
98
|
+
logger.info("[AzureDiscoveryService] Azure Discovery Service started with peer discovery enabled")
|
99
|
+
else:
|
100
|
+
logger.info("[AzureDiscoveryService] Azure Discovery Service started with peer discovery disabled")
|
101
|
+
|
102
|
+
self._cleanup_task = asyncio.create_task(self._cleanup_stale_peers())
|
103
|
+
self._receive_task = asyncio.create_task(self._receive_messages())
|
104
|
+
except Exception as e:
|
105
|
+
logger.error(f"[AzureDiscoveryService] Discovery Service connection failed: {e}")
|
106
|
+
|
107
|
+
async def stop(self) -> None:
|
108
|
+
"""
|
109
|
+
Stop the discovery service and clean up resources.
|
110
|
+
"""
|
111
|
+
self.running = False
|
112
|
+
if self._receive_task:
|
113
|
+
self._receive_task.cancel()
|
114
|
+
if self._announce_task:
|
115
|
+
self._announce_task.cancel()
|
116
|
+
if self.websocket:
|
117
|
+
await self.websocket.close()
|
118
|
+
self.discovered_peers.clear()
|
119
|
+
logger.info("[AzureDiscoveryService] Azure Discovery Service stopped")
|
120
|
+
|
121
|
+
async def _receive_messages(self) -> None:
|
122
|
+
"""
|
123
|
+
Receive incoming DISCOVERY messages and update the list of discovered peers.
|
124
|
+
"""
|
125
|
+
while self.running and self.websocket:
|
126
|
+
try:
|
127
|
+
message = await self.websocket.recv()
|
128
|
+
msg = json.loads(message)
|
129
|
+
if isinstance(msg, str):
|
130
|
+
msg = json.loads(msg)
|
131
|
+
peer_id = msg.get("client_id")
|
132
|
+
# Process DISCOVERY messages from other peers.
|
133
|
+
if msg.get("type") == "DISCOVERY" and peer_id != self.client_id:
|
134
|
+
if peer_id:
|
135
|
+
result_msg = {
|
136
|
+
"timestamp": msg.get("timestamp", 0),
|
137
|
+
"display_name": msg.get("client_name", "Unknown"),
|
138
|
+
"client_id": peer_id,
|
139
|
+
"metadata": msg.get("metadata", {})
|
140
|
+
}
|
141
|
+
self.discovered_peers[peer_id] = result_msg
|
142
|
+
if self.on_peer_discovered_callback:
|
143
|
+
# Use a dummy address since Azure does not provide a peer's IP.
|
144
|
+
dummy_addr = ("0.0.0.0", 0)
|
145
|
+
try:
|
146
|
+
result = self.on_peer_discovered_callback(result_msg, dummy_addr)
|
147
|
+
if asyncio.iscoroutine(result):
|
148
|
+
await result
|
149
|
+
except Exception as e:
|
150
|
+
logger.error(f"[DiscoveryService] Error in on_peer_discovered_callback: {e}")
|
151
|
+
except websockets.exceptions.ConnectionClosed:
|
152
|
+
logger.error("[AzureDiscoveryService] Discovery WebSocket connection closed")
|
153
|
+
break
|
154
|
+
except Exception as e:
|
155
|
+
logger.error(f"[AzureDiscoveryService] Error processing discovery message: {e}")
|
156
|
+
|
157
|
+
async def _periodic_announce(self) -> None:
|
158
|
+
"""
|
159
|
+
Periodically send DISCOVERY messages through the discovery hub.
|
160
|
+
"""
|
161
|
+
while self.running:
|
162
|
+
try:
|
163
|
+
discovery_msg = {
|
164
|
+
"type": "DISCOVERY",
|
165
|
+
"client_id": self.client_id,
|
166
|
+
"client_name": self.client_name or "Unknown",
|
167
|
+
"timestamp": time.time(),
|
168
|
+
}
|
169
|
+
self.service_client.send_to_all(json.dumps(discovery_msg))
|
170
|
+
except Exception as e:
|
171
|
+
logger.error(f"[AzureDiscoveryService] Error sending discovery message: {e}")
|
172
|
+
else:
|
173
|
+
logger.info("[AzureDiscoveryService] Discovery message sent")
|
174
|
+
await asyncio.sleep(self.discovery_interval)
|
175
|
+
|
176
|
+
async def _cleanup_stale_peers(self) -> None:
|
177
|
+
"""
|
178
|
+
Periodically clean up stale peer entries.
|
179
|
+
A peer is considered stale if its timestamp is older than 3 times the heartbeat_interval.
|
180
|
+
"""
|
181
|
+
while self.running:
|
182
|
+
try:
|
183
|
+
current_time = asyncio.get_event_loop().time()
|
184
|
+
stale_threshold = current_time - (self.heartbeat_interval * 3)
|
185
|
+
|
186
|
+
stale_peers = [
|
187
|
+
peer_id for peer_id, info in self.discovered_peers.items()
|
188
|
+
if info["timestamp"] < stale_threshold
|
189
|
+
]
|
190
|
+
|
191
|
+
for peer_id in stale_peers:
|
192
|
+
del self.discovered_peers[peer_id]
|
193
|
+
if self.on_peer_lost_callback:
|
194
|
+
try:
|
195
|
+
result = self.on_peer_lost_callback(peer_id)
|
196
|
+
if asyncio.iscoroutine(result):
|
197
|
+
await result
|
198
|
+
except Exception as e:
|
199
|
+
logger.error(f"[DiscoveryService] Error in on_peer_disappear callback: {e}")
|
200
|
+
|
201
|
+
logger.debug(f"[AzureDiscoveryService] Removed stale peer: {peer_id}")
|
202
|
+
|
203
|
+
except Exception as e:
|
204
|
+
logger.error(f"[AzureDiscoveryService] Error in cleanup task: {e}")
|
205
|
+
|
206
|
+
await asyncio.sleep(self.heartbeat_interval)
|
@@ -0,0 +1,27 @@
|
|
1
|
+
from abc import ABC, abstractmethod
|
2
|
+
|
3
|
+
|
4
|
+
class BaseDiscoveryService(ABC):
|
5
|
+
"""
|
6
|
+
Abstract base class for discovery services.
|
7
|
+
|
8
|
+
Subclasses must implement methods to start and stop the discovery service.
|
9
|
+
"""
|
10
|
+
|
11
|
+
@abstractmethod
|
12
|
+
def start(self, allow_discovery: bool = False) -> None:
|
13
|
+
"""
|
14
|
+
Start the discovery service.
|
15
|
+
|
16
|
+
Args:
|
17
|
+
allow_discovery (bool, optional): Flag indicating whether peer discovery is enabled.
|
18
|
+
Defaults to False.
|
19
|
+
"""
|
20
|
+
pass
|
21
|
+
|
22
|
+
@abstractmethod
|
23
|
+
def stop(self) -> None:
|
24
|
+
"""
|
25
|
+
Stop the discovery service.
|
26
|
+
"""
|
27
|
+
pass
|