yellowstone-fumarole-client 0.1.0rc2__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,18 +3,21 @@ import logging
3
3
  from yellowstone_fumarole_client.grpc_connectivity import (
4
4
  FumaroleGrpcConnector,
5
5
  )
6
- from typing import Dict, Optional
6
+ from typing import AsyncGenerator, Optional
7
7
  from dataclasses import dataclass
8
- from . import config
8
+ from yellowstone_fumarole_client.config import FumaroleConfig
9
9
  from yellowstone_fumarole_client.runtime.aio import (
10
10
  AsyncioFumeDragonsmouthRuntime,
11
- FumaroleSM,
12
11
  DEFAULT_GC_INTERVAL,
13
12
  DEFAULT_SLOT_MEMORY_RETENTION,
14
13
  GrpcSlotDownloader,
15
14
  )
15
+ from yellowstone_fumarole_client.runtime.state_machine import (
16
+ FumaroleSM,
17
+ FumeOffset,
18
+ )
16
19
  from yellowstone_fumarole_proto.geyser_pb2 import SubscribeRequest, SubscribeUpdate
17
- from yellowstone_fumarole_proto.fumarole_v2_pb2 import (
20
+ from yellowstone_fumarole_proto.fumarole_pb2 import (
18
21
  ControlResponse,
19
22
  VersionRequest,
20
23
  VersionResponse,
@@ -29,9 +32,11 @@ from yellowstone_fumarole_proto.fumarole_v2_pb2 import (
29
32
  CreateConsumerGroupRequest,
30
33
  CreateConsumerGroupResponse,
31
34
  )
32
- from yellowstone_fumarole_proto.fumarole_v2_pb2_grpc import FumaroleStub
35
+ from yellowstone_fumarole_proto.fumarole_pb2_grpc import FumaroleStub
33
36
  import grpc
34
37
 
38
+ from yellowstone_fumarole_client import config
39
+
35
40
  __all__ = [
36
41
  "FumaroleClient",
37
42
  "FumaroleConfig",
@@ -44,7 +49,7 @@ __all__ = [
44
49
  ]
45
50
 
46
51
  # Constants
47
- DEFAULT_DRAGONSMOUTH_CAPACITY = 10000
52
+ DEFAULT_DRAGONSMOUTH_CAPACITY = 100000
48
53
  DEFAULT_COMMIT_INTERVAL = 5.0 # seconds
49
54
  DEFAULT_MAX_SLOT_DOWNLOAD_ATTEMPT = 3
50
55
  DEFAULT_CONCURRENT_DOWNLOAD_LIMIT_PER_TCP = 10
@@ -72,10 +77,23 @@ class FumaroleSubscribeConfig:
72
77
  # The interval at which to perform garbage collection on the slot memory.
73
78
  gc_interval: int = DEFAULT_GC_INTERVAL
74
79
 
75
- # The retention period for slot memory in seconds.
80
+ # How many processed slot numbers to retain in memory to avoid duplication.
76
81
  slot_memory_retention: int = DEFAULT_SLOT_MEMORY_RETENTION
77
82
 
78
83
 
84
+ @dataclass
85
+ class FumaroleSubscribeStats:
86
+ """Commit/slot statistics for the Fumarole subscribe session."""
87
+
88
+ # Last committed log offset in Fumarole -- this is a low-level, implementation detail.
89
+ # NOTE: this should not be part as business logic, can change any time.
90
+ log_committed_offset: FumeOffset
91
+ # NOTE:: this is a low-level information, can change any time.
92
+ log_committable_offset: FumeOffset
93
+ # Max slot seen by the in the current session - does not mean it has been processed.
94
+ max_slot_seen: int
95
+
96
+
79
97
  # DragonsmouthAdapterSession
80
98
  @dataclass
81
99
  class DragonsmouthAdapterSession:
@@ -85,10 +103,31 @@ class DragonsmouthAdapterSession:
85
103
  sink: asyncio.Queue
86
104
 
87
105
  # The queue for receiving SubscribeUpdate from the dragonsmouth stream.
88
- source: asyncio.Queue
106
+ source: AsyncGenerator[SubscribeUpdate, None]
89
107
 
90
108
  # The task handle for the fumarole runtime.
91
- fumarole_handle: asyncio.Task
109
+ _fumarole_handle: asyncio.Task
110
+
111
+ _sm: FumaroleSM
112
+
113
+ async def __aenter__(self):
114
+ """Enter the session context."""
115
+ return self
116
+
117
+ async def __aexit__(self, exc_type, exc_value, traceback):
118
+ self.sink.shutdown()
119
+ self._fumarole_handle.cancel()
120
+
121
+ def stats(self) -> FumaroleSubscribeStats:
122
+ """Get low-level statistics of the Fumarole state-machine."""
123
+ commitable = self._sm.committable_offset
124
+ committed = self._sm.last_committed_offset
125
+ max_slot = self._sm.max_slot_detected
126
+ return FumaroleSubscribeStats(
127
+ log_committed_offset=committed,
128
+ log_committable_offset=commitable,
129
+ max_slot_seen=max_slot,
130
+ )
92
131
 
93
132
 
94
133
  # FumaroleClient
@@ -117,7 +156,7 @@ class FumaroleClient:
117
156
  async def version(self) -> VersionResponse:
118
157
  """Get the version of the Fumarole server."""
119
158
  request = VersionRequest()
120
- response = await self.stub.version(request)
159
+ response = await self.stub.Version(request)
121
160
  return response
122
161
 
123
162
  async def dragonsmouth_subscribe(
@@ -162,12 +201,12 @@ class FumaroleClient:
162
201
  try:
163
202
  update = await fume_control_plane_q.get()
164
203
  yield update
165
- except asyncio.QueueShutDown:
204
+ except (asyncio.CancelledError, asyncio.QueueShutDown):
166
205
  break
167
206
 
168
- fume_control_plane_stream_rx: grpc.aio.StreamStreamMultiCallable = (
169
- self.stub.Subscribe(control_plane_sink())
170
- )
207
+ fume_control_plane_stream_rx: grpc.aio.StreamStreamCall = self.stub.Subscribe(
208
+ control_plane_sink()
209
+ ) # it's actually InterceptedStreamStreamCall, but grpc lib doesn't export it
171
210
 
172
211
  control_response: ControlResponse = await fume_control_plane_stream_rx.read()
173
212
  init = control_response.init
@@ -187,8 +226,12 @@ class FumaroleClient:
187
226
  await fume_control_plane_rx_q.put(update)
188
227
  except asyncio.QueueShutDown:
189
228
  break
229
+ except asyncio.CancelledError:
230
+ break
231
+ finally:
232
+ fume_control_plane_rx_q.shutdown()
190
233
 
191
- _cp_src_task = asyncio.create_task(control_plane_source())
234
+ control_plane_src_task = asyncio.create_task(control_plane_source())
192
235
 
193
236
  FumaroleClient.logger.debug(f"Control response: {control_response}")
194
237
 
@@ -219,12 +262,36 @@ class FumaroleClient:
219
262
  max_concurrent_download=config.concurrent_download_limit,
220
263
  )
221
264
 
222
- fumarole_handle = asyncio.create_task(rt.run())
223
- FumaroleClient.logger.debug(f"Fumarole handle created: {fumarole_handle}")
265
+ async def rt_run(rt):
266
+ async with rt as rt:
267
+ await rt.run()
268
+
269
+ rt_task = asyncio.create_task(rt_run(rt))
270
+
271
+ async def fumarole_overseer():
272
+ done, pending = await asyncio.wait(
273
+ [rt_task, control_plane_src_task], return_when=asyncio.FIRST_COMPLETED
274
+ )
275
+ for t in pending:
276
+ t.cancel()
277
+
278
+ fumarole_handle = asyncio.create_task(fumarole_overseer())
279
+
280
+ async def source_gen() -> AsyncGenerator[SubscribeUpdate, None]:
281
+ try:
282
+ while True:
283
+ update = await dragonsmouth_outlet.get()
284
+ yield update
285
+ except (asyncio.CancelledError, asyncio.QueueShutDown):
286
+ pass
287
+ finally:
288
+ dragonsmouth_outlet.shutdown()
289
+
224
290
  return DragonsmouthAdapterSession(
225
291
  sink=subscribe_request_queue,
226
- source=dragonsmouth_outlet,
227
- fumarole_handle=fumarole_handle,
292
+ source=source_gen(),
293
+ _fumarole_handle=fumarole_handle,
294
+ _sm=sm,
228
295
  )
229
296
 
230
297
  async def list_consumer_groups(
@@ -1,14 +1,19 @@
1
1
  from dataclasses import dataclass
2
- from typing import Dict, Optional
2
+ from typing import Dict, Literal, Optional
3
3
  import yaml
4
4
 
5
5
 
6
+ SUPPORTED_COMPRESSION = ["gzip"]
7
+ SupportedCompression = Literal["gzip"]
8
+
9
+
6
10
  @dataclass
7
11
  class FumaroleConfig:
8
12
  endpoint: str
9
13
  x_token: Optional[str] = None
10
14
  max_decoding_message_size_bytes: int = 512_000_000
11
15
  x_metadata: Dict[str, str] = None
16
+ response_compression: Optional[SupportedCompression] = None
12
17
 
13
18
  def __post_init__(self):
14
19
  self.x_metadata = self.x_metadata or {}
@@ -16,6 +21,14 @@ class FumaroleConfig:
16
21
  @classmethod
17
22
  def from_yaml(cls, fileobj) -> "FumaroleConfig":
18
23
  data = yaml.safe_load(fileobj)
24
+ response_compression = data.get(
25
+ "response_compression", cls.response_compression
26
+ )
27
+ if (
28
+ response_compression is not None
29
+ and response_compression not in SUPPORTED_COMPRESSION
30
+ ):
31
+ raise ValueError(f"response_compression must be in {SUPPORTED_COMPRESSION}")
19
32
  return cls(
20
33
  endpoint=data["endpoint"],
21
34
  x_token=data.get("x-token") or data.get("x_token"),
@@ -23,4 +36,7 @@ class FumaroleConfig:
23
36
  "max_decoding_message_size_bytes", cls.max_decoding_message_size_bytes
24
37
  ),
25
38
  x_metadata=data.get("x-metadata", {}),
39
+ response_compression=data.get(
40
+ "response_compression", cls.response_compression
41
+ ),
26
42
  )
@@ -2,7 +2,7 @@ import logging
2
2
  from typing import Optional
3
3
  import grpc
4
4
  from yellowstone_fumarole_client.config import FumaroleConfig
5
- from yellowstone_fumarole_proto.fumarole_v2_pb2_grpc import FumaroleStub
5
+ from yellowstone_fumarole_proto.fumarole_pb2_grpc import FumaroleStub
6
6
 
7
7
  X_TOKEN_HEADER = "x-token"
8
8
 
@@ -31,34 +31,8 @@ class TritonAuthMetadataPlugin(grpc.AuthMetadataPlugin):
31
31
  return _triton_sign_request(callback, self.x_token, None)
32
32
 
33
33
 
34
- def grpc_channel(endpoint: str, x_token=None, compression=None, *grpc_options):
35
- options = [("grpc.max_receive_message_length", 111111110), *grpc_options]
36
- if x_token is not None:
37
- auth = TritonAuthMetadataPlugin(x_token)
38
- # ssl_creds allow you to use our https endpoint
39
- # grpc.ssl_channel_credentials with no arguments will look through your CA trust store.
40
- ssl_creds = grpc.ssl_channel_credentials()
41
-
42
- # call credentials will be sent on each request if setup with composite_channel_credentials.
43
- call_creds: grpc.CallCredentials = grpc.metadata_call_credentials(auth)
44
-
45
- # Combined creds will store the channel creds aswell as the call credentials
46
- combined_creds = grpc.composite_channel_credentials(ssl_creds, call_creds)
47
-
48
- return grpc.secure_channel(
49
- endpoint,
50
- credentials=combined_creds,
51
- compression=compression,
52
- options=options,
53
- )
54
- else:
55
- return grpc.insecure_channel(endpoint, compression=compression, options=options)
56
-
57
-
58
34
  # Because of a bug in grpcio library, multiple inheritance of ClientInterceptor subclasses does not work.
59
35
  # You have to create a new class for each type of interceptor you want to use.
60
-
61
-
62
36
  class MetadataInterceptor(
63
37
  grpc.aio.UnaryStreamClientInterceptor,
64
38
  grpc.aio.StreamUnaryClientInterceptor,
@@ -166,6 +140,11 @@ class FumaroleGrpcConnector:
166
140
  async def connect(self, *grpc_options) -> FumaroleStub:
167
141
  options = [("grpc.max_receive_message_length", 111111110), *grpc_options]
168
142
  interceptors = MetadataInterceptor(self.config.x_metadata).interceptors()
143
+ compression = (
144
+ grpc.Compression.Gzip
145
+ if self.config.response_compression == "gzip"
146
+ else None
147
+ )
169
148
  if self.config.x_token is not None:
170
149
  auth = TritonAuthMetadataPlugin(self.config.x_token)
171
150
  # ssl_creds allow you to use our https endpoint
@@ -184,6 +163,7 @@ class FumaroleGrpcConnector:
184
163
  self.endpoint,
185
164
  credentials=combined_creds,
186
165
  options=options,
166
+ compression=compression,
187
167
  interceptors=interceptors,
188
168
  )
189
169
  else:
@@ -191,7 +171,10 @@ class FumaroleGrpcConnector:
191
171
  "Using insecure channel without authentication"
192
172
  )
193
173
  channel = grpc.aio.insecure_channel(
194
- self.endpoint, options=options, interceptors=interceptors
174
+ self.endpoint,
175
+ options=options,
176
+ interceptors=interceptors,
177
+ compression=compression,
195
178
  )
196
179
 
197
180
  return FumaroleStub(channel)
@@ -1,10 +1,9 @@
1
1
  # DataPlaneConn
2
2
  from abc import abstractmethod, ABC
3
3
  import asyncio
4
- import uuid
5
4
  import grpc
6
- from typing import Optional, List
7
- from collections import abc, deque
5
+ from typing import Optional
6
+ from collections import deque
8
7
  from dataclasses import dataclass
9
8
  import time
10
9
  from yellowstone_fumarole_client.runtime.state_machine import (
@@ -19,7 +18,7 @@ from yellowstone_fumarole_proto.geyser_pb2 import (
19
18
  SubscribeUpdateSlot,
20
19
  CommitmentLevel as ProtoCommitmentLevel,
21
20
  )
22
- from yellowstone_fumarole_proto.fumarole_v2_pb2 import (
21
+ from yellowstone_fumarole_proto.fumarole_pb2 import (
23
22
  ControlCommand,
24
23
  PollBlockchainHistory,
25
24
  CommitOffset,
@@ -27,8 +26,8 @@ from yellowstone_fumarole_proto.fumarole_v2_pb2 import (
27
26
  DownloadBlockShard,
28
27
  BlockFilters,
29
28
  )
30
- from yellowstone_fumarole_proto.fumarole_v2_pb2_grpc import (
31
- Fumarole as GrpcFumaroleClient,
29
+ from yellowstone_fumarole_proto.fumarole_pb2_grpc import (
30
+ FumaroleStub,
32
31
  )
33
32
  from yellowstone_fumarole_client.utils.aio import Interval
34
33
  from yellowstone_fumarole_client.grpc_connectivity import FumaroleGrpcConnector
@@ -84,6 +83,12 @@ class AsyncSlotDownloader(ABC):
84
83
  pass
85
84
 
86
85
 
86
+ SUBSCRIBE_REQ_UPDATE_TYPE_MARKER: int = 1
87
+ CONTROL_PLANE_RESP_TYPE_MARKER: int = 2
88
+ COMMIT_TICK_TYPE_MARKER: int = 3
89
+ DOWNLOAD_TASK_TYPE_MARKER: int = 4
90
+
91
+
87
92
  # TokioFumeDragonsmouthRuntime
88
93
  class AsyncioFumeDragonsmouthRuntime:
89
94
  """Asynchronous runtime for Fumarole with Dragonsmouth-like stream support."""
@@ -119,17 +124,32 @@ class AsyncioFumeDragonsmouthRuntime:
119
124
  """
120
125
  self.sm = sm
121
126
  self.slot_downloader: AsyncSlotDownloader = slot_downloader
122
- self.subscribe_request_update_q = subscribe_request_update_q
127
+ self.subscribe_request_update_rx: asyncio.Queue = subscribe_request_update_q
123
128
  self.subscribe_request = subscribe_request
124
129
  self.consumer_group_name = consumer_group_name
125
- self.control_plane_tx = control_plane_tx_q
126
- self.control_plane_rx = control_plane_rx_q
127
- self.dragonsmouth_outlet = dragonsmouth_outlet
130
+ self.control_plane_tx: asyncio.Queue = control_plane_tx_q
131
+ self.control_plane_rx: asyncio.Queue = control_plane_rx_q
132
+ self.dragonsmouth_outlet: asyncio.Queue = dragonsmouth_outlet
128
133
  self.commit_interval = commit_interval
129
134
  self.gc_interval = gc_interval
130
135
  self.max_concurrent_download = max_concurrent_download
136
+
137
+ # holds metadata about the download task
131
138
  self.download_tasks = dict()
132
- self.inner_runtime_channel: asyncio.Queue = asyncio.Queue()
139
+ self.inflight_tasks = dict()
140
+
141
+ async def __aenter__(self):
142
+ return self
143
+
144
+ async def __aexit__(self, exc_type, exc_value, traceback):
145
+ await self.aclose()
146
+
147
+ async def aclose(self):
148
+ self.control_plane_tx.shutdown()
149
+ self.dragonsmouth_outlet.shutdown()
150
+ for t, kind in self.inflight_tasks.items():
151
+ LOGGER.debug(f"closing {kind} task")
152
+ t.cancel()
133
153
 
134
154
  def _build_poll_history_cmd(
135
155
  self, from_offset: Optional[FumeOffset]
@@ -195,8 +215,9 @@ class AsyncioFumeDragonsmouthRuntime:
195
215
  coro = self.slot_downloader.run_download(
196
216
  self.subscribe_request, download_task_args
197
217
  )
198
- donwload_task = asyncio.create_task(coro)
199
- self.download_tasks[donwload_task] = download_request
218
+ download_task = asyncio.create_task(coro)
219
+ self.download_tasks[download_task] = download_request
220
+ self.inflight_tasks[download_task] = DOWNLOAD_TASK_TYPE_MARKER
200
221
  LOGGER.debug(f"Scheduling download task for slot {download_request.slot}")
201
222
 
202
223
  def _handle_download_result(self, download_result: DownloadTaskResult):
@@ -219,10 +240,10 @@ class AsyncioFumeDragonsmouthRuntime:
219
240
  )
220
241
 
221
242
  async def _commit_offset(self):
243
+ self.last_commit = time.time()
222
244
  if self.sm.last_committed_offset < self.sm.committable_offset:
223
245
  LOGGER.debug(f"Committing offset {self.sm.committable_offset}")
224
246
  await self._force_commit_offset()
225
- self.last_commit = time.time()
226
247
 
227
248
  async def _drain_slot_status(self):
228
249
  """Drains the slot status from the state machine and sends updates to the Dragonsmouth outlet."""
@@ -245,8 +266,10 @@ class AsyncioFumeDragonsmouthRuntime:
245
266
  matched_filters.append(filter_name)
246
267
  elif not filter.filter_by_commitment:
247
268
  matched_filters.append(filter_name)
248
-
249
269
  if matched_filters:
270
+ LOGGER.debug(
271
+ f"Matched {len(matched_filters)} filters for SlotStatus Update"
272
+ )
250
273
  update = SubscribeUpdate(
251
274
  filters=matched_filters,
252
275
  created_at=None,
@@ -257,10 +280,7 @@ class AsyncioFumeDragonsmouthRuntime:
257
280
  dead_error=slot_status.dead_error,
258
281
  ),
259
282
  )
260
- try:
261
- await self.dragonsmouth_outlet.put(update)
262
- except asyncio.QueueFull:
263
- return
283
+ await self.dragonsmouth_outlet.put(update)
264
284
 
265
285
  self.sm.mark_event_as_processed(slot_status.session_sequence)
266
286
 
@@ -286,16 +306,19 @@ class AsyncioFumeDragonsmouthRuntime:
286
306
  LOGGER.debug("Initial commit offset command sent")
287
307
  ticks = 0
288
308
 
289
- task_map = {
309
+ self.inflight_tasks = {
310
+ asyncio.create_task(
311
+ self.subscribe_request_update_rx.get()
312
+ ): SUBSCRIBE_REQ_UPDATE_TYPE_MARKER,
313
+ asyncio.create_task(
314
+ self.control_plane_rx.get()
315
+ ): CONTROL_PLANE_RESP_TYPE_MARKER,
290
316
  asyncio.create_task(
291
- self.subscribe_request_update_q.get()
292
- ): "dragonsmouth_bidi",
293
- asyncio.create_task(self.control_plane_rx.get()): "control_plane_rx",
294
- asyncio.create_task(Interval(self.commit_interval).tick()): "commit_tick",
317
+ Interval(self.commit_interval).tick()
318
+ ): COMMIT_TICK_TYPE_MARKER,
295
319
  }
296
320
 
297
- pending = set(task_map.keys())
298
- while pending:
321
+ while self.inflight_tasks:
299
322
  ticks += 1
300
323
  LOGGER.debug(f"Runtime loop tick")
301
324
  if ticks % self.gc_interval == 0:
@@ -306,58 +329,51 @@ class AsyncioFumeDragonsmouthRuntime:
306
329
  await self.poll_history_if_needed()
307
330
  LOGGER.debug("Scheduling download tasks if any")
308
331
  self._schedule_download_task_if_any()
309
- for t in self.download_tasks.keys():
310
- pending.add(t)
311
- task_map[t] = "download_task"
312
-
313
332
  download_task_inflight = len(self.download_tasks)
314
333
  LOGGER.debug(
315
334
  f"Current download tasks in flight: {download_task_inflight} / {self.max_concurrent_download}"
316
335
  )
317
- done, pending = await asyncio.wait(
318
- pending, return_when=asyncio.FIRST_COMPLETED
336
+ done, _pending = await asyncio.wait(
337
+ self.inflight_tasks.keys(), return_when=asyncio.FIRST_COMPLETED
319
338
  )
320
339
  for t in done:
321
340
  result = t.result()
322
- name = task_map.pop(t)
323
- match name:
324
- case "dragonsmouth_bidi":
325
- LOGGER.debug("Dragonsmouth subscribe request received")
326
- assert isinstance(
327
- result, SubscribeRequest
328
- ), "Expected SubscribeRequest"
329
- self.handle_new_subscribe_request(result)
330
- new_task = asyncio.create_task(
331
- self.subscribe_request_update_q.get()
332
- )
333
- task_map[new_task] = "dragonsmouth_bidi"
334
- pending.add(new_task)
335
- pass
336
- case "control_plane_rx":
337
- LOGGER.debug("Control plane response received")
338
- if not await self._handle_control_plane_resp(result):
339
- LOGGER.debug("Control plane error")
340
- return
341
- new_task = asyncio.create_task(self.control_plane_rx.get())
342
- task_map[new_task] = "control_plane_rx"
343
- pending.add(new_task)
344
- case "download_task":
345
- LOGGER.debug("Download task result received")
346
- assert self.download_tasks.pop(t)
347
- self._handle_download_result(result)
348
- case "commit_tick":
349
- LOGGER.debug("Commit tick reached")
350
- await self._commit_offset()
351
- new_task = asyncio.create_task(
352
- Interval(self.commit_interval).tick()
353
- )
354
- task_map[new_task] = "commit_tick"
355
- pending.add(new_task)
356
- case unknown:
357
- raise RuntimeError(f"Unexpected task name: {unknown}")
341
+ sigcode = self.inflight_tasks.pop(t)
342
+ if sigcode == SUBSCRIBE_REQ_UPDATE_TYPE_MARKER:
343
+ LOGGER.debug("Dragonsmouth subscribe request received")
344
+ assert isinstance(
345
+ result, SubscribeRequest
346
+ ), "Expected SubscribeRequest"
347
+ self.handle_new_subscribe_request(result)
348
+ new_task = asyncio.create_task(
349
+ self.subscribe_request_update_rx.get()
350
+ )
351
+ self.inflight_tasks[new_task] = SUBSCRIBE_REQ_UPDATE_TYPE_MARKER
352
+ pass
353
+ elif sigcode == CONTROL_PLANE_RESP_TYPE_MARKER:
354
+ LOGGER.debug("Control plane response received")
355
+ if not await self._handle_control_plane_resp(result):
356
+ LOGGER.debug("Control plane error")
357
+ return
358
+ new_task = asyncio.create_task(self.control_plane_rx.get())
359
+ self.inflight_tasks[new_task] = CONTROL_PLANE_RESP_TYPE_MARKER
360
+ elif sigcode == DOWNLOAD_TASK_TYPE_MARKER:
361
+ LOGGER.debug("Download task result received")
362
+ assert self.download_tasks.pop(t)
363
+ self._handle_download_result(result)
364
+ elif sigcode == COMMIT_TICK_TYPE_MARKER:
365
+ LOGGER.debug("Commit tick reached")
366
+ await self._commit_offset()
367
+ new_task = asyncio.create_task(
368
+ Interval(self.commit_interval).tick()
369
+ )
370
+ self.inflight_tasks[new_task] = COMMIT_TICK_TYPE_MARKER
371
+ else:
372
+ raise RuntimeError(f"Unexpected task name: {sigcode}")
358
373
 
359
374
  await self._drain_slot_status()
360
375
 
376
+ await self.aclose()
361
377
  LOGGER.debug("Fumarole runtime exiting")
362
378
 
363
379
 
@@ -391,7 +407,7 @@ class GrpcSlotDownloader(AsyncSlotDownloader):
391
407
 
392
408
  def __init__(
393
409
  self,
394
- client: GrpcFumaroleClient,
410
+ client: FumaroleStub,
395
411
  ):
396
412
  self.client = client
397
413
 
@@ -420,7 +436,7 @@ class GrpcDownloadBlockTaskRun:
420
436
  def __init__(
421
437
  self,
422
438
  download_request: FumeDownloadRequest,
423
- client: GrpcFumaroleClient,
439
+ client: FumaroleStub,
424
440
  filters: Optional[BlockFilters],
425
441
  dragonsmouth_oulet: asyncio.Queue,
426
442
  ):
@@ -1,11 +1,11 @@
1
- from typing import Optional, List, Dict, Set, Deque, Tuple, Any
1
+ from typing import Optional, Set, Deque, Sequence
2
2
  from collections import deque, defaultdict
3
- from yellowstone_fumarole_proto.fumarole_v2_pb2 import (
3
+ from yellowstone_fumarole_proto.fumarole_pb2 import (
4
4
  CommitmentLevel,
5
5
  BlockchainEvent,
6
6
  )
7
+ from yellowstone_fumarole_client.utils.collections import OrderedSet
7
8
  import heapq
8
- import uuid
9
9
  from enum import Enum
10
10
 
11
11
  __all__ = [
@@ -107,7 +107,7 @@ class FumaroleSM:
107
107
  def __init__(self, last_committed_offset: FumeOffset, slot_memory_retention: int):
108
108
  self.last_committed_offset = last_committed_offset
109
109
  self.slot_commitment_progression = dict() # Slot -> SlotCommitmentProgression
110
- self.downloaded_slot = set() # Set of downloaded slots
110
+ self.downloaded_slot = OrderedSet() # Set of downloaded slots
111
111
  self.inflight_slot_shard_download = {} # Slot -> SlotDownloadProgress
112
112
  self.blocked_slot_status_update = defaultdict(
113
113
  deque
@@ -121,7 +121,9 @@ class FumaroleSM:
121
121
  ] = deque()
122
122
  self.sequence = 1
123
123
  self.last_processed_fume_sequence = 0
124
- self.sequence_to_offset = {} # FumeSessionSequence -> FumeOffset
124
+ self.sequence_to_offset: dict[FumeSessionSequence, FumeOffset] = (
125
+ {}
126
+ ) # FumeSessionSequence -> FumeOffset
125
127
  self.slot_memory_retention = slot_memory_retention
126
128
 
127
129
  def update_committed_offset(self, offset: FumeOffset) -> None:
@@ -138,14 +140,14 @@ class FumaroleSM:
138
140
  def gc(self) -> None:
139
141
  """Garbage collect old slots to respect memory retention limit."""
140
142
  while len(self.downloaded_slot) > self.slot_memory_retention:
141
- slot = self.downloaded_slot.pop(0) if self.downloaded_slot else None
143
+ slot = self.downloaded_slot.popfirst() if self.downloaded_slot else None
142
144
  if slot is None:
143
145
  break
144
146
  self.slot_commitment_progression.pop(slot, None)
145
147
  self.inflight_slot_shard_download.pop(slot, None)
146
148
  self.blocked_slot_status_update.pop(slot, None)
147
149
 
148
- def queue_blockchain_event(self, events: List[BlockchainEvent]) -> None:
150
+ def queue_blockchain_event(self, events: Sequence[BlockchainEvent]) -> None:
149
151
  """Queue blockchain events for processing."""
150
152
  for event in events:
151
153
 
@@ -232,7 +234,7 @@ class FumaroleSM:
232
234
  blockchain_event: BlockchainEvent = blockchain_event
233
235
  event_cl = blockchain_event.commitment_level
234
236
 
235
- if event_cl < min_commitment:
237
+ if event_cl != min_commitment:
236
238
  self.slot_status_update_queue.append(
237
239
  FumeSlotStatus(
238
240
  session_sequence=session_sequence,
@@ -266,6 +268,18 @@ class FumaroleSM:
266
268
  else:
267
269
  blockchain_id = bytes(blockchain_event.blockchain_id)
268
270
  block_uid = bytes(blockchain_event.block_uid)
271
+
272
+ self.blocked_slot_status_update[blockchain_event.slot].append(
273
+ FumeSlotStatus(
274
+ session_sequence=session_sequence,
275
+ offset=blockchain_event.offset,
276
+ slot=blockchain_event.slot,
277
+ parent_slot=blockchain_event.parent_slot,
278
+ commitment_level=event_cl,
279
+ dead_error=blockchain_event.dead_error,
280
+ )
281
+ )
282
+
269
283
  if blockchain_event.slot not in self.inflight_slot_shard_download:
270
284
  download_request = FumeDownloadRequest(
271
285
  slot=blockchain_event.slot,
@@ -280,16 +294,6 @@ class FumaroleSM:
280
294
  self.inflight_slot_shard_download[blockchain_event.slot] = (
281
295
  download_progress
282
296
  )
283
- self.blocked_slot_status_update[blockchain_event.slot].append(
284
- FumeSlotStatus(
285
- session_sequence=session_sequence,
286
- offset=blockchain_event.offset,
287
- slot=blockchain_event.slot,
288
- parent_slot=blockchain_event.parent_slot,
289
- commitment_level=event_cl,
290
- dead_error=blockchain_event.dead_error,
291
- )
292
- )
293
297
  return download_request
294
298
  return None
295
299
 
@@ -298,13 +302,10 @@ class FumaroleSM:
298
302
  fume_offset = self.sequence_to_offset.pop(event_seq_number, None)
299
303
  if fume_offset is None:
300
304
  raise ValueError("Event sequence number not found")
301
- heapq.heappush(
302
- self.processed_offset, (-event_seq_number, -fume_offset)
303
- ) # Negate for min-heap
305
+ heapq.heappush(self.processed_offset, (event_seq_number, fume_offset))
304
306
 
305
307
  while self.processed_offset:
306
308
  seq, offset = self.processed_offset[0]
307
- seq, offset = -seq, -offset # Convert back to positive
308
309
  if seq != self.last_processed_fume_sequence + 1:
309
310
  break
310
311
  heapq.heappop(self.processed_offset)