jararaca 0.3.11a7__py3-none-any.whl → 0.3.11a9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of jararaca might be problematic. Click here for more details.

@@ -95,39 +95,44 @@ class AioPikaMicroserviceConsumer:
95
95
  self.lock = asyncio.Lock()
96
96
  self.tasks: set[asyncio.Task[Any]] = set()
97
97
 
98
- async def consume(self, passive_declare: bool) -> None:
99
-
98
+ async def consume(self) -> None:
100
99
  connection = await aio_pika.connect(self.config.url)
101
-
102
100
  channel = await connection.channel()
103
-
104
101
  await channel.set_qos(prefetch_count=self.config.prefetch_count)
105
102
 
106
- main_ex = await RabbitmqUtils.declare_main_exchange(
107
- channel=channel,
108
- exchange_name=self.config.exchange,
109
- passive=passive_declare,
110
- )
111
-
112
- dlx, dlq = await RabbitmqUtils.declare_dl_kit(channel=channel)
103
+ # Get existing exchange and DL kit
104
+ try:
105
+ main_ex = await RabbitmqUtils.get_main_exchange(
106
+ channel=channel,
107
+ exchange_name=self.config.exchange,
108
+ )
109
+ dlx, dlq = await RabbitmqUtils.get_dl_kit(channel=channel)
110
+ except Exception as e:
111
+ logger.error(
112
+ f"Required exchange or queue infrastructure not found. "
113
+ f"Please use the declare command first to create the required infrastructure. Error: {e}"
114
+ )
115
+ self.shutdown_event.set()
116
+ return
113
117
 
114
118
  for handler in self.message_handler_set:
115
-
116
119
  queue_name = f"{handler.message_type.MESSAGE_TOPIC}.{handler.instance_callable.__module__}.{handler.instance_callable.__qualname__}"
117
120
  routing_key = f"{handler.message_type.MESSAGE_TOPIC}.#"
118
121
 
119
122
  self.incoming_map[queue_name] = handler
120
123
 
121
- queue: aio_pika.abc.AbstractQueue = await channel.declare_queue(
122
- passive=passive_declare,
123
- name=queue_name,
124
- arguments={
125
- "x-dead-letter-exchange": dlx.name,
126
- "x-dead-letter-routing-key": dlq.name,
127
- },
128
- )
129
-
130
- await queue.bind(exchange=main_ex, routing_key=routing_key)
124
+ # Get existing queue
125
+ try:
126
+ queue = await RabbitmqUtils.get_worker_v1_queue(
127
+ channel=channel,
128
+ queue_name=queue_name,
129
+ )
130
+ except Exception as e:
131
+ logger.error(
132
+ f"Worker queue '{queue_name}' not found. "
133
+ f"Please use the declare command first to create the queue. Error: {e}"
134
+ )
135
+ continue
131
136
 
132
137
  await queue.consume(
133
138
  callback=MessageHandlerCallback(
@@ -337,7 +342,7 @@ class MessageBusWorker:
337
342
  raise RuntimeError("Consumer not started")
338
343
  return self._consumer
339
344
 
340
- async def start_async(self, passive_declare: bool) -> None:
345
+ async def start_async(self, handler_names: set[str] | None = None) -> None:
341
346
  all_message_handlers_set: MESSAGE_HANDLER_DATA_SET = set()
342
347
  async with self.lifecycle():
343
348
  for instance_type in self.app.controllers:
@@ -356,6 +361,15 @@ class MessageBusWorker:
356
361
  for handler_data in handlers:
357
362
  message_type = handler_data.spec.message_type
358
363
  topic = message_type.MESSAGE_TOPIC
364
+
365
+ # Filter handlers by name if specified
366
+ if handler_names is not None and handler_data.spec.name is not None:
367
+ if handler_data.spec.name not in handler_names:
368
+ continue
369
+ elif handler_names is not None and handler_data.spec.name is None:
370
+ # Skip handlers without names when filtering is requested
371
+ continue
372
+
359
373
  if (
360
374
  topic in message_handler_data_map
361
375
  and message_type.MESSAGE_TYPE == "task"
@@ -374,9 +388,9 @@ class MessageBusWorker:
374
388
  uow_context_provider=self.uow_context_provider,
375
389
  )
376
390
 
377
- await consumer.consume(passive_declare=passive_declare)
391
+ await consumer.consume()
378
392
 
379
- def start_sync(self, passive_declare: bool) -> None:
393
+ def start_sync(self, handler_names: set[str] | None = None) -> None:
380
394
 
381
395
  def on_shutdown(loop: asyncio.AbstractEventLoop) -> None:
382
396
  logger.info("Shutting down")
@@ -386,7 +400,7 @@ class MessageBusWorker:
386
400
  runner.get_loop().add_signal_handler(
387
401
  signal.SIGINT, on_shutdown, runner.get_loop()
388
402
  )
389
- runner.run(self.start_async(passive_declare=passive_declare))
403
+ runner.run(self.start_async(handler_names=handler_names))
390
404
 
391
405
 
392
406
  class AioPikaMessageBusController(BusMessageController):
@@ -12,6 +12,7 @@ from urllib.parse import parse_qs, urlparse
12
12
  import aio_pika
13
13
  import aio_pika.abc
14
14
  import uvloop
15
+ from aio_pika.exceptions import AMQPError, ChannelClosed, ChannelNotFoundEntity
15
16
  from pydantic import BaseModel
16
17
 
17
18
  from jararaca.broker_backend import MessageBrokerBackend
@@ -105,7 +106,6 @@ class AioPikaMicroserviceConsumer(MessageBusConsumer):
105
106
  message_handler_set: MESSAGE_HANDLER_DATA_SET,
106
107
  scheduled_actions: SCHEDULED_ACTION_DATA_SET,
107
108
  uow_context_provider: UnitOfWorkContextProvider,
108
- passive_declare: bool = False,
109
109
  ):
110
110
 
111
111
  self.broker_backend = broker_backend
@@ -117,7 +117,6 @@ class AioPikaMicroserviceConsumer(MessageBusConsumer):
117
117
  self.shutdown_event = asyncio.Event()
118
118
  self.lock = asyncio.Lock()
119
119
  self.tasks: set[asyncio.Task[Any]] = set()
120
- self.passive_declare = passive_declare
121
120
 
122
121
  async def consume(self) -> None:
123
122
 
@@ -127,21 +126,22 @@ class AioPikaMicroserviceConsumer(MessageBusConsumer):
127
126
 
128
127
  await channel.set_qos(prefetch_count=self.config.prefetch_count)
129
128
 
130
- await RabbitmqUtils.declare_main_exchange(
131
- channel=channel,
132
- exchange_name=self.config.exchange,
133
- passive=self.passive_declare,
134
- )
135
-
136
- dlx = await RabbitmqUtils.declare_dl_exchange(
137
- channel=channel, passive=self.passive_declare
138
- )
139
-
140
- dlq = await RabbitmqUtils.declare_dl_queue(
141
- channel=channel, passive=self.passive_declare
142
- )
129
+ # Get existing exchange and queues
130
+ try:
131
+ exchange = await RabbitmqUtils.get_main_exchange(
132
+ channel=channel,
133
+ exchange_name=self.config.exchange,
134
+ )
143
135
 
144
- await dlq.bind(dlx, routing_key=RabbitmqUtils.DEAD_LETTER_EXCHANGE)
136
+ dlx = await RabbitmqUtils.get_dl_exchange(channel=channel)
137
+ dlq = await RabbitmqUtils.get_dl_queue(channel=channel)
138
+ except (ChannelNotFoundEntity, ChannelClosed, AMQPError) as e:
139
+ logger.critical(
140
+ f"Required exchange or queue infrastructure not found and passive mode is enabled. "
141
+ f"Please use the declare command first to create the required infrastructure. Error: {e}"
142
+ )
143
+ self.shutdown_event.set()
144
+ return
145
145
 
146
146
  for handler in self.message_handler_set:
147
147
 
@@ -150,11 +150,16 @@ class AioPikaMicroserviceConsumer(MessageBusConsumer):
150
150
 
151
151
  self.incoming_map[queue_name] = handler
152
152
 
153
- queue = await RabbitmqUtils.declare_queue(
154
- channel=channel, queue_name=queue_name, passive=self.passive_declare
155
- )
156
-
157
- await queue.bind(exchange=self.config.exchange, routing_key=routing_key)
153
+ try:
154
+ queue = await RabbitmqUtils.get_queue(
155
+ channel=channel, queue_name=queue_name
156
+ )
157
+ except (ChannelNotFoundEntity, ChannelClosed, AMQPError) as e:
158
+ logger.error(
159
+ f"Queue '{queue_name}' not found and passive mode is enabled. "
160
+ f"Please use the declare command first to create the queue. Error: {e}"
161
+ )
162
+ continue
158
163
 
159
164
  await queue.consume(
160
165
  callback=MessageHandlerCallback(
@@ -174,11 +179,16 @@ class AioPikaMicroserviceConsumer(MessageBusConsumer):
174
179
 
175
180
  routing_key = queue_name
176
181
 
177
- queue = await RabbitmqUtils.declare_queue(
178
- channel=channel, queue_name=queue_name, passive=self.passive_declare
179
- )
180
-
181
- await queue.bind(exchange=self.config.exchange, routing_key=routing_key)
182
+ try:
183
+ queue = await RabbitmqUtils.get_queue(
184
+ channel=channel, queue_name=queue_name
185
+ )
186
+ except (ChannelNotFoundEntity, ChannelClosed, AMQPError) as e:
187
+ logger.error(
188
+ f"Scheduler queue '{queue_name}' not found and passive mode is enabled. "
189
+ f"Please use the declare command first to create the queue. Error: {e}"
190
+ )
191
+ continue
182
192
 
183
193
  await queue.consume(
184
194
  callback=ScheduledMessageHandlerCallback(
@@ -515,10 +525,17 @@ async def none_context() -> AsyncGenerator[None, None]:
515
525
 
516
526
 
517
527
  class MessageBusWorker:
518
- def __init__(self, app: Microservice, broker_url: str, backend_url: str) -> None:
528
+ def __init__(
529
+ self,
530
+ app: Microservice,
531
+ broker_url: str,
532
+ backend_url: str,
533
+ handler_names: set[str] | None = None,
534
+ ) -> None:
519
535
  self.app = app
520
536
  self.backend_url = backend_url
521
537
  self.broker_url = broker_url
538
+ self.handler_names = handler_names
522
539
 
523
540
  self.container = Container(app)
524
541
  self.lifecycle = AppLifecycle(app, self.container)
@@ -555,6 +572,21 @@ class MessageBusWorker:
555
572
  for handler_data in handlers:
556
573
  message_type = handler_data.spec.message_type
557
574
  topic = message_type.MESSAGE_TOPIC
575
+
576
+ # Filter handlers by name if specified
577
+ if (
578
+ self.handler_names is not None
579
+ and handler_data.spec.name is not None
580
+ ):
581
+ if handler_data.spec.name not in self.handler_names:
582
+ continue
583
+ elif (
584
+ self.handler_names is not None
585
+ and handler_data.spec.name is None
586
+ ):
587
+ # Skip handlers without names when filtering is requested
588
+ continue
589
+
558
590
  if (
559
591
  topic in message_handler_data_map
560
592
  and message_type.MESSAGE_TYPE == "task"
jararaca/microservice.py CHANGED
@@ -1,4 +1,5 @@
1
1
  import inspect
2
+ import logging
2
3
  from contextlib import contextmanager, suppress
3
4
  from contextvars import ContextVar
4
5
  from dataclasses import dataclass, field
@@ -25,6 +26,8 @@ from jararaca.messagebus import MessageOf
25
26
  from jararaca.messagebus.message import Message
26
27
  from jararaca.reflect.controller_inspect import ControllerMemberReflect
27
28
 
29
+ logger = logging.getLogger(__name__)
30
+
28
31
  if TYPE_CHECKING:
29
32
  from typing_extensions import TypeIs
30
33
 
@@ -145,6 +148,49 @@ class Microservice:
145
148
  )
146
149
 
147
150
 
151
+ @dataclass
152
+ class InstantiationNode:
153
+ property_name: str
154
+ parent: "InstantiationNode | None" = None
155
+ source_type: Any | None = None
156
+ target_type: Any | None = None
157
+
158
+
159
+ instantiation_vector_ctxvar = ContextVar[list[InstantiationNode]](
160
+ "instantiation_vector", default=[]
161
+ )
162
+
163
+
164
+ def print_instantiation_vector(
165
+ instantiation_vector: list[InstantiationNode],
166
+ ) -> None:
167
+ """
168
+ Prints the instantiation vector for debugging purposes.
169
+ """
170
+ for node in instantiation_vector:
171
+ print(
172
+ f"Property: {node.property_name}, Source: {node.source_type}, Target: {node.target_type}"
173
+ )
174
+
175
+
176
+ @contextmanager
177
+ def span_instantiation_vector(
178
+ instantiation_node: InstantiationNode,
179
+ ) -> Generator[None, None, None]:
180
+ """
181
+ Context manager to track instantiation nodes in a vector.
182
+ This is useful for debugging and tracing instantiation paths.
183
+ """
184
+ current_vector = list(instantiation_vector_ctxvar.get())
185
+ current_vector.append(instantiation_node)
186
+ token = instantiation_vector_ctxvar.set(current_vector)
187
+ try:
188
+ yield
189
+ finally:
190
+ with suppress(ValueError):
191
+ instantiation_vector_ctxvar.reset(token)
192
+
193
+
148
194
  class Container:
149
195
 
150
196
  def __init__(self, app: Microservice) -> None:
@@ -161,40 +207,54 @@ class Container:
161
207
  if provider.use_value:
162
208
  self.instances_map[provider.provide] = provider.use_value
163
209
  elif provider.use_class:
164
- self.get_and_register(provider.use_class, provider.provide)
210
+ self._get_and_register(provider.use_class, provider.provide)
165
211
  elif provider.use_factory:
166
- self.get_and_register(provider.use_factory, provider.provide)
212
+ self._get_and_register(provider.use_factory, provider.provide)
167
213
  else:
168
- self.get_and_register(provider, provider)
214
+ self._get_and_register(provider, provider)
169
215
 
170
- def instantiate(self, type_: type[Any] | Callable[..., Any]) -> Any:
216
+ def _instantiate(self, type_: type[Any] | Callable[..., Any]) -> Any:
171
217
 
172
- dependencies = self.parse_dependencies(type_)
218
+ dependencies = self._parse_dependencies(type_)
173
219
 
174
- evaluated_dependencies = {
175
- name: self.get_or_register_token_or_type(dependency)
176
- for name, dependency in dependencies.items()
177
- }
220
+ evaluated_dependencies: dict[str, Any] = {}
221
+ for name, dependency in dependencies.items():
222
+ with span_instantiation_vector(
223
+ InstantiationNode(
224
+ property_name=name,
225
+ source_type=type_,
226
+ target_type=dependency,
227
+ )
228
+ ):
229
+ evaluated_dependencies[name] = self.get_or_register_token_or_type(
230
+ dependency
231
+ )
178
232
 
179
233
  instance = type_(**evaluated_dependencies)
180
234
 
181
235
  return instance
182
236
 
183
- def parse_dependencies(
237
+ def _parse_dependencies(
184
238
  self, provider: type[Any] | Callable[..., Any]
185
239
  ) -> dict[str, type[Any]]:
186
240
 
187
- signature = inspect.signature(provider)
241
+ vector = instantiation_vector_ctxvar.get()
242
+ try:
243
+ signature = inspect.signature(provider)
244
+ except ValueError:
245
+ print("VECTOR:", vector)
246
+ print_instantiation_vector(vector)
247
+ raise
188
248
 
189
249
  parameters = signature.parameters
190
250
 
191
251
  return {
192
- name: self.lookup_parameter_type(parameter)
252
+ name: self._lookup_parameter_type(parameter)
193
253
  for name, parameter in parameters.items()
194
254
  if parameter.annotation != inspect.Parameter.empty
195
255
  }
196
256
 
197
- def lookup_parameter_type(self, parameter: inspect.Parameter) -> Any:
257
+ def _lookup_parameter_type(self, parameter: inspect.Parameter) -> Any:
198
258
  if parameter.annotation == inspect.Parameter.empty:
199
259
  raise Exception(f"Parameter {parameter.name} has no type annotation")
200
260
 
@@ -227,14 +287,14 @@ class Container:
227
287
  item_type = bind_to = token_or_type
228
288
 
229
289
  if token_or_type not in self.instances_map:
230
- return self.get_and_register(item_type, bind_to)
290
+ return self._get_and_register(item_type, bind_to)
231
291
 
232
292
  return cast(T, self.instances_map[bind_to])
233
293
 
234
- def get_and_register(
294
+ def _get_and_register(
235
295
  self, item_type: Type[T] | Callable[..., T], bind_to: Any
236
296
  ) -> T:
237
- instance = self.instantiate(item_type)
297
+ instance = self._instantiate(item_type)
238
298
  self.register(instance, bind_to)
239
299
  return cast(T, instance)
240
300
 
@@ -20,6 +20,7 @@ class ScheduledAction:
20
20
  exclusive: bool = True,
21
21
  timeout: int | None = None,
22
22
  exception_handler: Callable[[BaseException], None] | None = None,
23
+ name: str | None = None,
23
24
  ) -> None:
24
25
  """
25
26
  :param cron: A string representing the cron expression for the scheduled action.
@@ -27,6 +28,7 @@ class ScheduledAction:
27
28
  :param exclusive: A boolean indicating if the scheduled action should be executed in one instance of the application. (Requires a distributed lock provided by a backend)
28
29
  :param exception_handler: A callable that will be called when an exception is raised during the execution of the scheduled action.
29
30
  :param timeout: An integer representing the timeout for the scheduled action in seconds. If the scheduled action takes longer than this time, it will be terminated.
31
+ :param name: An optional name for the scheduled action, used for filtering which actions to run.
30
32
  """
31
33
  self.cron = cron
32
34
  """
@@ -55,6 +57,11 @@ class ScheduledAction:
55
57
  If the scheduled action takes longer than this time, it will be terminated.
56
58
  """
57
59
 
60
+ self.name = name
61
+ """
62
+ An optional name for the scheduled action, used for filtering which actions to run.
63
+ """
64
+
58
65
  def __call__(self, func: DECORATED_FUNC) -> DECORATED_FUNC:
59
66
  ScheduledAction.register(func, self)
60
67
  return func
@@ -34,7 +34,7 @@ class SchedulerConfig:
34
34
 
35
35
 
36
36
  def extract_scheduled_actions(
37
- app: Microservice, container: Container
37
+ app: Microservice, container: Container, scheduler_names: set[str] | None = None
38
38
  ) -> list[ScheduledActionData]:
39
39
  scheduled_actions: list[ScheduledActionData] = []
40
40
  for controllers in app.controllers:
@@ -42,6 +42,17 @@ def extract_scheduled_actions(
42
42
  controller_instance: Any = container.get_by_type(controllers)
43
43
 
44
44
  controller_scheduled_actions = get_type_scheduled_actions(controller_instance)
45
+
46
+ # Filter scheduled actions by name if scheduler_names is provided
47
+ if scheduler_names is not None:
48
+ filtered_actions = []
49
+ for action in controller_scheduled_actions:
50
+ # Include actions that have a name and it's in the provided set
51
+ if action.spec.name and action.spec.name in scheduler_names:
52
+ filtered_actions.append(action)
53
+ # Skip actions without names when filtering is active
54
+ controller_scheduled_actions = filtered_actions
55
+
45
56
  scheduled_actions.extend(controller_scheduled_actions)
46
57
 
47
58
  return scheduled_actions
@@ -59,10 +70,12 @@ class Scheduler:
59
70
  self,
60
71
  app: Microservice,
61
72
  interval: int,
73
+ scheduler_names: set[str] | None = None,
62
74
  ) -> None:
63
75
  self.app = app
64
76
 
65
77
  self.interval = interval
78
+ self.scheduler_names = scheduler_names
66
79
  self.container = Container(self.app)
67
80
  self.uow_provider = UnitOfWorkContextProvider(app, self.container)
68
81
 
@@ -146,7 +159,9 @@ class Scheduler:
146
159
  async def run_scheduled_actions() -> None:
147
160
 
148
161
  async with self.lifceycle():
149
- scheduled_actions = extract_scheduled_actions(self.app, self.container)
162
+ scheduled_actions = extract_scheduled_actions(
163
+ self.app, self.container, self.scheduler_names
164
+ )
150
165
 
151
166
  while True:
152
167
  for action in scheduled_actions:
@@ -31,12 +31,13 @@ from jararaca.scheduler.decorators import (
31
31
  get_type_scheduled_actions,
32
32
  )
33
33
  from jararaca.scheduler.types import DelayedMessageData
34
+ from jararaca.utils.rabbitmq_utils import RabbitmqUtils
34
35
 
35
36
  logger = logging.getLogger(__name__)
36
37
 
37
38
 
38
39
  def extract_scheduled_actions(
39
- app: Microservice, container: Container
40
+ app: Microservice, container: Container, scheduler_names: set[str] | None = None
40
41
  ) -> list[ScheduledActionData]:
41
42
  scheduled_actions: list[ScheduledActionData] = []
42
43
  for controllers in app.controllers:
@@ -44,6 +45,17 @@ def extract_scheduled_actions(
44
45
  controller_instance: Any = container.get_by_type(controllers)
45
46
 
46
47
  controller_scheduled_actions = get_type_scheduled_actions(controller_instance)
48
+
49
+ # Filter scheduled actions by name if scheduler_names is provided
50
+ if scheduler_names is not None:
51
+ filtered_actions = []
52
+ for action in controller_scheduled_actions:
53
+ # Include actions that have a name and it's in the provided set
54
+ if action.spec.name and action.spec.name in scheduler_names:
55
+ filtered_actions.append(action)
56
+ # Skip actions without names when filtering is active
57
+ controller_scheduled_actions = filtered_actions
58
+
47
59
  scheduled_actions.extend(controller_scheduled_actions)
48
60
 
49
61
  return scheduled_actions
@@ -145,7 +157,7 @@ class RabbitMQBrokerDispatcher(MessageBrokerDispatcher):
145
157
 
146
158
  logger.info(f"Dispatching message to {action_id} at {timestamp}")
147
159
  async with self.channel_pool.acquire() as channel:
148
- exchange = await channel.get_exchange(self.exchange)
160
+ exchange = await RabbitmqUtils.get_main_exchange(channel, self.exchange)
149
161
 
150
162
  await exchange.publish(
151
163
  aio_pika.Message(body=str(timestamp).encode()),
@@ -163,7 +175,7 @@ class RabbitMQBrokerDispatcher(MessageBrokerDispatcher):
163
175
  """
164
176
  async with self.channel_pool.acquire() as channel:
165
177
 
166
- exchange = await channel.get_exchange(self.exchange)
178
+ exchange = await RabbitmqUtils.get_main_exchange(channel, self.exchange)
167
179
  await exchange.publish(
168
180
  aio_pika.Message(
169
181
  body=delayed_message.payload,
@@ -178,25 +190,15 @@ class RabbitMQBrokerDispatcher(MessageBrokerDispatcher):
178
190
  """
179
191
 
180
192
  async with self.channel_pool.acquire() as channel:
181
-
182
- await channel.declare_exchange(
183
- name=self.exchange,
184
- type="topic",
185
- durable=True,
186
- auto_delete=False,
187
- )
193
+ await RabbitmqUtils.get_main_exchange(channel, self.exchange)
188
194
 
189
195
  for sched_act_data in scheduled_actions:
190
- queue = await channel.declare_queue(
191
- name=ScheduledAction.get_function_id(sched_act_data.callable),
192
- durable=True,
193
- )
196
+ queue_name = ScheduledAction.get_function_id(sched_act_data.callable)
194
197
 
195
- await queue.bind(
196
- exchange=self.exchange,
197
- routing_key=ScheduledAction.get_function_id(
198
- sched_act_data.callable
199
- ),
198
+ # Try to get existing queue
199
+ await RabbitmqUtils.get_scheduler_queue(
200
+ channel=channel,
201
+ queue_name=queue_name,
200
202
  )
201
203
 
202
204
  async def dispose(self) -> None:
@@ -226,6 +228,7 @@ class SchedulerV2:
226
228
  interval: int,
227
229
  broker_url: str,
228
230
  backend_url: str,
231
+ scheduler_names: set[str] | None = None,
229
232
  ) -> None:
230
233
  self.app = app
231
234
 
@@ -237,6 +240,7 @@ class SchedulerV2:
237
240
  )
238
241
 
239
242
  self.interval = interval
243
+ self.scheduler_names = scheduler_names
240
244
  self.container = Container(self.app)
241
245
  self.uow_provider = UnitOfWorkContextProvider(app, self.container)
242
246
 
@@ -262,7 +266,9 @@ class SchedulerV2:
262
266
  """
263
267
  async with self.lifecycle():
264
268
 
265
- scheduled_actions = extract_scheduled_actions(self.app, self.container)
269
+ scheduled_actions = extract_scheduled_actions(
270
+ self.app, self.container, self.scheduler_names
271
+ )
266
272
 
267
273
  await self.broker.initialize(scheduled_actions)
268
274