annet 0.11__py3-none-any.whl → 0.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of annet might be problematic. Click here for more details.

annet/cli_args.py CHANGED
@@ -3,6 +3,7 @@
3
3
  import abc
4
4
  import argparse
5
5
  import enum
6
+ import logging
6
7
  import os
7
8
 
8
9
  from valkit.common import valid_string_list
@@ -360,15 +361,14 @@ class QueryOptionsBase(CacheOptions):
360
361
  def __init__(self, *args, **kwargs):
361
362
  super().__init__(*args, **kwargs)
362
363
  if not isinstance(self.query, Query):
363
- query_type = storage_connector.get().query()
364
- self.query = query_type.new(self.query, hosts_range=self.hosts_range)
365
-
366
- def validate_stdin(self, arg, val, **kwargs):
367
- if "storage" in kwargs and arg == "config":
368
- storage = kwargs["storage"]
369
- if len(storage.resolve_object_ids_by_query(self.query)) > 1:
370
- raise ValueError("stdin config can not be used with multiple devices")
371
- super().validate_stdin(arg, val, **kwargs)
364
+ connectors = storage_connector.get_all()
365
+ if not connectors:
366
+ pass
367
+ elif len(connectors) == 1:
368
+ query_type = connectors[0].query()
369
+ self.query = query_type.new(self.query, hosts_range=self.hosts_range)
370
+ else:
371
+ logging.warning("Multiple connectors found, skip parsing query")
372
372
 
373
373
 
374
374
  class QueryOptions(QueryOptionsBase):
annet/connectors.py CHANGED
@@ -2,8 +2,7 @@ import sys
2
2
  from abc import ABC
3
3
  from functools import cached_property
4
4
  from importlib.metadata import entry_points
5
- from typing import Generic, Optional, Type, TypeVar
6
-
5
+ from typing import Generic, Optional, Type, TypeVar, List
7
6
 
8
7
  T = TypeVar("T")
9
8
 
@@ -12,29 +11,46 @@ class Connector(ABC, Generic[T]):
12
11
  name: str
13
12
  ep_name: str
14
13
  ep_group: str = "annet.connectors"
15
- _cls: Optional[Type[T]] = None
14
+ _classes: Optional[List[Type[T]]] = None
16
15
 
17
16
  def _get_default(self) -> Type[T]:
18
17
  raise RuntimeError(f"{self.name} is not set")
19
18
 
20
19
  @cached_property
21
- def _entry_point(self) -> Optional[Type[T]]:
20
+ def _entry_point(self) -> List[Type[T]]:
22
21
  return load_entry_point(self.ep_group, self.ep_name)
23
22
 
24
23
  def get(self, *args, **kwargs) -> T:
25
- if self._cls is not None:
26
- res = self._cls
27
- else:
28
- res = self._entry_point or self._get_default()
24
+ if self._classes is None:
25
+ self._classes = self._entry_point or [self._get_default()]
26
+ if len(self._classes) > 1:
27
+ raise RuntimeError(
28
+ f"Multiple classes are registered with the same "
29
+ f"group={self.ep_group} and name={self.ep_name}: "
30
+ f"{[cls for cls in self._classes]}",
31
+ )
32
+
33
+ res = self._classes[0]
29
34
  return res(*args, **kwargs)
30
35
 
36
+ def get_all(self, *args, **kwargs) -> T:
37
+ if self._classes is None:
38
+ self._classes = self._entry_point or [self._get_default()]
39
+
40
+ return [cls(*args, **kwargs) for cls in self._classes]
41
+
31
42
  def set(self, cls: Type[T]):
32
- if self._cls is not None:
43
+ if self._classes is not None:
44
+ raise RuntimeError(f"Cannot reinitialize value of {self.name}")
45
+ self._classes = [cls]
46
+
47
+ def set_all(self, classes: List[Type[T]]):
48
+ if self._classes is not None:
33
49
  raise RuntimeError(f"Cannot reinitialize value of {self.name}")
34
- self._cls = cls
50
+ self._classes = list(classes)
35
51
 
36
52
  def is_default(self) -> bool:
37
- return self._cls is self._entry_point is None
53
+ return self._classes is self._entry_point is None
38
54
 
39
55
 
40
56
  class CachedConnector(Connector[T], ABC):
@@ -58,7 +74,4 @@ def load_entry_point(group: str, name: str):
58
74
  ep = entry_points(group=group, name=name) # pylint: disable=unexpected-keyword-arg
59
75
  if not ep:
60
76
  return None
61
- if len(ep) > 1:
62
- raise RuntimeError(f"Multiple entry points with the same {group=} and {name=}: {[item.value for item in ep]}")
63
- for item in ep:
64
- return item.load()
77
+ return [item.load() for item in ep]
annet/gen.py CHANGED
@@ -39,6 +39,7 @@ from annet.generators import (
39
39
  JSONFragment,
40
40
  NotSupportedDevice,
41
41
  PartialGenerator,
42
+ RefGenerator,
42
43
  )
43
44
  from annet.lib import merge_dicts, percentile
44
45
  from annet.output import output_driver_connector
@@ -60,13 +61,16 @@ class DeviceGenerators:
60
61
  """Collections of various types of generators found for devices."""
61
62
 
62
63
  # map device fqdn to found partial generators
63
- partial: Dict[str, List[PartialGenerator]] = dataclasses.field(default_factory=dict)
64
+ partial: Dict[Any, List[PartialGenerator]] = dataclasses.field(default_factory=dict)
65
+
66
+ # ref generators
67
+ ref: Dict[Any, List[RefGenerator]] = dataclasses.field(default_factory=dict)
64
68
 
65
69
  # map device fqdn to found entire generators
66
- entire: Dict[str, List[Entire]] = dataclasses.field(default_factory=dict)
70
+ entire: Dict[Any, List[Entire]] = dataclasses.field(default_factory=dict)
67
71
 
68
72
  # map device fqdn to found json fragment generators
69
- json_fragment: Dict[str, List[JSONFragment]] = dataclasses.field(default_factory=dict)
73
+ json_fragment: Dict[Any, List[JSONFragment]] = dataclasses.field(default_factory=dict)
70
74
 
71
75
  def iter_gens(self) -> Iterator[BaseGenerator]:
72
76
  """Iterate over generators."""
@@ -75,19 +79,24 @@ class DeviceGenerators:
75
79
  for gen in gen_list:
76
80
  yield gen
77
81
 
78
- def file_gens(self, device_fqdn: str) -> Iterator[Union[Entire, JSONFragment]]:
82
+ def file_gens(self, device: Any) -> Iterator[Union[Entire, JSONFragment]]:
79
83
  """Iterate over generators that generate files or file parts."""
80
84
  yield from itertools.chain(
81
- self.entire.get(device_fqdn, []),
82
- self.json_fragment.get(device_fqdn, []),
85
+ self.entire.get(device, []),
86
+ self.json_fragment.get(device, []),
83
87
  )
84
88
 
89
+ def update(self, other: "DeviceGenerators") -> None:
90
+ self.partial.update(other.partial)
91
+ self.ref.update(other.ref)
92
+ self.entire.update(other.entire)
93
+ self.json_fragment.update(other.json_fragment)
94
+
85
95
 
86
96
  @dataclasses.dataclass
87
97
  class OldNewDeviceContext:
88
98
  config: str
89
99
  args: GenOptions
90
- storage: Storage
91
100
  downloaded_files: Dict[Device, DeviceDownloadedFiles]
92
101
  failed_files: Dict[Device, Exception]
93
102
  running: Dict[Device, Dict[str, str]]
@@ -143,21 +152,24 @@ def _old_new_per_device(ctx: OldNewDeviceContext, device: Device, filterer: Filt
143
152
  splitter=tabparser.make_formatter(device.hw).split,
144
153
  )
145
154
  if not old:
146
- res = generators.run_partial_initial(device, ctx.storage)
155
+ res = generators.run_partial_initial(device)
147
156
  old = res.config_tree()
148
157
  perf = res.perf_mesures()
149
158
  if ctx.args.profile and ctx.do_print_perf:
150
159
  _print_perf("INITIAL", perf)
151
160
  run_args = generators.GeneratorPartialRunArgs(
152
161
  device=device,
153
- storage=ctx.storage,
154
162
  use_acl=not ctx.args.no_acl,
155
163
  use_acl_safe=ctx.args.acl_safe,
156
164
  annotate=ctx.add_annotations,
157
165
  generators_context=ctx.args.generators_context,
158
166
  no_new=ctx.no_new,
159
167
  )
160
- res = generators.run_partial_generators(ctx.gens.partial[device.fqdn], run_args)
168
+ res = generators.run_partial_generators(
169
+ ctx.gens.partial[device],
170
+ ctx.gens.ref[device],
171
+ run_args,
172
+ )
161
173
  partial_results = res.partial_results
162
174
  perf = res.perf_mesures()
163
175
  if ctx.no_new:
@@ -227,9 +239,8 @@ def _old_new_per_device(ctx: OldNewDeviceContext, device: Device, filterer: Filt
227
239
  get_logger(host=device.hostname).error(error_msg)
228
240
  return OldNewResult(device=device, err=Exception(error_msg))
229
241
  res = generators.run_file_generators(
230
- ctx.gens.file_gens(device.fqdn),
242
+ ctx.gens.file_gens(device),
231
243
  device,
232
- ctx.storage,
233
244
  )
234
245
 
235
246
  entire_results = res.entire_results
@@ -324,7 +335,7 @@ def split_downloaded_files(
324
335
  """Split downloaded files per generator type: entire/json_fragment."""
325
336
  ret = DeviceDownloadedFiles()
326
337
 
327
- for gen in gens.file_gens(device.fqdn):
338
+ for gen in gens.file_gens(device):
328
339
  filepath = gen.path(device)
329
340
  if filepath in device_flat_files:
330
341
  if isinstance(gen, Entire):
@@ -355,7 +366,6 @@ def split_downloaded_files_multi_device(
355
366
  @tracing.function
356
367
  def old_new(
357
368
  args: GenOptions,
358
- storage: Storage,
359
369
  config: str,
360
370
  loader: "Loader",
361
371
  filterer: Filterer,
@@ -367,13 +377,13 @@ def old_new(
367
377
  do_files_download=False,
368
378
  do_print_perf=True,
369
379
  ):
370
- devices = loader.resolve_devices(storage, device_ids)
371
- gens = loader.resolve_gens(storage, devices)
380
+ devices = loader.devices
381
+ gens = loader.resolve_gens(devices)
372
382
  running, failed_running = _old_resolve_running(config, devices)
373
383
  downloaded_files, failed_files = _old_resolve_files(config, devices, gens, do_files_download)
374
384
 
375
385
  if stdin is None:
376
- stdin = args.stdin(storage=storage, filter_acl=args.filter_acl, config=config)
386
+ stdin = args.stdin(filter_acl=args.filter_acl, config=config)
377
387
 
378
388
  fetched_packages, failed_packages = {}, {}
379
389
  if do_files_download and config == "running":
@@ -385,7 +395,6 @@ def old_new(
385
395
  ctx = OldNewDeviceContext(
386
396
  config=config,
387
397
  args=args,
388
- storage=storage,
389
398
  downloaded_files=split_downloaded_files_multi_device(downloaded_files, gens, devices),
390
399
  failed_files=failed_files,
391
400
  running=running,
@@ -413,26 +422,25 @@ def old_new(
413
422
 
414
423
 
415
424
  @tracing.function
416
- def old_raw(args: GenOptions, storage, config, stdin=None,
417
- do_files_download=False, use_mesh=True,
418
- ) -> Iterable[Tuple[Device, Union[str, Dict[str, str]]]]:
419
- devices = storage.make_devices(args.query, preload_neighbors=True, use_mesh=use_mesh)
420
- device_gens = _old_resolve_gens(args, storage, devices)
421
- running, failed_running = _old_resolve_running(config, devices)
422
- downloaded_files, failed_files = _old_resolve_files(config, devices, device_gens, do_files_download)
425
+ def old_raw(
426
+ args: GenOptions, loader: Loader, config, stdin=None,
427
+ do_files_download=False, use_mesh=True,
428
+ ) -> Iterable[Tuple[Device, Union[str, Dict[str, str]]]]:
429
+ device_gens = loader.resolve_gens(loader.devices)
430
+ running, failed_running = _old_resolve_running(config, loader.devices)
431
+ downloaded_files, failed_files = _old_resolve_files(config, loader.devices, device_gens, do_files_download)
423
432
  if stdin is None:
424
- stdin = args.stdin(storage=storage, filter_acl=args.filter_acl, config=config)
433
+ stdin = args.stdin(filter_acl=args.filter_acl, config=config)
425
434
  ctx = OldNewDeviceContext(
426
435
  config=config,
427
436
  args=args,
428
- storage=storage,
429
- downloaded_files=split_downloaded_files_multi_device(downloaded_files, device_gens, devices),
437
+ downloaded_files=split_downloaded_files_multi_device(downloaded_files, device_gens, loader.devices),
430
438
  failed_files=failed_files,
431
439
  running=running,
432
440
  failed_running=failed_running,
433
441
  stdin=stdin,
434
442
  do_files_download=do_files_download,
435
- device_count=len(devices),
443
+ device_count=len(loader.devices),
436
444
  no_new=True,
437
445
  add_annotations=False,
438
446
  add_implicit=False,
@@ -441,7 +449,7 @@ def old_raw(args: GenOptions, storage, config, stdin=None,
441
449
  failed_packages={},
442
450
  do_print_perf=True,
443
451
  )
444
- for device in devices:
452
+ for device in loader.devices:
445
453
  if not device.is_pc():
446
454
  config = _old_new_get_config_cli(ctx, device)
447
455
  config = scrub_config(config, device.breed)
@@ -463,68 +471,59 @@ def worker(device_id, args: ShowGenOptions, stdin, loader: "Loader", filterer: F
463
471
  if span:
464
472
  span.set_attribute("device.id", device_id)
465
473
 
466
- connector = storage_connector.get()
467
- storage_opts = connector.opts().from_cli_opts(args)
468
- with connector.storage()(storage_opts) as storage:
469
- for res in old_new(
470
- args,
471
- storage,
472
- config="/dev/null",
473
- loader=loader,
474
- filterer=filterer,
475
- add_implicit=False,
476
- add_annotations=args.annotate,
477
- stdin=stdin,
478
- device_ids=[device_id],
479
- ):
480
- new = res.get_new(args.acl_safe)
481
- new_files = res.get_new_files(args.acl_safe)
482
- new_file_fragments = res.get_new_file_fragments(args.acl_safe)
483
- output_driver = output_driver_connector.get()
484
- device = res.device
485
- if new is None:
486
- continue
487
- for (entire_path, (entire_data, _)) in sorted(new_files.items(), key=itemgetter(0)):
488
- yield (output_driver.entire_config_dest_path(device, entire_path), entire_data, False)
489
-
490
- for (path, (data, _)) in sorted(new_file_fragments.items(), key=itemgetter(0)):
491
- dumped_data = json.dumps(data, indent=4, sort_keys=True, ensure_ascii=False)
492
- yield (output_driver.entire_config_dest_path(device, path), dumped_data, False)
493
-
494
- has_file_result = new_files or new_file_fragments
495
- has_partial_result = new or not has_file_result
496
- if device.hw.vendor in platform.VENDOR_REVERSES and has_partial_result:
497
- orderer = patching.Orderer.from_hw(device.hw)
498
- yield (output_driver.cfg_file_names(device)[0],
499
- format_config_blocks(
500
- orderer.order_config(new),
501
- device.hw,
502
- args.indent
503
- ),
504
- False)
474
+ for res in old_new(
475
+ args,
476
+ config="/dev/null",
477
+ loader=loader,
478
+ filterer=filterer,
479
+ add_implicit=False,
480
+ add_annotations=args.annotate,
481
+ stdin=stdin,
482
+ device_ids=[device_id],
483
+ ):
484
+ new = res.get_new(args.acl_safe)
485
+ new_files = res.get_new_files(args.acl_safe)
486
+ new_file_fragments = res.get_new_file_fragments(args.acl_safe)
487
+ output_driver = output_driver_connector.get()
488
+ device = res.device
489
+ if new is None:
490
+ continue
491
+ for (entire_path, (entire_data, _)) in sorted(new_files.items(), key=itemgetter(0)):
492
+ yield (output_driver.entire_config_dest_path(device, entire_path), entire_data, False)
493
+
494
+ for (path, (data, _)) in sorted(new_file_fragments.items(), key=itemgetter(0)):
495
+ dumped_data = json.dumps(data, indent=4, sort_keys=True, ensure_ascii=False)
496
+ yield (output_driver.entire_config_dest_path(device, path), dumped_data, False)
497
+
498
+ has_file_result = new_files or new_file_fragments
499
+ has_partial_result = new or not has_file_result
500
+ if device.hw.vendor in platform.VENDOR_REVERSES and has_partial_result:
501
+ orderer = patching.Orderer.from_hw(device.hw)
502
+ yield (output_driver.cfg_file_names(device)[0],
503
+ format_config_blocks(
504
+ orderer.order_config(new),
505
+ device.hw,
506
+ args.indent
507
+ ),
508
+ False)
505
509
 
506
510
 
507
511
  def old_new_worker(device_id, args: DeployOptions, config, stdin, loader: "Loader", filterer: Filterer):
508
- connector = storage_connector.get()
509
- storage_opts = connector.opts().from_cli_opts(args)
510
- with connector.storage()(storage_opts) as storage:
511
- yield from old_new(
512
- args,
513
- storage,
514
- config=config,
515
- loader=loader,
516
- filterer=filterer,
517
- stdin=stdin,
518
- device_ids=[device_id],
519
- no_new=args.clear,
520
- do_files_download=True,
521
- )
512
+ yield from old_new(
513
+ args,
514
+ config=config,
515
+ loader=loader,
516
+ filterer=filterer,
517
+ stdin=stdin,
518
+ device_ids=[device_id],
519
+ no_new=args.clear,
520
+ do_files_download=True,
521
+ )
522
522
 
523
523
 
524
524
  class OldNewParallel(Parallel):
525
- def __init__(self, storage: Storage, args: DeployOptions, loader: "Loader", filterer: Filterer):
526
- self.storage = storage
527
- stdin = args.stdin(storage=storage, filter_acl=args.filter_acl, config=args.config)
525
+ def __init__(self, args: DeployOptions, loader: "Loader", filterer: Filterer):
526
+ stdin = args.stdin(filter_acl=args.filter_acl, config=args.config)
528
527
  super().__init__(
529
528
  old_new_worker,
530
529
  args,
@@ -535,20 +534,19 @@ class OldNewParallel(Parallel):
535
534
  )
536
535
  self.tune_args(args)
537
536
 
538
- def generated_configs(self, device_ids: List[int]) -> Generator[OldNewResult, None, None]:
539
- skipped = set(device_ids)
537
+ def generated_configs(self, devices: List[Device]) -> Generator[OldNewResult, None, None]:
538
+ devices_by_id = {device.id: device for device in devices}
539
+ device_ids = list(devices_by_id)
540
540
 
541
541
  for task_result in self.irun(device_ids):
542
542
  if task_result.exc is not None:
543
- device = self.storage.get_device(task_result.device_id, use_mesh=False, preload_neighbors=False)
543
+ device = devices_by_id.pop(task_result.device_id)
544
544
  yield OldNewResult(device=device, err=task_result.exc)
545
- skipped.discard(task_result.device_id)
546
545
  elif task_result.result is not None:
547
546
  yield from task_result.result
548
- skipped.discard(task_result.device_id)
547
+ devices_by_id.pop(task_result.device_id)
549
548
 
550
- for device_id in skipped:
551
- device = self.storage.get_device(device_id, use_mesh=False, preload_neighbors=False)
549
+ for device in devices_by_id.values():
552
550
  yield OldNewResult(device=device, err=Exception(f"No config returned for {device.hostname}"))
553
551
 
554
552
 
@@ -564,7 +562,7 @@ def _get_files_to_download(devices: List[Device], gens: DeviceGenerators) -> Dic
564
562
  for device in devices:
565
563
  paths = set()
566
564
  try:
567
- for generator in gens.file_gens(device.fqdn):
565
+ for generator in gens.file_gens(device):
568
566
  try:
569
567
  path = generator.path(device)
570
568
  if path:
@@ -713,6 +711,8 @@ def _old_new_get_config_cli(ctx: OldNewDeviceContext, device: Device) -> str:
713
711
  raise exc
714
712
  elif ctx.config == "-":
715
713
  text = ctx.stdin["config"]
714
+ if ctx.device_count > 1:
715
+ raise ValueError("stdin config can not be used with multiple devices")
716
716
  else:
717
717
  if os.path.isdir(ctx.config):
718
718
  filename = _existing_cfg_file_name(ctx.config, device)
@@ -766,13 +766,15 @@ def _old_new_get_config_files(ctx: OldNewDeviceContext, device: Device) -> Devic
766
766
 
767
767
 
768
768
  @tracing.function
769
- def _old_resolve_gens(args: GenOptions, storage: Storage, devices: List[Device]) -> DeviceGenerators:
769
+ def _old_resolve_gens(args: GenOptions, storage: Storage, devices: Iterable[Device]) -> DeviceGenerators:
770
770
  per_device_gens = DeviceGenerators()
771
+ devices = devices or [None] # get all generators if no devices provided
771
772
  for device in devices:
772
773
  gens = generators.build_generators(storage, gens=args, device=device)
773
- per_device_gens.partial[device.fqdn] = gens.partial
774
- per_device_gens.entire[device.fqdn] = gens.entire
775
- per_device_gens.json_fragment[device.fqdn] = gens.json_fragment
774
+ per_device_gens.partial[device] = gens.partial
775
+ per_device_gens.entire[device] = gens.entire
776
+ per_device_gens.json_fragment[device] = gens.json_fragment
777
+ per_device_gens.ref[device] = gens.ref
776
778
  return per_device_gens
777
779
 
778
780
 
@@ -808,37 +810,46 @@ def _old_resolve_files(config: str,
808
810
 
809
811
 
810
812
  class Loader:
811
- def __init__(self, storage: Storage, args: GenOptions, no_empty_warning: bool = False) -> None:
813
+ def __init__(
814
+ self, *storages: Storage,
815
+ args: GenOptions,
816
+ no_empty_warning: bool = False,
817
+ ) -> None:
812
818
  self._args = args
813
- self._storage = storage
819
+ self._storages = storages
814
820
  self._no_empty_warning = no_empty_warning
815
- self._devices_map: Optional[Dict[int, Device]] = None
816
- self._gens: Optional[DeviceGenerators] = None
821
+ self._devices_map: Dict[int, Device] = {}
822
+ self._gens: DeviceGenerators = DeviceGenerators()
823
+ self._counter = itertools.count()
817
824
 
818
825
  self._preload()
819
826
 
820
827
  def _preload(self) -> None:
821
828
  with tracing_connector.get().start_as_current_span("Resolve devices"):
822
- devices = self._storage.make_devices(
823
- self._args.query,
824
- preload_neighbors=True,
825
- use_mesh=not self._args.no_mesh,
826
- preload_extra_fields=True,
827
- )
829
+ for storage in self._storages:
830
+ devices = storage.make_devices(
831
+ self._args.query,
832
+ preload_neighbors=True,
833
+ use_mesh=not self._args.no_mesh,
834
+ preload_extra_fields=True,
835
+ )
836
+ for device in devices:
837
+ self._devices_map[next(self._counter)] = device
838
+ self._gens.update(_old_resolve_gens(self._args, storage, devices))
828
839
  if not devices and not self._no_empty_warning:
829
840
  get_logger().error("No devices found for %s", self._args.query)
830
841
  return
831
842
 
832
- self._devices_map = {d.id: d for d in devices}
833
- self._gens = _old_resolve_gens(self._args, self._storage, devices)
834
-
835
843
  @property
836
844
  def device_fqdns(self):
837
- return {d.id: d.fqdn for d in self._devices_map.values()} if self._devices_map else {}
845
+ return {
846
+ device_id: d.fqdn
847
+ for device_id, d in self._devices_map.items()
848
+ }
838
849
 
839
850
  @property
840
851
  def device_ids(self):
841
- return list(self.device_fqdns)
852
+ return list(self._devices_map)
842
853
 
843
854
  @property
844
855
  def devices(self) -> List[Device]:
@@ -846,23 +857,9 @@ class Loader:
846
857
  return list(self._devices_map.values())
847
858
  return []
848
859
 
849
- def resolve_devices(self, storage: Storage, device_ids: Iterable[int]) -> List[Device]:
850
- devices = []
851
- for device_id in device_ids:
852
- device = self._devices_map[device_id]
853
-
854
- # can not use self._storage here, we can be in another process
855
- device.storage = storage
856
-
857
- devices.append(device)
858
- return devices
859
-
860
- def resolve_gens(self, storage: Storage, devices: Iterable[Device]) -> DeviceGenerators:
860
+ def resolve_gens(self, devices: Iterable[Device]) -> DeviceGenerators:
861
861
  if self._gens is not None:
862
- for gen in self._gens.iter_gens():
863
- # can not use self._storage here, we can be in another process
864
- gen.storage = storage
865
862
  return self._gens
866
863
 
867
864
  with tracing_connector.get().start_as_current_span("Resolve gens"):
868
- return _old_resolve_gens(self._args, storage, devices)
865
+ return _old_resolve_gens(self._args, self._storage, devices)