annet 0.10__py3-none-any.whl → 0.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of annet might be problematic. Click here for more details.
- annet/adapters/netbox/common/models.py +10 -3
- annet/adapters/netbox/common/status_client.py +2 -1
- annet/adapters/netbox/v24/api_models.py +4 -3
- annet/adapters/netbox/v24/storage.py +12 -6
- annet/adapters/netbox/v37/api_models.py +3 -2
- annet/adapters/netbox/v37/storage.py +11 -5
- annet/annlib/jsontools.py +6 -2
- annet/api/__init__.py +167 -159
- annet/cli.py +96 -55
- annet/cli_args.py +9 -9
- annet/connectors.py +28 -15
- annet/gen.py +140 -144
- annet/generators/__init__.py +41 -627
- annet/generators/base.py +136 -0
- annet/generators/common/initial.py +1 -1
- annet/generators/entire.py +97 -0
- annet/generators/exceptions.py +10 -0
- annet/generators/jsonfragment.py +125 -0
- annet/generators/partial.py +119 -0
- annet/generators/perf.py +79 -0
- annet/generators/ref.py +15 -0
- annet/generators/result.py +127 -0
- annet/output.py +4 -9
- annet/storage.py +7 -3
- annet/types.py +0 -2
- {annet-0.10.dist-info → annet-0.12.dist-info}/METADATA +1 -1
- {annet-0.10.dist-info → annet-0.12.dist-info}/RECORD +32 -24
- {annet-0.10.dist-info → annet-0.12.dist-info}/AUTHORS +0 -0
- {annet-0.10.dist-info → annet-0.12.dist-info}/LICENSE +0 -0
- {annet-0.10.dist-info → annet-0.12.dist-info}/WHEEL +0 -0
- {annet-0.10.dist-info → annet-0.12.dist-info}/entry_points.txt +0 -0
- {annet-0.10.dist-info → annet-0.12.dist-info}/top_level.txt +0 -0
annet/cli_args.py
CHANGED
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
import abc
|
|
4
4
|
import argparse
|
|
5
5
|
import enum
|
|
6
|
+
import logging
|
|
6
7
|
import os
|
|
7
8
|
|
|
8
9
|
from valkit.common import valid_string_list
|
|
@@ -360,15 +361,14 @@ class QueryOptionsBase(CacheOptions):
|
|
|
360
361
|
def __init__(self, *args, **kwargs):
|
|
361
362
|
super().__init__(*args, **kwargs)
|
|
362
363
|
if not isinstance(self.query, Query):
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
super().validate_stdin(arg, val, **kwargs)
|
|
364
|
+
connectors = storage_connector.get_all()
|
|
365
|
+
if not connectors:
|
|
366
|
+
pass
|
|
367
|
+
elif len(connectors) == 1:
|
|
368
|
+
query_type = connectors[0].query()
|
|
369
|
+
self.query = query_type.new(self.query, hosts_range=self.hosts_range)
|
|
370
|
+
else:
|
|
371
|
+
logging.warning("Multiple connectors found, skip parsing query")
|
|
372
372
|
|
|
373
373
|
|
|
374
374
|
class QueryOptions(QueryOptionsBase):
|
annet/connectors.py
CHANGED
|
@@ -2,8 +2,7 @@ import sys
|
|
|
2
2
|
from abc import ABC
|
|
3
3
|
from functools import cached_property
|
|
4
4
|
from importlib.metadata import entry_points
|
|
5
|
-
from typing import Generic, Optional, Type, TypeVar
|
|
6
|
-
|
|
5
|
+
from typing import Generic, Optional, Type, TypeVar, List
|
|
7
6
|
|
|
8
7
|
T = TypeVar("T")
|
|
9
8
|
|
|
@@ -12,29 +11,46 @@ class Connector(ABC, Generic[T]):
|
|
|
12
11
|
name: str
|
|
13
12
|
ep_name: str
|
|
14
13
|
ep_group: str = "annet.connectors"
|
|
15
|
-
|
|
14
|
+
_classes: Optional[List[Type[T]]] = None
|
|
16
15
|
|
|
17
16
|
def _get_default(self) -> Type[T]:
|
|
18
17
|
raise RuntimeError(f"{self.name} is not set")
|
|
19
18
|
|
|
20
19
|
@cached_property
|
|
21
|
-
def _entry_point(self) ->
|
|
20
|
+
def _entry_point(self) -> List[Type[T]]:
|
|
22
21
|
return load_entry_point(self.ep_group, self.ep_name)
|
|
23
22
|
|
|
24
23
|
def get(self, *args, **kwargs) -> T:
|
|
25
|
-
if self.
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
24
|
+
if self._classes is None:
|
|
25
|
+
self._classes = self._entry_point or [self._get_default()]
|
|
26
|
+
if len(self._classes) > 1:
|
|
27
|
+
raise RuntimeError(
|
|
28
|
+
f"Multiple classes are registered with the same "
|
|
29
|
+
f"group={self.ep_group} and name={self.ep_name}: "
|
|
30
|
+
f"{[cls for cls in self._classes]}",
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
res = self._classes[0]
|
|
29
34
|
return res(*args, **kwargs)
|
|
30
35
|
|
|
36
|
+
def get_all(self, *args, **kwargs) -> T:
|
|
37
|
+
if self._classes is None:
|
|
38
|
+
self._classes = self._entry_point or [self._get_default()]
|
|
39
|
+
|
|
40
|
+
return [cls(*args, **kwargs) for cls in self._classes]
|
|
41
|
+
|
|
31
42
|
def set(self, cls: Type[T]):
|
|
32
|
-
if self.
|
|
43
|
+
if self._classes is not None:
|
|
44
|
+
raise RuntimeError(f"Cannot reinitialize value of {self.name}")
|
|
45
|
+
self._classes = [cls]
|
|
46
|
+
|
|
47
|
+
def set_all(self, classes: List[Type[T]]):
|
|
48
|
+
if self._classes is not None:
|
|
33
49
|
raise RuntimeError(f"Cannot reinitialize value of {self.name}")
|
|
34
|
-
self.
|
|
50
|
+
self._classes = list(classes)
|
|
35
51
|
|
|
36
52
|
def is_default(self) -> bool:
|
|
37
|
-
return self.
|
|
53
|
+
return self._classes is self._entry_point is None
|
|
38
54
|
|
|
39
55
|
|
|
40
56
|
class CachedConnector(Connector[T], ABC):
|
|
@@ -58,7 +74,4 @@ def load_entry_point(group: str, name: str):
|
|
|
58
74
|
ep = entry_points(group=group, name=name) # pylint: disable=unexpected-keyword-arg
|
|
59
75
|
if not ep:
|
|
60
76
|
return None
|
|
61
|
-
|
|
62
|
-
raise RuntimeError(f"Multiple entry points with the same {group=} and {name=}: {[item.value for item in ep]}")
|
|
63
|
-
for item in ep:
|
|
64
|
-
return item.load()
|
|
77
|
+
return [item.load() for item in ep]
|
annet/gen.py
CHANGED
|
@@ -39,6 +39,7 @@ from annet.generators import (
|
|
|
39
39
|
JSONFragment,
|
|
40
40
|
NotSupportedDevice,
|
|
41
41
|
PartialGenerator,
|
|
42
|
+
RefGenerator,
|
|
42
43
|
)
|
|
43
44
|
from annet.lib import merge_dicts, percentile
|
|
44
45
|
from annet.output import output_driver_connector
|
|
@@ -60,13 +61,16 @@ class DeviceGenerators:
|
|
|
60
61
|
"""Collections of various types of generators found for devices."""
|
|
61
62
|
|
|
62
63
|
# map device fqdn to found partial generators
|
|
63
|
-
partial: Dict[
|
|
64
|
+
partial: Dict[Any, List[PartialGenerator]] = dataclasses.field(default_factory=dict)
|
|
65
|
+
|
|
66
|
+
# ref generators
|
|
67
|
+
ref: Dict[Any, List[RefGenerator]] = dataclasses.field(default_factory=dict)
|
|
64
68
|
|
|
65
69
|
# map device fqdn to found entire generators
|
|
66
|
-
entire: Dict[
|
|
70
|
+
entire: Dict[Any, List[Entire]] = dataclasses.field(default_factory=dict)
|
|
67
71
|
|
|
68
72
|
# map device fqdn to found json fragment generators
|
|
69
|
-
json_fragment: Dict[
|
|
73
|
+
json_fragment: Dict[Any, List[JSONFragment]] = dataclasses.field(default_factory=dict)
|
|
70
74
|
|
|
71
75
|
def iter_gens(self) -> Iterator[BaseGenerator]:
|
|
72
76
|
"""Iterate over generators."""
|
|
@@ -75,19 +79,24 @@ class DeviceGenerators:
|
|
|
75
79
|
for gen in gen_list:
|
|
76
80
|
yield gen
|
|
77
81
|
|
|
78
|
-
def file_gens(self,
|
|
82
|
+
def file_gens(self, device: Any) -> Iterator[Union[Entire, JSONFragment]]:
|
|
79
83
|
"""Iterate over generators that generate files or file parts."""
|
|
80
84
|
yield from itertools.chain(
|
|
81
|
-
self.entire.get(
|
|
82
|
-
self.json_fragment.get(
|
|
85
|
+
self.entire.get(device, []),
|
|
86
|
+
self.json_fragment.get(device, []),
|
|
83
87
|
)
|
|
84
88
|
|
|
89
|
+
def update(self, other: "DeviceGenerators") -> None:
|
|
90
|
+
self.partial.update(other.partial)
|
|
91
|
+
self.ref.update(other.ref)
|
|
92
|
+
self.entire.update(other.entire)
|
|
93
|
+
self.json_fragment.update(other.json_fragment)
|
|
94
|
+
|
|
85
95
|
|
|
86
96
|
@dataclasses.dataclass
|
|
87
97
|
class OldNewDeviceContext:
|
|
88
98
|
config: str
|
|
89
99
|
args: GenOptions
|
|
90
|
-
storage: Storage
|
|
91
100
|
downloaded_files: Dict[Device, DeviceDownloadedFiles]
|
|
92
101
|
failed_files: Dict[Device, Exception]
|
|
93
102
|
running: Dict[Device, Dict[str, str]]
|
|
@@ -143,21 +152,24 @@ def _old_new_per_device(ctx: OldNewDeviceContext, device: Device, filterer: Filt
|
|
|
143
152
|
splitter=tabparser.make_formatter(device.hw).split,
|
|
144
153
|
)
|
|
145
154
|
if not old:
|
|
146
|
-
res = generators.run_partial_initial(device
|
|
155
|
+
res = generators.run_partial_initial(device)
|
|
147
156
|
old = res.config_tree()
|
|
148
157
|
perf = res.perf_mesures()
|
|
149
158
|
if ctx.args.profile and ctx.do_print_perf:
|
|
150
159
|
_print_perf("INITIAL", perf)
|
|
151
160
|
run_args = generators.GeneratorPartialRunArgs(
|
|
152
161
|
device=device,
|
|
153
|
-
storage=ctx.storage,
|
|
154
162
|
use_acl=not ctx.args.no_acl,
|
|
155
163
|
use_acl_safe=ctx.args.acl_safe,
|
|
156
164
|
annotate=ctx.add_annotations,
|
|
157
165
|
generators_context=ctx.args.generators_context,
|
|
158
166
|
no_new=ctx.no_new,
|
|
159
167
|
)
|
|
160
|
-
res = generators.run_partial_generators(
|
|
168
|
+
res = generators.run_partial_generators(
|
|
169
|
+
ctx.gens.partial[device],
|
|
170
|
+
ctx.gens.ref[device],
|
|
171
|
+
run_args,
|
|
172
|
+
)
|
|
161
173
|
partial_results = res.partial_results
|
|
162
174
|
perf = res.perf_mesures()
|
|
163
175
|
if ctx.no_new:
|
|
@@ -227,9 +239,8 @@ def _old_new_per_device(ctx: OldNewDeviceContext, device: Device, filterer: Filt
|
|
|
227
239
|
get_logger(host=device.hostname).error(error_msg)
|
|
228
240
|
return OldNewResult(device=device, err=Exception(error_msg))
|
|
229
241
|
res = generators.run_file_generators(
|
|
230
|
-
ctx.gens.file_gens(device
|
|
242
|
+
ctx.gens.file_gens(device),
|
|
231
243
|
device,
|
|
232
|
-
ctx.storage,
|
|
233
244
|
)
|
|
234
245
|
|
|
235
246
|
entire_results = res.entire_results
|
|
@@ -245,28 +256,27 @@ def _old_new_per_device(ctx: OldNewDeviceContext, device: Device, filterer: Filt
|
|
|
245
256
|
filters = filters_text.split("\n")
|
|
246
257
|
|
|
247
258
|
for file_name in new_json_fragment_files:
|
|
248
|
-
new_json_fragment_files
|
|
249
|
-
new_json_fragment_files
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
259
|
+
if new_json_fragment_files.get(file_name) is not None:
|
|
260
|
+
new_json_fragment_files = _update_json_config(
|
|
261
|
+
new_json_fragment_files,
|
|
262
|
+
file_name,
|
|
263
|
+
jsontools.apply_acl_filters(new_json_fragment_files[file_name][0], filters)
|
|
264
|
+
)
|
|
253
265
|
for file_name in old_json_fragment_files:
|
|
254
|
-
old_json_fragment_files
|
|
255
|
-
old_json_fragment_files,
|
|
256
|
-
file_name,
|
|
257
|
-
jsontools.apply_acl_filters(old_json_fragment_files[file_name][0], filters)
|
|
258
|
-
)
|
|
266
|
+
if old_json_fragment_files.get(file_name) is not None:
|
|
267
|
+
old_json_fragment_files[file_name] = jsontools.apply_acl_filters(old_json_fragment_files[file_name], filters)
|
|
259
268
|
|
|
260
269
|
if ctx.args.acl_safe:
|
|
261
270
|
safe_new_files = res.new_files(safe=True)
|
|
262
271
|
safe_new_json_fragment_files = res.new_json_fragment_files(old_json_fragment_files, safe=True)
|
|
263
272
|
if filters:
|
|
264
273
|
for file_name in safe_new_json_fragment_files:
|
|
265
|
-
safe_new_json_fragment_files
|
|
266
|
-
safe_new_json_fragment_files
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
274
|
+
if safe_new_json_fragment_files.get(file_name):
|
|
275
|
+
safe_new_json_fragment_files = _update_json_config(
|
|
276
|
+
safe_new_json_fragment_files,
|
|
277
|
+
file_name,
|
|
278
|
+
jsontools.apply_acl_filters(safe_new_json_fragment_files[file_name][0], filters)
|
|
279
|
+
)
|
|
270
280
|
|
|
271
281
|
if ctx.args.profile:
|
|
272
282
|
perf = res.perf_mesures()
|
|
@@ -325,7 +335,7 @@ def split_downloaded_files(
|
|
|
325
335
|
"""Split downloaded files per generator type: entire/json_fragment."""
|
|
326
336
|
ret = DeviceDownloadedFiles()
|
|
327
337
|
|
|
328
|
-
for gen in gens.file_gens(device
|
|
338
|
+
for gen in gens.file_gens(device):
|
|
329
339
|
filepath = gen.path(device)
|
|
330
340
|
if filepath in device_flat_files:
|
|
331
341
|
if isinstance(gen, Entire):
|
|
@@ -356,7 +366,6 @@ def split_downloaded_files_multi_device(
|
|
|
356
366
|
@tracing.function
|
|
357
367
|
def old_new(
|
|
358
368
|
args: GenOptions,
|
|
359
|
-
storage: Storage,
|
|
360
369
|
config: str,
|
|
361
370
|
loader: "Loader",
|
|
362
371
|
filterer: Filterer,
|
|
@@ -368,13 +377,13 @@ def old_new(
|
|
|
368
377
|
do_files_download=False,
|
|
369
378
|
do_print_perf=True,
|
|
370
379
|
):
|
|
371
|
-
devices = loader.
|
|
372
|
-
gens = loader.resolve_gens(
|
|
380
|
+
devices = loader.devices
|
|
381
|
+
gens = loader.resolve_gens(devices)
|
|
373
382
|
running, failed_running = _old_resolve_running(config, devices)
|
|
374
383
|
downloaded_files, failed_files = _old_resolve_files(config, devices, gens, do_files_download)
|
|
375
384
|
|
|
376
385
|
if stdin is None:
|
|
377
|
-
stdin = args.stdin(
|
|
386
|
+
stdin = args.stdin(filter_acl=args.filter_acl, config=config)
|
|
378
387
|
|
|
379
388
|
fetched_packages, failed_packages = {}, {}
|
|
380
389
|
if do_files_download and config == "running":
|
|
@@ -386,7 +395,6 @@ def old_new(
|
|
|
386
395
|
ctx = OldNewDeviceContext(
|
|
387
396
|
config=config,
|
|
388
397
|
args=args,
|
|
389
|
-
storage=storage,
|
|
390
398
|
downloaded_files=split_downloaded_files_multi_device(downloaded_files, gens, devices),
|
|
391
399
|
failed_files=failed_files,
|
|
392
400
|
running=running,
|
|
@@ -414,26 +422,25 @@ def old_new(
|
|
|
414
422
|
|
|
415
423
|
|
|
416
424
|
@tracing.function
|
|
417
|
-
def old_raw(
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
device_gens =
|
|
422
|
-
running, failed_running = _old_resolve_running(config, devices)
|
|
423
|
-
downloaded_files, failed_files = _old_resolve_files(config, devices, device_gens, do_files_download)
|
|
425
|
+
def old_raw(
|
|
426
|
+
args: GenOptions, loader: Loader, config, stdin=None,
|
|
427
|
+
do_files_download=False, use_mesh=True,
|
|
428
|
+
) -> Iterable[Tuple[Device, Union[str, Dict[str, str]]]]:
|
|
429
|
+
device_gens = loader.resolve_gens(loader.devices)
|
|
430
|
+
running, failed_running = _old_resolve_running(config, loader.devices)
|
|
431
|
+
downloaded_files, failed_files = _old_resolve_files(config, loader.devices, device_gens, do_files_download)
|
|
424
432
|
if stdin is None:
|
|
425
|
-
stdin = args.stdin(
|
|
433
|
+
stdin = args.stdin(filter_acl=args.filter_acl, config=config)
|
|
426
434
|
ctx = OldNewDeviceContext(
|
|
427
435
|
config=config,
|
|
428
436
|
args=args,
|
|
429
|
-
|
|
430
|
-
downloaded_files=split_downloaded_files_multi_device(downloaded_files, device_gens, devices),
|
|
437
|
+
downloaded_files=split_downloaded_files_multi_device(downloaded_files, device_gens, loader.devices),
|
|
431
438
|
failed_files=failed_files,
|
|
432
439
|
running=running,
|
|
433
440
|
failed_running=failed_running,
|
|
434
441
|
stdin=stdin,
|
|
435
442
|
do_files_download=do_files_download,
|
|
436
|
-
device_count=len(devices),
|
|
443
|
+
device_count=len(loader.devices),
|
|
437
444
|
no_new=True,
|
|
438
445
|
add_annotations=False,
|
|
439
446
|
add_implicit=False,
|
|
@@ -442,7 +449,7 @@ def old_raw(args: GenOptions, storage, config, stdin=None,
|
|
|
442
449
|
failed_packages={},
|
|
443
450
|
do_print_perf=True,
|
|
444
451
|
)
|
|
445
|
-
for device in devices:
|
|
452
|
+
for device in loader.devices:
|
|
446
453
|
if not device.is_pc():
|
|
447
454
|
config = _old_new_get_config_cli(ctx, device)
|
|
448
455
|
config = scrub_config(config, device.breed)
|
|
@@ -464,68 +471,59 @@ def worker(device_id, args: ShowGenOptions, stdin, loader: "Loader", filterer: F
|
|
|
464
471
|
if span:
|
|
465
472
|
span.set_attribute("device.id", device_id)
|
|
466
473
|
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
)
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
device
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
device.hw,
|
|
503
|
-
args.indent
|
|
504
|
-
),
|
|
505
|
-
False)
|
|
474
|
+
for res in old_new(
|
|
475
|
+
args,
|
|
476
|
+
config="/dev/null",
|
|
477
|
+
loader=loader,
|
|
478
|
+
filterer=filterer,
|
|
479
|
+
add_implicit=False,
|
|
480
|
+
add_annotations=args.annotate,
|
|
481
|
+
stdin=stdin,
|
|
482
|
+
device_ids=[device_id],
|
|
483
|
+
):
|
|
484
|
+
new = res.get_new(args.acl_safe)
|
|
485
|
+
new_files = res.get_new_files(args.acl_safe)
|
|
486
|
+
new_file_fragments = res.get_new_file_fragments(args.acl_safe)
|
|
487
|
+
output_driver = output_driver_connector.get()
|
|
488
|
+
device = res.device
|
|
489
|
+
if new is None:
|
|
490
|
+
continue
|
|
491
|
+
for (entire_path, (entire_data, _)) in sorted(new_files.items(), key=itemgetter(0)):
|
|
492
|
+
yield (output_driver.entire_config_dest_path(device, entire_path), entire_data, False)
|
|
493
|
+
|
|
494
|
+
for (path, (data, _)) in sorted(new_file_fragments.items(), key=itemgetter(0)):
|
|
495
|
+
dumped_data = json.dumps(data, indent=4, sort_keys=True, ensure_ascii=False)
|
|
496
|
+
yield (output_driver.entire_config_dest_path(device, path), dumped_data, False)
|
|
497
|
+
|
|
498
|
+
has_file_result = new_files or new_file_fragments
|
|
499
|
+
has_partial_result = new or not has_file_result
|
|
500
|
+
if device.hw.vendor in platform.VENDOR_REVERSES and has_partial_result:
|
|
501
|
+
orderer = patching.Orderer.from_hw(device.hw)
|
|
502
|
+
yield (output_driver.cfg_file_names(device)[0],
|
|
503
|
+
format_config_blocks(
|
|
504
|
+
orderer.order_config(new),
|
|
505
|
+
device.hw,
|
|
506
|
+
args.indent
|
|
507
|
+
),
|
|
508
|
+
False)
|
|
506
509
|
|
|
507
510
|
|
|
508
511
|
def old_new_worker(device_id, args: DeployOptions, config, stdin, loader: "Loader", filterer: Filterer):
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
device_ids=[device_id],
|
|
520
|
-
no_new=args.clear,
|
|
521
|
-
do_files_download=True,
|
|
522
|
-
)
|
|
512
|
+
yield from old_new(
|
|
513
|
+
args,
|
|
514
|
+
config=config,
|
|
515
|
+
loader=loader,
|
|
516
|
+
filterer=filterer,
|
|
517
|
+
stdin=stdin,
|
|
518
|
+
device_ids=[device_id],
|
|
519
|
+
no_new=args.clear,
|
|
520
|
+
do_files_download=True,
|
|
521
|
+
)
|
|
523
522
|
|
|
524
523
|
|
|
525
524
|
class OldNewParallel(Parallel):
|
|
526
|
-
def __init__(self,
|
|
527
|
-
|
|
528
|
-
stdin = args.stdin(storage=storage, filter_acl=args.filter_acl, config=args.config)
|
|
525
|
+
def __init__(self, args: DeployOptions, loader: "Loader", filterer: Filterer):
|
|
526
|
+
stdin = args.stdin(filter_acl=args.filter_acl, config=args.config)
|
|
529
527
|
super().__init__(
|
|
530
528
|
old_new_worker,
|
|
531
529
|
args,
|
|
@@ -536,20 +534,19 @@ class OldNewParallel(Parallel):
|
|
|
536
534
|
)
|
|
537
535
|
self.tune_args(args)
|
|
538
536
|
|
|
539
|
-
def generated_configs(self,
|
|
540
|
-
|
|
537
|
+
def generated_configs(self, devices: List[Device]) -> Generator[OldNewResult, None, None]:
|
|
538
|
+
devices_by_id = {device.id: device for device in devices}
|
|
539
|
+
device_ids = list(devices_by_id)
|
|
541
540
|
|
|
542
541
|
for task_result in self.irun(device_ids):
|
|
543
542
|
if task_result.exc is not None:
|
|
544
|
-
device =
|
|
543
|
+
device = devices_by_id.pop(task_result.device_id)
|
|
545
544
|
yield OldNewResult(device=device, err=task_result.exc)
|
|
546
|
-
skipped.discard(task_result.device_id)
|
|
547
545
|
elif task_result.result is not None:
|
|
548
546
|
yield from task_result.result
|
|
549
|
-
|
|
547
|
+
devices_by_id.pop(task_result.device_id)
|
|
550
548
|
|
|
551
|
-
for
|
|
552
|
-
device = self.storage.get_device(device_id, use_mesh=False, preload_neighbors=False)
|
|
549
|
+
for device in devices_by_id.values():
|
|
553
550
|
yield OldNewResult(device=device, err=Exception(f"No config returned for {device.hostname}"))
|
|
554
551
|
|
|
555
552
|
|
|
@@ -565,7 +562,7 @@ def _get_files_to_download(devices: List[Device], gens: DeviceGenerators) -> Dic
|
|
|
565
562
|
for device in devices:
|
|
566
563
|
paths = set()
|
|
567
564
|
try:
|
|
568
|
-
for generator in gens.file_gens(device
|
|
565
|
+
for generator in gens.file_gens(device):
|
|
569
566
|
try:
|
|
570
567
|
path = generator.path(device)
|
|
571
568
|
if path:
|
|
@@ -714,6 +711,8 @@ def _old_new_get_config_cli(ctx: OldNewDeviceContext, device: Device) -> str:
|
|
|
714
711
|
raise exc
|
|
715
712
|
elif ctx.config == "-":
|
|
716
713
|
text = ctx.stdin["config"]
|
|
714
|
+
if ctx.device_count > 1:
|
|
715
|
+
raise ValueError("stdin config can not be used with multiple devices")
|
|
717
716
|
else:
|
|
718
717
|
if os.path.isdir(ctx.config):
|
|
719
718
|
filename = _existing_cfg_file_name(ctx.config, device)
|
|
@@ -767,13 +766,15 @@ def _old_new_get_config_files(ctx: OldNewDeviceContext, device: Device) -> Devic
|
|
|
767
766
|
|
|
768
767
|
|
|
769
768
|
@tracing.function
|
|
770
|
-
def _old_resolve_gens(args: GenOptions, storage: Storage, devices:
|
|
769
|
+
def _old_resolve_gens(args: GenOptions, storage: Storage, devices: Iterable[Device]) -> DeviceGenerators:
|
|
771
770
|
per_device_gens = DeviceGenerators()
|
|
771
|
+
devices = devices or [None] # get all generators if no devices provided
|
|
772
772
|
for device in devices:
|
|
773
773
|
gens = generators.build_generators(storage, gens=args, device=device)
|
|
774
|
-
per_device_gens.partial[device
|
|
775
|
-
per_device_gens.entire[device
|
|
776
|
-
per_device_gens.json_fragment[device
|
|
774
|
+
per_device_gens.partial[device] = gens.partial
|
|
775
|
+
per_device_gens.entire[device] = gens.entire
|
|
776
|
+
per_device_gens.json_fragment[device] = gens.json_fragment
|
|
777
|
+
per_device_gens.ref[device] = gens.ref
|
|
777
778
|
return per_device_gens
|
|
778
779
|
|
|
779
780
|
|
|
@@ -809,37 +810,46 @@ def _old_resolve_files(config: str,
|
|
|
809
810
|
|
|
810
811
|
|
|
811
812
|
class Loader:
|
|
812
|
-
def __init__(
|
|
813
|
+
def __init__(
|
|
814
|
+
self, *storages: Storage,
|
|
815
|
+
args: GenOptions,
|
|
816
|
+
no_empty_warning: bool = False,
|
|
817
|
+
) -> None:
|
|
813
818
|
self._args = args
|
|
814
|
-
self.
|
|
819
|
+
self._storages = storages
|
|
815
820
|
self._no_empty_warning = no_empty_warning
|
|
816
|
-
self._devices_map:
|
|
817
|
-
self._gens:
|
|
821
|
+
self._devices_map: Dict[int, Device] = {}
|
|
822
|
+
self._gens: DeviceGenerators = DeviceGenerators()
|
|
823
|
+
self._counter = itertools.count()
|
|
818
824
|
|
|
819
825
|
self._preload()
|
|
820
826
|
|
|
821
827
|
def _preload(self) -> None:
|
|
822
828
|
with tracing_connector.get().start_as_current_span("Resolve devices"):
|
|
823
|
-
|
|
824
|
-
|
|
825
|
-
|
|
826
|
-
|
|
827
|
-
|
|
828
|
-
|
|
829
|
+
for storage in self._storages:
|
|
830
|
+
devices = storage.make_devices(
|
|
831
|
+
self._args.query,
|
|
832
|
+
preload_neighbors=True,
|
|
833
|
+
use_mesh=not self._args.no_mesh,
|
|
834
|
+
preload_extra_fields=True,
|
|
835
|
+
)
|
|
836
|
+
for device in devices:
|
|
837
|
+
self._devices_map[next(self._counter)] = device
|
|
838
|
+
self._gens.update(_old_resolve_gens(self._args, storage, devices))
|
|
829
839
|
if not devices and not self._no_empty_warning:
|
|
830
840
|
get_logger().error("No devices found for %s", self._args.query)
|
|
831
841
|
return
|
|
832
842
|
|
|
833
|
-
self._devices_map = {d.id: d for d in devices}
|
|
834
|
-
self._gens = _old_resolve_gens(self._args, self._storage, devices)
|
|
835
|
-
|
|
836
843
|
@property
|
|
837
844
|
def device_fqdns(self):
|
|
838
|
-
return {
|
|
845
|
+
return {
|
|
846
|
+
device_id: d.fqdn
|
|
847
|
+
for device_id, d in self._devices_map.items()
|
|
848
|
+
}
|
|
839
849
|
|
|
840
850
|
@property
|
|
841
851
|
def device_ids(self):
|
|
842
|
-
return list(self.
|
|
852
|
+
return list(self._devices_map)
|
|
843
853
|
|
|
844
854
|
@property
|
|
845
855
|
def devices(self) -> List[Device]:
|
|
@@ -847,23 +857,9 @@ class Loader:
|
|
|
847
857
|
return list(self._devices_map.values())
|
|
848
858
|
return []
|
|
849
859
|
|
|
850
|
-
def
|
|
851
|
-
devices = []
|
|
852
|
-
for device_id in device_ids:
|
|
853
|
-
device = self._devices_map[device_id]
|
|
854
|
-
|
|
855
|
-
# can not use self._storage here, we can be in another process
|
|
856
|
-
device.storage = storage
|
|
857
|
-
|
|
858
|
-
devices.append(device)
|
|
859
|
-
return devices
|
|
860
|
-
|
|
861
|
-
def resolve_gens(self, storage: Storage, devices: Iterable[Device]) -> DeviceGenerators:
|
|
860
|
+
def resolve_gens(self, devices: Iterable[Device]) -> DeviceGenerators:
|
|
862
861
|
if self._gens is not None:
|
|
863
|
-
for gen in self._gens.iter_gens():
|
|
864
|
-
# can not use self._storage here, we can be in another process
|
|
865
|
-
gen.storage = storage
|
|
866
862
|
return self._gens
|
|
867
863
|
|
|
868
864
|
with tracing_connector.get().start_as_current_span("Resolve gens"):
|
|
869
|
-
return _old_resolve_gens(self._args,
|
|
865
|
+
return _old_resolve_gens(self._args, self._storage, devices)
|