annet 0.11__py3-none-any.whl → 0.12.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of annet might be problematic. Click here for more details.

annet/api/__init__.py CHANGED
@@ -4,6 +4,7 @@ import os
4
4
  import re
5
5
  import sys
6
6
  import time
7
+ import warnings
7
8
  from collections import OrderedDict as odict
8
9
  from itertools import groupby
9
10
  from operator import itemgetter
@@ -17,23 +18,24 @@ from typing import (
17
18
  Optional,
18
19
  Set,
19
20
  Tuple,
20
- Union,
21
- cast,
21
+ Union, cast,
22
22
  )
23
23
 
24
24
  import colorama
25
+ import annet.lib
25
26
  from annet.annlib import jsontools
26
27
  from annet.annlib.netdev.views.hardware import HardwareView
27
28
  from annet.annlib.rbparser.platform import VENDOR_REVERSES
28
29
  from annet.annlib.types import GeneratorType
29
30
  from contextlog import get_logger
30
31
 
31
- import annet.deploy
32
+ from annet.deploy import Fetcher, DeployDriver
32
33
  from annet import cli_args
33
34
  from annet import diff as ann_diff
34
35
  from annet import filtering
35
36
  from annet import gen as ann_gen
36
37
  from annet import patching, rulebook, tabparser, tracing
38
+ from annet.filtering import Filterer
37
39
  from annet.hardware import hardware_connector
38
40
  from annet.output import (
39
41
  LABEL_NEW_PREFIX,
@@ -43,7 +45,7 @@ from annet.output import (
43
45
  )
44
46
  from annet.parallel import Parallel, TaskResult
45
47
  from annet.reference import RefTracker
46
- from annet.storage import Device, Storage, storage_connector
48
+ from annet.storage import Device, storage_connector
47
49
  from annet.types import Diff, ExitCode, OldNewResult, Op, PCDiff, PCDiffFile
48
50
 
49
51
 
@@ -185,43 +187,54 @@ def _print_pre_as_diff(pre, show_rules, indent, file=None, _level=0):
185
187
  rule_printed = False
186
188
 
187
189
 
190
+ class PoolProgressLogger:
191
+ def __init__(self, device_fqdns: Dict[int, str]):
192
+ self.device_fqdns = device_fqdns
193
+
194
+ def __call__(self, pool: Parallel, task_result: TaskResult):
195
+ progress_logger = get_logger("progress")
196
+ perc = int(pool.tasks_done / len(self.device_fqdns) * 100)
197
+
198
+ fqdn = self.device_fqdns[task_result.device_id]
199
+ elapsed_time = "%dsec" % int(time.monotonic() - task_result.extra["start_time"])
200
+ if task_result.extra.get("regression", False):
201
+ status = task_result.extra["status"]
202
+ status_color = task_result.extra["status_color"]
203
+ message = task_result.extra["message"]
204
+ else:
205
+ status = "OK" if task_result.exc is None else "FAIL"
206
+ status_color = colorama.Fore.GREEN if status == "OK" else colorama.Fore.RED
207
+ message = "" if status == "OK" else str(task_result.exc)
208
+ progress_logger.info(message,
209
+ perc=perc, fqdn=fqdn, status=status, status_color=status_color,
210
+ worker=task_result.worker_name, task_time=elapsed_time)
211
+ return task_result
212
+
213
+
188
214
  def log_host_progress_cb(pool: Parallel, task_result: TaskResult):
189
- progress_logger = get_logger("progress")
215
+ warnings.warn(
216
+ "log_host_progress_cb is deprecated, use PoolProgressLogger",
217
+ DeprecationWarning,
218
+ stacklevel=2,
219
+ )
190
220
  args = cast(cli_args.QueryOptions, pool.args[0])
191
221
  connector = storage_connector.get()
192
222
  storage_opts = connector.opts().from_cli_opts(args)
193
223
  with connector.storage()(storage_opts) as storage:
194
224
  hosts = storage.resolve_fdnds_by_query(args.query)
195
- perc = int(pool.tasks_done / len(hosts) * 100)
196
225
  fqdn = hosts[task_result.device_id]
197
- elapsed_time = "%dsec" % int(time.monotonic() - task_result.extra["start_time"])
198
- if task_result.extra.get("regression", False):
199
- status = task_result.extra["status"]
200
- status_color = task_result.extra["status_color"]
201
- message = task_result.extra["message"]
202
- else:
203
- status = "OK" if task_result.exc is None else "FAIL"
204
- status_color = colorama.Fore.GREEN if status == "OK" else colorama.Fore.RED
205
- message = "" if status == "OK" else str(task_result.exc)
206
- progress_logger.info(message,
207
- perc=perc, fqdn=fqdn, status=status, status_color=status_color,
208
- worker=task_result.worker_name, task_time=elapsed_time)
209
- return task_result
226
+ PoolProgressLogger(device_fqdns=fqdn)(pool, task_result)
210
227
 
211
228
 
212
229
  # =====
213
- def gen(args: cli_args.ShowGenOptions):
230
+ def gen(args: cli_args.ShowGenOptions, loader: ann_gen.Loader):
214
231
  """ Сгенерировать конфиг для устройств """
215
- connector = storage_connector.get()
216
- storage_opts = connector.opts().from_cli_opts(args)
217
- with connector.storage()(storage_opts) as storage:
218
- loader = ann_gen.Loader(storage, args)
219
- stdin = args.stdin(storage=storage, filter_acl=args.filter_acl, config=None)
232
+ stdin = args.stdin(filter_acl=args.filter_acl, config=None)
220
233
 
221
234
  filterer = filtering.filterer_connector.get()
222
235
  pool = Parallel(ann_gen.worker, args, stdin, loader, filterer).tune_args(args)
223
236
  if args.show_hosts_progress:
224
- pool.add_callback(log_host_progress_cb)
237
+ pool.add_callback(PoolProgressLogger(loader.device_fqdns))
225
238
 
226
239
  return pool.run(loader.device_ids, args.tolerate_fails, args.strict_exit_code)
227
240
 
@@ -244,22 +257,18 @@ def _diff_files(old_files, new_files, context=3):
244
257
  return ret
245
258
 
246
259
 
247
- def patch(args: cli_args.ShowPatchOptions):
260
+ def patch(args: cli_args.ShowPatchOptions, loader: ann_gen.Loader):
248
261
  """ Сгенерировать патч для устройств """
249
262
  global live_configs # pylint: disable=global-statement
250
- connector = storage_connector.get()
251
- storage_opts = connector.opts().from_cli_opts(args)
252
- with connector.storage()(storage_opts) as storage:
253
- loader = ann_gen.Loader(storage, args)
254
- if args.config == "running":
255
- fetcher = annet.deploy.fetcher_connector.get()
256
- live_configs = fetcher.fetch(loader.devices, processes=args.parallel)
257
- stdin = args.stdin(storage=storage, filter_acl=args.filter_acl, config=args.config)
263
+ if args.config == "running":
264
+ fetcher = annet.deploy.fetcher_connector.get()
265
+ live_configs = fetcher.fetch(loader.devices, processes=args.parallel)
266
+ stdin = args.stdin(filter_acl=args.filter_acl, config=args.config)
258
267
 
259
268
  filterer = filtering.filterer_connector.get()
260
269
  pool = Parallel(_patch_worker, args, stdin, loader, filterer).tune_args(args)
261
270
  if args.show_hosts_progress:
262
- pool.add_callback(log_host_progress_cb)
271
+ pool.add_callback(PoolProgressLogger(loader.device_fqdns))
263
272
  return pool.run(loader.device_ids, args.tolerate_fails, args.strict_exit_code)
264
273
 
265
274
 
@@ -290,71 +299,70 @@ def _patch_worker(device_id, args: cli_args.ShowPatchOptions, stdin, loader: ann
290
299
 
291
300
 
292
301
  # =====
293
- def res_diff_patch(device_id, args: cli_args.ShowPatchOptions, stdin, loader: ann_gen.Loader, filterer: filtering.Filterer) -> Iterable[
294
- Tuple[OldNewResult, Dict, Dict]]:
295
- connector = storage_connector.get()
296
- storage_opts = connector.opts().from_cli_opts(args)
297
- with connector.storage()(storage_opts) as storage:
298
- for res in ann_gen.old_new(
299
- args,
300
- storage,
301
- config=args.config,
302
- loader=loader,
303
- filterer=filterer,
304
- stdin=stdin,
305
- device_ids=[device_id],
306
- no_new=args.clear,
307
- do_files_download=True,
308
- ):
309
- old = res.get_old(args.acl_safe)
310
- new = res.get_new(args.acl_safe)
311
- new_json_fragment_files = res.get_new_file_fragments(args.acl_safe)
312
-
313
- device = res.device
314
- acl_rules = res.get_acl_rules(args.acl_safe)
315
- if res.old_json_fragment_files or new_json_fragment_files:
316
- yield res, None, None
317
- elif old is not None:
318
- (diff_tree, patch_tree) = _diff_and_patch(device, old, new, acl_rules, res.filter_acl_rules,
319
- args.add_comments)
320
- yield res, diff_tree, patch_tree
321
-
322
-
323
- def diff(args: cli_args.DiffOptions, loader: ann_gen.Loader, filterer: filtering.Filterer) -> Mapping[Device, Union[Diff, PCDiff]]:
302
+ def res_diff_patch(
303
+ device_id, args: cli_args.ShowPatchOptions, stdin, loader: ann_gen.Loader, filterer: filtering.Filterer,
304
+ ) -> Iterable[Tuple[OldNewResult, Dict, Dict]]:
305
+ for res in ann_gen.old_new(
306
+ args,
307
+ config=args.config,
308
+ loader=loader,
309
+ filterer=filterer,
310
+ stdin=stdin,
311
+ device_ids=[device_id],
312
+ no_new=args.clear,
313
+ do_files_download=True,
314
+ ):
315
+ old = res.get_old(args.acl_safe)
316
+ new = res.get_new(args.acl_safe)
317
+ new_json_fragment_files = res.get_new_file_fragments(args.acl_safe)
318
+
319
+ device = res.device
320
+ acl_rules = res.get_acl_rules(args.acl_safe)
321
+ if res.old_json_fragment_files or new_json_fragment_files:
322
+ yield res, None, None
323
+ elif old is not None:
324
+ (diff_tree, patch_tree) = _diff_and_patch(device, old, new, acl_rules, res.filter_acl_rules,
325
+ args.add_comments)
326
+ yield res, diff_tree, patch_tree
327
+
328
+
329
+ def diff(
330
+ args: cli_args.DiffOptions,
331
+ loader: ann_gen.Loader,
332
+ device_ids: List[int],
333
+ filterer: filtering.Filterer,
334
+ ) -> Mapping[Device, Union[Diff, PCDiff]]:
324
335
  ret = {}
325
- connector = storage_connector.get()
326
- storage_opts = connector.opts().from_cli_opts(args)
327
- with connector.storage()(storage_opts) as storage:
328
- for res in ann_gen.old_new(
329
- args,
330
- storage,
331
- config=args.config,
332
- loader=loader,
333
- no_new=args.clear,
334
- do_files_download=True,
335
- device_ids=loader.device_ids,
336
- filterer=filterer,
337
- ):
338
- old = res.get_old(args.acl_safe)
339
- new = res.get_new(args.acl_safe)
340
- device = res.device
341
- acl_rules = res.get_acl_rules(args.acl_safe)
342
- new_files = res.get_new_files(args.acl_safe)
343
- new_json_fragment_files = res.get_new_file_fragments()
344
- pc_diff_files = []
345
- if res.old_files or new_files:
346
- pc_diff_files.extend(_pc_diff(device.hostname, res.old_files, new_files))
347
- if res.old_json_fragment_files or new_json_fragment_files:
348
- pc_diff_files.extend(_json_fragment_diff(device.hostname, res.old_json_fragment_files, new_json_fragment_files))
349
-
350
- if pc_diff_files:
351
- pc_diff_files.sort(key=lambda f: f.label)
352
- ret[device] = PCDiff(hostname=device.hostname, diff_files=pc_diff_files)
353
- elif old is not None:
354
- rb = rulebook.get_rulebook(device.hw)
355
- diff_tree = patching.make_diff(old, new, rb, [acl_rules, res.filter_acl_rules])
356
- diff_tree = patching.strip_unchanged(diff_tree)
357
- ret[device] = diff_tree
336
+ for res in ann_gen.old_new(
337
+ args,
338
+ config=args.config,
339
+ loader=loader,
340
+ no_new=args.clear,
341
+ do_files_download=True,
342
+ device_ids=device_ids,
343
+ filterer=filterer,
344
+ ):
345
+ old = res.get_old(args.acl_safe)
346
+ new = res.get_new(args.acl_safe)
347
+ device = res.device
348
+ acl_rules = res.get_acl_rules(args.acl_safe)
349
+ new_files = res.get_new_files(args.acl_safe)
350
+ new_json_fragment_files = res.get_new_file_fragments()
351
+ if res.old_files or new_files:
352
+ ret[device] = PCDiff(
353
+ hostname=device.hostname,
354
+ diff_files=list(_pc_diff(device.hostname, res.old_files, new_files)),
355
+ )
356
+ elif res.old_json_fragment_files or new_json_fragment_files:
357
+ ret[device] = PCDiff(
358
+ hostname=device.hostname,
359
+ diff_files=list(_json_fragment_diff(device.hostname, res.old_json_fragment_files, new_json_fragment_files)),
360
+ )
361
+ elif old is not None:
362
+ rb = rulebook.get_rulebook(device.hw)
363
+ diff_tree = patching.make_diff(old, new, rb, [acl_rules, res.filter_acl_rules])
364
+ diff_tree = patching.strip_unchanged(diff_tree)
365
+ ret[device] = diff_tree
358
366
  return ret
359
367
 
360
368
 
@@ -592,24 +600,24 @@ class Deployer:
592
600
  ans = ask.loop()
593
601
  return ans
594
602
 
595
- def check_diff(self, result: annet.deploy.DeployResult, storage: Storage):
603
+ def check_diff(self, result: annet.deploy.DeployResult, loader: ann_gen.Loader):
596
604
  global live_configs # pylint: disable=global-statement
597
- success_hosts = [
598
- host.split(".", 1)[0] for (host, hres) in result.results.items()
599
- if (not isinstance(hres, Exception) and
605
+ success_device_ids = []
606
+ for host, hres in result.results.items():
607
+ device = self.fqdn_to_device[host]
608
+ if (
609
+ not isinstance(hres, Exception) and
600
610
  host not in self.empty_diff_hostnames and
601
- not self.fqdn_to_device[host].is_pc())
602
- ]
611
+ device.is_pc()
612
+ ):
613
+ success_device_ids.append(device.id)
603
614
  diff_args = self.args.copy_from(
604
615
  self.args,
605
616
  config="running",
606
- query=success_hosts,
607
617
  )
608
618
  if diff_args.query:
609
619
  live_configs = None
610
- loader = ann_gen.Loader(storage, diff_args, no_empty_warning=True)
611
-
612
- diffs = diff(diff_args, loader, self._filterer)
620
+ diffs = diff(diff_args, loader, success_device_ids, self._filterer)
613
621
  non_pc_diffs = {dev: diff for dev, diff in diffs.items() if not isinstance(diff, PCDiff)}
614
622
  devices_to_diff = ann_diff.collapse_diffs(non_pc_diffs)
615
623
  devices_to_diff.update({(dev,): diff for dev, diff in diffs.items() if isinstance(diff, PCDiff)})
@@ -630,57 +638,57 @@ class Deployer:
630
638
  _print_pre_as_diff(patching.make_pre(diff_obj), diff_args.show_rules, diff_args.indent)
631
639
 
632
640
 
633
- def deploy(args: cli_args.DeployOptions) -> ExitCode:
641
+ def deploy(
642
+ args: cli_args.DeployOptions,
643
+ loader: ann_gen.Loader,
644
+ deployer: Deployer,
645
+ filterer: Filterer,
646
+ fetcher: Fetcher,
647
+ deploy_driver: DeployDriver,
648
+ ) -> ExitCode:
634
649
  """ Сгенерировать конфиг для устройств и задеплоить его """
635
650
  ret: ExitCode = 0
636
- deployer = Deployer(args)
637
- connector = storage_connector.get()
638
- storage_opts = connector.opts().from_cli_opts(args)
639
- with connector.storage()(storage_opts) as storage:
640
- global live_configs # pylint: disable=global-statement
641
- loader = ann_gen.Loader(storage, args)
642
- filterer = filtering.filterer_connector.get()
643
- fetcher = annet.deploy.fetcher_connector.get()
644
- deploy_driver = annet.deploy.driver_connector.get()
645
- live_configs = fetcher.fetch(devices=loader.devices, processes=args.parallel)
646
- pool = ann_gen.OldNewParallel(storage, args, loader, filterer)
647
-
648
- for res in pool.generated_configs(loader.device_ids):
649
- # Меняем exit code если хоть один device ловил exception
650
- if res.err is not None:
651
- ret |= 2 ** 3
652
- job = DeployerJob.from_device(res.device, args)
653
- deployer.parse_result(job, res)
654
-
655
- deploy_cmds = deployer.deploy_cmds
656
- result = annet.deploy.DeployResult(hostnames=[], results={}, durations={}, original_states={})
657
- if deploy_cmds:
658
- ans = deployer.ask_deploy()
659
- if ans != "y":
660
- return 2 ** 2
661
- result = annet.lib.do_async(deploy_driver.bulk_deploy(deploy_cmds, args))
662
-
663
- rolled_back = False
664
- rollback_cmds = {deployer.fqdn_to_device[x]: cc for x, cc in result.original_states.items() if cc}
665
- if args.rollback and rollback_cmds:
666
- ans = deployer.ask_rollback()
667
- if rollback_cmds and ans == "y":
668
- rolled_back = True
669
- annet.lib.do_async(deploy_driver.bulk_deploy(rollback_cmds, args))
670
-
671
- if not args.no_check_diff and not rolled_back:
672
- deployer.check_diff(result, storage)
673
-
674
- if deployer.failed_configs:
675
- result.add_results(deployer.failed_configs)
676
- ret |= 2 ** 1
677
-
678
- annet.deploy.show_bulk_report(result.hostnames, result.results, result.durations, log_dir=None)
679
- for host_result in result.results.values():
680
- if isinstance(host_result, Exception):
681
- ret |= 2 ** 0
682
- break
683
- return ret
651
+ global live_configs # pylint: disable=global-statement
652
+ live_configs = fetcher.fetch(devices=loader.devices, processes=args.parallel)
653
+ pool = ann_gen.OldNewParallel(args, loader, filterer)
654
+
655
+ for res in pool.generated_configs(loader.devices):
656
+ # Меняем exit code если хоть один device ловил exception
657
+ if res.err is not None:
658
+ get_logger(res.device.hostname).error("error generating configs", exc_info=res.err)
659
+ ret |= 2 ** 3
660
+ job = DeployerJob.from_device(res.device, args)
661
+ deployer.parse_result(job, res)
662
+
663
+ deploy_cmds = deployer.deploy_cmds
664
+ result = annet.deploy.DeployResult(hostnames=[], results={}, durations={}, original_states={})
665
+ if deploy_cmds:
666
+ ans = deployer.ask_deploy()
667
+ if ans != "y":
668
+ return 2 ** 2
669
+ result = annet.lib.do_async(deploy_driver.bulk_deploy(deploy_cmds, args))
670
+
671
+ rolled_back = False
672
+ rollback_cmds = {deployer.fqdn_to_device[x]: cc for x, cc in result.original_states.items() if cc}
673
+ if args.rollback and rollback_cmds:
674
+ ans = deployer.ask_rollback()
675
+ if rollback_cmds and ans == "y":
676
+ rolled_back = True
677
+ annet.lib.do_async(deploy_driver.bulk_deploy(rollback_cmds, args))
678
+
679
+ if not args.no_check_diff and not rolled_back:
680
+ deployer.check_diff(result, loader)
681
+
682
+ if deployer.failed_configs:
683
+ result.add_results(deployer.failed_configs)
684
+ ret |= 2 ** 1
685
+
686
+ annet.deploy.show_bulk_report(result.hostnames, result.results, result.durations, log_dir=None)
687
+ for host_result in result.results.values():
688
+ if isinstance(host_result, Exception):
689
+ ret |= 2 ** 0
690
+ break
691
+ return ret
684
692
 
685
693
 
686
694
  def file_diff(args: cli_args.FileDiffOptions):
annet/cli.py CHANGED
@@ -4,20 +4,22 @@ import os
4
4
  import platform
5
5
  import subprocess
6
6
  import shutil
7
- from typing import Generator, Tuple
7
+ from contextlib import ExitStack, contextmanager
8
+ from typing import Tuple, Iterable
8
9
 
9
10
  import yaml
10
11
  from contextlog import get_logger
11
12
  from valkit.python import valid_logging_level
12
13
 
14
+ from annet.deploy import driver_connector, fetcher_connector
13
15
  from annet import api, cli_args, filtering
14
- from annet.api import collapse_texts
16
+ from annet.api import collapse_texts, Deployer
15
17
  from annet.argparse import ArgParser, subcommand
16
18
  from annet.diff import gen_sort_diff
17
19
  from annet.gen import Loader, old_raw
18
20
  from annet.lib import get_context_path, repair_context_file
19
- from annet.output import output_driver_connector
20
- from annet.storage import Storage, storage_connector
21
+ from annet.output import output_driver_connector, OutputDriver
22
+ from annet.storage import storage_connector
21
23
 
22
24
 
23
25
  def fill_base_args(parser: ArgParser, pkg_name: str, logging_config: str):
@@ -31,65 +33,91 @@ def list_subcommands():
31
33
  return globals().copy()
32
34
 
33
35
 
36
+ def _gen_current_items(
37
+ config,
38
+ stdin,
39
+ loader: Loader,
40
+ output_driver: OutputDriver,
41
+ gen_args: cli_args.GenOptions,
42
+ ) -> Iterable[Tuple[str, str, bool]]:
43
+ for device, result in old_raw(
44
+ args=gen_args,
45
+ loader=loader,
46
+ config=config,
47
+ stdin=stdin,
48
+ do_files_download=True,
49
+ use_mesh=False,
50
+ ):
51
+ if device.hw.vendor != "pc":
52
+ destname = output_driver.cfg_file_names(device)[0]
53
+ yield (destname, result, False)
54
+ else:
55
+ for entire_path, entire_data in sorted(result.items(), key=operator.itemgetter(0)):
56
+ if entire_data is None:
57
+ entire_data = ""
58
+ destname = output_driver.entire_config_dest_path(device, entire_path)
59
+ yield (destname, entire_data, False)
60
+
61
+
62
+ @contextmanager
63
+ def get_loader(gen_args: cli_args.GenOptions, args: cli_args.QueryOptions):
64
+ exit_stack = ExitStack()
65
+ connectors = storage_connector.get_all()
66
+ storages = []
67
+ with exit_stack:
68
+ for connector in connectors:
69
+ storage_opts = connector.opts().from_cli_opts(args)
70
+ storages.append(exit_stack.enter_context(connector.storage()(storage_opts)))
71
+ yield Loader(*storages, args=gen_args)
72
+
73
+
34
74
  @subcommand(cli_args.QueryOptions, cli_args.opt_config, cli_args.FileOutOptions)
35
75
  def show_current(args: cli_args.QueryOptions, config, arg_out: cli_args.FileOutOptions) -> None:
36
76
  """ Показать текущий конфиг устройств """
37
-
38
- def _gen_items(storage: Storage) -> Generator[Tuple[str, str, bool], None, None]:
39
- for device, result in old_raw(
40
- cli_args.GenOptions(args, no_acl=True),
41
- storage,
42
- config,
43
- stdin=args.stdin(storage=storage, config=config),
44
- do_files_download=True,
45
- use_mesh=False,
46
- ):
47
- output_driver = output_driver_connector.get()
48
- destname = output_driver.cfg_file_names(device)[0]
49
- if device.hw.vendor != "pc":
50
- yield (destname, result, False)
51
- else:
52
- for entire_path, entire_data in sorted(result.items(), key=operator.itemgetter(0)):
53
- if entire_data is None:
54
- entire_data = ""
55
- yield (output_driver.entire_config_dest_path(device, entire_path), entire_data, False)
56
-
57
- connector = storage_connector.get()
58
- storage_opts = connector.opts().from_cli_opts(args)
59
- with connector.storage()(storage_opts) as storage:
60
- ids = storage.resolve_object_ids_by_query(args.query)
61
- if not ids:
77
+ gen_args = cli_args.GenOptions(args, no_acl=True)
78
+ output_driver = output_driver_connector.get()
79
+ with get_loader(gen_args, args) as loader:
80
+ if not loader.devices:
62
81
  get_logger().error("No devices found for %s", args.query)
63
- output_driver_connector.get().write_output(arg_out, _gen_items(storage), len(ids))
82
+
83
+ items = _gen_current_items(
84
+ loader=loader,
85
+ output_driver=output_driver,
86
+ gen_args=gen_args,
87
+ stdin=args.stdin(config=config),
88
+ config=config,
89
+ )
90
+ output_driver.write_output(arg_out, items, len(loader.devices))
64
91
 
65
92
 
66
93
  @subcommand(cli_args.ShowGenOptions)
67
94
  def gen(args: cli_args.ShowGenOptions):
68
95
  """ Сгенерировать конфиг для устройств """
69
- (success, fail) = api.gen(args)
70
- out = [item for items in success.values() for item in items]
71
- output_driver = output_driver_connector.get()
72
- if args.dest is None:
73
- text_mapping = {item[0]: item[1] for item in out}
74
- out = [(",".join(key), value, False) for key, value in collapse_texts(text_mapping).items()]
75
- out.extend(output_driver.format_fails(fail, args))
76
- total = len(success) + len(fail)
77
- if not total:
78
- get_logger().error("No devices found for %s", args.query)
79
- output_driver.write_output(args, out, total)
96
+ with get_loader(args, args) as loader:
97
+ (success, fail) = api.gen(args, loader)
98
+
99
+ out = [item for items in success.values() for item in items]
100
+ output_driver = output_driver_connector.get()
101
+ if args.dest is None:
102
+ text_mapping = {item[0]: item[1] for item in out}
103
+ out = [(",".join(key), value, False) for key, value in collapse_texts(text_mapping).items()]
104
+
105
+ out.extend(output_driver.format_fails(fail, loader.device_fqdns))
106
+ total = len(success) + len(fail)
107
+ if not total:
108
+ get_logger().error("No devices found for %s", args.query)
109
+ output_driver.write_output(args, out, total)
80
110
 
81
111
 
82
112
  @subcommand(cli_args.ShowDiffOptions)
83
113
  def diff(args: cli_args.ShowDiffOptions):
84
114
  """ Сгенерировать конфиг для устройств и показать дифф по рулбуку с текущим """
85
- connector = storage_connector.get()
86
- storage_opts = connector.opts().from_cli_opts(args)
87
- with connector.storage()(storage_opts) as storage:
115
+ with get_loader(args, args) as loader:
88
116
  filterer = filtering.filterer_connector.get()
89
- loader = Loader(storage, args)
117
+ device_ids = loader.device_ids
90
118
  output_driver_connector.get().write_output(
91
119
  args,
92
- gen_sort_diff(api.diff(args, loader, filterer), args),
120
+ gen_sort_diff(api.diff(args, loader, device_ids, filterer), args),
93
121
  len(loader.device_ids)
94
122
  )
95
123
 
@@ -97,20 +125,33 @@ def diff(args: cli_args.ShowDiffOptions):
97
125
  @subcommand(cli_args.ShowPatchOptions)
98
126
  def patch(args: cli_args.ShowPatchOptions):
99
127
  """ Сгенерировать конфиг для устройств и сформировать патч """
100
- (success, fail) = api.patch(args)
101
- out = [item for items in success.values() for item in items]
102
- output_driver = output_driver_connector.get()
103
- out.extend(output_driver.format_fails(fail, args))
104
- total = len(success) + len(fail)
105
- if not total:
106
- get_logger().error("No devices found for %s", args.query)
107
- output_driver.write_output(args, out, total)
128
+ with get_loader(args, args) as loader:
129
+ (success, fail) = api.patch(args, loader)
130
+
131
+ out = [item for items in success.values() for item in items]
132
+ output_driver = output_driver_connector.get()
133
+ out.extend(output_driver.format_fails(fail, loader.device_fqdns))
134
+ total = len(success) + len(fail)
135
+ if not total:
136
+ get_logger().error("No devices found for %s", args.query)
137
+ output_driver.write_output(args, out, total)
108
138
 
109
139
 
110
140
  @subcommand(cli_args.DeployOptions)
111
141
  def deploy(args: cli_args.DeployOptions):
112
142
  """ Сгенерировать конфиг для устройств и задеплоить его """
113
- return api.deploy(args)
143
+
144
+ deployer = Deployer(args)
145
+ filterer = filtering.filterer_connector.get()
146
+ fetcher = fetcher_connector.get()
147
+ deploy_driver = driver_connector.get()
148
+
149
+ with get_loader(args, args) as loader:
150
+ return api.deploy(
151
+ args=args, loader=loader, deployer=deployer,
152
+ deploy_driver=deploy_driver, filterer=filterer,
153
+ fetcher=fetcher,
154
+ )
114
155
 
115
156
 
116
157
  @subcommand(cli_args.FileDiffOptions)