annet 0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of annet might be problematic. Click here for more details.

Files changed (137) hide show
  1. annet/__init__.py +61 -0
  2. annet/adapters/__init__.py +0 -0
  3. annet/adapters/netbox/__init__.py +0 -0
  4. annet/adapters/netbox/common/__init__.py +0 -0
  5. annet/adapters/netbox/common/client.py +87 -0
  6. annet/adapters/netbox/common/manufacturer.py +62 -0
  7. annet/adapters/netbox/common/models.py +105 -0
  8. annet/adapters/netbox/common/query.py +23 -0
  9. annet/adapters/netbox/common/status_client.py +25 -0
  10. annet/adapters/netbox/common/storage_opts.py +14 -0
  11. annet/adapters/netbox/provider.py +34 -0
  12. annet/adapters/netbox/v24/__init__.py +0 -0
  13. annet/adapters/netbox/v24/api_models.py +73 -0
  14. annet/adapters/netbox/v24/client.py +59 -0
  15. annet/adapters/netbox/v24/storage.py +196 -0
  16. annet/adapters/netbox/v37/__init__.py +0 -0
  17. annet/adapters/netbox/v37/api_models.py +38 -0
  18. annet/adapters/netbox/v37/client.py +62 -0
  19. annet/adapters/netbox/v37/storage.py +149 -0
  20. annet/annet.py +25 -0
  21. annet/annlib/__init__.py +7 -0
  22. annet/annlib/command.py +49 -0
  23. annet/annlib/diff.py +158 -0
  24. annet/annlib/errors.py +8 -0
  25. annet/annlib/filter_acl.py +196 -0
  26. annet/annlib/jsontools.py +116 -0
  27. annet/annlib/lib.py +495 -0
  28. annet/annlib/netdev/__init__.py +0 -0
  29. annet/annlib/netdev/db.py +62 -0
  30. annet/annlib/netdev/devdb/__init__.py +28 -0
  31. annet/annlib/netdev/devdb/data/devdb.json +137 -0
  32. annet/annlib/netdev/views/__init__.py +0 -0
  33. annet/annlib/netdev/views/dump.py +121 -0
  34. annet/annlib/netdev/views/hardware.py +112 -0
  35. annet/annlib/output.py +246 -0
  36. annet/annlib/patching.py +533 -0
  37. annet/annlib/rbparser/__init__.py +0 -0
  38. annet/annlib/rbparser/acl.py +120 -0
  39. annet/annlib/rbparser/deploying.py +55 -0
  40. annet/annlib/rbparser/ordering.py +52 -0
  41. annet/annlib/rbparser/platform.py +51 -0
  42. annet/annlib/rbparser/syntax.py +115 -0
  43. annet/annlib/rulebook/__init__.py +0 -0
  44. annet/annlib/rulebook/common.py +350 -0
  45. annet/annlib/tabparser.py +648 -0
  46. annet/annlib/types.py +35 -0
  47. annet/api/__init__.py +826 -0
  48. annet/argparse.py +415 -0
  49. annet/cli.py +237 -0
  50. annet/cli_args.py +503 -0
  51. annet/configs/context.yml +18 -0
  52. annet/configs/logging.yaml +39 -0
  53. annet/connectors.py +77 -0
  54. annet/deploy.py +536 -0
  55. annet/diff.py +84 -0
  56. annet/executor.py +551 -0
  57. annet/filtering.py +40 -0
  58. annet/gen.py +865 -0
  59. annet/generators/__init__.py +435 -0
  60. annet/generators/base.py +136 -0
  61. annet/generators/common/__init__.py +0 -0
  62. annet/generators/common/initial.py +33 -0
  63. annet/generators/entire.py +97 -0
  64. annet/generators/exceptions.py +10 -0
  65. annet/generators/jsonfragment.py +125 -0
  66. annet/generators/partial.py +119 -0
  67. annet/generators/perf.py +79 -0
  68. annet/generators/ref.py +15 -0
  69. annet/generators/result.py +127 -0
  70. annet/hardware.py +45 -0
  71. annet/implicit.py +139 -0
  72. annet/lib.py +128 -0
  73. annet/output.py +167 -0
  74. annet/parallel.py +448 -0
  75. annet/patching.py +25 -0
  76. annet/reference.py +148 -0
  77. annet/rulebook/__init__.py +114 -0
  78. annet/rulebook/arista/__init__.py +0 -0
  79. annet/rulebook/arista/iface.py +16 -0
  80. annet/rulebook/aruba/__init__.py +16 -0
  81. annet/rulebook/aruba/ap_env.py +146 -0
  82. annet/rulebook/aruba/misc.py +8 -0
  83. annet/rulebook/cisco/__init__.py +0 -0
  84. annet/rulebook/cisco/iface.py +68 -0
  85. annet/rulebook/cisco/misc.py +57 -0
  86. annet/rulebook/cisco/vlandb.py +90 -0
  87. annet/rulebook/common.py +19 -0
  88. annet/rulebook/deploying.py +87 -0
  89. annet/rulebook/huawei/__init__.py +0 -0
  90. annet/rulebook/huawei/aaa.py +75 -0
  91. annet/rulebook/huawei/bgp.py +97 -0
  92. annet/rulebook/huawei/iface.py +33 -0
  93. annet/rulebook/huawei/misc.py +337 -0
  94. annet/rulebook/huawei/vlandb.py +115 -0
  95. annet/rulebook/juniper/__init__.py +107 -0
  96. annet/rulebook/nexus/__init__.py +0 -0
  97. annet/rulebook/nexus/iface.py +92 -0
  98. annet/rulebook/patching.py +143 -0
  99. annet/rulebook/ribbon/__init__.py +12 -0
  100. annet/rulebook/texts/arista.deploy +20 -0
  101. annet/rulebook/texts/arista.order +125 -0
  102. annet/rulebook/texts/arista.rul +59 -0
  103. annet/rulebook/texts/aruba.deploy +20 -0
  104. annet/rulebook/texts/aruba.order +83 -0
  105. annet/rulebook/texts/aruba.rul +87 -0
  106. annet/rulebook/texts/cisco.deploy +27 -0
  107. annet/rulebook/texts/cisco.order +82 -0
  108. annet/rulebook/texts/cisco.rul +105 -0
  109. annet/rulebook/texts/huawei.deploy +188 -0
  110. annet/rulebook/texts/huawei.order +388 -0
  111. annet/rulebook/texts/huawei.rul +471 -0
  112. annet/rulebook/texts/juniper.rul +120 -0
  113. annet/rulebook/texts/nexus.deploy +24 -0
  114. annet/rulebook/texts/nexus.order +85 -0
  115. annet/rulebook/texts/nexus.rul +83 -0
  116. annet/rulebook/texts/nokia.rul +31 -0
  117. annet/rulebook/texts/pc.order +5 -0
  118. annet/rulebook/texts/pc.rul +9 -0
  119. annet/rulebook/texts/ribbon.deploy +22 -0
  120. annet/rulebook/texts/ribbon.rul +77 -0
  121. annet/rulebook/texts/routeros.order +38 -0
  122. annet/rulebook/texts/routeros.rul +45 -0
  123. annet/storage.py +125 -0
  124. annet/tabparser.py +36 -0
  125. annet/text_term_format.py +95 -0
  126. annet/tracing.py +170 -0
  127. annet/types.py +227 -0
  128. annet-0.0.dist-info/AUTHORS +21 -0
  129. annet-0.0.dist-info/LICENSE +21 -0
  130. annet-0.0.dist-info/METADATA +26 -0
  131. annet-0.0.dist-info/RECORD +137 -0
  132. annet-0.0.dist-info/WHEEL +5 -0
  133. annet-0.0.dist-info/entry_points.txt +5 -0
  134. annet-0.0.dist-info/top_level.txt +2 -0
  135. annet_generators/__init__.py +0 -0
  136. annet_generators/example/__init__.py +12 -0
  137. annet_generators/example/lldp.py +53 -0
annet/api/__init__.py ADDED
@@ -0,0 +1,826 @@
1
+ import abc
2
+ import difflib
3
+ import os
4
+ import re
5
+ import sys
6
+ import time
7
+ import warnings
8
+ from collections import OrderedDict as odict
9
+ from itertools import groupby
10
+ from operator import itemgetter
11
+ from typing import (
12
+ Any,
13
+ Dict,
14
+ Generator,
15
+ Iterable,
16
+ List,
17
+ Mapping,
18
+ Optional,
19
+ Set,
20
+ Tuple,
21
+ Union, cast,
22
+ )
23
+
24
+ import colorama
25
+ import annet.lib
26
+ from annet.annlib import jsontools
27
+ from annet.annlib.netdev.views.hardware import HardwareView
28
+ from annet.annlib.rbparser.platform import VENDOR_REVERSES
29
+ from annet.annlib.types import GeneratorType
30
+ from contextlog import get_logger
31
+
32
+ from annet.deploy import Fetcher, DeployDriver
33
+ from annet import cli_args
34
+ from annet import diff as ann_diff
35
+ from annet import filtering
36
+ from annet import gen as ann_gen
37
+ from annet import patching, rulebook, tabparser, tracing
38
+ from annet.filtering import Filterer
39
+ from annet.hardware import hardware_connector
40
+ from annet.output import (
41
+ LABEL_NEW_PREFIX,
42
+ format_file_diff,
43
+ output_driver_connector,
44
+ print_err_label,
45
+ )
46
+ from annet.parallel import Parallel, TaskResult
47
+ from annet.reference import RefTracker
48
+ from annet.storage import Device, storage_connector
49
+ from annet.types import Diff, ExitCode, OldNewResult, Op, PCDiff, PCDiffFile
50
+
51
+
52
+ live_configs = ann_gen.live_configs
53
+
54
+ DEFAULT_INDENT = " "
55
+
56
+
57
+ def patch_from_pre(pre, hw, rb, add_comments, ref_track=None, do_commit=True):
58
+ if not ref_track:
59
+ ref_track = RefTracker()
60
+ orderer = patching.Orderer(rb["ordering"], hw.vendor)
61
+ orderer.ref_insert(ref_track)
62
+ return patching.make_patch(
63
+ pre=pre,
64
+ rb=rb,
65
+ hw=hw,
66
+ add_comments=add_comments,
67
+ orderer=orderer,
68
+ do_commit=do_commit,
69
+ )
70
+
71
+
72
+ def _diff_and_patch(
73
+ device, old, new, acl_rules, filter_acl_rules,
74
+ add_comments, ref_track=None, do_commit=True, rb=None
75
+ ) -> Tuple[Diff, Dict]:
76
+ if rb is None:
77
+ rb = rulebook.get_rulebook(device.hw)
78
+ # [NOCDEV-5532] Передаем в diff только релевантные для logic'a части конфига
79
+ if acl_rules is not None:
80
+ old = patching.apply_acl(old, acl_rules)
81
+ new = patching.apply_acl(new, acl_rules, with_annotations=add_comments)
82
+
83
+ diff_tree = patching.make_diff(old, new, rb, [acl_rules, filter_acl_rules])
84
+ pre = patching.make_pre(diff_tree)
85
+ patch_tree = patch_from_pre(pre, device.hw, rb, add_comments, ref_track, do_commit)
86
+ diff_tree = patching.strip_unchanged(diff_tree)
87
+
88
+ return (diff_tree, patch_tree)
89
+
90
+
91
+ # =====
92
+ def _read_old_new_diff_patch(old: Dict[str, Dict], new: Dict[str, Dict], hw: HardwareView, add_comments: bool):
93
+ rb = rulebook.get_rulebook(hw)
94
+ diff_obj = patching.make_diff(old, new, rb, [])
95
+ diff_obj = patching.strip_unchanged(diff_obj)
96
+ pre = patching.make_pre(diff_obj)
97
+ patchtree = patch_from_pre(pre, hw, rb, add_comments)
98
+ return rb, diff_obj, pre, patchtree
99
+
100
+
101
+ def _read_old_new_hw(old_path: str, new_path: str, args: cli_args.FileInputOptions):
102
+ _logger = get_logger()
103
+ hw = args.hw
104
+ if isinstance(args.hw, str):
105
+ hw = HardwareView(args.hw, "")
106
+
107
+ old, old_hw, old_score = _read_device_config(old_path, hw)
108
+ new, new_hw, new_score = _read_device_config(new_path, hw)
109
+ hw = new_hw
110
+ if old_score > new_score:
111
+ hw = old_hw
112
+ if old_hw != new_hw:
113
+ _logger.debug("Old and new detected hw differs, assume %r", hw)
114
+ dest_name = os.path.basename(new_path)
115
+ return dest_name, old, new, hw
116
+
117
+
118
+ @tracing.function
119
+ def _read_old_new_cfgdumps(args: cli_args.FileInputOptions):
120
+ _logger = get_logger()
121
+ old_path, new_path = os.path.normpath(args.old), os.path.normpath(args.new)
122
+ if not os.path.isdir(old_path):
123
+ yield (old_path, new_path)
124
+ return
125
+ _logger.info("Scanning cfgdumps: %s/*.cfg ...", old_path)
126
+ cfgdump_reg = re.compile(r"^[^\s]+\.cfg$")
127
+ if os.path.isdir(old_path) and os.path.isdir(new_path):
128
+ if cfgdump_reg.match(os.path.basename(old_path)) and cfgdump_reg.match(os.path.basename(new_path)):
129
+ yield (old_path, new_path)
130
+ for name in os.listdir(old_path):
131
+ old_path_name = os.path.join(old_path, name)
132
+ new_path_name = os.path.join(new_path, name)
133
+ if not os.path.exists(new_path_name):
134
+ _logger.debug("Ignoring file %s: not exist %s", name, new_path_name)
135
+ continue
136
+ yield (old_path_name, new_path_name)
137
+
138
+
139
+ def _read_device_config(path, hw):
140
+ _logger = get_logger()
141
+ _logger.debug("Reading %r ...", path)
142
+ score = 1
143
+
144
+ with open(path) as cfgdump_file:
145
+ text = cfgdump_file.read()
146
+ try:
147
+ if not hw:
148
+ hw, score = guess_hw(text)
149
+ config = tabparser.parse_to_tree(
150
+ text=text,
151
+ splitter=tabparser.make_formatter(hw).split,
152
+ )
153
+ return config, hw, score
154
+ except tabparser.ParserError:
155
+ _logger.exception("Parser error: %r", path)
156
+ raise
157
+
158
+
159
+ # =====
160
+ def _format_patch_blocks(patch_tree, hw, indent):
161
+ formatter = tabparser.make_formatter(hw, indent=indent)
162
+ return formatter.patch(patch_tree)
163
+
164
+
165
+ # =====
166
+ def _print_pre_as_diff(pre, show_rules, indent, file=None, _level=0):
167
+ for (raw_rule, content) in sorted(pre.items(), key=itemgetter(0)):
168
+ rule_printed = False
169
+ for (op, sign) in [ # FIXME: Not very effective
170
+ (Op.REMOVED, colorama.Fore.RED + "-"),
171
+ (Op.ADDED, colorama.Fore.GREEN + "+"),
172
+ (Op.AFFECTED, colorama.Fore.CYAN + " "),
173
+ ]:
174
+ items = content["items"].items()
175
+ if not content["attrs"]["multiline"]:
176
+ items = sorted(items, key=itemgetter(0))
177
+ for (_, diff) in items: # pylint: disable=redefined-outer-name
178
+ if show_rules and not rule_printed and not raw_rule == "__MULTILINE_BODY__":
179
+ print("%s%s# %s%s%s" % (colorama.Style.BRIGHT, colorama.Fore.BLACK, (indent * _level),
180
+ raw_rule, colorama.Style.RESET_ALL), file=file)
181
+ rule_printed = True
182
+ for item in sorted(diff[op], key=itemgetter("row")):
183
+ print("%s%s%s %s%s" % (colorama.Style.BRIGHT, sign, (indent * _level),
184
+ item["row"], colorama.Style.RESET_ALL), file=file)
185
+ if len(item["children"]) != 0:
186
+ _print_pre_as_diff(item["children"], show_rules, indent, file, _level + 1)
187
+ rule_printed = False
188
+
189
+
190
+ class PoolProgressLogger:
191
+ def __init__(self, device_fqdns: Dict[int, str]):
192
+ self.device_fqdns = device_fqdns
193
+
194
+ def __call__(self, pool: Parallel, task_result: TaskResult):
195
+ progress_logger = get_logger("progress")
196
+ perc = int(pool.tasks_done / len(self.device_fqdns) * 100)
197
+
198
+ fqdn = self.device_fqdns[task_result.device_id]
199
+ elapsed_time = "%dsec" % int(time.monotonic() - task_result.extra["start_time"])
200
+ if task_result.extra.get("regression", False):
201
+ status = task_result.extra["status"]
202
+ status_color = task_result.extra["status_color"]
203
+ message = task_result.extra["message"]
204
+ else:
205
+ status = "OK" if task_result.exc is None else "FAIL"
206
+ status_color = colorama.Fore.GREEN if status == "OK" else colorama.Fore.RED
207
+ message = "" if status == "OK" else str(task_result.exc)
208
+ progress_logger.info(message,
209
+ perc=perc, fqdn=fqdn, status=status, status_color=status_color,
210
+ worker=task_result.worker_name, task_time=elapsed_time)
211
+ return task_result
212
+
213
+
214
+ def log_host_progress_cb(pool: Parallel, task_result: TaskResult):
215
+ warnings.warn(
216
+ "log_host_progress_cb is deprecated, use PoolProgressLogger",
217
+ DeprecationWarning,
218
+ stacklevel=2,
219
+ )
220
+ args = cast(cli_args.QueryOptions, pool.args[0])
221
+ connector = storage_connector.get()
222
+ storage_opts = connector.opts().from_cli_opts(args)
223
+ with connector.storage()(storage_opts) as storage:
224
+ hosts = storage.resolve_fdnds_by_query(args.query)
225
+ fqdn = hosts[task_result.device_id]
226
+ PoolProgressLogger(device_fqdns=fqdn)(pool, task_result)
227
+
228
+
229
+ # =====
230
+ def gen(args: cli_args.ShowGenOptions, loader: ann_gen.Loader):
231
+ """ Сгенерировать конфиг для устройств """
232
+ stdin = args.stdin(filter_acl=args.filter_acl, config=None)
233
+
234
+ filterer = filtering.filterer_connector.get()
235
+ pool = Parallel(ann_gen.worker, args, stdin, loader, filterer).tune_args(args)
236
+ if args.show_hosts_progress:
237
+ pool.add_callback(PoolProgressLogger(loader.device_fqdns))
238
+
239
+ return pool.run(loader.device_ids, args.tolerate_fails, args.strict_exit_code)
240
+
241
+
242
+ # =====
243
+ def _diff_file(old_text: Optional[str], new_text: Optional[str], context=3):
244
+ old_lines = old_text.splitlines() if old_text else []
245
+ new_lines = new_text.splitlines() if new_text else []
246
+ context = max(len(old_lines), len(new_lines)) if context is None else context
247
+ return list(difflib.unified_diff(old_lines, new_lines, n=context, lineterm=""))
248
+
249
+
250
+ def _diff_files(old_files, new_files, context=3):
251
+ ret = {}
252
+ for (path, (new_text, reload_data)) in new_files.items():
253
+ old_text = old_files.get(path)
254
+ is_new = old_text is None
255
+ diff_lines = _diff_file(old_text, new_text, context=context)
256
+ ret[path] = (diff_lines, reload_data, is_new)
257
+ return ret
258
+
259
+
260
+ def patch(args: cli_args.ShowPatchOptions, loader: ann_gen.Loader):
261
+ """ Сгенерировать патч для устройств """
262
+ global live_configs # pylint: disable=global-statement
263
+ if args.config == "running":
264
+ fetcher = annet.deploy.fetcher_connector.get()
265
+ live_configs = fetcher.fetch(loader.devices, processes=args.parallel)
266
+ stdin = args.stdin(filter_acl=args.filter_acl, config=args.config)
267
+
268
+ filterer = filtering.filterer_connector.get()
269
+ pool = Parallel(_patch_worker, args, stdin, loader, filterer).tune_args(args)
270
+ if args.show_hosts_progress:
271
+ pool.add_callback(PoolProgressLogger(loader.device_fqdns))
272
+ return pool.run(loader.device_ids, args.tolerate_fails, args.strict_exit_code)
273
+
274
+
275
+ def _patch_worker(device_id, args: cli_args.ShowPatchOptions, stdin, loader: ann_gen.Loader, filterer: filtering.Filterer):
276
+ for res, _, patch_tree in res_diff_patch(device_id, args, stdin, loader, filterer):
277
+ new_files = res.get_new_files(args.acl_safe)
278
+ new_json_fragment_files = res.get_new_file_fragments(args.acl_safe)
279
+ if new_files:
280
+ for path, (cfg_text, _cmds) in new_files.items():
281
+ label = res.device.hostname + os.sep + path
282
+ yield label, cfg_text, False
283
+ elif res.old_json_fragment_files or new_json_fragment_files:
284
+ for path, (new_json_cfg, _cmds) in new_json_fragment_files.items():
285
+ label = res.device.hostname + os.sep + path
286
+ old_json_cfg = res.old_json_fragment_files[path]
287
+ json_patch = jsontools.make_patch(old_json_cfg, new_json_cfg)
288
+ yield (
289
+ label,
290
+ jsontools.format_json(json_patch),
291
+ False,
292
+ )
293
+ elif patch_tree:
294
+ yield (
295
+ "%s.patch" % res.device.hostname,
296
+ _format_patch_blocks(patch_tree, res.device.hw, args.indent),
297
+ False,
298
+ )
299
+
300
+
301
+ # =====
302
+ def res_diff_patch(
303
+ device_id, args: cli_args.ShowPatchOptions, stdin, loader: ann_gen.Loader, filterer: filtering.Filterer,
304
+ ) -> Iterable[Tuple[OldNewResult, Dict, Dict]]:
305
+ for res in ann_gen.old_new(
306
+ args,
307
+ config=args.config,
308
+ loader=loader,
309
+ filterer=filterer,
310
+ stdin=stdin,
311
+ device_ids=[device_id],
312
+ no_new=args.clear,
313
+ do_files_download=True,
314
+ ):
315
+ old = res.get_old(args.acl_safe)
316
+ new = res.get_new(args.acl_safe)
317
+ new_json_fragment_files = res.get_new_file_fragments(args.acl_safe)
318
+
319
+ device = res.device
320
+ acl_rules = res.get_acl_rules(args.acl_safe)
321
+ if res.old_json_fragment_files or new_json_fragment_files:
322
+ yield res, None, None
323
+ elif old is not None:
324
+ (diff_tree, patch_tree) = _diff_and_patch(device, old, new, acl_rules, res.filter_acl_rules,
325
+ args.add_comments)
326
+ yield res, diff_tree, patch_tree
327
+
328
+
329
+ def diff(
330
+ args: cli_args.DiffOptions,
331
+ loader: ann_gen.Loader,
332
+ device_ids: List[int],
333
+ filterer: filtering.Filterer,
334
+ ) -> Mapping[Device, Union[Diff, PCDiff]]:
335
+ ret = {}
336
+ for res in ann_gen.old_new(
337
+ args,
338
+ config=args.config,
339
+ loader=loader,
340
+ no_new=args.clear,
341
+ do_files_download=True,
342
+ device_ids=device_ids,
343
+ filterer=filterer,
344
+ ):
345
+ old = res.get_old(args.acl_safe)
346
+ new = res.get_new(args.acl_safe)
347
+ device = res.device
348
+ acl_rules = res.get_acl_rules(args.acl_safe)
349
+ new_files = res.get_new_files(args.acl_safe)
350
+ new_json_fragment_files = res.get_new_file_fragments()
351
+ if res.old_files or new_files:
352
+ ret[device] = PCDiff(
353
+ hostname=device.hostname,
354
+ diff_files=list(_pc_diff(device.hostname, res.old_files, new_files)),
355
+ )
356
+ elif res.old_json_fragment_files or new_json_fragment_files:
357
+ ret[device] = PCDiff(
358
+ hostname=device.hostname,
359
+ diff_files=list(_json_fragment_diff(device.hostname, res.old_json_fragment_files, new_json_fragment_files)),
360
+ )
361
+ elif old is not None:
362
+ rb = rulebook.get_rulebook(device.hw)
363
+ diff_tree = patching.make_diff(old, new, rb, [acl_rules, res.filter_acl_rules])
364
+ diff_tree = patching.strip_unchanged(diff_tree)
365
+ ret[device] = diff_tree
366
+ return ret
367
+
368
+
369
+ def collapse_texts(texts: Mapping[str, str]) -> Mapping[Tuple[str, ...], str]:
370
+ """
371
+ Группировка текстов.
372
+ :param texts:
373
+ :return: словарь с несколькими хостнеймами в ключе.
374
+ """
375
+ diffs_with_orig = {key: [value, value.splitlines()] for key, value in texts.items()}
376
+ res = {}
377
+ for _, collapsed_diff_iter in groupby(sorted(diffs_with_orig.items(), key=lambda x: (x[0], x[1][1])),
378
+ key=lambda x: x[1][1]):
379
+ collapsed_diff = list(collapsed_diff_iter)
380
+ res[tuple(x[0] for x in collapsed_diff)] = collapsed_diff[0][1][0]
381
+
382
+ return res
383
+
384
+
385
+ class DeployerJob(abc.ABC):
386
+ def __init__(self, device, args: cli_args.DeployOptions):
387
+ self.args = args
388
+ self.device = device
389
+ self.add_comments = False
390
+ self.diff_lines = []
391
+ self.cmd_lines: List[str] = []
392
+ self.deploy_cmds = odict()
393
+ self.diffs = {}
394
+ self.failed_configs = {}
395
+ self._has_diff = False
396
+
397
+ @abc.abstractmethod
398
+ def parse_result(self, res):
399
+ pass
400
+
401
+ def collapseable_diffs(self):
402
+ return {}
403
+
404
+ def has_diff(self):
405
+ return self._has_diff
406
+
407
+ @staticmethod
408
+ def from_device(device, args: cli_args.DeployOptions):
409
+ if device.hw.vendor == "pc":
410
+ return PCDeployerJob(device, args)
411
+ return CliDeployerJob(device, args)
412
+
413
+
414
+ class CliDeployerJob(DeployerJob):
415
+ def parse_result(self, res: OldNewResult):
416
+ device = res.device
417
+ old = res.get_old(self.args.acl_safe)
418
+ new = res.get_new(self.args.acl_safe)
419
+ acl_rules = res.get_acl_rules(self.args.acl_safe)
420
+ err = res.err
421
+
422
+ if err:
423
+ self.failed_configs[device.fqdn] = err
424
+ return
425
+
426
+ (diff_obj, patch_tree) = _diff_and_patch(device, old, new, acl_rules,
427
+ res.filter_acl_rules, self.add_comments,
428
+ do_commit=not self.args.dont_commit)
429
+ cmds = tabparser.make_formatter(device.hw, indent="").cmd_paths(patch_tree)
430
+ if not cmds:
431
+ return
432
+ self._has_diff = True
433
+ self.diffs[device] = diff_obj
434
+ self.cmd_lines.extend(["= %s " % device.hostname, ""])
435
+ self.cmd_lines.extend(map(itemgetter(-1), cmds))
436
+ self.cmd_lines.append("")
437
+ deployer_driver = annet.deploy.driver_connector.get()
438
+ self.deploy_cmds[device] = deployer_driver.apply_deploy_rulebook(
439
+ device.hw, cmds,
440
+ do_commit=not self.args.dont_commit
441
+ )
442
+ for cmd in deployer_driver.build_exit_cmdlist(device.hw):
443
+ self.deploy_cmds[device].add_cmd(cmd)
444
+
445
+ def collapseable_diffs(self):
446
+ return self.diffs
447
+
448
+
449
+ class PCDeployerJob(DeployerJob):
450
+ def parse_result(self, res: ann_gen.OldNewResult):
451
+ device = res.device
452
+ old_files = res.old_files
453
+ new_files = res.get_new_files(self.args.acl_safe)
454
+ old_json_fragment_files = res.old_json_fragment_files
455
+ new_json_fragment_files = res.get_new_file_fragments(self.args.acl_safe)
456
+ err = res.err
457
+
458
+ if err:
459
+ self.failed_configs[device.fqdn] = err
460
+ return
461
+ elif not new_files and not new_json_fragment_files:
462
+ return
463
+
464
+ upload_files: Dict[str, bytes] = {}
465
+ reload_cmds: Dict[str, bytes] = {}
466
+ generator_types: Dict[str, GeneratorType] = {}
467
+ for generator_type, pc_files in [(GeneratorType.ENTIRE, new_files), (GeneratorType.JSON_FRAGMENT, new_json_fragment_files)]:
468
+ for file, (file_content_or_json_cfg, cmds) in pc_files.items():
469
+ if generator_type == GeneratorType.ENTIRE:
470
+ file_content: str = file_content_or_json_cfg
471
+ diff_content = "\n".join(_diff_file(old_files.get(file), file_content))
472
+ else: # generator_type == GeneratorType.JSON_FRAGMENT
473
+ old_json_cfg = old_json_fragment_files[file]
474
+ json_patch = jsontools.make_patch(old_json_cfg, file_content_or_json_cfg)
475
+ file_content = jsontools.format_json(json_patch)
476
+ old_text = jsontools.format_json(old_json_cfg)
477
+ new_text = jsontools.format_json(file_content_or_json_cfg)
478
+ diff_content = "\n".join(_diff_file(old_text, new_text))
479
+ if diff_content:
480
+ self._has_diff = True
481
+ upload_files[file], reload_cmds[file] = file_content.encode(), cmds.encode()
482
+ generator_types[file] = generator_type
483
+ self.cmd_lines.append("= Deploy cmds %s/%s " % (device.hostname, file))
484
+ self.cmd_lines.extend([cmds, ""])
485
+ self.cmd_lines.append("= %s/%s " % (device.hostname, file))
486
+ self.cmd_lines.extend([file_content, ""])
487
+ self.diff_lines.append("= %s/%s " % (device.hostname, file))
488
+ self.diff_lines.extend([diff_content, ""])
489
+
490
+ if upload_files:
491
+ self.deploy_cmds[device] = {
492
+ "files": upload_files,
493
+ "cmds": reload_cmds,
494
+ "generator_types": generator_types,
495
+ }
496
+ self.diffs[device] = upload_files
497
+ deployer_driver = annet.deploy.driver_connector.get()
498
+ before, after = deployer_driver.build_configuration_cmdlist(device.hw)
499
+ for cmd in deployer_driver.build_exit_cmdlist(device.hw):
500
+ after.add_cmd(cmd)
501
+ cmds_pre_files = {}
502
+ for file in self.deploy_cmds[device]["files"]:
503
+ if before:
504
+ cmds_pre_files[file] = "\n".join(map(str, before)).encode(encoding="utf-8")
505
+ self.deploy_cmds[device]["cmds"][file] += "\n".join(map(str, after)).encode(encoding="utf-8")
506
+ self.deploy_cmds[device]["cmds_pre_files"] = cmds_pre_files
507
+
508
+
509
+ class Deployer:
510
+ def __init__(self, args: cli_args.DeployOptions):
511
+ self.args = args
512
+
513
+ self.cmd_lines = []
514
+ self.deploy_cmds = odict()
515
+ self.diffs = {}
516
+ self.failed_configs: Dict[str, Exception] = {}
517
+ self.fqdn_to_device: Dict[str, Device] = {}
518
+ self.empty_diff_hostnames: Set[str] = set()
519
+
520
+ self._collapseable_diffs = {}
521
+ self._diff_lines: List[str] = []
522
+ self._filterer = filtering.filterer_connector.get()
523
+
524
+ def parse_result(self, job: DeployerJob, result: ann_gen.OldNewResult):
525
+ entire_reload = self.args.entire_reload
526
+ logger = get_logger(job.device.hostname)
527
+
528
+ job.parse_result(result)
529
+ self.failed_configs.update(job.failed_configs)
530
+
531
+ if job.has_diff() or entire_reload is entire_reload.force:
532
+ self.cmd_lines.extend(job.cmd_lines)
533
+ self.deploy_cmds.update(job.deploy_cmds)
534
+ self.diffs.update(job.diffs)
535
+
536
+ self.fqdn_to_device[result.device.fqdn] = result.device
537
+ self._collapseable_diffs.update(job.collapseable_diffs())
538
+ self._diff_lines.extend(job.diff_lines)
539
+ else:
540
+ logger.info("empty diff")
541
+
542
+ def diff_lines(self) -> List[str]:
543
+ diff_lines = []
544
+ diff_lines.extend(self._diff_lines)
545
+ for devices, diff_obj in ann_diff.collapse_diffs(self._collapseable_diffs).items():
546
+ if not diff_obj:
547
+ self.empty_diff_hostnames.update(dev.hostname for dev in devices)
548
+ if not self.args.no_ask_deploy:
549
+ # разобъем список устройств на несколько линий
550
+ dest_name = ""
551
+ try:
552
+ _, term_columns_str = os.popen("stty size", "r").read().split()
553
+ term_columns = int(term_columns_str)
554
+ except Exception:
555
+ term_columns = 2 ** 32
556
+ fqdns = [dev.hostname for dev in devices]
557
+ while fqdns:
558
+ fqdn = fqdns.pop()
559
+ if len(dest_name) == 0:
560
+ dest_name = "= %s" % fqdn
561
+ elif len(dest_name) + len(fqdn) < term_columns:
562
+ dest_name = "%s, %s" % (dest_name, fqdn)
563
+ else:
564
+ diff_lines.extend([dest_name])
565
+ dest_name = "= %s" % fqdn
566
+ if not fqdns:
567
+ diff_lines.extend([dest_name, ""])
568
+ else:
569
+ dest_name = "= %s" % ", ".join([dev.hostname for dev in devices])
570
+ diff_lines.extend([dest_name, ""])
571
+
572
+ for line in tabparser.make_formatter(devices[0].hw).diff(diff_obj):
573
+ diff_lines.append(line)
574
+ diff_lines.append("")
575
+ return diff_lines
576
+
577
+ def ask_deploy(self) -> str:
578
+ return self._ask("y", annet.deploy.AskConfirm(
579
+ text="\n".join(self.diff_lines()),
580
+ alternative_text="\n".join(self.cmd_lines),
581
+ ))
582
+
583
+ def ask_rollback(self) -> str:
584
+ return self._ask("n", annet.deploy.AskConfirm(
585
+ text="Execute rollback?\n",
586
+ alternative_text="",
587
+ ))
588
+
589
+ def _ask(self, default_ans: str, ask: annet.deploy.AskConfirm) -> str:
590
+ # если filter_acl из stdin то с ним уже не получится работать как с терминалом
591
+ ans = default_ans
592
+ if not self.args.no_ask_deploy:
593
+ try:
594
+ if not os.isatty(sys.stdin.fileno()):
595
+ pts_path = os.ttyname(sys.stdout.fileno())
596
+ pts = open(pts_path, "r") # pylint: disable=consider-using-with
597
+ os.dup2(pts.fileno(), sys.stdin.fileno())
598
+ except OSError:
599
+ pass
600
+ ans = ask.loop()
601
+ return ans
602
+
603
+ def check_diff(self, result: annet.deploy.DeployResult, loader: ann_gen.Loader):
604
+ global live_configs # pylint: disable=global-statement
605
+ success_device_ids = []
606
+ for host, hres in result.results.items():
607
+ device = self.fqdn_to_device[host]
608
+ if (
609
+ not isinstance(hres, Exception) and
610
+ host not in self.empty_diff_hostnames and
611
+ device.is_pc()
612
+ ):
613
+ success_device_ids.append(device.id)
614
+ diff_args = self.args.copy_from(
615
+ self.args,
616
+ config="running",
617
+ )
618
+ if diff_args.query:
619
+ live_configs = None
620
+ diffs = diff(diff_args, loader, success_device_ids, self._filterer)
621
+ non_pc_diffs = {dev: diff for dev, diff in diffs.items() if not isinstance(diff, PCDiff)}
622
+ devices_to_diff = ann_diff.collapse_diffs(non_pc_diffs)
623
+ devices_to_diff.update({(dev,): diff for dev, diff in diffs.items() if isinstance(diff, PCDiff)})
624
+ else:
625
+ devices_to_diff = {}
626
+ for devices, diff_obj in devices_to_diff.items():
627
+ if diff_obj:
628
+ for dev in devices:
629
+ self.failed_configs[dev.fqdn] = Warning("Deploy OK, but diff still exists")
630
+ if isinstance(diff_obj, PCDiff):
631
+ for diff_file in diff_obj.diff_files:
632
+ print_err_label(diff_file.label)
633
+ print("\n".join(format_file_diff(diff_file.diff_lines)))
634
+ else:
635
+ output_driver = output_driver_connector.get()
636
+ dest_name = ", ".join([output_driver.cfg_file_names(dev)[0] for dev in devices])
637
+ print_err_label(dest_name)
638
+ _print_pre_as_diff(patching.make_pre(diff_obj), diff_args.show_rules, diff_args.indent)
639
+
640
+
641
+ def deploy(
642
+ args: cli_args.DeployOptions,
643
+ loader: ann_gen.Loader,
644
+ deployer: Deployer,
645
+ filterer: Filterer,
646
+ fetcher: Fetcher,
647
+ deploy_driver: DeployDriver,
648
+ ) -> ExitCode:
649
+ """ Сгенерировать конфиг для устройств и задеплоить его """
650
+ ret: ExitCode = 0
651
+ global live_configs # pylint: disable=global-statement
652
+ live_configs = fetcher.fetch(devices=loader.devices, processes=args.parallel)
653
+ pool = ann_gen.OldNewParallel(args, loader, filterer)
654
+
655
+ for res in pool.generated_configs(loader.devices):
656
+ # Меняем exit code если хоть один device ловил exception
657
+ if res.err is not None:
658
+ get_logger(res.device.hostname).error("error generating configs", exc_info=res.err)
659
+ ret |= 2 ** 3
660
+ job = DeployerJob.from_device(res.device, args)
661
+ deployer.parse_result(job, res)
662
+
663
+ deploy_cmds = deployer.deploy_cmds
664
+ result = annet.deploy.DeployResult(hostnames=[], results={}, durations={}, original_states={})
665
+ if deploy_cmds:
666
+ ans = deployer.ask_deploy()
667
+ if ans != "y":
668
+ return 2 ** 2
669
+ result = annet.lib.do_async(deploy_driver.bulk_deploy(deploy_cmds, args))
670
+
671
+ rolled_back = False
672
+ rollback_cmds = {deployer.fqdn_to_device[x]: cc for x, cc in result.original_states.items() if cc}
673
+ if args.rollback and rollback_cmds:
674
+ ans = deployer.ask_rollback()
675
+ if rollback_cmds and ans == "y":
676
+ rolled_back = True
677
+ annet.lib.do_async(deploy_driver.bulk_deploy(rollback_cmds, args))
678
+
679
+ if not args.no_check_diff and not rolled_back:
680
+ deployer.check_diff(result, loader)
681
+
682
+ if deployer.failed_configs:
683
+ result.add_results(deployer.failed_configs)
684
+ ret |= 2 ** 1
685
+
686
+ annet.deploy.show_bulk_report(result.hostnames, result.results, result.durations, log_dir=None)
687
+ for host_result in result.results.values():
688
+ if isinstance(host_result, Exception):
689
+ ret |= 2 ** 0
690
+ break
691
+ return ret
692
+
693
+
694
+ def file_diff(args: cli_args.FileDiffOptions):
695
+ """ Создать дифф по рулбуку между файлами или каталогами """
696
+ old_new = list(_read_old_new_cfgdumps(args))
697
+ pool = Parallel(file_diff_worker, args).tune_args(args)
698
+ return pool.run(old_new, tolerate_fails=True)
699
+
700
+
701
+ def file_diff_worker(old_new: Tuple[str, str], args: cli_args.FileDiffOptions) -> Generator[
702
+ Tuple[str, str, bool], None, None]:
703
+ old_path, new_path = old_new
704
+ if os.path.isdir(old_path) and os.path.isdir(new_path):
705
+ hostname = os.path.basename(new_path)
706
+ new_files = {relative_cfg_path: (cfg_text, "") for relative_cfg_path, cfg_text in
707
+ ann_gen.load_pc_config(new_path).items()}
708
+ old_files = ann_gen.load_pc_config(old_path)
709
+ for diff_file in _pc_diff(hostname, old_files, new_files):
710
+ diff_text = (
711
+ "\n".join(diff_file.diff_lines)
712
+ if args.no_color
713
+ else "\n".join(format_file_diff(diff_file.diff_lines))
714
+ )
715
+ if diff_text:
716
+ yield diff_file.label, diff_text, False
717
+ else:
718
+ dest_name, old, new, hw = _read_old_new_hw(old_path, new_path, args)
719
+ _, __, pre, ___ = _read_old_new_diff_patch(old, new, hw, add_comments=False)
720
+ diff_lines = ann_diff.gen_pre_as_diff(pre, args.show_rules, args.indent, args.no_color)
721
+ diff_text = "".join(diff_lines)
722
+ if diff_text:
723
+ yield dest_name, diff_text, False
724
+
725
+
726
+ @tracing.function
727
+ def file_patch(args: cli_args.FilePatchOptions):
728
+ """ Создать патч между файлами или каталогами """
729
+ old_new = list(_read_old_new_cfgdumps(args))
730
+ pool = Parallel(file_patch_worker, args).tune_args(args)
731
+ return pool.run(old_new, tolerate_fails=True)
732
+
733
+
734
+ def file_patch_worker(old_new: Tuple[str, str], args: cli_args.FileDiffOptions) -> Generator[
735
+ Tuple[str, str, bool], None, None]:
736
+ old_path, new_path = old_new
737
+ if os.path.isdir(old_path) and os.path.isdir(new_path):
738
+ for relative_cfg_path, cfg_text in ann_gen.load_pc_config(new_path).items():
739
+ label = os.path.join(os.path.basename(new_path), relative_cfg_path)
740
+ yield label, cfg_text, False
741
+ else:
742
+ dest_name, old, new, hw = _read_old_new_hw(old_path, new_path, args)
743
+ _, __, ___, patch_tree = _read_old_new_diff_patch(old, new, hw, args.add_comments)
744
+ patch_text = _format_patch_blocks(patch_tree, hw, args.indent)
745
+ if patch_text:
746
+ yield dest_name, patch_text, False
747
+
748
+
749
+ def _pc_diff(hostname: str, old_files: Dict[str, str], new_files: Dict[str, str]) -> Generator[PCDiffFile, None, None]:
750
+ sorted_lines = sorted(_diff_files(old_files, new_files).items())
751
+ for (path, (diff_lines, _reload_data, is_new)) in sorted_lines:
752
+ if not diff_lines:
753
+ continue
754
+ label = hostname + os.sep + path
755
+ if is_new:
756
+ label = LABEL_NEW_PREFIX + label
757
+ yield PCDiffFile(label=label, diff_lines=diff_lines)
758
+
759
+
760
+ def _json_fragment_diff(
761
+ hostname: str,
762
+ old_files: Dict[str, Any],
763
+ new_files: Dict[str, Tuple[Any, Optional[str]]],
764
+ ) -> Generator[PCDiffFile, None, None]:
765
+ def jsonify_multi(files):
766
+ return {
767
+ path: jsontools.format_json(cfg)
768
+ for path, cfg in files.items()
769
+ }
770
+
771
+ def jsonify_multi_with_cmd(files):
772
+ ret = {}
773
+ for path, cfg_reload_cmd in files.items():
774
+ cfg, reload_cmd = cfg_reload_cmd
775
+ ret[path] = (jsontools.format_json(cfg), reload_cmd)
776
+ return ret
777
+ jold, jnew = jsonify_multi(old_files), jsonify_multi_with_cmd(new_files)
778
+ return _pc_diff(hostname, jold, jnew)
779
+
780
+
781
+ def guess_hw(config_text: str):
782
+ """Пытаемся угадать вендора и hw на основе
783
+ текста конфига и annet/rulebook/texts/*.rul"""
784
+ scores = {}
785
+ hw_provider = hardware_connector.get()
786
+ for vendor in VENDOR_REVERSES:
787
+ hw = hw_provider.vendor_to_hw(vendor)
788
+ fmtr = tabparser.make_formatter(hw)
789
+ rb = rulebook.get_rulebook(hw)
790
+ config = tabparser.parse_to_tree(config_text, fmtr.split)
791
+ pre = patching.make_pre(patching.make_diff({}, config, rb, []))
792
+ metric = _count_pre_score(pre)
793
+ scores[metric] = hw
794
+ max_score = max(scores.keys())
795
+ hw = scores[max_score]
796
+ return hw, max_score
797
+
798
+
799
+ def _count_pre_score(top_pre) -> float:
800
+ """Обходим вширь pre-конфиг
801
+ и подсчитываем количество заматчившихся
802
+ правил на каждом из уровней.
803
+
804
+ Чем больше результирующий приоритет
805
+ тем больше рулбук соответсвует конфигу.
806
+ """
807
+ score = 0
808
+ scores = []
809
+ cur, child = [top_pre], []
810
+ while cur:
811
+ for pre in cur.pop().values():
812
+ score += 1
813
+ for item in pre["items"].values():
814
+ for op in [Op.ADDED, Op.AFFECTED, Op.REMOVED]:
815
+ child += [x["children"] for x in item[op]]
816
+ if not cur:
817
+ scores.append(score)
818
+ score = 0
819
+ cur, child = child, []
820
+ result = 0
821
+ for i in reversed(scores):
822
+ result <<= i.bit_length()
823
+ result += i
824
+ if result > 0:
825
+ result = 1 - (1 / result)
826
+ return float(result)