annet 0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of annet might be problematic. Click here for more details.
- annet/__init__.py +61 -0
- annet/annet.py +25 -0
- annet/annlib/__init__.py +7 -0
- annet/annlib/command.py +49 -0
- annet/annlib/diff.py +158 -0
- annet/annlib/errors.py +8 -0
- annet/annlib/filter_acl.py +196 -0
- annet/annlib/jsontools.py +89 -0
- annet/annlib/lib.py +495 -0
- annet/annlib/netdev/__init__.py +0 -0
- annet/annlib/netdev/db.py +62 -0
- annet/annlib/netdev/devdb/__init__.py +28 -0
- annet/annlib/netdev/devdb/data/devdb.json +137 -0
- annet/annlib/netdev/views/__init__.py +0 -0
- annet/annlib/netdev/views/dump.py +121 -0
- annet/annlib/netdev/views/hardware.py +112 -0
- annet/annlib/output.py +246 -0
- annet/annlib/patching.py +533 -0
- annet/annlib/rbparser/__init__.py +0 -0
- annet/annlib/rbparser/acl.py +120 -0
- annet/annlib/rbparser/deploying.py +55 -0
- annet/annlib/rbparser/ordering.py +52 -0
- annet/annlib/rbparser/platform.py +51 -0
- annet/annlib/rbparser/syntax.py +115 -0
- annet/annlib/rulebook/__init__.py +0 -0
- annet/annlib/rulebook/common.py +350 -0
- annet/annlib/tabparser.py +648 -0
- annet/annlib/types.py +35 -0
- annet/api/__init__.py +807 -0
- annet/argparse.py +415 -0
- annet/cli.py +192 -0
- annet/cli_args.py +493 -0
- annet/configs/context.yml +18 -0
- annet/configs/logging.yaml +39 -0
- annet/connectors.py +64 -0
- annet/deploy.py +441 -0
- annet/diff.py +85 -0
- annet/executor.py +551 -0
- annet/filtering.py +40 -0
- annet/gen.py +828 -0
- annet/generators/__init__.py +987 -0
- annet/generators/common/__init__.py +0 -0
- annet/generators/common/initial.py +33 -0
- annet/hardware.py +45 -0
- annet/implicit.py +139 -0
- annet/lib.py +128 -0
- annet/output.py +170 -0
- annet/parallel.py +448 -0
- annet/patching.py +25 -0
- annet/reference.py +148 -0
- annet/rulebook/__init__.py +114 -0
- annet/rulebook/arista/__init__.py +0 -0
- annet/rulebook/arista/iface.py +16 -0
- annet/rulebook/aruba/__init__.py +16 -0
- annet/rulebook/aruba/ap_env.py +146 -0
- annet/rulebook/aruba/misc.py +8 -0
- annet/rulebook/cisco/__init__.py +0 -0
- annet/rulebook/cisco/iface.py +68 -0
- annet/rulebook/cisco/misc.py +57 -0
- annet/rulebook/cisco/vlandb.py +90 -0
- annet/rulebook/common.py +19 -0
- annet/rulebook/deploying.py +87 -0
- annet/rulebook/huawei/__init__.py +0 -0
- annet/rulebook/huawei/aaa.py +75 -0
- annet/rulebook/huawei/bgp.py +97 -0
- annet/rulebook/huawei/iface.py +33 -0
- annet/rulebook/huawei/misc.py +337 -0
- annet/rulebook/huawei/vlandb.py +115 -0
- annet/rulebook/juniper/__init__.py +107 -0
- annet/rulebook/nexus/__init__.py +0 -0
- annet/rulebook/nexus/iface.py +92 -0
- annet/rulebook/patching.py +143 -0
- annet/rulebook/ribbon/__init__.py +12 -0
- annet/rulebook/texts/arista.deploy +20 -0
- annet/rulebook/texts/arista.order +125 -0
- annet/rulebook/texts/arista.rul +59 -0
- annet/rulebook/texts/aruba.deploy +20 -0
- annet/rulebook/texts/aruba.order +83 -0
- annet/rulebook/texts/aruba.rul +87 -0
- annet/rulebook/texts/cisco.deploy +27 -0
- annet/rulebook/texts/cisco.order +82 -0
- annet/rulebook/texts/cisco.rul +105 -0
- annet/rulebook/texts/huawei.deploy +188 -0
- annet/rulebook/texts/huawei.order +388 -0
- annet/rulebook/texts/huawei.rul +471 -0
- annet/rulebook/texts/juniper.rul +120 -0
- annet/rulebook/texts/nexus.deploy +24 -0
- annet/rulebook/texts/nexus.order +85 -0
- annet/rulebook/texts/nexus.rul +83 -0
- annet/rulebook/texts/nokia.rul +31 -0
- annet/rulebook/texts/pc.order +5 -0
- annet/rulebook/texts/pc.rul +9 -0
- annet/rulebook/texts/ribbon.deploy +22 -0
- annet/rulebook/texts/ribbon.rul +77 -0
- annet/rulebook/texts/routeros.order +38 -0
- annet/rulebook/texts/routeros.rul +45 -0
- annet/storage.py +121 -0
- annet/tabparser.py +36 -0
- annet/text_term_format.py +95 -0
- annet/tracing.py +170 -0
- annet/types.py +223 -0
- annet-0.1.dist-info/AUTHORS +21 -0
- annet-0.1.dist-info/LICENSE +21 -0
- annet-0.1.dist-info/METADATA +24 -0
- annet-0.1.dist-info/RECORD +113 -0
- annet-0.1.dist-info/WHEEL +5 -0
- annet-0.1.dist-info/entry_points.txt +6 -0
- annet-0.1.dist-info/top_level.txt +3 -0
- annet_generators/__init__.py +0 -0
- annet_generators/example/__init__.py +12 -0
- annet_generators/example/lldp.py +52 -0
- annet_nbexport/__init__.py +220 -0
- annet_nbexport/main.py +46 -0
annet/api/__init__.py
ADDED
|
@@ -0,0 +1,807 @@
|
|
|
1
|
+
import abc
|
|
2
|
+
import difflib
|
|
3
|
+
import os
|
|
4
|
+
import re
|
|
5
|
+
import sys
|
|
6
|
+
import time
|
|
7
|
+
from collections import OrderedDict as odict
|
|
8
|
+
from itertools import groupby
|
|
9
|
+
from operator import itemgetter
|
|
10
|
+
from typing import (
|
|
11
|
+
Any,
|
|
12
|
+
Dict,
|
|
13
|
+
Generator,
|
|
14
|
+
Iterable,
|
|
15
|
+
List,
|
|
16
|
+
Mapping,
|
|
17
|
+
Optional,
|
|
18
|
+
Set,
|
|
19
|
+
Tuple,
|
|
20
|
+
Union,
|
|
21
|
+
cast,
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
import colorama
|
|
25
|
+
from annet.annlib import jsontools
|
|
26
|
+
from annet.annlib.netdev.views.hardware import HardwareView
|
|
27
|
+
from annet.annlib.rbparser.platform import VENDOR_REVERSES
|
|
28
|
+
from annet.annlib.types import GeneratorType
|
|
29
|
+
from contextlog import get_logger
|
|
30
|
+
|
|
31
|
+
import annet.deploy
|
|
32
|
+
from annet import cli_args
|
|
33
|
+
from annet import diff as ann_diff
|
|
34
|
+
from annet import filtering
|
|
35
|
+
from annet import gen as ann_gen
|
|
36
|
+
from annet import patching, rulebook, tabparser, tracing
|
|
37
|
+
from annet.hardware import hardware_connector
|
|
38
|
+
from annet.output import (
|
|
39
|
+
LABEL_NEW_PREFIX,
|
|
40
|
+
format_file_diff,
|
|
41
|
+
output_driver_connector,
|
|
42
|
+
print_err_label,
|
|
43
|
+
)
|
|
44
|
+
from annet.parallel import Parallel, TaskResult
|
|
45
|
+
from annet.reference import RefTracker
|
|
46
|
+
from annet.storage import Device, Storage, storage_connector
|
|
47
|
+
from annet.types import Diff, ExitCode, OldNewResult, Op, PCDiff, PCDiffFile
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
live_configs = ann_gen.live_configs
|
|
51
|
+
|
|
52
|
+
DEFAULT_INDENT = " "
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def patch_from_pre(pre, hw, rb, add_comments, ref_track=None, do_commit=True):
|
|
56
|
+
if not ref_track:
|
|
57
|
+
ref_track = RefTracker()
|
|
58
|
+
orderer = patching.Orderer(rb["ordering"], hw.vendor)
|
|
59
|
+
orderer.ref_insert(ref_track)
|
|
60
|
+
return patching.make_patch(
|
|
61
|
+
pre=pre,
|
|
62
|
+
rb=rb,
|
|
63
|
+
hw=hw,
|
|
64
|
+
add_comments=add_comments,
|
|
65
|
+
orderer=orderer,
|
|
66
|
+
do_commit=do_commit,
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def _diff_and_patch(
|
|
71
|
+
device, old, new, acl_rules, filter_acl_rules,
|
|
72
|
+
add_comments, ref_track=None, do_commit=True, rb=None
|
|
73
|
+
) -> Tuple[Diff, Dict]:
|
|
74
|
+
if rb is None:
|
|
75
|
+
rb = rulebook.get_rulebook(device.hw)
|
|
76
|
+
# [NOCDEV-5532] Передаем в diff только релевантные для logic'a части конфига
|
|
77
|
+
if acl_rules is not None:
|
|
78
|
+
old = patching.apply_acl(old, acl_rules)
|
|
79
|
+
new = patching.apply_acl(new, acl_rules, with_annotations=add_comments)
|
|
80
|
+
|
|
81
|
+
diff_tree = patching.make_diff(old, new, rb, [acl_rules, filter_acl_rules])
|
|
82
|
+
pre = patching.make_pre(diff_tree)
|
|
83
|
+
patch_tree = patch_from_pre(pre, device.hw, rb, add_comments, ref_track, do_commit)
|
|
84
|
+
diff_tree = patching.strip_unchanged(diff_tree)
|
|
85
|
+
|
|
86
|
+
return (diff_tree, patch_tree)
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
# =====
|
|
90
|
+
def _read_old_new_diff_patch(old: Dict[str, Dict], new: Dict[str, Dict], hw: HardwareView, add_comments: bool):
|
|
91
|
+
rb = rulebook.get_rulebook(hw)
|
|
92
|
+
diff_obj = patching.make_diff(old, new, rb, [])
|
|
93
|
+
diff_obj = patching.strip_unchanged(diff_obj)
|
|
94
|
+
pre = patching.make_pre(diff_obj)
|
|
95
|
+
patchtree = patch_from_pre(pre, hw, rb, add_comments)
|
|
96
|
+
return rb, diff_obj, pre, patchtree
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def _read_old_new_hw(old_path: str, new_path: str, args: cli_args.FileInputOptions):
|
|
100
|
+
_logger = get_logger()
|
|
101
|
+
hw = args.hw
|
|
102
|
+
if isinstance(args.hw, str):
|
|
103
|
+
hw = HardwareView(args.hw, "")
|
|
104
|
+
|
|
105
|
+
old, old_hw, old_score = _read_device_config(old_path, hw)
|
|
106
|
+
new, new_hw, new_score = _read_device_config(new_path, hw)
|
|
107
|
+
hw = new_hw
|
|
108
|
+
if old_score > new_score:
|
|
109
|
+
hw = old_hw
|
|
110
|
+
if old_hw != new_hw:
|
|
111
|
+
_logger.debug("Old and new detected hw differs, assume %r", hw)
|
|
112
|
+
dest_name = os.path.basename(new_path)
|
|
113
|
+
return dest_name, old, new, hw
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
@tracing.function
|
|
117
|
+
def _read_old_new_cfgdumps(args: cli_args.FileInputOptions):
|
|
118
|
+
_logger = get_logger()
|
|
119
|
+
old_path, new_path = os.path.normpath(args.old), os.path.normpath(args.new)
|
|
120
|
+
if not os.path.isdir(old_path):
|
|
121
|
+
yield (old_path, new_path)
|
|
122
|
+
return
|
|
123
|
+
_logger.info("Scanning cfgdumps: %s/*.cfg ...", old_path)
|
|
124
|
+
cfgdump_reg = re.compile(r"^[^\s]+\.cfg$")
|
|
125
|
+
if os.path.isdir(old_path) and os.path.isdir(new_path):
|
|
126
|
+
if cfgdump_reg.match(os.path.basename(old_path)) and cfgdump_reg.match(os.path.basename(new_path)):
|
|
127
|
+
yield (old_path, new_path)
|
|
128
|
+
for name in os.listdir(old_path):
|
|
129
|
+
old_path_name = os.path.join(old_path, name)
|
|
130
|
+
new_path_name = os.path.join(new_path, name)
|
|
131
|
+
if not os.path.exists(new_path_name):
|
|
132
|
+
_logger.debug("Ignoring file %s: not exist %s", name, new_path_name)
|
|
133
|
+
continue
|
|
134
|
+
yield (old_path_name, new_path_name)
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
def _read_device_config(path, hw):
|
|
138
|
+
_logger = get_logger()
|
|
139
|
+
_logger.debug("Reading %r ...", path)
|
|
140
|
+
score = 1
|
|
141
|
+
|
|
142
|
+
with open(path) as cfgdump_file:
|
|
143
|
+
text = cfgdump_file.read()
|
|
144
|
+
try:
|
|
145
|
+
if not hw:
|
|
146
|
+
hw, score = guess_hw(text)
|
|
147
|
+
config = tabparser.parse_to_tree(
|
|
148
|
+
text=text,
|
|
149
|
+
splitter=tabparser.make_formatter(hw).split,
|
|
150
|
+
)
|
|
151
|
+
return config, hw, score
|
|
152
|
+
except tabparser.ParserError:
|
|
153
|
+
_logger.exception("Parser error: %r", path)
|
|
154
|
+
raise
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
# =====
|
|
158
|
+
def _format_patch_blocks(patch_tree, hw, indent):
|
|
159
|
+
formatter = tabparser.make_formatter(hw, indent=indent)
|
|
160
|
+
return formatter.patch(patch_tree)
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
# =====
|
|
164
|
+
def _print_pre_as_diff(pre, show_rules, indent, file=None, _level=0):
|
|
165
|
+
for (raw_rule, content) in sorted(pre.items(), key=itemgetter(0)):
|
|
166
|
+
rule_printed = False
|
|
167
|
+
for (op, sign) in [ # FIXME: Not very effective
|
|
168
|
+
(Op.REMOVED, colorama.Fore.RED + "-"),
|
|
169
|
+
(Op.ADDED, colorama.Fore.GREEN + "+"),
|
|
170
|
+
(Op.AFFECTED, colorama.Fore.CYAN + " "),
|
|
171
|
+
]:
|
|
172
|
+
items = content["items"].items()
|
|
173
|
+
if not content["attrs"]["multiline"]:
|
|
174
|
+
items = sorted(items, key=itemgetter(0))
|
|
175
|
+
for (_, diff) in items: # pylint: disable=redefined-outer-name
|
|
176
|
+
if show_rules and not rule_printed and not raw_rule == "__MULTILINE_BODY__":
|
|
177
|
+
print("%s%s# %s%s%s" % (colorama.Style.BRIGHT, colorama.Fore.BLACK, (indent * _level),
|
|
178
|
+
raw_rule, colorama.Style.RESET_ALL), file=file)
|
|
179
|
+
rule_printed = True
|
|
180
|
+
for item in sorted(diff[op], key=itemgetter("row")):
|
|
181
|
+
print("%s%s%s %s%s" % (colorama.Style.BRIGHT, sign, (indent * _level),
|
|
182
|
+
item["row"], colorama.Style.RESET_ALL), file=file)
|
|
183
|
+
if len(item["children"]) != 0:
|
|
184
|
+
_print_pre_as_diff(item["children"], show_rules, indent, file, _level + 1)
|
|
185
|
+
rule_printed = False
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
def log_host_progress_cb(pool: Parallel, task_result: TaskResult):
|
|
189
|
+
progress_logger = get_logger("progress")
|
|
190
|
+
args = cast(cli_args.QueryOptions, pool.args[0])
|
|
191
|
+
with storage_connector.get().storage()(args) as storage:
|
|
192
|
+
hosts = storage.resolve_fdnds_by_query(args.query)
|
|
193
|
+
perc = int(pool.tasks_done / len(hosts) * 100)
|
|
194
|
+
fqdn = hosts[task_result.device_id]
|
|
195
|
+
elapsed_time = "%dsec" % int(time.monotonic() - task_result.extra["start_time"])
|
|
196
|
+
if task_result.extra.get("regression", False):
|
|
197
|
+
status = task_result.extra["status"]
|
|
198
|
+
status_color = task_result.extra["status_color"]
|
|
199
|
+
message = task_result.extra["message"]
|
|
200
|
+
else:
|
|
201
|
+
status = "OK" if task_result.exc is None else "FAIL"
|
|
202
|
+
status_color = colorama.Fore.GREEN if status == "OK" else colorama.Fore.RED
|
|
203
|
+
message = "" if status == "OK" else str(task_result.exc)
|
|
204
|
+
progress_logger.info(message,
|
|
205
|
+
perc=perc, fqdn=fqdn, status=status, status_color=status_color,
|
|
206
|
+
worker=task_result.worker_name, task_time=elapsed_time)
|
|
207
|
+
return task_result
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
# =====
|
|
211
|
+
def gen(args: cli_args.ShowGenOptions):
|
|
212
|
+
""" Сгенерировать конфиг для устройств """
|
|
213
|
+
with storage_connector.get().storage()(args) as storage:
|
|
214
|
+
loader = ann_gen.Loader(storage, args)
|
|
215
|
+
stdin = args.stdin(storage=storage, filter_acl=args.filter_acl, config=None)
|
|
216
|
+
|
|
217
|
+
filterer = filtering.filterer_connector.get()
|
|
218
|
+
pool = Parallel(ann_gen.worker, args, stdin, loader, filterer).tune_args(args)
|
|
219
|
+
if args.show_hosts_progress:
|
|
220
|
+
pool.add_callback(log_host_progress_cb)
|
|
221
|
+
|
|
222
|
+
return pool.run(loader.device_ids, args.tolerate_fails, args.strict_exit_code)
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
# =====
|
|
226
|
+
def _diff_file(old_text: Optional[str], new_text: Optional[str], context=3):
|
|
227
|
+
old_lines = old_text.splitlines() if old_text else []
|
|
228
|
+
new_lines = new_text.splitlines() if new_text else []
|
|
229
|
+
context = max(len(old_lines), len(new_lines)) if context is None else context
|
|
230
|
+
return list(difflib.unified_diff(old_lines, new_lines, n=context, lineterm=""))
|
|
231
|
+
|
|
232
|
+
|
|
233
|
+
def _diff_files(old_files, new_files, context=3):
|
|
234
|
+
ret = {}
|
|
235
|
+
for (path, (new_text, reload_data)) in new_files.items():
|
|
236
|
+
old_text = old_files.get(path)
|
|
237
|
+
is_new = old_text is None
|
|
238
|
+
diff_lines = _diff_file(old_text, new_text, context=context)
|
|
239
|
+
ret[path] = (diff_lines, reload_data, is_new)
|
|
240
|
+
return ret
|
|
241
|
+
|
|
242
|
+
|
|
243
|
+
def patch(args: cli_args.ShowPatchOptions):
|
|
244
|
+
""" Сгенерировать патч для устройств """
|
|
245
|
+
global live_configs # pylint: disable=global-statement
|
|
246
|
+
with storage_connector.get().storage()(args) as storage:
|
|
247
|
+
loader = ann_gen.Loader(storage, args)
|
|
248
|
+
if args.config == "running":
|
|
249
|
+
fetcher = annet.deploy.fetcher_connector.get()
|
|
250
|
+
live_configs = fetcher.fetch(loader.devices, processes=args.parallel)
|
|
251
|
+
stdin = args.stdin(storage=storage, filter_acl=args.filter_acl, config=args.config)
|
|
252
|
+
|
|
253
|
+
filterer = filtering.filterer_connector.get()
|
|
254
|
+
pool = Parallel(_patch_worker, args, stdin, loader, filterer).tune_args(args)
|
|
255
|
+
if args.show_hosts_progress:
|
|
256
|
+
pool.add_callback(log_host_progress_cb)
|
|
257
|
+
return pool.run(loader.device_ids, args.tolerate_fails, args.strict_exit_code)
|
|
258
|
+
|
|
259
|
+
|
|
260
|
+
def _patch_worker(device_id, args: cli_args.ShowPatchOptions, stdin, loader: ann_gen.Loader, filterer: filtering.Filterer):
|
|
261
|
+
for res, _, patch_tree in res_diff_patch(device_id, args, stdin, loader, filterer):
|
|
262
|
+
new_files = res.get_new_files(args.acl_safe)
|
|
263
|
+
new_json_fragment_files = res.get_new_file_fragments(args.acl_safe)
|
|
264
|
+
if new_files:
|
|
265
|
+
for path, (cfg_text, _cmds) in new_files.items():
|
|
266
|
+
label = res.device.hostname + os.sep + path
|
|
267
|
+
yield label, cfg_text, False
|
|
268
|
+
elif res.old_json_fragment_files or new_json_fragment_files:
|
|
269
|
+
for path, (new_json_cfg, _cmds) in new_json_fragment_files.items():
|
|
270
|
+
label = res.device.hostname + os.sep + path
|
|
271
|
+
old_json_cfg = res.old_json_fragment_files[path]
|
|
272
|
+
json_patch = jsontools.make_patch(old_json_cfg, new_json_cfg)
|
|
273
|
+
yield (
|
|
274
|
+
label,
|
|
275
|
+
jsontools.format_json(json_patch),
|
|
276
|
+
False,
|
|
277
|
+
)
|
|
278
|
+
elif patch_tree:
|
|
279
|
+
yield (
|
|
280
|
+
"%s.patch" % res.device.hostname,
|
|
281
|
+
_format_patch_blocks(patch_tree, res.device.hw, args.indent),
|
|
282
|
+
False,
|
|
283
|
+
)
|
|
284
|
+
|
|
285
|
+
|
|
286
|
+
# =====
|
|
287
|
+
def res_diff_patch(device_id, args: cli_args.ShowPatchOptions, stdin, loader: ann_gen.Loader, filterer: filtering.Filterer) -> Iterable[
|
|
288
|
+
Tuple[OldNewResult, Dict, Dict]]:
|
|
289
|
+
with storage_connector.get().storage()(args) as storage:
|
|
290
|
+
for res in ann_gen.old_new(
|
|
291
|
+
args,
|
|
292
|
+
storage,
|
|
293
|
+
config=args.config,
|
|
294
|
+
loader=loader,
|
|
295
|
+
filterer=filterer,
|
|
296
|
+
stdin=stdin,
|
|
297
|
+
device_ids=[device_id],
|
|
298
|
+
no_new=args.clear,
|
|
299
|
+
do_files_download=True,
|
|
300
|
+
):
|
|
301
|
+
old = res.get_old(args.acl_safe)
|
|
302
|
+
new = res.get_new(args.acl_safe)
|
|
303
|
+
new_json_fragment_files = res.get_new_file_fragments(args.acl_safe)
|
|
304
|
+
|
|
305
|
+
device = res.device
|
|
306
|
+
acl_rules = res.get_acl_rules(args.acl_safe)
|
|
307
|
+
if res.old_json_fragment_files or new_json_fragment_files:
|
|
308
|
+
yield res, None, None
|
|
309
|
+
elif old is not None:
|
|
310
|
+
(diff_tree, patch_tree) = _diff_and_patch(device, old, new, acl_rules, res.filter_acl_rules,
|
|
311
|
+
args.add_comments)
|
|
312
|
+
yield res, diff_tree, patch_tree
|
|
313
|
+
|
|
314
|
+
|
|
315
|
+
def diff(args: cli_args.DiffOptions, loader: ann_gen.Loader, filterer: filtering.Filterer) -> Mapping[Device, Union[Diff, PCDiff]]:
|
|
316
|
+
ret = {}
|
|
317
|
+
with storage_connector.get().storage()(args) as storage:
|
|
318
|
+
for res in ann_gen.old_new(
|
|
319
|
+
args,
|
|
320
|
+
storage,
|
|
321
|
+
config=args.config,
|
|
322
|
+
loader=loader,
|
|
323
|
+
no_new=args.clear,
|
|
324
|
+
do_files_download=True,
|
|
325
|
+
device_ids=loader.device_ids,
|
|
326
|
+
filterer=filterer,
|
|
327
|
+
):
|
|
328
|
+
old = res.get_old(args.acl_safe)
|
|
329
|
+
new = res.get_new(args.acl_safe)
|
|
330
|
+
device = res.device
|
|
331
|
+
acl_rules = res.get_acl_rules(args.acl_safe)
|
|
332
|
+
new_files = res.get_new_files(args.acl_safe)
|
|
333
|
+
new_json_fragment_files = res.get_new_file_fragments()
|
|
334
|
+
if res.old_files or new_files:
|
|
335
|
+
ret[device] = PCDiff(
|
|
336
|
+
hostname=device.hostname,
|
|
337
|
+
diff_files=list(_pc_diff(device.hostname, res.old_files, new_files)),
|
|
338
|
+
)
|
|
339
|
+
elif res.old_json_fragment_files or new_json_fragment_files:
|
|
340
|
+
ret[device] = PCDiff(
|
|
341
|
+
hostname=device.hostname,
|
|
342
|
+
diff_files=list(_json_fragment_diff(device.hostname, res.old_json_fragment_files, new_json_fragment_files)),
|
|
343
|
+
)
|
|
344
|
+
elif old is not None:
|
|
345
|
+
rb = rulebook.get_rulebook(device.hw)
|
|
346
|
+
diff_tree = patching.make_diff(old, new, rb, [acl_rules, res.filter_acl_rules])
|
|
347
|
+
diff_tree = patching.strip_unchanged(diff_tree)
|
|
348
|
+
ret[device] = diff_tree
|
|
349
|
+
return ret
|
|
350
|
+
|
|
351
|
+
|
|
352
|
+
def collapse_texts(texts: Mapping[str, str]) -> Mapping[Tuple[str, ...], str]:
|
|
353
|
+
"""
|
|
354
|
+
Группировка текстов.
|
|
355
|
+
:param texts:
|
|
356
|
+
:return: словарь с несколькими хостнеймами в ключе.
|
|
357
|
+
"""
|
|
358
|
+
diffs_with_orig = {key: [value, value.splitlines()] for key, value in texts.items()}
|
|
359
|
+
res = {}
|
|
360
|
+
for _, collapsed_diff_iter in groupby(sorted(diffs_with_orig.items(), key=lambda x: (x[0], x[1][1])),
|
|
361
|
+
key=lambda x: x[1][1]):
|
|
362
|
+
collapsed_diff = list(collapsed_diff_iter)
|
|
363
|
+
res[tuple(x[0] for x in collapsed_diff)] = collapsed_diff[0][1][0]
|
|
364
|
+
|
|
365
|
+
return res
|
|
366
|
+
|
|
367
|
+
|
|
368
|
+
class DeployerJob(abc.ABC):
|
|
369
|
+
def __init__(self, device, args: cli_args.DeployOptions):
|
|
370
|
+
self.args = args
|
|
371
|
+
self.device = device
|
|
372
|
+
self.add_comments = False
|
|
373
|
+
self.diff_lines = []
|
|
374
|
+
self.cmd_lines: List[str] = []
|
|
375
|
+
self.deploy_cmds = odict()
|
|
376
|
+
self.diffs = {}
|
|
377
|
+
self.failed_configs = {}
|
|
378
|
+
self._has_diff = False
|
|
379
|
+
|
|
380
|
+
@abc.abstractmethod
|
|
381
|
+
def parse_result(self, res):
|
|
382
|
+
pass
|
|
383
|
+
|
|
384
|
+
def collapseable_diffs(self):
|
|
385
|
+
return {}
|
|
386
|
+
|
|
387
|
+
def has_diff(self):
|
|
388
|
+
return self._has_diff
|
|
389
|
+
|
|
390
|
+
@staticmethod
|
|
391
|
+
def from_device(device, args: cli_args.DeployOptions):
|
|
392
|
+
if device.hw.vendor == "pc":
|
|
393
|
+
return PCDeployerJob(device, args)
|
|
394
|
+
return CliDeployerJob(device, args)
|
|
395
|
+
|
|
396
|
+
|
|
397
|
+
class CliDeployerJob(DeployerJob):
|
|
398
|
+
def parse_result(self, res: OldNewResult):
|
|
399
|
+
device = res.device
|
|
400
|
+
old = res.get_old(self.args.acl_safe)
|
|
401
|
+
new = res.get_new(self.args.acl_safe)
|
|
402
|
+
acl_rules = res.get_acl_rules(self.args.acl_safe)
|
|
403
|
+
err = res.err
|
|
404
|
+
|
|
405
|
+
if err:
|
|
406
|
+
self.failed_configs[device.fqdn] = err
|
|
407
|
+
return
|
|
408
|
+
|
|
409
|
+
(diff_obj, patch_tree) = _diff_and_patch(device, old, new, acl_rules,
|
|
410
|
+
res.filter_acl_rules, self.add_comments,
|
|
411
|
+
do_commit=not self.args.dont_commit)
|
|
412
|
+
cmds = tabparser.make_formatter(device.hw, indent="").cmd_paths(patch_tree)
|
|
413
|
+
if not cmds:
|
|
414
|
+
return
|
|
415
|
+
self._has_diff = True
|
|
416
|
+
self.diffs[device] = diff_obj
|
|
417
|
+
self.cmd_lines.extend(["= %s " % device.hostname, ""])
|
|
418
|
+
self.cmd_lines.extend(map(itemgetter(-1), cmds))
|
|
419
|
+
self.cmd_lines.append("")
|
|
420
|
+
deployer_driver = annet.deploy.driver_connector.get()
|
|
421
|
+
self.deploy_cmds[device] = deployer_driver.apply_deploy_rulebook(
|
|
422
|
+
device.hw, cmds,
|
|
423
|
+
do_commit=not self.args.dont_commit
|
|
424
|
+
)
|
|
425
|
+
for cmd in deployer_driver.build_exit_cmdlist(device.hw):
|
|
426
|
+
self.deploy_cmds[device].add_cmd(cmd)
|
|
427
|
+
|
|
428
|
+
def collapseable_diffs(self):
|
|
429
|
+
return self.diffs
|
|
430
|
+
|
|
431
|
+
|
|
432
|
+
class PCDeployerJob(DeployerJob):
|
|
433
|
+
def parse_result(self, res: ann_gen.OldNewResult):
|
|
434
|
+
device = res.device
|
|
435
|
+
old_files = res.old_files
|
|
436
|
+
new_files = res.get_new_files(self.args.acl_safe)
|
|
437
|
+
old_json_fragment_files = res.old_json_fragment_files
|
|
438
|
+
new_json_fragment_files = res.get_new_file_fragments(self.args.acl_safe)
|
|
439
|
+
err = res.err
|
|
440
|
+
|
|
441
|
+
if err:
|
|
442
|
+
self.failed_configs[device.fqdn] = err
|
|
443
|
+
return
|
|
444
|
+
elif not new_files and not new_json_fragment_files:
|
|
445
|
+
return
|
|
446
|
+
|
|
447
|
+
upload_files: Dict[str, bytes] = {}
|
|
448
|
+
reload_cmds: Dict[str, bytes] = {}
|
|
449
|
+
generator_types: Dict[str, GeneratorType] = {}
|
|
450
|
+
for generator_type, pc_files in [(GeneratorType.ENTIRE, new_files), (GeneratorType.JSON_FRAGMENT, new_json_fragment_files)]:
|
|
451
|
+
for file, (file_content_or_json_cfg, cmds) in pc_files.items():
|
|
452
|
+
if generator_type == GeneratorType.ENTIRE:
|
|
453
|
+
file_content: str = file_content_or_json_cfg
|
|
454
|
+
diff_content = "\n".join(_diff_file(old_files.get(file), file_content))
|
|
455
|
+
else: # generator_type == GeneratorType.JSON_FRAGMENT
|
|
456
|
+
old_json_cfg = old_json_fragment_files[file]
|
|
457
|
+
json_patch = jsontools.make_patch(old_json_cfg, file_content_or_json_cfg)
|
|
458
|
+
file_content = jsontools.format_json(json_patch)
|
|
459
|
+
old_text = jsontools.format_json(old_json_cfg)
|
|
460
|
+
new_text = jsontools.format_json(file_content_or_json_cfg)
|
|
461
|
+
diff_content = "\n".join(_diff_file(old_text, new_text))
|
|
462
|
+
if diff_content:
|
|
463
|
+
self._has_diff = True
|
|
464
|
+
upload_files[file], reload_cmds[file] = file_content.encode(), cmds.encode()
|
|
465
|
+
generator_types[file] = generator_type
|
|
466
|
+
self.cmd_lines.append("= Deploy cmds %s/%s " % (device.hostname, file))
|
|
467
|
+
self.cmd_lines.extend([cmds, ""])
|
|
468
|
+
self.cmd_lines.append("= %s/%s " % (device.hostname, file))
|
|
469
|
+
self.cmd_lines.extend([file_content, ""])
|
|
470
|
+
self.diff_lines.append("= %s/%s " % (device.hostname, file))
|
|
471
|
+
self.diff_lines.extend([diff_content, ""])
|
|
472
|
+
|
|
473
|
+
if upload_files:
|
|
474
|
+
self.deploy_cmds[device] = {
|
|
475
|
+
"files": upload_files,
|
|
476
|
+
"cmds": reload_cmds,
|
|
477
|
+
"generator_types": generator_types,
|
|
478
|
+
}
|
|
479
|
+
self.diffs[device] = upload_files
|
|
480
|
+
deployer_driver = annet.deploy.driver_connector.get()
|
|
481
|
+
before, after = deployer_driver.build_configuration_cmdlist(device.hw)
|
|
482
|
+
for cmd in deployer_driver.build_exit_cmdlist(device.hw):
|
|
483
|
+
after.add_cmd(cmd)
|
|
484
|
+
cmds_pre_files = {}
|
|
485
|
+
for file in self.deploy_cmds[device]["files"]:
|
|
486
|
+
if before:
|
|
487
|
+
cmds_pre_files[file] = "\n".join(map(str, before)).encode(encoding="utf-8")
|
|
488
|
+
self.deploy_cmds[device]["cmds"][file] += "\n".join(map(str, after)).encode(encoding="utf-8")
|
|
489
|
+
self.deploy_cmds[device]["cmds_pre_files"] = cmds_pre_files
|
|
490
|
+
|
|
491
|
+
|
|
492
|
+
class Deployer:
|
|
493
|
+
def __init__(self, args: cli_args.DeployOptions):
|
|
494
|
+
self.args = args
|
|
495
|
+
|
|
496
|
+
self.cmd_lines = []
|
|
497
|
+
self.deploy_cmds = odict()
|
|
498
|
+
self.diffs = {}
|
|
499
|
+
self.failed_configs: Dict[str, Exception] = {}
|
|
500
|
+
self.fqdn_to_device: Dict[str, Device] = {}
|
|
501
|
+
self.empty_diff_hostnames: Set[str] = set()
|
|
502
|
+
|
|
503
|
+
self._collapseable_diffs = {}
|
|
504
|
+
self._diff_lines: List[str] = []
|
|
505
|
+
self._filterer = filtering.filterer_connector.get()
|
|
506
|
+
|
|
507
|
+
def parse_result(self, job: DeployerJob, result: ann_gen.OldNewResult):
|
|
508
|
+
entire_reload = self.args.entire_reload
|
|
509
|
+
logger = get_logger(job.device.hostname)
|
|
510
|
+
|
|
511
|
+
job.parse_result(result)
|
|
512
|
+
self.failed_configs.update(job.failed_configs)
|
|
513
|
+
|
|
514
|
+
if job.has_diff() or entire_reload is entire_reload.force:
|
|
515
|
+
self.cmd_lines.extend(job.cmd_lines)
|
|
516
|
+
self.deploy_cmds.update(job.deploy_cmds)
|
|
517
|
+
self.diffs.update(job.diffs)
|
|
518
|
+
|
|
519
|
+
self.fqdn_to_device[result.device.fqdn] = result.device
|
|
520
|
+
self._collapseable_diffs.update(job.collapseable_diffs())
|
|
521
|
+
self._diff_lines.extend(job.diff_lines)
|
|
522
|
+
else:
|
|
523
|
+
logger.info("empty diff")
|
|
524
|
+
|
|
525
|
+
def diff_lines(self) -> List[str]:
|
|
526
|
+
diff_lines = []
|
|
527
|
+
diff_lines.extend(self._diff_lines)
|
|
528
|
+
for devices, diff_obj in ann_diff.collapse_diffs(self._collapseable_diffs).items():
|
|
529
|
+
if not diff_obj:
|
|
530
|
+
self.empty_diff_hostnames.update(dev.hostname for dev in devices)
|
|
531
|
+
if not self.args.no_ask_deploy:
|
|
532
|
+
# разобъем список устройств на несколько линий
|
|
533
|
+
dest_name = ""
|
|
534
|
+
try:
|
|
535
|
+
_, term_columns_str = os.popen("stty size", "r").read().split()
|
|
536
|
+
term_columns = int(term_columns_str)
|
|
537
|
+
except Exception:
|
|
538
|
+
term_columns = 2 ** 32
|
|
539
|
+
fqdns = [dev.hostname for dev in devices]
|
|
540
|
+
while fqdns:
|
|
541
|
+
fqdn = fqdns.pop()
|
|
542
|
+
if len(dest_name) == 0:
|
|
543
|
+
dest_name = "= %s" % fqdn
|
|
544
|
+
elif len(dest_name) + len(fqdn) < term_columns:
|
|
545
|
+
dest_name = "%s, %s" % (dest_name, fqdn)
|
|
546
|
+
else:
|
|
547
|
+
diff_lines.extend([dest_name])
|
|
548
|
+
dest_name = "= %s" % fqdn
|
|
549
|
+
if not fqdns:
|
|
550
|
+
diff_lines.extend([dest_name, ""])
|
|
551
|
+
else:
|
|
552
|
+
dest_name = "= %s" % ", ".join([dev.hostname for dev in devices])
|
|
553
|
+
diff_lines.extend([dest_name, ""])
|
|
554
|
+
|
|
555
|
+
for line in tabparser.make_formatter(devices[0].hw).diff(diff_obj):
|
|
556
|
+
diff_lines.append(line)
|
|
557
|
+
diff_lines.append("")
|
|
558
|
+
return diff_lines
|
|
559
|
+
|
|
560
|
+
def ask_deploy(self) -> str:
|
|
561
|
+
return self._ask("y", annet.deploy.AskConfirm(
|
|
562
|
+
text="\n".join(self.diff_lines()),
|
|
563
|
+
alternative_text="\n".join(self.cmd_lines),
|
|
564
|
+
))
|
|
565
|
+
|
|
566
|
+
def ask_rollback(self) -> str:
|
|
567
|
+
return self._ask("n", annet.deploy.AskConfirm(
|
|
568
|
+
text="Execute rollback?\n",
|
|
569
|
+
alternative_text="",
|
|
570
|
+
))
|
|
571
|
+
|
|
572
|
+
def _ask(self, default_ans: str, ask: annet.deploy.AskConfirm) -> str:
|
|
573
|
+
# если filter_acl из stdin то с ним уже не получится работать как с терминалом
|
|
574
|
+
ans = default_ans
|
|
575
|
+
if not self.args.no_ask_deploy:
|
|
576
|
+
try:
|
|
577
|
+
if not os.isatty(sys.stdin.fileno()):
|
|
578
|
+
pts_path = os.ttyname(sys.stdout.fileno())
|
|
579
|
+
pts = open(pts_path, "r") # pylint: disable=consider-using-with
|
|
580
|
+
os.dup2(pts.fileno(), sys.stdin.fileno())
|
|
581
|
+
except OSError:
|
|
582
|
+
pass
|
|
583
|
+
ans = ask.loop()
|
|
584
|
+
return ans
|
|
585
|
+
|
|
586
|
+
def check_diff(self, result: annet.deploy.DeployResult, storage: Storage):
|
|
587
|
+
global live_configs # pylint: disable=global-statement
|
|
588
|
+
success_hosts = [
|
|
589
|
+
host.split(".", 1)[0] for (host, hres) in result.results.items()
|
|
590
|
+
if (not isinstance(hres, Exception) and
|
|
591
|
+
host not in self.empty_diff_hostnames and
|
|
592
|
+
not self.fqdn_to_device[host].is_pc())
|
|
593
|
+
]
|
|
594
|
+
diff_args = self.args.copy_from(
|
|
595
|
+
self.args,
|
|
596
|
+
config="running",
|
|
597
|
+
query=success_hosts,
|
|
598
|
+
)
|
|
599
|
+
if diff_args.query:
|
|
600
|
+
live_configs = None
|
|
601
|
+
loader = ann_gen.Loader(storage, diff_args, no_empty_warning=True)
|
|
602
|
+
|
|
603
|
+
diffs = diff(diff_args, loader, self._filterer)
|
|
604
|
+
non_pc_diffs = {dev: diff for dev, diff in diffs.items() if not isinstance(diff, PCDiff)}
|
|
605
|
+
devices_to_diff = ann_diff.collapse_diffs(non_pc_diffs)
|
|
606
|
+
devices_to_diff.update({(dev,): diff for dev, diff in diffs.items() if isinstance(diff, PCDiff)})
|
|
607
|
+
else:
|
|
608
|
+
devices_to_diff = {}
|
|
609
|
+
for devices, diff_obj in devices_to_diff.items():
|
|
610
|
+
if diff_obj:
|
|
611
|
+
for dev in devices:
|
|
612
|
+
self.failed_configs[dev.fqdn] = Warning("Deploy OK, but diff still exists")
|
|
613
|
+
if isinstance(diff_obj, PCDiff):
|
|
614
|
+
for diff_file in diff_obj.diff_files:
|
|
615
|
+
print_err_label(diff_file.label)
|
|
616
|
+
print("\n".join(format_file_diff(diff_file.diff_lines)))
|
|
617
|
+
else:
|
|
618
|
+
output_driver = output_driver_connector.get()
|
|
619
|
+
dest_name = ", ".join([output_driver.cfg_file_names(dev)[0] for dev in devices])
|
|
620
|
+
print_err_label(dest_name)
|
|
621
|
+
_print_pre_as_diff(patching.make_pre(diff_obj), diff_args.show_rules, diff_args.indent)
|
|
622
|
+
|
|
623
|
+
|
|
624
|
+
def deploy(args: cli_args.DeployOptions) -> ExitCode:
|
|
625
|
+
""" Сгенерировать конфиг для устройств и задеплоить его """
|
|
626
|
+
ret: ExitCode = 0
|
|
627
|
+
deployer = Deployer(args)
|
|
628
|
+
with storage_connector.get().storage()(args) as storage:
|
|
629
|
+
global live_configs # pylint: disable=global-statement
|
|
630
|
+
loader = ann_gen.Loader(storage, args)
|
|
631
|
+
filterer = filtering.filterer_connector.get()
|
|
632
|
+
fetcher = annet.deploy.fetcher_connector.get()
|
|
633
|
+
deploy_driver = annet.deploy.driver_connector.get()
|
|
634
|
+
live_configs = fetcher.fetch(devices=loader.devices, processes=args.parallel)
|
|
635
|
+
pool = ann_gen.OldNewParallel(storage, args, loader, filterer)
|
|
636
|
+
|
|
637
|
+
for res in pool.generated_configs(loader.device_ids):
|
|
638
|
+
# Меняем exit code если хоть один device ловил exception
|
|
639
|
+
if res.err is not None:
|
|
640
|
+
ret |= 2 ** 3
|
|
641
|
+
job = DeployerJob.from_device(res.device, args)
|
|
642
|
+
deployer.parse_result(job, res)
|
|
643
|
+
|
|
644
|
+
deploy_cmds = deployer.deploy_cmds
|
|
645
|
+
result = annet.deploy.DeployResult(hostnames=[], results={}, durations={}, original_states={})
|
|
646
|
+
if deploy_cmds:
|
|
647
|
+
ans = deployer.ask_deploy()
|
|
648
|
+
if ans != "y":
|
|
649
|
+
return 2 ** 2
|
|
650
|
+
result = annet.lib.do_async(deploy_driver.bulk_deploy(deploy_cmds, args))
|
|
651
|
+
|
|
652
|
+
rolled_back = False
|
|
653
|
+
rollback_cmds = {deployer.fqdn_to_device[x]: cc for x, cc in result.original_states.items() if cc}
|
|
654
|
+
if args.rollback and rollback_cmds:
|
|
655
|
+
ans = deployer.ask_rollback()
|
|
656
|
+
if rollback_cmds and ans == "y":
|
|
657
|
+
rolled_back = True
|
|
658
|
+
annet.lib.do_async(deploy_driver.bulk_deploy(rollback_cmds, args))
|
|
659
|
+
|
|
660
|
+
if not args.no_check_diff and not rolled_back:
|
|
661
|
+
deployer.check_diff(result, storage)
|
|
662
|
+
|
|
663
|
+
if deployer.failed_configs:
|
|
664
|
+
result.add_results(deployer.failed_configs)
|
|
665
|
+
ret |= 2 ** 1
|
|
666
|
+
|
|
667
|
+
annet.deploy.show_bulk_report(result.hostnames, result.results, result.durations, log_dir=None)
|
|
668
|
+
for host_result in result.results.values():
|
|
669
|
+
if isinstance(host_result, Exception):
|
|
670
|
+
ret |= 2 ** 0
|
|
671
|
+
break
|
|
672
|
+
return ret
|
|
673
|
+
|
|
674
|
+
|
|
675
|
+
def file_diff(args: cli_args.FileDiffOptions):
|
|
676
|
+
""" Создать дифф по рулбуку между файлами или каталогами """
|
|
677
|
+
old_new = list(_read_old_new_cfgdumps(args))
|
|
678
|
+
pool = Parallel(file_diff_worker, args).tune_args(args)
|
|
679
|
+
return pool.run(old_new, tolerate_fails=True)
|
|
680
|
+
|
|
681
|
+
|
|
682
|
+
def file_diff_worker(old_new: Tuple[str, str], args: cli_args.FileDiffOptions) -> Generator[
|
|
683
|
+
Tuple[str, str, bool], None, None]:
|
|
684
|
+
old_path, new_path = old_new
|
|
685
|
+
if os.path.isdir(old_path) and os.path.isdir(new_path):
|
|
686
|
+
hostname = os.path.basename(new_path)
|
|
687
|
+
new_files = {relative_cfg_path: (cfg_text, "") for relative_cfg_path, cfg_text in
|
|
688
|
+
ann_gen.load_pc_config(new_path).items()}
|
|
689
|
+
old_files = ann_gen.load_pc_config(old_path)
|
|
690
|
+
for diff_file in _pc_diff(hostname, old_files, new_files):
|
|
691
|
+
diff_text = (
|
|
692
|
+
"\n".join(diff_file.diff_lines)
|
|
693
|
+
if args.no_color
|
|
694
|
+
else "\n".join(format_file_diff(diff_file.diff_lines))
|
|
695
|
+
)
|
|
696
|
+
if diff_text:
|
|
697
|
+
yield diff_file.label, diff_text, False
|
|
698
|
+
else:
|
|
699
|
+
dest_name, old, new, hw = _read_old_new_hw(old_path, new_path, args)
|
|
700
|
+
_, __, pre, ___ = _read_old_new_diff_patch(old, new, hw, add_comments=False)
|
|
701
|
+
diff_lines = ann_diff.gen_pre_as_diff(pre, args.show_rules, args.indent, args.no_color)
|
|
702
|
+
diff_text = "".join(diff_lines)
|
|
703
|
+
if diff_text:
|
|
704
|
+
yield dest_name, diff_text, False
|
|
705
|
+
|
|
706
|
+
|
|
707
|
+
@tracing.function
|
|
708
|
+
def file_patch(args: cli_args.FilePatchOptions):
|
|
709
|
+
""" Создать патч между файлами или каталогами """
|
|
710
|
+
old_new = list(_read_old_new_cfgdumps(args))
|
|
711
|
+
pool = Parallel(file_patch_worker, args).tune_args(args)
|
|
712
|
+
return pool.run(old_new, tolerate_fails=True)
|
|
713
|
+
|
|
714
|
+
|
|
715
|
+
def file_patch_worker(old_new: Tuple[str, str], args: cli_args.FileDiffOptions) -> Generator[
|
|
716
|
+
Tuple[str, str, bool], None, None]:
|
|
717
|
+
old_path, new_path = old_new
|
|
718
|
+
if os.path.isdir(old_path) and os.path.isdir(new_path):
|
|
719
|
+
for relative_cfg_path, cfg_text in ann_gen.load_pc_config(new_path).items():
|
|
720
|
+
label = os.path.join(os.path.basename(new_path), relative_cfg_path)
|
|
721
|
+
yield label, cfg_text, False
|
|
722
|
+
else:
|
|
723
|
+
dest_name, old, new, hw = _read_old_new_hw(old_path, new_path, args)
|
|
724
|
+
_, __, ___, patch_tree = _read_old_new_diff_patch(old, new, hw, args.add_comments)
|
|
725
|
+
patch_text = _format_patch_blocks(patch_tree, hw, args.indent)
|
|
726
|
+
if patch_text:
|
|
727
|
+
yield dest_name, patch_text, False
|
|
728
|
+
|
|
729
|
+
|
|
730
|
+
def _pc_diff(hostname: str, old_files: Dict[str, str], new_files: Dict[str, str]) -> Generator[PCDiffFile, None, None]:
|
|
731
|
+
sorted_lines = sorted(_diff_files(old_files, new_files).items())
|
|
732
|
+
for (path, (diff_lines, _reload_data, is_new)) in sorted_lines:
|
|
733
|
+
if not diff_lines:
|
|
734
|
+
continue
|
|
735
|
+
label = hostname + os.sep + path
|
|
736
|
+
if is_new:
|
|
737
|
+
label = LABEL_NEW_PREFIX + label
|
|
738
|
+
yield PCDiffFile(label=label, diff_lines=diff_lines)
|
|
739
|
+
|
|
740
|
+
|
|
741
|
+
def _json_fragment_diff(
|
|
742
|
+
hostname: str,
|
|
743
|
+
old_files: Dict[str, Any],
|
|
744
|
+
new_files: Dict[str, Tuple[Any, Optional[str]]],
|
|
745
|
+
) -> Generator[PCDiffFile, None, None]:
|
|
746
|
+
def jsonify_multi(files):
|
|
747
|
+
return {
|
|
748
|
+
path: jsontools.format_json(cfg)
|
|
749
|
+
for path, cfg in files.items()
|
|
750
|
+
}
|
|
751
|
+
|
|
752
|
+
def jsonify_multi_with_cmd(files):
|
|
753
|
+
ret = {}
|
|
754
|
+
for path, cfg_reload_cmd in files.items():
|
|
755
|
+
cfg, reload_cmd = cfg_reload_cmd
|
|
756
|
+
ret[path] = (jsontools.format_json(cfg), reload_cmd)
|
|
757
|
+
return ret
|
|
758
|
+
jold, jnew = jsonify_multi(old_files), jsonify_multi_with_cmd(new_files)
|
|
759
|
+
return _pc_diff(hostname, jold, jnew)
|
|
760
|
+
|
|
761
|
+
|
|
762
|
+
def guess_hw(config_text: str):
|
|
763
|
+
"""Пытаемся угадать вендора и hw на основе
|
|
764
|
+
текста конфига и annushka/rulebook/texts/*.rul"""
|
|
765
|
+
scores = {}
|
|
766
|
+
hw_provider = hardware_connector.get()
|
|
767
|
+
for vendor in VENDOR_REVERSES:
|
|
768
|
+
hw = hw_provider.vendor_to_hw(vendor)
|
|
769
|
+
fmtr = tabparser.make_formatter(hw)
|
|
770
|
+
rb = rulebook.get_rulebook(hw)
|
|
771
|
+
config = tabparser.parse_to_tree(config_text, fmtr.split)
|
|
772
|
+
pre = patching.make_pre(patching.make_diff({}, config, rb, []))
|
|
773
|
+
metric = _count_pre_score(pre)
|
|
774
|
+
scores[metric] = hw
|
|
775
|
+
max_score = max(scores.keys())
|
|
776
|
+
hw = scores[max_score]
|
|
777
|
+
return hw, max_score
|
|
778
|
+
|
|
779
|
+
|
|
780
|
+
def _count_pre_score(top_pre) -> float:
|
|
781
|
+
"""Обходим вширь pre-конфиг
|
|
782
|
+
и подсчитываем количество заматчившихся
|
|
783
|
+
правил на каждом из уровней.
|
|
784
|
+
|
|
785
|
+
Чем больше результирующий приоритет
|
|
786
|
+
тем больше рулбук соответсвует конфигу.
|
|
787
|
+
"""
|
|
788
|
+
score = 0
|
|
789
|
+
scores = []
|
|
790
|
+
cur, child = [top_pre], []
|
|
791
|
+
while cur:
|
|
792
|
+
for pre in cur.pop().values():
|
|
793
|
+
score += 1
|
|
794
|
+
for item in pre["items"].values():
|
|
795
|
+
for op in [Op.ADDED, Op.AFFECTED, Op.REMOVED]:
|
|
796
|
+
child += [x["children"] for x in item[op]]
|
|
797
|
+
if not cur:
|
|
798
|
+
scores.append(score)
|
|
799
|
+
score = 0
|
|
800
|
+
cur, child = child, []
|
|
801
|
+
result = 0
|
|
802
|
+
for i in reversed(scores):
|
|
803
|
+
result <<= i.bit_length()
|
|
804
|
+
result += i
|
|
805
|
+
if result > 0:
|
|
806
|
+
result = 1 - (1 / result)
|
|
807
|
+
return float(result)
|