annet 1.0.4__py3-none-any.whl → 1.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of annet might be problematic. Click here for more details.

@@ -5,7 +5,7 @@ from typing import cast, List, Union, Iterable, Optional, TypedDict
5
5
  from annet.storage import Query
6
6
 
7
7
  FIELD_VALUE_SEPARATOR = ":"
8
- ALLOWED_GLOB_GROUPS = ["site", "tag", "role", "device_type"]
8
+ ALLOWED_GLOB_GROUPS = ["site", "tag", "role", "device_type", "status", "tenant"]
9
9
 
10
10
 
11
11
  class Filter(TypedDict, total=False):
@@ -14,6 +14,8 @@ class Filter(TypedDict, total=False):
14
14
  role: list[str]
15
15
  name: list[str]
16
16
  device_type: list[str]
17
+ status: list[str]
18
+ tenant: list[str]
17
19
 
18
20
 
19
21
  @dataclass
annet/annlib/patching.py CHANGED
@@ -2,7 +2,7 @@ import copy
2
2
  import operator
3
3
  import textwrap
4
4
  from collections import OrderedDict as odict
5
- from typing import ( # pylint: disable=unused-import
5
+ from typing import (
6
6
  Any,
7
7
  Dict,
8
8
  Iterator,
@@ -177,7 +177,7 @@ class Orderer:
177
177
  def rule_weight(self, row, rule, regexp_key):
178
178
  return len(set(row).intersection(set(rule["attrs"][regexp_key].pattern))) / len(row)
179
179
 
180
- def get_order(self, row, cmd_direct):
180
+ def get_order(self, row, cmd_direct, scope: str | None = None):
181
181
  f_order = None
182
182
  f_weight = 0
183
183
  f_rule = ""
@@ -186,6 +186,15 @@ class Orderer:
186
186
  block_exit = platform.VENDOR_EXIT[self.vendor]
187
187
 
188
188
  for (order, (raw_rule, rule)) in enumerate(ordering.items()):
189
+ if (
190
+ (rule_scope := rule["attrs"]["scope"]) is not None
191
+ and scope not in rule_scope
192
+ ):
193
+ continue
194
+
195
+ if rule["attrs"]["global"]:
196
+ children.append((raw_rule, rule))
197
+
189
198
  direct_matched = bool(rule["attrs"]["direct_regexp"].match(row))
190
199
  if not rule["attrs"]["order_reverse"] and (direct_matched or rule["attrs"]["reverse_regexp"].match(row)):
191
200
  # если не указано order_reverse - правило считается прямым
@@ -395,7 +404,7 @@ def make_patch(pre, rb, hw, add_comments, orderer=None, _root_pre=None, do_commi
395
404
  for (key, diff) in content["items"].items():
396
405
  # чтобы logic не мог поменять атрибуты
397
406
  rule_pre = content.copy()
398
- attrs = rule_pre["attrs"].copy()
407
+ attrs = copy.deepcopy(rule_pre["attrs"])
399
408
 
400
409
  iterable = attrs["logic"](
401
410
  rule=attrs,
@@ -416,7 +425,7 @@ def make_patch(pre, rb, hw, add_comments, orderer=None, _root_pre=None, do_commi
416
425
  patch_row = "%s %s" % (row, comments)
417
426
 
418
427
  # pylint: disable=unused-variable
419
- (order, order_direct, ordering, order_rule) = orderer.get_order(row, direct)
428
+ (order, order_direct, ordering, order_rule) = orderer.get_order(row, direct, scope="patch")
420
429
  fmt_row = patch_row
421
430
  # fmt_row += " # %s" % str(order_rule) # uncomment to debug ordering
422
431
 
@@ -544,8 +553,7 @@ def _select_match(matches, rules):
544
553
  for (rule, is_cr_allowed) in map(operator.itemgetter(0), matches):
545
554
  if is_cr_allowed:
546
555
  local_children = merge_dicts(local_children, rule["children"]["local"])
547
- # optional break on is_cr_allowed==False?
548
-
556
+ # optional break on is_cr_allowed==False?
549
557
  global_children = merge_dicts(global_children, rule["children"]["global"])
550
558
 
551
559
  global_children = merge_dicts(global_children, rules["global"])
@@ -555,9 +563,10 @@ def _select_match(matches, rules):
555
563
  "global": global_children,
556
564
  }
557
565
 
558
- match = {"attrs": f_rule["attrs"]}
566
+ match = {"attrs": copy.deepcopy(f_rule["attrs"])}
559
567
  match.update(f_other)
560
- return (match, children_rules)
568
+
569
+ return match, children_rules
561
570
 
562
571
 
563
572
  def _rules_local_global(rules):
@@ -2,7 +2,7 @@ import functools
2
2
  import re
3
3
  from collections import OrderedDict as odict
4
4
 
5
- from valkit.common import valid_bool
5
+ from valkit.common import valid_bool, valid_string_list
6
6
 
7
7
  from . import platform, syntax
8
8
 
@@ -16,6 +16,14 @@ def compile_ordering_text(text, vendor):
16
16
  "validator": valid_bool,
17
17
  "default": False,
18
18
  },
19
+ "global": {
20
+ "validator": valid_bool,
21
+ "default": False,
22
+ },
23
+ "scope": {
24
+ "validator": valid_string_list,
25
+ "default": None,
26
+ }
19
27
  }),
20
28
  reverse_prefix=platform.VENDOR_REVERSES[vendor],
21
29
  )
@@ -44,6 +52,8 @@ def _compile_ordering(tree, reverse_prefix):
44
52
  syntax.compile_row_regexp(re.sub(r"^%s\s+" % (reverse_prefix), "", attrs["row"]))
45
53
  ),
46
54
  "order_reverse": attrs["params"]["order_reverse"],
55
+ "global": attrs["params"]["global"],
56
+ "scope": attrs["params"]["scope"],
47
57
  "raw_rule": attrs["raw_rule"],
48
58
  "context": attrs["context"],
49
59
  },
@@ -25,7 +25,7 @@ VENDOR_DIFF = {
25
25
  "routeros": "common.default_diff",
26
26
  "aruba": "aruba.default_diff",
27
27
  "pc": "common.default_diff",
28
- "ribbon": "ribbon.default_diff",
28
+ "ribbon": "common.default_diff",
29
29
  "b4com": "common.default_diff",
30
30
  }
31
31
 
@@ -40,7 +40,7 @@ VENDOR_DIFF_ORDERED = {
40
40
  "routeros": "common.ordered_diff",
41
41
  "aruba": "common.ordered_diff",
42
42
  "pc": "common.ordered_diff",
43
- "ribbon": "ribbon.default_diff",
43
+ "ribbon": "common.ordered_diff",
44
44
  "b4com": "common.ordered_diff",
45
45
  }
46
46
 
annet/annlib/tabparser.py CHANGED
@@ -1,6 +1,8 @@
1
1
  import dataclasses
2
2
  import itertools
3
+ import json
3
4
  import re
5
+ import textwrap
4
6
  from collections import OrderedDict as odict
5
7
  from typing import TYPE_CHECKING, Any, Dict, Iterable, Optional, Tuple, Union, List
6
8
 
@@ -70,10 +72,10 @@ class CommonFormatter:
70
72
  self._block_end = ""
71
73
  self._statement_end = ""
72
74
 
73
- def split(self, text):
75
+ def split(self, text: str):
74
76
  return list(filter(None, text.split("\n")))
75
77
 
76
- def join(self, config):
78
+ def join(self, config: "PatchTree"):
77
79
  return "\n".join(
78
80
  _filtered_block_marks(
79
81
  self._indent_blocks(self._blocks(config, is_patch=False))
@@ -86,14 +88,14 @@ class CommonFormatter:
86
88
  def diff(self, diff):
87
89
  return list(self.diff_generator(diff))
88
90
 
89
- def patch(self, patch):
91
+ def patch(self, patch: "PatchTree") -> str:
90
92
  return "\n".join(
91
93
  _filtered_block_marks(
92
94
  self._indent_blocks(self._blocks(patch, is_patch=True))
93
95
  )
94
96
  )
95
97
 
96
- def cmd_paths(self, patch):
98
+ def cmd_paths(self, patch: "PatchTree") -> odict:
97
99
  ret = odict()
98
100
  path = []
99
101
  for row, context in self.blocks_and_context(patch, is_patch=True):
@@ -175,7 +177,7 @@ class CommonFormatter:
175
177
  )
176
178
  yield BlockEnd, None
177
179
 
178
- def _blocks(self, tree, is_patch):
180
+ def _blocks(self, tree: "PatchTree", is_patch: bool):
179
181
  for row, _context in self.blocks_and_context(tree, is_patch):
180
182
  yield row
181
183
 
@@ -386,7 +388,32 @@ class AsrFormatter(BlockExitFormatter):
386
388
 
387
389
 
388
390
  class JuniperFormatter(CommonFormatter):
389
- patch_set_prefix = "set "
391
+ patch_set_prefix = "set"
392
+
393
+ @dataclasses.dataclass
394
+ class Comment:
395
+ begin = "/*"
396
+ end = "*/"
397
+
398
+ row: str
399
+ comment: str
400
+
401
+ def __post_init__(self):
402
+ self.row = self.row.strip()
403
+ self.comment = self.comment.strip()
404
+
405
+ @classmethod
406
+ def loads(cls, value: str):
407
+ return cls(
408
+ **json.loads(
409
+ value.removeprefix(cls.begin)
410
+ .removesuffix(cls.end)
411
+ .strip()
412
+ )
413
+ )
414
+
415
+ def dumps(self):
416
+ return json.dumps({"row": self.row, "comment": self.comment})
390
417
 
391
418
  def __init__(self, indent=" "):
392
419
  super().__init__(indent)
@@ -395,20 +422,32 @@ class JuniperFormatter(CommonFormatter):
395
422
  self._statement_end = ";"
396
423
  self._endofline_comment = "; ##"
397
424
 
398
- def split(self, text):
399
- sub_regexs = (
425
+ self._sub_regexs = (
400
426
  (re.compile(self._block_begin + r"\s*" + self._block_end + r"$"), ""), # collapse empty blocks
401
427
  (re.compile(self._block_begin + "(\t# .+)?$"), ""),
402
428
  (re.compile(self._statement_end + r"$"), ""),
403
429
  (re.compile(r"\s*" + self._block_end + "(\t# .+)?$"), ""),
404
430
  (re.compile(self._endofline_comment + r".*$"), ""),
405
431
  )
406
- split = []
407
- for line in text.split("\n"):
408
- for (regex, repl_line) in sub_regexs:
409
- line = regex.sub(repl_line, line)
410
- split.append(line)
411
- return list(filter(None, split))
432
+
433
+ def sub_regexs(self, value: str) -> str:
434
+ for (regex, repl_line) in self._sub_regexs:
435
+ value = regex.sub(repl_line, value)
436
+ return value
437
+
438
+ def split(self, text: str) -> list[str]:
439
+ comment_begin, comment_end = map(re.escape, (self.Comment.begin, self.Comment.end))
440
+ comment_regexp = re.compile(fr"(\s+{comment_begin})((?:(?!{comment_end}).)*)({comment_end})")
441
+
442
+ result = []
443
+ lines = text.split("\n")
444
+ for i, line in enumerate(lines):
445
+ line = self.sub_regexs(line)
446
+ if i + 1 < len(lines) and (m := comment_regexp.match(line)):
447
+ line = f"{m.group(1)} {self.Comment(self.sub_regexs(lines[i + 1]), m.group(2)).dumps()} {m.group(3)}"
448
+ result.append(line)
449
+
450
+ return list(filter(None, result))
412
451
 
413
452
  def join(self, config):
414
453
  return "\n".join(_filtered_block_marks(self._formatted_blocks(self._indented_blocks(config))))
@@ -419,6 +458,13 @@ class JuniperFormatter(CommonFormatter):
419
458
  def patch_plain(self, patch):
420
459
  return list(self.cmd_paths(patch).keys())
421
460
 
461
+ def _blocks(self, tree: "PatchTree", is_patch: bool):
462
+ for row in super()._blocks(tree, is_patch):
463
+ if isinstance(row, str) and row.startswith(self.Comment.begin):
464
+ yield f"{self.Comment.begin} {self.Comment.loads(row).comment} {self.Comment.end}"
465
+ else:
466
+ yield row
467
+
422
468
  def _formatted_blocks(self, blocks):
423
469
  level = 0
424
470
  line = None
@@ -430,33 +476,48 @@ class JuniperFormatter(CommonFormatter):
430
476
  elif new_line is BlockEnd:
431
477
  level -= 1
432
478
  if isinstance(line, str):
433
- yield line + self._statement_end
479
+ yield line + ("" if line.endswith(self.Comment.end) else self._statement_end)
434
480
  yield self._indent * level + self._block_end
435
481
  elif isinstance(line, str):
436
- yield line + self._statement_end
482
+ yield line + ("" if line.endswith(self.Comment.end) else self._statement_end)
437
483
  line = new_line
438
484
  if isinstance(line, str):
439
485
  yield line + self._statement_end
440
486
 
441
- def cmd_paths(self, patch, _prev=""):
487
+ def cmd_paths(self, patch, _prev=tuple()):
442
488
  commands = odict()
443
489
  for item in patch.itms:
444
490
  key, childs, context = item.row, item.child, item.context
491
+
445
492
  if childs:
446
- for k, v in self.cmd_paths(childs, _prev + " " + key).items():
493
+ for k, v in self.cmd_paths(childs, (*_prev, key.strip())).items():
447
494
  commands[k] = v
448
495
  else:
449
- if key.startswith("delete"):
450
- cmd = "delete" + _prev + " " + key.replace("delete", "", 1).strip()
496
+ if "comment" in context:
497
+ value = (
498
+ ""
499
+ if key.startswith("delete")
500
+ else context["comment"]
501
+ )
502
+
503
+ cmd = "\n".join(
504
+ (
505
+ "edit " + " ".join(_prev),
506
+ " ".join(("annotate", context["row"].split(" ")[0], f'"{value}"')),
507
+ "exit"
508
+ )
509
+ )
510
+ elif key.startswith("delete"):
511
+ cmd = " ".join(("delete", *_prev, key.replace("delete", "", 1).strip()))
451
512
  elif key.startswith("activate"):
452
- cmd = "activate" + _prev + " " + key.replace("activate", "", 1).strip()
513
+ cmd = " ".join(("activate", *_prev, key.replace("activate", "", 1).strip()))
453
514
  elif key.startswith("deactivate"):
454
- cmd = "deactivate" + _prev + " " + key.replace("deactivate", "", 1).strip()
515
+ cmd = " ".join(("deactivate", *_prev, key.replace("deactivate", "", 1).strip()))
455
516
  else:
456
- cmd = (self.patch_set_prefix + _prev.strip()).strip() + " " + key
517
+ cmd = " ".join((self.patch_set_prefix, *_prev, key.strip()))
518
+
457
519
  # Expanding [ a b c ] junipers list of arguments
458
- matches = re.search(r"^(.*)\s+\[(.+)\]$", cmd)
459
- if matches:
520
+ if matches := re.search(r"^(.*)\s+\[(.+)\]$", cmd):
460
521
  for c in matches.group(2).split(" "):
461
522
  if c.strip():
462
523
  cmd = " ".join([matches.group(1), c])
@@ -490,7 +551,7 @@ class JuniperList:
490
551
 
491
552
 
492
553
  class NokiaFormatter(JuniperFormatter):
493
- patch_set_prefix = "/configure "
554
+ patch_set_prefix = "/configure"
494
555
 
495
556
  def __init__(self, *args, **kwargs):
496
557
  super().__init__(*args, **kwargs)
@@ -517,18 +578,18 @@ class NokiaFormatter(JuniperFormatter):
517
578
  finish = finish if finish is not None else len(ret)
518
579
  return ret[start:finish]
519
580
 
520
- def cmd_paths(self, patch, _prev=""):
581
+ def cmd_paths(self, patch, _prev=tuple()):
521
582
  commands = odict()
522
583
  for item in patch.itms:
523
584
  key, childs, context = item.row, item.child, item.context
524
585
  if childs:
525
- for k, v in self.cmd_paths(childs, _prev + " " + key).items():
586
+ for k, v in self.cmd_paths(childs, (*_prev, key.strip())).items():
526
587
  commands[k] = v
527
588
  else:
528
589
  if key.startswith("delete"):
529
- cmd = "/configure delete" + _prev + " " + key.replace("delete", "", 1).strip()
590
+ cmd = " ".join((self.patch_set_prefix, "delete", *_prev, key.replace("delete", "", 1).strip()))
530
591
  else:
531
- cmd = self.patch_set_prefix + _prev.strip() + " " + key
592
+ cmd = " ".join((self.patch_set_prefix, *_prev, key.strip()))
532
593
  # Expanding [ a b c ] junipers list of arguments
533
594
  matches = re.search(r"^(.*)\s+\[(.+)\]$", cmd)
534
595
  if matches:
annet/executor.py CHANGED
@@ -1,29 +1,13 @@
1
1
  import asyncio
2
- import logging
3
- import multiprocessing
4
2
  import os
5
- import platform
6
- import resource
7
- import signal
8
3
  import statistics
9
- import time
10
4
  from abc import ABC, abstractmethod
11
5
  from functools import partial
12
6
  from operator import itemgetter
13
- from queue import Empty
14
- from typing import Any, Callable, Dict, List, Optional, Tuple, Union
7
+ from typing import Any, List, Optional
15
8
 
16
9
  import colorama
17
- import psutil
18
-
19
- import annet.lib
20
10
  from annet.annlib.command import Command, CommandList, Question # noqa: F401
21
- from annet.storage import Device
22
-
23
-
24
- _logger = logging.getLogger(__name__)
25
- FIRST_EXCEPTION = 1
26
- ALL_COMPLETED = 2
27
11
 
28
12
 
29
13
  class CommandResult(ABC):
@@ -32,41 +16,6 @@ class CommandResult(ABC):
32
16
  pass
33
17
 
34
18
 
35
- class Connector(ABC):
36
- @abstractmethod
37
- async def cmd(self, cmd: Union[Command, str]) -> CommandResult:
38
- pass
39
-
40
- @abstractmethod
41
- async def download(self, files: List[str]) -> Dict[str, str]:
42
- pass
43
-
44
- @abstractmethod
45
- async def upload(self, files: Dict[str, str]):
46
- pass
47
-
48
- @abstractmethod
49
- def get_conn_trace(self) -> str:
50
- pass
51
-
52
- @abstractmethod
53
- async def aclose(self) -> str:
54
- pass
55
-
56
-
57
- class Executor(ABC):
58
- # method for bulk config downloading TODO: remove in favor Connector.cmd
59
- @abstractmethod
60
- def fetch(self,
61
- devices: List[Device],
62
- files_to_download: Dict[str, List[str]] = None) -> Tuple[Dict[Device, str], Dict[Device, Any]]:
63
- pass
64
-
65
- @abstractmethod
66
- async def amake_connection(self, device: Device) -> Connector:
67
- pass
68
-
69
-
70
19
  class ExecutorException(Exception):
71
20
  def __init__(self, *args: List[Any], auxiliary: Optional[Any] = None, **kwargs: object):
72
21
  self.auxiliary = auxiliary
@@ -103,117 +52,6 @@ class NonzeroRetcode(ExecException):
103
52
  class CommitException(ExecException):
104
53
  pass
105
54
 
106
- def chunks_tuple(l, n): # noqa
107
- return [tuple(l[i:i + n]) for i in range(0, len(l), n)]
108
-
109
-
110
- def async_bulk(
111
- executor: Executor,
112
- devices: List[Device],
113
- coro_gen: Callable[[Connector, Device], Any],
114
- *args,
115
- processes: int = 1,
116
- show_report: bool = True,
117
- do_log: bool = True,
118
- log_dir: Optional[str] = None,
119
- policy: int = ALL_COMPLETED,
120
- **kwargs,
121
- ):
122
- """Connect to specified devices and work with their CLI.
123
-
124
- Note: this function is not allowed to be run in parallel, since it's using global state (TODO: fixme)
125
-
126
- :param devices: List of devices' fqdns to use their CLI.
127
- :param coro_gen: Async function. It contains all logic about usage of CLI.
128
- See docstring of "bind_coro_args" for allowed function signature and examples.
129
- :param args: Positional arguments to "bulk" function.
130
- :type processes: Amount of processes to fork for current work.
131
- :param show_report: Set this flag to show report to stdout.
132
- :param do_log: If True and log_dir is not set, then log_dir will be filled automatically.
133
- :param log_dir: Specify path to log all response from devices.
134
- :param policy: int flag. If FIRST_EXCEPTION, then work will be stopped after first error.
135
- Otherwise all hosts will be processed.
136
- TODO: fix that policy is not used if processes=1
137
- :param kwargs: other arguments to pass to "bulk" function.
138
- Note: it is not passed directly to "coro_gen" function!
139
- kwargs should be {'kwargs': {'var1': value1}} to set "var1" with "value1" in "coro_gen" function.
140
-
141
- TODOs:
142
- * do not log if do_log=False and log_dir is set.
143
-
144
- """
145
- res = {}
146
- deploy_durations = {}
147
- kwargs["log_dir"] = log_dir
148
- kwargs["policy"] = policy
149
-
150
- if processes == 1:
151
- host_res, host_duration = annet.lib.do_async(bulk(executor, devices, coro_gen, *args, **kwargs))
152
- res.update(host_res)
153
- deploy_durations.update(host_duration)
154
- else:
155
- # FIXME: show_report works per process
156
- if len(devices) != len(set(devices)):
157
- raise Exception("hostnames should be unique")
158
- # warm up a cache
159
- # asyncio.get_event_loop().run_until_complete(get_validator_rt_data(hostnames))
160
- if isinstance(devices, dict):
161
- devices = list(devices.keys())
162
- hostnames_chunks = chunks_tuple(devices, int(len(devices) / processes) + 1)
163
- pool = {}
164
- for hostnames_chunk in hostnames_chunks:
165
- res_q = multiprocessing.Queue()
166
- p = multiprocessing.Process(target=_mp_async_bulk, args=[res_q, hostnames_chunk, coro_gen, *args], kwargs=kwargs)
167
- pool[p] = [res_q, hostnames_chunk]
168
- p.start()
169
- _logger.info("process (id=%d) work with %d chunks", p.pid, len(hostnames_chunks))
170
-
171
- seen_error = False
172
- while True:
173
- done = []
174
- for p in pool:
175
- host_res = None
176
- try:
177
- # proc wont be exited till q.get() call
178
- host_res, host_duration = pool[p][0].get(timeout=0.2)
179
- except Empty:
180
- pass
181
- else:
182
- done.append(p)
183
-
184
- if not p.is_alive() and not host_res:
185
- _logger.error("process %s has died: hostnames: %s", p.pid, pool[p][1])
186
- host_res = {hostname: Exception("died with exitcode %s" % p.exitcode) for hostname in pool[p][1]}
187
- host_duration = {hostname: 0 for hostname in pool[p][1]} # FIXME:
188
- done.append(p)
189
-
190
- if host_res:
191
- res.update(host_res)
192
- deploy_durations.update(host_duration)
193
-
194
- if p.exitcode:
195
- _logger.error("process %s finished with bad exitcode %s", p.pid, p.exitcode)
196
- seen_error = True
197
- for p in done:
198
- pool.pop(p)
199
- if policy == FIRST_EXCEPTION and seen_error:
200
- for p in pool:
201
- p.terminate()
202
- if p.is_alive():
203
- time.sleep(0.4)
204
- if p.is_alive():
205
- os.kill(p.pid, signal.SIGKILL)
206
- for hostname in pool[p][1]:
207
- res[hostname] = Exception("force kill with exitcode %s" % p.exitcode)
208
- deploy_durations[hostname] = 0 # FIXME:
209
- if not pool:
210
- break
211
-
212
- if show_report:
213
- show_bulk_report(devices, res, deploy_durations, do_log and log_dir)
214
-
215
- return res
216
-
217
55
 
218
56
  def _show_type_summary(caption, items, total, stat_items=None):
219
57
  if items:
@@ -314,139 +152,6 @@ def _print_failed(host, res):
314
152
  print(" %s - %s" % (color + host + colorama.Style.RESET_ALL, _format_exc(exc)))
315
153
 
316
154
 
317
- def _mp_async_bulk(res_q: multiprocessing.Queue, *args, **kwargs):
318
- res = annet.lib.do_async(bulk(*args, **kwargs))
319
- res_q.put(res)
320
- res_q.close()
321
-
322
-
323
- async def bulk(
324
- executor: Executor,
325
- devices: List[Device],
326
- coro_gen: Callable[[Connector, Device, Optional[Dict[str, Any]]], Any],
327
- max_parallel: float = 100,
328
- policy: int = ALL_COMPLETED,
329
- log_dir: str = True, # pylint: disable=unused-argument
330
- kwargs: Optional[dict] = None,
331
- console_log: bool = True
332
- ) -> Tuple[Dict[str, Any], Dict[str, float]]:
333
- """Connect to specified devices and work with their CLI.
334
-
335
- :param hostnames: List of devices' fqdns to use their CLI.
336
- :param coro_gen: Async function. It contains all logic about usage of CLI.
337
- See docstring of "bind_coro_args" for allowed function signature and examples.
338
- :param max_parallel: Upper border to CPU usage (in percentage 1 CPU = 100).
339
- If cpu usage is over, then tasks are trottled.
340
- :param policy: Flag to specify when tasks are completed.
341
- :param log_dir: Specify path to log all response from devices.
342
- TODO: fix default value.
343
- :param kwargs: Device independent arguments to call function. See @bind_coro_args for details.
344
- :param get_device: See "make_connection" for better understanding.
345
- :param device_cls: See "make_connection" for better understanding.
346
- :param streamer_cls: See "make_connection" for better understanding.
347
- :param console_log: If True and there is no handlers for root logger, then stderr will be used for logging.
348
- :return: two dicts with results per host and execution duration per host.
349
-
350
- """
351
- if console_log:
352
- init_log()
353
-
354
- tasks = []
355
- res = {}
356
- pending = set()
357
- tasks_to_device = {}
358
- time_of_start = {}
359
- deploy_durations = {}
360
- now = None
361
- if not kwargs:
362
- kwargs = {}
363
-
364
- def start_hook(device: Device):
365
- time_of_start[device.hostname] = time.monotonic()
366
-
367
- def end_hook(device: Device, task: asyncio.Task):
368
- duration = now - time_of_start[device.hostname]
369
- deploy_durations[device.hostname] = duration
370
-
371
- coro_exc = task.exception()
372
- if coro_exc:
373
- if policy == FIRST_EXCEPTION:
374
- _logger.error("%s %r", device.hostname, coro_exc, exc_info=coro_exc)
375
- _logger.info("Terminating all running tasks according to FIRST_EXCEPTION policy")
376
- res[device.hostname] = coro_exc
377
- raise CancelAllTasks
378
- else:
379
- if isinstance(coro_exc, AssertionError):
380
- _logger.error("%s %r", device.hostname, coro_exc, exc_info=coro_exc)
381
- else:
382
- _logger.error("%s %r", device.hostname, coro_exc)
383
- return coro_exc
384
- else:
385
- _logger.info("Finished in %0.2fs, hostname=%s", duration, device.hostname)
386
- return task.result()
387
-
388
- for device in devices:
389
- try:
390
- conn = await executor.amake_connection(device=device)
391
- except Exception as exc:
392
- _logger.error("failed to connect to %s %r", device.hostname, exc)
393
- res[device] = exc
394
- continue
395
- start_hook(device)
396
- task = asyncio.create_task(coro_gen(conn=conn, device=device, **kwargs))
397
- tasks_to_device[task] = device
398
- tasks.append(task)
399
- try:
400
- ndone = 0
401
- with CpuThrottler(asyncio.get_event_loop(), maximum=max_parallel) as throttler:
402
- while pending or tasks:
403
- left_fds = int(fd_left() / 2) # better to be safe than sorry
404
-
405
- for _ in range(min(throttler.curr - len(pending), len(tasks), left_fds)):
406
- pending.add(tasks.pop(0))
407
- if len(pending) == 0:
408
- _logger.debug("empty pending list. tasks=%s throttler curr=%s left_fds=%s. waiting", len(tasks),
409
- throttler.curr, left_fds)
410
- await asyncio.sleep(1)
411
- continue
412
- example_host = next(iter(pending))
413
- _logger.debug("tasks status: %d pending, %d queued, pending example %s", len(pending), len(tasks), example_host)
414
- done, pending = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED)
415
-
416
- now = time.monotonic()
417
- for task in done:
418
- device = tasks_to_device[task]
419
- res[device] = end_hook(device, task)
420
- ndone += 1
421
- except CancelAllTasks:
422
- exc = asyncio.CancelledError()
423
-
424
- now = time.monotonic()
425
- for device, task in _get_remaining(tasks, pending, tasks_to_device):
426
- res[device] = exc
427
-
428
- if device.hostname in time_of_start:
429
- duration = now - time_of_start[device.hostname]
430
- else:
431
- duration = None
432
- deploy_durations[device.hostname] = duration
433
-
434
- if not asyncio.iscoroutine(task):
435
- _logger.info("task %s", task)
436
- task.cancel()
437
-
438
- return res, deploy_durations
439
-
440
-
441
- def init_log():
442
- streamer = logging.StreamHandler()
443
- fmt = logging.Formatter("%(asctime)s - %(filename)s:%(lineno)d - %(funcName)s() - %(levelname)s - %(message)s",
444
- "%Y-%m-%d %H:%M:%S")
445
- streamer.setFormatter(fmt)
446
- if not logging.root.handlers:
447
- logging.root.addHandler(streamer)
448
-
449
-
450
155
  class DeferredFileWrite:
451
156
  def __init__(self, file, mode="r"):
452
157
  self._file = file
@@ -465,87 +170,3 @@ class DeferredFileWrite:
465
170
 
466
171
  def flush(self):
467
172
  pass
468
-
469
-
470
- class CancelAllTasks(Exception):
471
- pass
472
-
473
-
474
- def _get_remaining(tasks, pending, tasks_to_device):
475
- for task in pending:
476
- yield (tasks_to_device[task], task)
477
- for task in tasks:
478
- yield (tasks_to_device[task], task)
479
-
480
-
481
- _platform = platform.system()
482
- _fd_available = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
483
-
484
-
485
- def fd_left():
486
- res = _fd_available
487
- if _platform == "Linux":
488
- res = _fd_available - len(list(os.scandir(path="/proc/self/fd/")))
489
- return res
490
-
491
-
492
- class CpuThrottler:
493
- def __init__(self, loop, start=20, maximum=None, minimum=5, hz=1.0, target=80.0):
494
- self.loop = loop
495
- self.minimum = int(minimum)
496
- self.maximum = int(maximum or 0)
497
- self.hz = hz
498
- self.target = target
499
- self.timer_handle = None
500
- self.last_usage = 0
501
- self.curr = int(start)
502
- self.proc = psutil.Process(os.getpid())
503
-
504
- def __enter__(self):
505
- if self.proc and self.maximum:
506
- self.proc.cpu_percent() # initialize previous value
507
- self.timer_handle = self.loop.call_later(self.hz, self.schedule)
508
- return self
509
-
510
- def __exit__(self, type_, value, tb):
511
- if self.timer_handle:
512
- self.timer_handle.cancel()
513
-
514
- def schedule(self):
515
- # re-schedule
516
- self.timer_handle = self.loop.call_later(self.hz, self.schedule)
517
-
518
- cpu_usage = self.proc.cpu_percent()
519
- self.last_usage = cpu_usage
520
- _logger.debug("current cpu_usage=%s", cpu_usage)
521
- if cpu_usage > self.target:
522
- self.change_by(0.5)
523
- elif cpu_usage > self.target * 0.8:
524
- pass
525
- elif cpu_usage > self.target * 0.2:
526
- self.change_by(1.2)
527
- else:
528
- self.change_by(1.5)
529
-
530
- def change_by(self, rate):
531
- new_curr = int(self.curr * rate)
532
- # округлим в нужную сторону
533
- if new_curr == self.curr:
534
- if rate > 1:
535
- new_curr += 1
536
- elif rate < 1:
537
- new_curr -= 1
538
- # ограничим пределами
539
- if new_curr < self.curr:
540
- new_curr = max(self.minimum, new_curr)
541
- else:
542
- if self.maximum is not None:
543
- new_curr = min(self.maximum, new_curr)
544
-
545
- if new_curr < self.curr:
546
- _logger.info("decreasing max_slots %d -> %d, cpu_usage=%.1f", self.curr, new_curr, self.last_usage)
547
- elif new_curr > self.curr:
548
- _logger.info("increasing max_slots %d -> %d, cpu_usage=%.1f", self.curr, new_curr, self.last_usage)
549
-
550
- # new_curr не делаем меньше 0, иначе не сможем его увеличить
551
- self.curr = max(new_curr, 1)
@@ -1,73 +1,122 @@
1
1
  import re
2
2
  from collections import OrderedDict as odict
3
- from functools import wraps
3
+ import functools
4
4
 
5
5
  from annet.annlib.lib import jun_activate, jun_is_inactive, merge_dicts
6
6
  from annet.annlib.types import Op
7
-
7
+ from annet.tabparser import JuniperFormatter
8
8
  from annet.rulebook import common
9
9
 
10
10
 
11
- def _inactive_blocks(diff_foo):
12
- @wraps(diff_foo)
13
- def wrapper(old, new, diff_pre, *args, **kwargs):
11
+ def comment_processor(item: common.DiffItem):
12
+ if item.op in (Op.REMOVED, Op.ADDED) and item.row.startswith(JuniperFormatter.Comment.begin):
13
+ comment = JuniperFormatter.Comment.loads(item.row)
14
+
15
+ item.diff_pre["attrs"]["context"]["row"] = comment.row
16
+ item.diff_pre["attrs"]["context"]["comment"] = comment.comment
17
+ item.diff_pre["key"] = item.diff_pre["raw_rule"] = (
18
+ f"{JuniperFormatter.Comment.begin} {comment.row} {JuniperFormatter.Comment.end}"
19
+ )
20
+
21
+ return common.DiffItem(
22
+ item.op,
23
+ f"{JuniperFormatter.Comment.begin} {comment.comment} {JuniperFormatter.Comment.end}",
24
+ item.children,
25
+ item.diff_pre,
26
+ )
27
+ else:
28
+ return item
29
+
30
+
31
+ def inactive_blocks(diff_foo):
32
+ @functools.wraps(diff_foo)
33
+ def wrapper(old: odict, new: odict, diff_pre, *args, **kwargs):
14
34
  old_inactives = list(map(jun_activate, filter(jun_is_inactive, old)))
15
35
  new_inactives = list(map(jun_activate, filter(jun_is_inactive, new)))
36
+
37
+ diff: list[common.DiffItem]
38
+
16
39
  if len(old_inactives) == 0 and len(new_inactives) == 0:
17
- return diff_foo(old, new, diff_pre, *args, **kwargs)
18
-
19
- inactive_pre = odict([(jun_activate(k), v) for k, v in diff_pre.items() if jun_is_inactive(k)])
20
- merged_pre = merge_dicts(diff_pre, inactive_pre)
21
- diff = diff_foo(_strip_toplevel_inactives(old),
22
- _strip_toplevel_inactives(new),
23
- merged_pre,
24
- *args, **kwargs)
25
-
26
- for activated in [k for k in old_inactives if k in new]:
27
- diff += [(Op.ADDED, _activate_cmd(activated, merged_pre), {}, diff_pre[activated]["match"])]
28
-
29
- for deactivated in [k for k in new_inactives if k not in old_inactives]:
30
- # если деактивуруемого блока не существует - ставим один deactivate, глубже не идем
31
- if deactivated not in diff_pre:
32
- diff = [(Op.ADDED, _deactivate_cmd(deactivated, merged_pre), {}, inactive_pre[deactivated]["match"])]
33
- else:
34
- diff += [(Op.ADDED, _deactivate_cmd(deactivated, merged_pre), {}, diff_pre[deactivated]["match"])]
35
- return diff
40
+ diff = diff_foo(old, new, diff_pre, *args, **kwargs)
41
+ else:
42
+ inactive_pre = odict([(jun_activate(k), v) for k, v in diff_pre.items() if jun_is_inactive(k)])
43
+ merged_pre = merge_dicts(diff_pre, inactive_pre)
44
+
45
+ diff = diff_foo(
46
+ strip_toplevel_inactives(old),
47
+ strip_toplevel_inactives(new),
48
+ merged_pre,
49
+ *args, **kwargs
50
+ )
51
+
52
+ for activated in [k for k in old_inactives if k in new]:
53
+ diff.append(
54
+ common.DiffItem(
55
+ Op.ADDED,
56
+ activate_cmd(activated, merged_pre),
57
+ [],
58
+ diff_pre[activated]["match"]
59
+ )
60
+ )
61
+
62
+ for deactivated in [k for k in new_inactives if k not in old_inactives]:
63
+ # если деактивуруемого блока не существует - ставим один deactivate, глубже не идем
64
+ if deactivated not in diff_pre:
65
+ diff = [
66
+ common.DiffItem(
67
+ Op.ADDED,
68
+ deactivate_cmd(deactivated, merged_pre),
69
+ [],
70
+ inactive_pre[deactivated]["match"]
71
+ )
72
+ ]
73
+ else:
74
+ diff.append(
75
+ common.DiffItem(
76
+ Op.ADDED,
77
+ deactivate_cmd(deactivated, merged_pre),
78
+ [],
79
+ diff_pre[deactivated]["match"]
80
+ )
81
+ )
82
+
83
+ return list(map(comment_processor, diff))
84
+
36
85
  return wrapper
37
86
 
38
87
 
39
- @_inactive_blocks
88
+ @inactive_blocks
40
89
  def default_diff(old, new, diff_pre, _pops=(Op.AFFECTED,)):
41
90
  diff = common.default_diff(old, new, diff_pre, _pops)
42
- diff = _ignore_quotes(diff)
43
- diff = _strip_inactive_removed(diff)
91
+ diff = ignore_quotes(diff)
92
+ diff = strip_inactive_removed(diff)
44
93
  return diff
45
94
 
46
95
 
47
- @_inactive_blocks
96
+ @inactive_blocks
48
97
  def ordered_diff(old, new, diff_pre, _pops=(Op.AFFECTED,)):
49
98
  diff = common.ordered_diff(old, new, diff_pre, _pops)
50
- diff = _ignore_quotes(diff)
51
- diff = _strip_inactive_removed(diff)
99
+ diff = ignore_quotes(diff)
100
+ diff = strip_inactive_removed(diff)
52
101
  return diff
53
102
 
54
103
 
55
104
  # =====
56
- def _strip_toplevel_inactives(tree):
105
+ def strip_toplevel_inactives(tree):
57
106
  for inactive in filter(jun_is_inactive, tree):
58
107
  assert jun_activate(inactive) not in tree
59
108
  return odict([(k, v) if not jun_is_inactive(k) else (jun_activate(k), v) for k, v in tree.items()])
60
109
 
61
110
 
62
- def _activate_cmd(active_key, diff_pre):
63
- return _cmd(active_key, diff_pre, "activate")
111
+ def activate_cmd(active_key, diff_pre):
112
+ return cmd(active_key, diff_pre, "activate")
64
113
 
65
114
 
66
- def _deactivate_cmd(active_key, diff_pre):
67
- return _cmd(active_key, diff_pre, "deactivate")
115
+ def deactivate_cmd(active_key, diff_pre):
116
+ return cmd(active_key, diff_pre, "deactivate")
68
117
 
69
118
 
70
- def _cmd(active_key, diff_pre, cmd):
119
+ def cmd(active_key, diff_pre, cmd):
71
120
  assert not jun_is_inactive(active_key)
72
121
  if not diff_pre[active_key]["subtree"]:
73
122
  # Если конанда не имеет подблоков И имеет агрументы то надо их отбросить
@@ -75,7 +124,7 @@ def _cmd(active_key, diff_pre, cmd):
75
124
  return " ".join([cmd, active_key])
76
125
 
77
126
 
78
- def _ignore_quotes(diff):
127
+ def ignore_quotes(diff):
79
128
  """
80
129
  Фильтрует из diff строки которые различаются
81
130
  только наличием/отсутсвием кавычек
@@ -86,19 +135,19 @@ def _ignore_quotes(diff):
86
135
  """
87
136
  equivs = {}
88
137
  for elem in diff:
89
- key = _strip_quotes(elem[1])
138
+ key = strip_quotes(elem[1])
90
139
  if key not in equivs:
91
140
  equivs[key] = 0
92
141
  equivs[key] += 1
93
- filtered_diff = [elem for elem in diff if equivs[_strip_quotes(elem[1])] == 1]
142
+ filtered_diff = [elem for elem in diff if equivs[strip_quotes(elem[1])] == 1]
94
143
  return filtered_diff
95
144
 
96
145
 
97
- def _strip_quotes(key):
146
+ def strip_quotes(key):
98
147
  return re.sub(r"\"(?P<quoted_text>[^\"]+)\"$", r"\g<quoted_text>", key)
99
148
 
100
149
 
101
- def _strip_inactive_removed(diff):
150
+ def strip_inactive_removed(diff):
102
151
  for elem in diff:
103
152
  if elem[0] == Op.REMOVED and elem[3]["key"]:
104
153
  key = elem[3]["key"][0]
@@ -1,3 +1,4 @@
1
+ # vim: set syntax=annrulebook:
1
2
  # В этом файле определяется порядок команд, в котором их следует подавать на устройство.
2
3
  # - Если порядок команды не важен - ее можно не писать сюда совсем.
3
4
  # - Если команда начинается с undo и прописан параметр %order_reverse - команда считается
@@ -29,7 +30,7 @@ ntp
29
30
 
30
31
 
31
32
  qos profile *
32
- ~ %global
33
+ ~
33
34
 
34
35
 
35
36
  snmp-server
@@ -49,7 +50,7 @@ tacacs-server
49
50
 
50
51
 
51
52
  aaa group
52
- ~ %global
53
+ ~
53
54
 
54
55
 
55
56
  aaa
@@ -59,7 +60,7 @@ username
59
60
 
60
61
 
61
62
  role
62
- ~ %global
63
+ ~
63
64
 
64
65
 
65
66
  vlan
@@ -73,14 +74,14 @@ mpls ip
73
74
  mpls ldp
74
75
 
75
76
  router isis
76
- ~ %global
77
+ ~
77
78
 
78
79
  interface
79
- ~ %global
80
+ ~
80
81
 
81
82
 
82
83
  (?:ip|ipv6) access-list
83
- ~ %global
84
+ ~
84
85
 
85
86
 
86
87
  (?:ip|ipv6) route
@@ -101,15 +102,15 @@ qos map
101
102
 
102
103
 
103
104
  route-map * (?:permit|deny) *
104
- ~ %global
105
+ ~
105
106
 
106
107
 
107
108
  router bfd
108
- ~ %global
109
+ ~
109
110
 
110
111
 
111
112
  router bgp
112
- ~ %global
113
+ ~
113
114
 
114
115
 
115
116
  router multicast
@@ -119,8 +120,7 @@ ip tacacs
119
120
 
120
121
 
121
122
  management
122
- ~ %global
123
+ ~
123
124
 
124
125
 
125
- # vim: set syntax=annrulebook:
126
- ~ %global
126
+ ~
@@ -1,3 +1,4 @@
1
+ # vim: set syntax=annrulebook:
1
2
  # В этом файле определяется порядок команд, в котором их следует подавать на устройство.
2
3
  # - Если порядок команды не важен - ее можно не писать сюда совсем.
3
4
  # - Если команда начинается с undo и прописан параметр %order_reverse - команда считается
@@ -384,5 +385,4 @@ undo sflow %order_reverse
384
385
  # индекс референсится в логике патчинга префикс-листа huawei.misc.prefix_list
385
386
  undo ip */(ip|ipv6)/-prefix * index 99999999 %order_reverse
386
387
 
387
- # vim: set syntax=annrulebook:
388
- ~ %global
388
+ ~
@@ -0,0 +1,4 @@
1
+ # Правило на все команды
2
+ * %global
3
+ # Добавляем веса комментам, чтобы они катились всегда после команд, но только в патче
4
+ */\/\*(?:(?!\*\/).)*\*\// %global %scope=patch
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: annet
3
- Version: 1.0.4
3
+ Version: 1.1.1
4
4
  Summary: annet
5
5
  Home-page: https://github.com/annetutil/annet
6
6
  License: MIT
@@ -15,7 +15,6 @@ Requires-Dist: PyYAML>=6.0.1
15
15
  Requires-Dist: Pygments>=2.14.0
16
16
  Requires-Dist: Mako>=1.2.4
17
17
  Requires-Dist: Jinja2>=3.1.2
18
- Requires-Dist: psutil>=5.8.0
19
18
  Requires-Dist: packaging>=23.2
20
19
  Requires-Dist: contextlog>=1.1
21
20
  Requires-Dist: valkit>=0.1.4
@@ -23,7 +22,7 @@ Requires-Dist: yarl>=1.8.2
23
22
  Requires-Dist: adaptix==3.0.0b7
24
23
  Requires-Dist: dataclass-rest==0.4
25
24
  Provides-Extra: netbox
26
- Requires-Dist: annetbox[sync]>=0.2.0; extra == "netbox"
25
+ Requires-Dist: annetbox[sync]>=0.2.1; extra == "netbox"
27
26
  Dynamic: home-page
28
27
  Dynamic: license
29
28
  Dynamic: provides-extra
@@ -8,7 +8,7 @@ annet/connectors.py,sha256=aoiDVLPizx8CW2p8SAwGCzyO_WW8H9xc2aujbGC4bDg,4882
8
8
  annet/deploy.py,sha256=3O96k17FbVt8KCvxF4gujXAB81U2-XRJyHLpbc9ekSQ,7529
9
9
  annet/deploy_ui.py,sha256=SDTJ-CF6puW0KHQ0g_NDp61Tqh6xkTBMxv8PrBhGyNI,27977
10
10
  annet/diff.py,sha256=zLcaCnb4lZRUb7frpH1CstQ3kacRcCblZs1uLG8J5lk,3391
11
- annet/executor.py,sha256=Jny-hm0otZA1naPpFWR-R16SbaZioSQ8pkx-Yd2PYlM,19004
11
+ annet/executor.py,sha256=lcKI-EbYqeCiBNpL729kSltduzxbAzOkQ1L_QK7tNv8,5112
12
12
  annet/filtering.py,sha256=ZtqxPsKdV9reZoRxtQyBg22BqyMqd-2SotYcxZ-68AQ,903
13
13
  annet/gen.py,sha256=A718tYqIcxAa8tQEdjR6PjQ2ovWBnwPH7STKh38lmFY,33567
14
14
  annet/hardware.py,sha256=_iR28dWiPtt6ZYdk-qg1sxazkSRJE3ukqKB-fFFfQak,1141
@@ -35,7 +35,7 @@ annet/adapters/netbox/common/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NM
35
35
  annet/adapters/netbox/common/client.py,sha256=PaxHG4W9H8_uunIwMBNYkLq4eQJYoO6p6gY-ciQs7Nc,2563
36
36
  annet/adapters/netbox/common/manufacturer.py,sha256=Y9kqU13q6fwYu0_HiotUKAy7OHFZngkC2s3s4IDAbDg,1745
37
37
  annet/adapters/netbox/common/models.py,sha256=cnNf2oB_BDRz4ZYkHpib1qPxwY1iREJMiWlg8T0lORY,7559
38
- annet/adapters/netbox/common/query.py,sha256=G4cBSgUs_SYRCsxHjeyyxsCMlRKieP6h2isUjd--du0,1803
38
+ annet/adapters/netbox/common/query.py,sha256=kbNQSZwkjFeDArHwA8INHUauxCxYElXtNh58pZipWdo,1867
39
39
  annet/adapters/netbox/common/status_client.py,sha256=XXx0glomaBaglmkUEy6YtFOxQQkHb59CDA0h1I-IhxM,592
40
40
  annet/adapters/netbox/common/storage_opts.py,sha256=wfv1spElomwgVYMCgGth3SWVF0RsRgtUgq9GpFL9hJs,1520
41
41
  annet/adapters/netbox/v24/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -50,8 +50,8 @@ annet/annlib/filter_acl.py,sha256=0w1VF6WcONiTYTQh0yWi6_j9rCTc_kMLAUMr0hbdkNU,72
50
50
  annet/annlib/jsontools.py,sha256=BS7s4rI8R9c_y3zz0zYl1l6con65oQ0MvfsC1dsXZts,5535
51
51
  annet/annlib/lib.py,sha256=eJ4hcVuQ6pdYBzLs4YKCHFtq45idOfZCYp92XfF7_QI,15317
52
52
  annet/annlib/output.py,sha256=_SjJ6G6bejvnTKqNHw6xeio0FT9oO3OIkLaOC3cEga4,7569
53
- annet/annlib/patching.py,sha256=p5u3jl3_Iod0CGcnfZsfFR3Izo_roorny_v0stjDCWs,21142
54
- annet/annlib/tabparser.py,sha256=Xsje7t2bEZqZ8hhgnEYgjQGaDZ5mBWgNxwE2wpCkwXQ,28961
53
+ annet/annlib/patching.py,sha256=IZYW4kydEzBmRi_PZ8Lk0g7hx-sSYl2wjd6lDaI0D4k,21435
54
+ annet/annlib/tabparser.py,sha256=dLH6idK7zr6ZDhdIugjdohTHURcEIXQVN7vPIv5qsjA,31208
55
55
  annet/annlib/types.py,sha256=VHU0CBADfYmO0xzB_c5f-mcuU3dUumuJczQnqGlib9M,852
56
56
  annet/annlib/netdev/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
57
57
  annet/annlib/netdev/db.py,sha256=fI_u5aya4l61mbYSjj4JwlVfi3s7obt2jqERSuXGRUI,1634
@@ -63,8 +63,8 @@ annet/annlib/netdev/views/hardware.py,sha256=3JCZLH7deIHhCguwPJTUX-WDvWjG_xt6BdS
63
63
  annet/annlib/rbparser/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
64
64
  annet/annlib/rbparser/acl.py,sha256=RR8yPt6t96_IiyuKVbeZ-3x32cyhBAT2wC1y99oWBO8,3931
65
65
  annet/annlib/rbparser/deploying.py,sha256=ACT8QNhDAhJx3ZKuGh2nYBOrpdka2qEKuLDxvQAGKLk,1649
66
- annet/annlib/rbparser/ordering.py,sha256=DiKqyY8Khz-5MTxNF1GSNtZgtyKwT3YYCXpahIPB6Ps,1779
67
- annet/annlib/rbparser/platform.py,sha256=hnxznTfV9txXi1PkR1hZrprTrQJvlwgqXVL8vXkYmv4,1558
66
+ annet/annlib/rbparser/ordering.py,sha256=SmN_22pIJSIkmyT1-HSjWsqid7UJ0DgkqyQu7IO3bS4,2142
67
+ annet/annlib/rbparser/platform.py,sha256=Q9HtqmhyzV3JK_236_4LjC2wgp5fgxY6seDfWYl1oHU,1558
68
68
  annet/annlib/rbparser/syntax.py,sha256=iZ7Y-4QQBw4L3UtjEh54qisiRDhobl7HZxFNdP8mi54,3577
69
69
  annet/annlib/rulebook/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
70
70
  annet/annlib/rulebook/common.py,sha256=hqwmmNofm5q2f-hV2usMY-IPMeiANLth28tZcRBYJTw,16640
@@ -131,14 +131,13 @@ annet/rulebook/huawei/bgp.py,sha256=dN8T3-44ggGEapt4u4sT3bTn_dsoCbS5qdNeSQ8LSTs,
131
131
  annet/rulebook/huawei/iface.py,sha256=DvLtQ7tfbDQWFmIYV4lxfih13Tdrt24L4-ZS29mCkuc,1126
132
132
  annet/rulebook/huawei/misc.py,sha256=Rpwhtm42IgcueDq4K6VOzN2qORoIDYh42Jb7iWL8AII,14424
133
133
  annet/rulebook/huawei/vlandb.py,sha256=B4BEUhZetjsNNhIJOp9cXtJSAYKMOgQucO8oAxSkRI0,4658
134
- annet/rulebook/juniper/__init__.py,sha256=WByFrqKCdr-ORCme2AURQGnYbN66t3omB3-9dWktT9o,3887
134
+ annet/rulebook/juniper/__init__.py,sha256=FnVaO7FHYz7O1ExRE6igYX7OKMUgx5_siC-aoHOLx1s,5380
135
135
  annet/rulebook/nexus/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
136
136
  annet/rulebook/nexus/iface.py,sha256=aeog9iSGT2zZ78tsGlrRcfgfOv7yW3jLwryXqdeplRw,2923
137
- annet/rulebook/ribbon/__init__.py,sha256=TRbkQVvk0-HxkUQW9-LmiG6XIfTZ-8t3SiaDemNCzK4,347
138
137
  annet/rulebook/routeros/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
139
138
  annet/rulebook/routeros/file.py,sha256=zK7RwBk1YaVoDSFSg1u7Pt8u0Fk3nhhu27aJRngemwc,137
140
139
  annet/rulebook/texts/arista.deploy,sha256=OS9eFyJpEPztcOHkBwajw_RTJfTT7ivaMHfx4_HXaUg,792
141
- annet/rulebook/texts/arista.order,sha256=e7e5pnzMZAfmXJ0sUaaWmep6JMeErNnd7DkouUVIZsU,1472
140
+ annet/rulebook/texts/arista.order,sha256=1jbKfF0USYi2b8V5qFlp4LjBAr3QGL9OkmoQqEK3PW8,1385
142
141
  annet/rulebook/texts/arista.rul,sha256=fGZe1L-x75rKQuRC5oLJMv1gSt1tJ0htOXfKt-tho64,853
143
142
  annet/rulebook/texts/aruba.deploy,sha256=hI432Bq-of_LMXuUflCu7eNSEFpx6qmj0KItEw6sgHI,841
144
143
  annet/rulebook/texts/aruba.order,sha256=ZMakkn0EJ9zomgY6VssoptJImrHrUmYnCqivzLBFTRo,1158
@@ -150,8 +149,9 @@ annet/rulebook/texts/cisco.deploy,sha256=Hu0NkcGv3f1CWUrnbzI3eQOPXJxtH4NNOPRV68I
150
149
  annet/rulebook/texts/cisco.order,sha256=OvNHMNqkCc-DN2dEjLCTKv_7ZhiaHt4q2X4Y4Z8dvR4,1901
151
150
  annet/rulebook/texts/cisco.rul,sha256=jgL5_xnSwd_H4E8cx4gcneSvJC5W1zz6_BWSb64iuxI,3017
152
151
  annet/rulebook/texts/huawei.deploy,sha256=uUsZCHUrC5Zyb_MePrR5svnE1QyGQlg7UxcKf00sJyg,10451
153
- annet/rulebook/texts/huawei.order,sha256=ENllPX4kO6xNw2mUQcx11yhxo3tKStZ5mUyc0C6s3d0,10657
152
+ annet/rulebook/texts/huawei.order,sha256=_X2LmxD9FGcIcb8ziwc13Imy1tBtQJgafrQGmvHPZLM,10649
154
153
  annet/rulebook/texts/huawei.rul,sha256=02Fi1RG4YYea2clHCluBuJDKNbT0hS9jtsk6_h6GK8k,12958
154
+ annet/rulebook/texts/juniper.order,sha256=PpxmcCgeaeP3TyYe3BWvtb24MKYV_BujjCH3HD4lsc8,256
155
155
  annet/rulebook/texts/juniper.rul,sha256=EmtrEJZesnmc2nXjURRD2G0WOq4zLluI_PNupKhSOJs,2654
156
156
  annet/rulebook/texts/nexus.deploy,sha256=9YNAQEw7aQxtYZJbE-dMD6qJrTzs_G92Ifrx3Ft4Wn4,1120
157
157
  annet/rulebook/texts/nexus.order,sha256=AZMKCD5Zf_mBOlE36aMDvO4w5rdbepTz1Dsyv7xP9Qs,1834
@@ -178,10 +178,10 @@ annet_generators/rpl_example/generator.py,sha256=zndIGfV4ZlTxPgAGYs7bMQvTc_tYScO
178
178
  annet_generators/rpl_example/items.py,sha256=Ez1RF5YhcXNCusBmeApIjRL3rBlMazNZd29Gpw1_IsA,766
179
179
  annet_generators/rpl_example/mesh.py,sha256=z_WgfDZZ4xnyh3cSf75igyH09hGvtexEVwy1gCD_DzA,288
180
180
  annet_generators/rpl_example/route_policy.py,sha256=z6nPb0VDeQtKD1NIg9sFvmUxBD5tVs2frfNIuKdM-5c,2318
181
- annet-1.0.4.dist-info/AUTHORS,sha256=rh3w5P6gEgqmuC-bw-HB68vBCr-yIBFhVL0PG4hguLs,878
182
- annet-1.0.4.dist-info/LICENSE,sha256=yPxl7dno02Pw7gAcFPIFONzx_gapwDoPXsIsh6Y7lC0,1079
183
- annet-1.0.4.dist-info/METADATA,sha256=X4P627F_6XM4xksFYwLxf9vqw-u7ou3br5iUkqwdFaA,851
184
- annet-1.0.4.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
185
- annet-1.0.4.dist-info/entry_points.txt,sha256=5lIaDGlGi3l6QQ2ry2jZaqViP5Lvt8AmsegdD0Uznck,192
186
- annet-1.0.4.dist-info/top_level.txt,sha256=QsoTZBsUtwp_FEcmRwuN8QITBmLOZFqjssRfKilGbP8,23
187
- annet-1.0.4.dist-info/RECORD,,
181
+ annet-1.1.1.dist-info/AUTHORS,sha256=rh3w5P6gEgqmuC-bw-HB68vBCr-yIBFhVL0PG4hguLs,878
182
+ annet-1.1.1.dist-info/LICENSE,sha256=yPxl7dno02Pw7gAcFPIFONzx_gapwDoPXsIsh6Y7lC0,1079
183
+ annet-1.1.1.dist-info/METADATA,sha256=M6zpUiTyWxk5rg5WDIdJBv1XGl86GYa8v9_-4HlG26I,822
184
+ annet-1.1.1.dist-info/WHEEL,sha256=jB7zZ3N9hIM9adW7qlTAyycLYW9npaWKLRzaoVcLKcM,91
185
+ annet-1.1.1.dist-info/entry_points.txt,sha256=5lIaDGlGi3l6QQ2ry2jZaqViP5Lvt8AmsegdD0Uznck,192
186
+ annet-1.1.1.dist-info/top_level.txt,sha256=QsoTZBsUtwp_FEcmRwuN8QITBmLOZFqjssRfKilGbP8,23
187
+ annet-1.1.1.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (75.8.0)
2
+ Generator: setuptools (75.8.2)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,12 +0,0 @@
1
- from annet.annlib.rulebook import common
2
- from annet.annlib.types import Op
3
-
4
-
5
- def default_diff(old, new, diff_pre, _pops=(Op.AFFECTED,)):
6
- diff = common.default_diff(old, new, diff_pre, _pops)
7
- return diff
8
-
9
-
10
- def ordered_diff(old, new, diff_pre, _pops=(Op.AFFECTED,)):
11
- diff = common.ordered_diff(old, new, diff_pre, _pops)
12
- return diff
File without changes
File without changes