annet 1.0.3__py3-none-any.whl → 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of annet might be problematic. Click here for more details.

annet/executor.py CHANGED
@@ -1,29 +1,13 @@
1
1
  import asyncio
2
- import logging
3
- import multiprocessing
4
2
  import os
5
- import platform
6
- import resource
7
- import signal
8
3
  import statistics
9
- import time
10
4
  from abc import ABC, abstractmethod
11
5
  from functools import partial
12
6
  from operator import itemgetter
13
- from queue import Empty
14
- from typing import Any, Callable, Dict, List, Optional, Tuple, Union
7
+ from typing import Any, Dict, List, Optional, Union
15
8
 
16
9
  import colorama
17
- import psutil
18
-
19
- import annet.lib
20
10
  from annet.annlib.command import Command, CommandList, Question # noqa: F401
21
- from annet.storage import Device
22
-
23
-
24
- _logger = logging.getLogger(__name__)
25
- FIRST_EXCEPTION = 1
26
- ALL_COMPLETED = 2
27
11
 
28
12
 
29
13
  class CommandResult(ABC):
@@ -54,19 +38,6 @@ class Connector(ABC):
54
38
  pass
55
39
 
56
40
 
57
- class Executor(ABC):
58
- # method for bulk config downloading TODO: remove in favor Connector.cmd
59
- @abstractmethod
60
- def fetch(self,
61
- devices: List[Device],
62
- files_to_download: Dict[str, List[str]] = None) -> Tuple[Dict[Device, str], Dict[Device, Any]]:
63
- pass
64
-
65
- @abstractmethod
66
- async def amake_connection(self, device: Device) -> Connector:
67
- pass
68
-
69
-
70
41
  class ExecutorException(Exception):
71
42
  def __init__(self, *args: List[Any], auxiliary: Optional[Any] = None, **kwargs: object):
72
43
  self.auxiliary = auxiliary
@@ -103,117 +74,6 @@ class NonzeroRetcode(ExecException):
103
74
  class CommitException(ExecException):
104
75
  pass
105
76
 
106
- def chunks_tuple(l, n): # noqa
107
- return [tuple(l[i:i + n]) for i in range(0, len(l), n)]
108
-
109
-
110
- def async_bulk(
111
- executor: Executor,
112
- devices: List[Device],
113
- coro_gen: Callable[[Connector, Device], Any],
114
- *args,
115
- processes: int = 1,
116
- show_report: bool = True,
117
- do_log: bool = True,
118
- log_dir: Optional[str] = None,
119
- policy: int = ALL_COMPLETED,
120
- **kwargs,
121
- ):
122
- """Connect to specified devices and work with their CLI.
123
-
124
- Note: this function is not allowed to be run in parallel, since it's using global state (TODO: fixme)
125
-
126
- :param devices: List of devices' fqdns to use their CLI.
127
- :param coro_gen: Async function. It contains all logic about usage of CLI.
128
- See docstring of "bind_coro_args" for allowed function signature and examples.
129
- :param args: Positional arguments to "bulk" function.
130
- :type processes: Amount of processes to fork for current work.
131
- :param show_report: Set this flag to show report to stdout.
132
- :param do_log: If True and log_dir is not set, then log_dir will be filled automatically.
133
- :param log_dir: Specify path to log all response from devices.
134
- :param policy: int flag. If FIRST_EXCEPTION, then work will be stopped after first error.
135
- Otherwise all hosts will be processed.
136
- TODO: fix that policy is not used if processes=1
137
- :param kwargs: other arguments to pass to "bulk" function.
138
- Note: it is not passed directly to "coro_gen" function!
139
- kwargs should be {'kwargs': {'var1': value1}} to set "var1" with "value1" in "coro_gen" function.
140
-
141
- TODOs:
142
- * do not log if do_log=False and log_dir is set.
143
-
144
- """
145
- res = {}
146
- deploy_durations = {}
147
- kwargs["log_dir"] = log_dir
148
- kwargs["policy"] = policy
149
-
150
- if processes == 1:
151
- host_res, host_duration = annet.lib.do_async(bulk(executor, devices, coro_gen, *args, **kwargs))
152
- res.update(host_res)
153
- deploy_durations.update(host_duration)
154
- else:
155
- # FIXME: show_report works per process
156
- if len(devices) != len(set(devices)):
157
- raise Exception("hostnames should be unique")
158
- # warm up a cache
159
- # asyncio.get_event_loop().run_until_complete(get_validator_rt_data(hostnames))
160
- if isinstance(devices, dict):
161
- devices = list(devices.keys())
162
- hostnames_chunks = chunks_tuple(devices, int(len(devices) / processes) + 1)
163
- pool = {}
164
- for hostnames_chunk in hostnames_chunks:
165
- res_q = multiprocessing.Queue()
166
- p = multiprocessing.Process(target=_mp_async_bulk, args=[res_q, hostnames_chunk, coro_gen, *args], kwargs=kwargs)
167
- pool[p] = [res_q, hostnames_chunk]
168
- p.start()
169
- _logger.info("process (id=%d) work with %d chunks", p.pid, len(hostnames_chunks))
170
-
171
- seen_error = False
172
- while True:
173
- done = []
174
- for p in pool:
175
- host_res = None
176
- try:
177
- # proc wont be exited till q.get() call
178
- host_res, host_duration = pool[p][0].get(timeout=0.2)
179
- except Empty:
180
- pass
181
- else:
182
- done.append(p)
183
-
184
- if not p.is_alive() and not host_res:
185
- _logger.error("process %s has died: hostnames: %s", p.pid, pool[p][1])
186
- host_res = {hostname: Exception("died with exitcode %s" % p.exitcode) for hostname in pool[p][1]}
187
- host_duration = {hostname: 0 for hostname in pool[p][1]} # FIXME:
188
- done.append(p)
189
-
190
- if host_res:
191
- res.update(host_res)
192
- deploy_durations.update(host_duration)
193
-
194
- if p.exitcode:
195
- _logger.error("process %s finished with bad exitcode %s", p.pid, p.exitcode)
196
- seen_error = True
197
- for p in done:
198
- pool.pop(p)
199
- if policy == FIRST_EXCEPTION and seen_error:
200
- for p in pool:
201
- p.terminate()
202
- if p.is_alive():
203
- time.sleep(0.4)
204
- if p.is_alive():
205
- os.kill(p.pid, signal.SIGKILL)
206
- for hostname in pool[p][1]:
207
- res[hostname] = Exception("force kill with exitcode %s" % p.exitcode)
208
- deploy_durations[hostname] = 0 # FIXME:
209
- if not pool:
210
- break
211
-
212
- if show_report:
213
- show_bulk_report(devices, res, deploy_durations, do_log and log_dir)
214
-
215
- return res
216
-
217
77
 
218
78
  def _show_type_summary(caption, items, total, stat_items=None):
219
79
  if items:
@@ -314,139 +174,6 @@ def _print_failed(host, res):
314
174
  print(" %s - %s" % (color + host + colorama.Style.RESET_ALL, _format_exc(exc)))
315
175
 
316
176
 
317
- def _mp_async_bulk(res_q: multiprocessing.Queue, *args, **kwargs):
318
- res = annet.lib.do_async(bulk(*args, **kwargs))
319
- res_q.put(res)
320
- res_q.close()
321
-
322
-
323
- async def bulk(
324
- executor: Executor,
325
- devices: List[Device],
326
- coro_gen: Callable[[Connector, Device, Optional[Dict[str, Any]]], Any],
327
- max_parallel: float = 100,
328
- policy: int = ALL_COMPLETED,
329
- log_dir: str = True, # pylint: disable=unused-argument
330
- kwargs: Optional[dict] = None,
331
- console_log: bool = True
332
- ) -> Tuple[Dict[str, Any], Dict[str, float]]:
333
- """Connect to specified devices and work with their CLI.
334
-
335
- :param hostnames: List of devices' fqdns to use their CLI.
336
- :param coro_gen: Async function. It contains all logic about usage of CLI.
337
- See docstring of "bind_coro_args" for allowed function signature and examples.
338
- :param max_parallel: Upper border to CPU usage (in percentage 1 CPU = 100).
339
- If cpu usage is over, then tasks are trottled.
340
- :param policy: Flag to specify when tasks are completed.
341
- :param log_dir: Specify path to log all response from devices.
342
- TODO: fix default value.
343
- :param kwargs: Device independent arguments to call function. See @bind_coro_args for details.
344
- :param get_device: See "make_connection" for better understanding.
345
- :param device_cls: See "make_connection" for better understanding.
346
- :param streamer_cls: See "make_connection" for better understanding.
347
- :param console_log: If True and there is no handlers for root logger, then stderr will be used for logging.
348
- :return: two dicts with results per host and execution duration per host.
349
-
350
- """
351
- if console_log:
352
- init_log()
353
-
354
- tasks = []
355
- res = {}
356
- pending = set()
357
- tasks_to_device = {}
358
- time_of_start = {}
359
- deploy_durations = {}
360
- now = None
361
- if not kwargs:
362
- kwargs = {}
363
-
364
- def start_hook(device: Device):
365
- time_of_start[device.hostname] = time.monotonic()
366
-
367
- def end_hook(device: Device, task: asyncio.Task):
368
- duration = now - time_of_start[device.hostname]
369
- deploy_durations[device.hostname] = duration
370
-
371
- coro_exc = task.exception()
372
- if coro_exc:
373
- if policy == FIRST_EXCEPTION:
374
- _logger.error("%s %r", device.hostname, coro_exc, exc_info=coro_exc)
375
- _logger.info("Terminating all running tasks according to FIRST_EXCEPTION policy")
376
- res[device.hostname] = coro_exc
377
- raise CancelAllTasks
378
- else:
379
- if isinstance(coro_exc, AssertionError):
380
- _logger.error("%s %r", device.hostname, coro_exc, exc_info=coro_exc)
381
- else:
382
- _logger.error("%s %r", device.hostname, coro_exc)
383
- return coro_exc
384
- else:
385
- _logger.info("Finished in %0.2fs, hostname=%s", duration, device.hostname)
386
- return task.result()
387
-
388
- for device in devices:
389
- try:
390
- conn = await executor.amake_connection(device=device)
391
- except Exception as exc:
392
- _logger.error("failed to connect to %s %r", device.hostname, exc)
393
- res[device] = exc
394
- continue
395
- start_hook(device)
396
- task = asyncio.create_task(coro_gen(conn=conn, device=device, **kwargs))
397
- tasks_to_device[task] = device
398
- tasks.append(task)
399
- try:
400
- ndone = 0
401
- with CpuThrottler(asyncio.get_event_loop(), maximum=max_parallel) as throttler:
402
- while pending or tasks:
403
- left_fds = int(fd_left() / 2) # better to be safe than sorry
404
-
405
- for _ in range(min(throttler.curr - len(pending), len(tasks), left_fds)):
406
- pending.add(tasks.pop(0))
407
- if len(pending) == 0:
408
- _logger.debug("empty pending list. tasks=%s throttler curr=%s left_fds=%s. waiting", len(tasks),
409
- throttler.curr, left_fds)
410
- await asyncio.sleep(1)
411
- continue
412
- example_host = next(iter(pending))
413
- _logger.debug("tasks status: %d pending, %d queued, pending example %s", len(pending), len(tasks), example_host)
414
- done, pending = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED)
415
-
416
- now = time.monotonic()
417
- for task in done:
418
- device = tasks_to_device[task]
419
- res[device] = end_hook(device, task)
420
- ndone += 1
421
- except CancelAllTasks:
422
- exc = asyncio.CancelledError()
423
-
424
- now = time.monotonic()
425
- for device, task in _get_remaining(tasks, pending, tasks_to_device):
426
- res[device] = exc
427
-
428
- if device.hostname in time_of_start:
429
- duration = now - time_of_start[device.hostname]
430
- else:
431
- duration = None
432
- deploy_durations[device.hostname] = duration
433
-
434
- if not asyncio.iscoroutine(task):
435
- _logger.info("task %s", task)
436
- task.cancel()
437
-
438
- return res, deploy_durations
439
-
440
-
441
- def init_log():
442
- streamer = logging.StreamHandler()
443
- fmt = logging.Formatter("%(asctime)s - %(filename)s:%(lineno)d - %(funcName)s() - %(levelname)s - %(message)s",
444
- "%Y-%m-%d %H:%M:%S")
445
- streamer.setFormatter(fmt)
446
- if not logging.root.handlers:
447
- logging.root.addHandler(streamer)
448
-
449
-
450
177
  class DeferredFileWrite:
451
178
  def __init__(self, file, mode="r"):
452
179
  self._file = file
@@ -465,87 +192,3 @@ class DeferredFileWrite:
465
192
 
466
193
  def flush(self):
467
194
  pass
468
-
469
-
470
- class CancelAllTasks(Exception):
471
- pass
472
-
473
-
474
- def _get_remaining(tasks, pending, tasks_to_device):
475
- for task in pending:
476
- yield (tasks_to_device[task], task)
477
- for task in tasks:
478
- yield (tasks_to_device[task], task)
479
-
480
-
481
- _platform = platform.system()
482
- _fd_available = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
483
-
484
-
485
- def fd_left():
486
- res = _fd_available
487
- if _platform == "Linux":
488
- res = _fd_available - len(list(os.scandir(path="/proc/self/fd/")))
489
- return res
490
-
491
-
492
- class CpuThrottler:
493
- def __init__(self, loop, start=20, maximum=None, minimum=5, hz=1.0, target=80.0):
494
- self.loop = loop
495
- self.minimum = int(minimum)
496
- self.maximum = int(maximum or 0)
497
- self.hz = hz
498
- self.target = target
499
- self.timer_handle = None
500
- self.last_usage = 0
501
- self.curr = int(start)
502
- self.proc = psutil.Process(os.getpid())
503
-
504
- def __enter__(self):
505
- if self.proc and self.maximum:
506
- self.proc.cpu_percent() # initialize previous value
507
- self.timer_handle = self.loop.call_later(self.hz, self.schedule)
508
- return self
509
-
510
- def __exit__(self, type_, value, tb):
511
- if self.timer_handle:
512
- self.timer_handle.cancel()
513
-
514
- def schedule(self):
515
- # re-schedule
516
- self.timer_handle = self.loop.call_later(self.hz, self.schedule)
517
-
518
- cpu_usage = self.proc.cpu_percent()
519
- self.last_usage = cpu_usage
520
- _logger.debug("current cpu_usage=%s", cpu_usage)
521
- if cpu_usage > self.target:
522
- self.change_by(0.5)
523
- elif cpu_usage > self.target * 0.8:
524
- pass
525
- elif cpu_usage > self.target * 0.2:
526
- self.change_by(1.2)
527
- else:
528
- self.change_by(1.5)
529
-
530
- def change_by(self, rate):
531
- new_curr = int(self.curr * rate)
532
- # округлим в нужную сторону
533
- if new_curr == self.curr:
534
- if rate > 1:
535
- new_curr += 1
536
- elif rate < 1:
537
- new_curr -= 1
538
- # ограничим пределами
539
- if new_curr < self.curr:
540
- new_curr = max(self.minimum, new_curr)
541
- else:
542
- if self.maximum is not None:
543
- new_curr = min(self.maximum, new_curr)
544
-
545
- if new_curr < self.curr:
546
- _logger.info("decreasing max_slots %d -> %d, cpu_usage=%.1f", self.curr, new_curr, self.last_usage)
547
- elif new_curr > self.curr:
548
- _logger.info("increasing max_slots %d -> %d, cpu_usage=%.1f", self.curr, new_curr, self.last_usage)
549
-
550
- # new_curr не делаем меньше 0, иначе не сможем его увеличить
551
- self.curr = max(new_curr, 1)
@@ -76,10 +76,21 @@ class VrfOptions(_FamiliesMixin, BaseMeshModel):
76
76
  groups: Annotated[dict[str, MeshPeerGroup], DictMerge(Merge())]
77
77
 
78
78
 
79
+ class L2VpnOptions(BaseMeshModel):
80
+ name: str
81
+ vid: str | int # VLAN ID, possible values are 1 to 4094, ranges can be set as strings
82
+ l2vni: int # VNI, possible values are 1 to 2**24-1
83
+ route_distinguisher: str # like in VrfOptions
84
+ rt_import: Annotated[tuple[str, ...], Concat()] # like in VrfOptions
85
+ rt_export: Annotated[tuple[str, ...], Concat()] # like in VrfOptions
86
+ advertise_host_routes: bool # advertise IP+MAC routes into L3VNI
87
+
88
+
79
89
  class GlobalOptionsDTO(_FamiliesMixin, BaseMeshModel):
80
90
  def __init__(self, **kwargs):
81
91
  kwargs.setdefault("groups", KeyDefaultDict(lambda x: MeshPeerGroup(name=x)))
82
92
  kwargs.setdefault("vrf", KeyDefaultDict(lambda x: VrfOptions(vrf_name=x)))
93
+ kwargs.setdefault("l2vpn", KeyDefaultDict(lambda x: L2VpnOptions(name=x)))
83
94
  super().__init__(**kwargs)
84
95
 
85
96
  as_path_relax: bool
@@ -89,3 +100,4 @@ class GlobalOptionsDTO(_FamiliesMixin, BaseMeshModel):
89
100
  router_id: str
90
101
  vrf: Annotated[dict[str, VrfOptions], DictMerge(Merge())]
91
102
  groups: Annotated[dict[str, MeshPeerGroup], DictMerge(Merge())]
103
+ l2vpn: Annotated[dict[str, L2VpnOptions], DictMerge(Merge())]
@@ -7,7 +7,7 @@ from adaptix import Retort, loader, Chain, name_mapping, as_is_loader
7
7
  from .peer_models import DirectPeerDTO, IndirectPeerDTO, VirtualPeerDTO, VirtualLocalDTO
8
8
  from ..bgp_models import (
9
9
  Aggregate, GlobalOptions, VrfOptions, FamilyOptions, Peer, PeerGroup, ASN, PeerOptions,
10
- Redistribute, BFDTimers,
10
+ Redistribute, BFDTimers, L2VpnOptions, VidCollection,
11
11
  )
12
12
 
13
13
 
@@ -49,8 +49,10 @@ retort = Retort(
49
49
  recipe=[
50
50
  loader(InterfaceChanges, ObjMapping, Chain.FIRST),
51
51
  loader(ASN, ASN),
52
+ loader(VidCollection, VidCollection.parse),
52
53
  loader(GlobalOptions, ObjMapping, Chain.FIRST),
53
54
  loader(VrfOptions, ObjMapping, Chain.FIRST),
55
+ loader(L2VpnOptions, ObjMapping, Chain.FIRST),
54
56
  loader(FamilyOptions, ObjMapping, Chain.FIRST),
55
57
  loader(Aggregate, ObjMapping, Chain.FIRST),
56
58
  loader(PeerOptions, ObjMapping, Chain.FIRST),
@@ -0,0 +1,63 @@
1
+ from dataclasses import dataclass
2
+
3
+ from annet.annlib.types import Op
4
+
5
+
6
+ @dataclass
7
+ class UserConfig:
8
+ name: str
9
+ privilege: int
10
+ role: str
11
+ secret_type: str
12
+ secret: str
13
+
14
+
15
+ def _parse_user_config(config_line):
16
+ """Convert a user config line into a dataclass. Config example:
17
+
18
+ username someuser privilege 15 role network-admin secret sha512 $6$....
19
+
20
+ privilege could be omitted if equal to 1
21
+ role could be omitted
22
+ secret could be omitted, 'nopassword' is provided instead
23
+ """
24
+ splstr = config_line.split()
25
+ name = splstr[1]
26
+ priv = 1
27
+ role = ""
28
+ secret_type = ""
29
+ secret = ""
30
+ if "privilege" in splstr:
31
+ pos = splstr.index("privilege")
32
+ priv = int(splstr[pos + 1])
33
+ if "role" in splstr:
34
+ pos = splstr.index("role")
35
+ role = splstr[pos + 1]
36
+ if "secret" in splstr:
37
+ pos = splstr.index("secret")
38
+ secret_type = splstr[pos + 1]
39
+ secret = splstr[pos + 2]
40
+ return UserConfig(name=name, privilege=priv, role=role, secret_type=secret_type, secret=secret)
41
+
42
+
43
+ def user(key, diff, **_):
44
+ if diff[Op.ADDED] and not diff[Op.REMOVED]:
45
+ for add in diff[Op.ADDED]:
46
+ yield (True, add["row"], None)
47
+ elif diff[Op.REMOVED] and not diff[Op.ADDED]:
48
+ for _ in diff[Op.REMOVED]:
49
+ yield (False, f"no username {key[0]}", None)
50
+ else:
51
+ for num, add in enumerate(diff[Op.ADDED]):
52
+ new_user = _parse_user_config(add["row"])
53
+ old_user = _parse_user_config(diff[Op.REMOVED][num]["row"])
54
+ if new_user.privilege != old_user.privilege:
55
+ yield (True, f"username {key[0]} privilege {new_user.privilege}", None)
56
+ if new_user.role != old_user.role and not new_user.role:
57
+ yield (True, f"no username {key[0]} role", None)
58
+ elif new_user.role != old_user.role:
59
+ yield (True, f"username {key[0]} role {new_user.role}", None)
60
+ if new_user.secret != old_user.secret and not new_user.secret:
61
+ yield (True, f"username {key[0]} nopassword", None)
62
+ elif new_user.secret != old_user.secret:
63
+ yield (True, f"username {key[0]} secret {new_user.secret_type} {new_user.secret}", None)