annet 0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of annet might be problematic. Click here for more details.

Files changed (137) hide show
  1. annet/__init__.py +61 -0
  2. annet/adapters/__init__.py +0 -0
  3. annet/adapters/netbox/__init__.py +0 -0
  4. annet/adapters/netbox/common/__init__.py +0 -0
  5. annet/adapters/netbox/common/client.py +87 -0
  6. annet/adapters/netbox/common/manufacturer.py +62 -0
  7. annet/adapters/netbox/common/models.py +105 -0
  8. annet/adapters/netbox/common/query.py +23 -0
  9. annet/adapters/netbox/common/status_client.py +25 -0
  10. annet/adapters/netbox/common/storage_opts.py +14 -0
  11. annet/adapters/netbox/provider.py +34 -0
  12. annet/adapters/netbox/v24/__init__.py +0 -0
  13. annet/adapters/netbox/v24/api_models.py +73 -0
  14. annet/adapters/netbox/v24/client.py +59 -0
  15. annet/adapters/netbox/v24/storage.py +196 -0
  16. annet/adapters/netbox/v37/__init__.py +0 -0
  17. annet/adapters/netbox/v37/api_models.py +38 -0
  18. annet/adapters/netbox/v37/client.py +62 -0
  19. annet/adapters/netbox/v37/storage.py +149 -0
  20. annet/annet.py +25 -0
  21. annet/annlib/__init__.py +7 -0
  22. annet/annlib/command.py +49 -0
  23. annet/annlib/diff.py +158 -0
  24. annet/annlib/errors.py +8 -0
  25. annet/annlib/filter_acl.py +196 -0
  26. annet/annlib/jsontools.py +116 -0
  27. annet/annlib/lib.py +495 -0
  28. annet/annlib/netdev/__init__.py +0 -0
  29. annet/annlib/netdev/db.py +62 -0
  30. annet/annlib/netdev/devdb/__init__.py +28 -0
  31. annet/annlib/netdev/devdb/data/devdb.json +137 -0
  32. annet/annlib/netdev/views/__init__.py +0 -0
  33. annet/annlib/netdev/views/dump.py +121 -0
  34. annet/annlib/netdev/views/hardware.py +112 -0
  35. annet/annlib/output.py +246 -0
  36. annet/annlib/patching.py +533 -0
  37. annet/annlib/rbparser/__init__.py +0 -0
  38. annet/annlib/rbparser/acl.py +120 -0
  39. annet/annlib/rbparser/deploying.py +55 -0
  40. annet/annlib/rbparser/ordering.py +52 -0
  41. annet/annlib/rbparser/platform.py +51 -0
  42. annet/annlib/rbparser/syntax.py +115 -0
  43. annet/annlib/rulebook/__init__.py +0 -0
  44. annet/annlib/rulebook/common.py +350 -0
  45. annet/annlib/tabparser.py +648 -0
  46. annet/annlib/types.py +35 -0
  47. annet/api/__init__.py +826 -0
  48. annet/argparse.py +415 -0
  49. annet/cli.py +237 -0
  50. annet/cli_args.py +503 -0
  51. annet/configs/context.yml +18 -0
  52. annet/configs/logging.yaml +39 -0
  53. annet/connectors.py +77 -0
  54. annet/deploy.py +536 -0
  55. annet/diff.py +84 -0
  56. annet/executor.py +551 -0
  57. annet/filtering.py +40 -0
  58. annet/gen.py +865 -0
  59. annet/generators/__init__.py +435 -0
  60. annet/generators/base.py +136 -0
  61. annet/generators/common/__init__.py +0 -0
  62. annet/generators/common/initial.py +33 -0
  63. annet/generators/entire.py +97 -0
  64. annet/generators/exceptions.py +10 -0
  65. annet/generators/jsonfragment.py +125 -0
  66. annet/generators/partial.py +119 -0
  67. annet/generators/perf.py +79 -0
  68. annet/generators/ref.py +15 -0
  69. annet/generators/result.py +127 -0
  70. annet/hardware.py +45 -0
  71. annet/implicit.py +139 -0
  72. annet/lib.py +128 -0
  73. annet/output.py +167 -0
  74. annet/parallel.py +448 -0
  75. annet/patching.py +25 -0
  76. annet/reference.py +148 -0
  77. annet/rulebook/__init__.py +114 -0
  78. annet/rulebook/arista/__init__.py +0 -0
  79. annet/rulebook/arista/iface.py +16 -0
  80. annet/rulebook/aruba/__init__.py +16 -0
  81. annet/rulebook/aruba/ap_env.py +146 -0
  82. annet/rulebook/aruba/misc.py +8 -0
  83. annet/rulebook/cisco/__init__.py +0 -0
  84. annet/rulebook/cisco/iface.py +68 -0
  85. annet/rulebook/cisco/misc.py +57 -0
  86. annet/rulebook/cisco/vlandb.py +90 -0
  87. annet/rulebook/common.py +19 -0
  88. annet/rulebook/deploying.py +87 -0
  89. annet/rulebook/huawei/__init__.py +0 -0
  90. annet/rulebook/huawei/aaa.py +75 -0
  91. annet/rulebook/huawei/bgp.py +97 -0
  92. annet/rulebook/huawei/iface.py +33 -0
  93. annet/rulebook/huawei/misc.py +337 -0
  94. annet/rulebook/huawei/vlandb.py +115 -0
  95. annet/rulebook/juniper/__init__.py +107 -0
  96. annet/rulebook/nexus/__init__.py +0 -0
  97. annet/rulebook/nexus/iface.py +92 -0
  98. annet/rulebook/patching.py +143 -0
  99. annet/rulebook/ribbon/__init__.py +12 -0
  100. annet/rulebook/texts/arista.deploy +20 -0
  101. annet/rulebook/texts/arista.order +125 -0
  102. annet/rulebook/texts/arista.rul +59 -0
  103. annet/rulebook/texts/aruba.deploy +20 -0
  104. annet/rulebook/texts/aruba.order +83 -0
  105. annet/rulebook/texts/aruba.rul +87 -0
  106. annet/rulebook/texts/cisco.deploy +27 -0
  107. annet/rulebook/texts/cisco.order +82 -0
  108. annet/rulebook/texts/cisco.rul +105 -0
  109. annet/rulebook/texts/huawei.deploy +188 -0
  110. annet/rulebook/texts/huawei.order +388 -0
  111. annet/rulebook/texts/huawei.rul +471 -0
  112. annet/rulebook/texts/juniper.rul +120 -0
  113. annet/rulebook/texts/nexus.deploy +24 -0
  114. annet/rulebook/texts/nexus.order +85 -0
  115. annet/rulebook/texts/nexus.rul +83 -0
  116. annet/rulebook/texts/nokia.rul +31 -0
  117. annet/rulebook/texts/pc.order +5 -0
  118. annet/rulebook/texts/pc.rul +9 -0
  119. annet/rulebook/texts/ribbon.deploy +22 -0
  120. annet/rulebook/texts/ribbon.rul +77 -0
  121. annet/rulebook/texts/routeros.order +38 -0
  122. annet/rulebook/texts/routeros.rul +45 -0
  123. annet/storage.py +125 -0
  124. annet/tabparser.py +36 -0
  125. annet/text_term_format.py +95 -0
  126. annet/tracing.py +170 -0
  127. annet/types.py +227 -0
  128. annet-0.0.dist-info/AUTHORS +21 -0
  129. annet-0.0.dist-info/LICENSE +21 -0
  130. annet-0.0.dist-info/METADATA +26 -0
  131. annet-0.0.dist-info/RECORD +137 -0
  132. annet-0.0.dist-info/WHEEL +5 -0
  133. annet-0.0.dist-info/entry_points.txt +5 -0
  134. annet-0.0.dist-info/top_level.txt +2 -0
  135. annet_generators/__init__.py +0 -0
  136. annet_generators/example/__init__.py +12 -0
  137. annet_generators/example/lldp.py +53 -0
annet/parallel.py ADDED
@@ -0,0 +1,448 @@
1
+ import asyncio
2
+ import dataclasses
3
+ import enum
4
+ import faulthandler
5
+ import inspect
6
+ import multiprocessing as mp
7
+ import os
8
+ import pickle
9
+ import platform
10
+ import queue
11
+ import re
12
+ import signal
13
+ import sys
14
+ import tempfile
15
+ import time
16
+ import traceback
17
+ import warnings
18
+ from typing import Any, List, Optional, Type
19
+ from uuid import uuid4
20
+
21
+ from contextlog import get_logger
22
+
23
+ import annet
24
+ from annet import tracing
25
+ from annet.lib import catch_ctrl_c, find_exc_in_stack
26
+ from annet.output import capture_output
27
+ from annet.tracing import tracing_connector
28
+
29
+
30
+ class PoolWorkerTaskType(enum.Enum):
31
+ INVOKE = "invoke"
32
+ STOP = "stop"
33
+
34
+
35
+ @dataclasses.dataclass(frozen=True)
36
+ class PoolWorkerTask:
37
+ type: PoolWorkerTaskType
38
+ payload: Optional[Any] = None
39
+
40
+
41
+ class PickleSafeException(Exception):
42
+ """An exception that can be safely pickled and passed between processes."""
43
+
44
+ def __init__(self, orig_exc_cls: Type[Exception], orig_exc_msg: str, device_id: str, formatted_output: str) -> None:
45
+ self.orig_exc_cls = orig_exc_cls
46
+ self.orig_exc_msg = orig_exc_msg
47
+ self.device_id = device_id
48
+ self.formatted_output = formatted_output
49
+ super().__init__(orig_exc_cls, orig_exc_msg, device_id, formatted_output)
50
+
51
+ def __repr__(self) -> str:
52
+ orig_cls_name = self.orig_exc_cls.__name__
53
+ orig_msg_repr = repr(self.orig_exc_msg)
54
+ return f"PickleSafeException<{orig_cls_name}({orig_msg_repr})>"
55
+
56
+ def __str__(self) -> str:
57
+ pretty_lines: List[str] = []
58
+ pretty_lines.append(f"An exception for device {self.device_id}:")
59
+ pretty_lines.append(self.formatted_output)
60
+ return "\n".join(pretty_lines)
61
+
62
+ @staticmethod
63
+ def from_exc(orig_exc: Exception, device_id: str, formatted_output: str) -> "PickleSafeException":
64
+ return PickleSafeException(orig_exc.__class__, str(orig_exc), device_id, formatted_output)
65
+
66
+
67
+ @catch_ctrl_c
68
+ def pool_worker(pool, index, task_queue, done_queue, context_carrier):
69
+ faulthandler.register(signal.SIGUSR1)
70
+
71
+ tracing_connector.get().attach_context(tracing_connector.get().extract_context(context_carrier))
72
+
73
+ with warnings.catch_warnings():
74
+ warnings.simplefilter("ignore", category=DeprecationWarning)
75
+ try:
76
+ asyncio.get_event_loop().close()
77
+ except Exception:
78
+ pass
79
+ asyncio.set_event_loop(asyncio.new_event_loop())
80
+
81
+ return _pool_worker(pool, index, task_queue, done_queue)
82
+
83
+
84
+ @tracing.function(flush=True)
85
+ def _pool_worker(pool, index, task_queue, done_queue):
86
+ worker_id = uuid4().hex
87
+
88
+ pool_span = tracing_connector.get().get_current_span()
89
+ if pool_span:
90
+ pool_span.update_name("pool_worker")
91
+ pool_span.set_attribute("worker.id", worker_id)
92
+ pool_span.set_attribute("worker.index", index)
93
+
94
+ cap_stdout = tempfile.TemporaryFile(mode="w+") if pool.capture_output else None
95
+ cap_stderr = tempfile.TemporaryFile(mode="w+") if pool.capture_output else None
96
+
97
+ _logger = get_logger()
98
+ tasks_done = 0
99
+ device_ids = []
100
+ worker_name = mp.current_process().name
101
+
102
+ while True:
103
+ task: PoolWorkerTask = task_queue.get()
104
+ if task.type == PoolWorkerTaskType.STOP:
105
+ _logger.debug("I received STOP, terminating...")
106
+ return
107
+
108
+ device_id: Optional[str] = None
109
+ if isinstance(task.payload, tuple):
110
+ if len(task.payload) > 0:
111
+ match = re.search(r"([^/]+).cfg", task.payload[0])
112
+ if match:
113
+ device_id = match.group(1)
114
+ else:
115
+ device_id = str(task.payload)
116
+
117
+ if device_id:
118
+ device_ids.append(device_id)
119
+
120
+ if pool_span:
121
+ pool_span.set_attribute("device.ids", device_ids)
122
+
123
+ task_result = TaskResult(worker_name, task.payload)
124
+ task_result.extra["start_time"] = time.monotonic()
125
+ ret_exc = None
126
+
127
+ try:
128
+ with tracing_connector.get().start_as_current_span(
129
+ "pool_worker.invoke",
130
+ ) as span:
131
+ if device_id:
132
+ span.set_attribute("device.id", device_id)
133
+
134
+ name = "invoke"
135
+ invoke_span_ctx = tracing_connector.get().start_as_linked_span(name, tracer_name=__name__)
136
+ capture_output_ctx = capture_output(cap_stdout, cap_stderr)
137
+
138
+ with invoke_span_ctx as invoke_span, capture_output_ctx as _:
139
+ invoke_trace_id = invoke_span.get_span_context().trace_id
140
+ if invoke_trace_id:
141
+ span.set_attribute(
142
+ "link",
143
+ f"https://t.yandex-team.ru/trace/{invoke_trace_id:x}"
144
+ )
145
+ invoke_span.set_attribute("func", pool.func.__name__)
146
+ invoke_span.set_attribute("worker.id", worker_id)
147
+ if device_id:
148
+ invoke_span.set_attribute("device.id", device_id)
149
+
150
+ _logger.warning("Worker-%d start invoke %s", index, device_id)
151
+ task_result.result = invoke_retry(
152
+ pool.func,
153
+ pool.net_retry,
154
+ task.payload,
155
+ *pool.args,
156
+ **pool.kwargs
157
+ )
158
+ _logger.warning("Worker-%d finish invoke %s", index, device_id)
159
+
160
+ # Check if the result is picklable and throw an exception if not.
161
+ # Otherwise the exception will be thrown inside the multiprocessing
162
+ # code and we won't be able to handle it.
163
+ pickle.dumps(task_result.result)
164
+ except KeyboardInterrupt: # pylint: disable=try-except-raise
165
+ raise
166
+ except Exception as exc:
167
+ safe_exc = PickleSafeException.from_exc(exc, task.payload, traceback.format_exc())
168
+ ret_exc = safe_exc
169
+ task_result.exc = safe_exc
170
+ task_result.result = None
171
+ if pool.capture_output:
172
+ task_result.extra["cap_stdout"] = cap_stdout.read()
173
+ task_result.extra["cap_stderr"] = cap_stderr.read()
174
+
175
+ results = list(pool._run_callbacks(task_result, in_thread=True)) # pylint: disable=protected-access
176
+ done_queue.put((worker_name, task, results, ret_exc))
177
+
178
+ tasks_done += 1
179
+ if pool.max_tasks and tasks_done >= pool.max_tasks:
180
+ _logger.debug("Maximum tasks limit reached. Now I can retire")
181
+ tracing_connector.get().force_flush()
182
+ sys.exit(9)
183
+
184
+
185
+ class TaskResult:
186
+ def __init__(self, worker_name, device_id, result=None, exc=None):
187
+ self.worker_name = worker_name
188
+ self.device_id = device_id
189
+ self.result = result
190
+ self.exc = exc
191
+ self.extra = {}
192
+
193
+ def __repr__(self):
194
+ return "TaskResult(worker_name=%s, device_id=%s, result=%s, exc=%s, extra=%s)" % (
195
+ self.worker_name, self.device_id, self.result, self.exc, self.extra)
196
+
197
+
198
+ class Parallel:
199
+ def __init__(self, func, *args, **kwargs):
200
+ self.func = func
201
+ self.args = args
202
+ self.kwargs = kwargs
203
+ self.callbacks = []
204
+ self.in_thread_callbacks = []
205
+ self.parallel = mp.cpu_count()
206
+ self.task_timeout = 1800 # maximum seconds to wait for next task done
207
+ self.max_tasks = 25
208
+ self.tasks_done = 0
209
+ self.net_retry = 3
210
+ self.capture_output = False
211
+
212
+ def tune(self, **kwargs):
213
+ for (kw, arg) in kwargs.items():
214
+ if not hasattr(self, kw):
215
+ raise ValueError("Can not tune Parallel.%s: attribute doesn't exist" % kw)
216
+ setattr(self, kw, arg)
217
+ return self
218
+
219
+ def tune_args(self, args):
220
+ for kw in args._enum_args(): # pylint: disable=protected-access
221
+ if hasattr(self, kw):
222
+ setattr(self, kw, getattr(args, kw))
223
+ return self
224
+
225
+ # func prototype: func(parallel_object, worker_name, ret) -> ret(s)
226
+ def add_callback(self, func, in_thread=False):
227
+ if not callable(func):
228
+ raise annet.ExecError("callback must be a callable object or function")
229
+ if in_thread:
230
+ self.in_thread_callbacks.append(func)
231
+ else:
232
+ self.callbacks.append(func)
233
+ return self
234
+
235
+ def _run_callbacks(self, task_result: TaskResult, in_thread=False):
236
+ task_results = [task_result]
237
+ cbs = self.in_thread_callbacks if in_thread else self.callbacks
238
+ for cb in cbs:
239
+ task_results = [cb_result for result in task_results for cb_result in self._cb_wrapper(cb, result)]
240
+ yield from task_results
241
+
242
+ def _cb_wrapper(self, cb, task_result: TaskResult):
243
+ task_results = []
244
+ try:
245
+ if inspect.isgeneratorfunction(cb):
246
+ future = cb(self, task_result)
247
+ while True:
248
+ cb_result = next(future)
249
+ if isinstance(cb_result, TaskResult):
250
+ task_results.append(cb_result)
251
+ else:
252
+ cb_result = cb(self, task_result)
253
+ if isinstance(cb_result, TaskResult):
254
+ task_results = [cb_result]
255
+ except StopIteration:
256
+ pass
257
+ except Exception as exc:
258
+ exc.device_id = task_result.device_id
259
+ exc.formatted_output = traceback.format_exc()
260
+ task_result.exc = exc
261
+ task_results = [task_result]
262
+ yield from task_results
263
+
264
+ @tracing.function
265
+ def run(self, device_ids, tolerate_fails=True, strict_error_code=False):
266
+ success, fail = {}, {}
267
+ for task_result in self.irun(device_ids, tolerate_fails):
268
+ if task_result.exc is not None:
269
+ fail[task_result.device_id] = task_result.exc
270
+ else:
271
+ success[task_result.device_id] = task_result.result
272
+ if strict_error_code and fail:
273
+ raise RuntimeError("failed for %d/%d devices" % (len(fail), len(device_ids)))
274
+ return success, fail
275
+
276
+ def irun(self, device_ids, tolerate_fails=True):
277
+ _logger = get_logger()
278
+ self.tasks_done = 0
279
+ pool_size = self.parallel if len(device_ids) > self.parallel else len(device_ids)
280
+
281
+ span = tracing_connector.get().get_current_span()
282
+ if span:
283
+ span.set_attribute("pool_size", pool_size)
284
+
285
+ # single process way
286
+ if pool_size == 1:
287
+ cap_stdout = tempfile.TemporaryFile(mode="w+") if self.capture_output else None
288
+ cap_stderr = tempfile.TemporaryFile(mode="w+") if self.capture_output else None
289
+ worker_name = mp.current_process().name
290
+
291
+ for device_id in device_ids:
292
+ task_result = TaskResult(worker_name, device_id)
293
+ task_result.extra["start_time"] = time.monotonic()
294
+ try:
295
+ with capture_output(cap_stdout, cap_stderr):
296
+ task_result.result = invoke_retry(
297
+ self.func,
298
+ self.net_retry,
299
+ device_id,
300
+ *self.args,
301
+ **self.kwargs
302
+ )
303
+ except Exception as exc:
304
+ if not tolerate_fails:
305
+ raise
306
+ exc.device_id = device_id
307
+ exc.formatted_output = traceback.format_exc()
308
+ task_result.exc = exc
309
+ if self.capture_output:
310
+ task_result.extra["cap_stdout"] = cap_stdout.read()
311
+ task_result.extra["cap_stderr"] = cap_stderr.read()
312
+ self.tasks_done += 1
313
+
314
+ yield from [
315
+ result
316
+ for in_thread_result in self._run_callbacks(task_result, in_thread=True)
317
+ for result in self._run_callbacks(in_thread_result)
318
+ ]
319
+ else:
320
+ # multiple processes way
321
+ _logger.info("creating process pool with %d workers", pool_size)
322
+ task_queue = mp.Queue()
323
+ done_queue = mp.Queue()
324
+ for device_id in device_ids:
325
+ task_queue.put(PoolWorkerTask(type=PoolWorkerTaskType.INVOKE, payload=device_id))
326
+
327
+ context_carrier = {}
328
+ tracing_connector.get().inject_context(context_carrier)
329
+
330
+ pool = {}
331
+ for index in range(pool_size):
332
+ task_queue.put(PoolWorkerTask(type=PoolWorkerTaskType.STOP))
333
+ worker_name = "Worker-%d" % index
334
+ worker_args = (self, index, task_queue, done_queue, context_carrier)
335
+
336
+ worker = mp.Process(
337
+ name=worker_name,
338
+ target=pool_worker,
339
+ args=worker_args
340
+ )
341
+ pool[worker_name] = worker
342
+ worker.start()
343
+ _logger.debug("Worker '%s' has been created with PID %d", worker_name, worker.pid)
344
+
345
+ last_task_ts = time.monotonic()
346
+ while True:
347
+ exc = None
348
+ worker_name = None
349
+ in_thread_results = None
350
+
351
+ queue_empty = False
352
+ try:
353
+ worker_name, _, in_thread_results, exc = done_queue.get(True, 1)
354
+ last_task_ts = time.monotonic()
355
+ except queue.Empty:
356
+ queue_empty = True
357
+
358
+ retired_workers, failed_workers = self._check_children(pool)
359
+
360
+ terminate_by_timeout = False
361
+ terminate_exc = None
362
+ if queue_empty:
363
+ if time.monotonic() - last_task_ts > self.task_timeout:
364
+ # timeout hit
365
+ terminate_by_timeout = True
366
+ terminate_exc = annet.ExecError()
367
+
368
+ if not tolerate_fails:
369
+ if exc is not None:
370
+ # worker returned exception
371
+ terminate_exc = exc
372
+ elif failed_workers:
373
+ # some workers exited with non-zero (and non-9) code
374
+ terminate_exc = annet.ExecError(f"Workers {failed_workers} exited with error")
375
+
376
+ if terminate_exc is not None:
377
+ for (name, worker) in pool.items():
378
+ if worker.exitcode is None:
379
+ if terminate_by_timeout:
380
+ os.kill(worker.pid, signal.SIGUSR1) # force dump stacktrace
381
+ time.sleep(10)
382
+ worker.terminate()
383
+ _logger.warning("Worker '%s' (PID: %d) has been terminated", name, worker.pid)
384
+ worker.join()
385
+ raise terminate_exc
386
+
387
+ if not queue_empty:
388
+ self.tasks_done += 1
389
+
390
+ qsize = "unknown"
391
+ if platform.system() != "Darwin":
392
+ # Not implemented for macOS
393
+ qsize = str(done_queue.qsize())
394
+
395
+ _logger.debug("Got a result from worker '%s', qsize is %s", worker_name, qsize)
396
+
397
+ yield from [
398
+ result
399
+ for in_thread_result in in_thread_results
400
+ for result in self._run_callbacks(in_thread_result)
401
+ ]
402
+
403
+ if not pool:
404
+ break
405
+
406
+ for name in retired_workers:
407
+ _logger.debug("Worker '%s' has retired. Restart it", name)
408
+ pool[name] = mp.Process(name=name, target=pool_worker, args=worker_args)
409
+ pool[name].start()
410
+ task_queue.close()
411
+ done_queue.close()
412
+
413
+ def _check_children(self, pool):
414
+ _logger = get_logger()
415
+ retired_workers = []
416
+ failed_workers = []
417
+ for name in list(pool.keys()):
418
+ exitcode = pool[name].exitcode
419
+ if exitcode is None:
420
+ continue
421
+ if exitcode == 9:
422
+ retired_workers.append(name)
423
+ elif exitcode != 0:
424
+ _logger.error("Worker '%s' (PID: %d) has exited with non-zero exit code %d", name, pool[name].pid,
425
+ exitcode)
426
+ failed_workers.append(name)
427
+ else:
428
+ _logger.debug("Worker '%s' (PID: %d) has been reaped with exitcode %d", name, pool[name].pid, exitcode)
429
+ pool[name].join()
430
+ if exitcode != 9:
431
+ del pool[name]
432
+ return retired_workers, failed_workers
433
+
434
+
435
+ def invoke_retry(func, net_retry, *args, **kwargs):
436
+ attempt = 0
437
+ while True:
438
+ try:
439
+ result = func(*args, **kwargs)
440
+ if inspect.isgenerator(result):
441
+ result = list(result)
442
+ return result
443
+ except Exception as exc:
444
+ if not find_exc_in_stack(exc, (BrokenPipeError, ConnectionResetError)):
445
+ raise
446
+ if attempt >= net_retry:
447
+ raise
448
+ attempt += 1
annet/patching.py ADDED
@@ -0,0 +1,25 @@
1
+ from annet.annlib.patching import ( # pylint: disable=unused-import
2
+ AclError,
3
+ AclNotExclusiveError,
4
+ )
5
+ from annet.annlib.patching import Orderer as BaseOrderer
6
+ from annet.annlib.patching import ( # pylint: disable=unused-import
7
+ PatchTree,
8
+ apply_acl,
9
+ apply_diff_rb,
10
+ make_diff,
11
+ make_patch,
12
+ make_pre,
13
+ strip_unchanged,
14
+ )
15
+
16
+ from annet import rulebook
17
+
18
+
19
+ class Orderer(BaseOrderer):
20
+ @classmethod
21
+ def from_hw(cls, hw):
22
+ return cls(
23
+ rulebook.get_rulebook(hw)["ordering"],
24
+ hw.vendor,
25
+ )
annet/reference.py ADDED
@@ -0,0 +1,148 @@
1
+ from annet.annlib.patching import match_row_to_acl
2
+ from annet.annlib.rbparser.acl import compile_ref_acl_text
3
+
4
+
5
+ class RefMatchResult:
6
+ def __init__(self, gen_cls=None, groups=None):
7
+ self.elems = {}
8
+ if gen_cls and groups:
9
+ self.elems[gen_cls] = groups
10
+
11
+ def gen_cls(self):
12
+ return list(self.elems.keys())
13
+
14
+ def groups(self):
15
+ return list(sum(self.elems.values(), []))
16
+
17
+ def __iter__(self):
18
+ yield from self.elems.items()
19
+
20
+ def __add__(self, other):
21
+ ret = RefMatchResult()
22
+ ret.elems.update(self.elems)
23
+ ret.elems.update(other.elems)
24
+ return ret
25
+
26
+
27
+ class RefMatch:
28
+ def __init__(self, acl, gen_cls):
29
+ self.acl = acl
30
+ self.gen_cls = gen_cls
31
+
32
+ def match(self, config):
33
+ ret = RefMatchResult()
34
+ passed = self._match(config, self.acl)
35
+ if passed:
36
+ ret = RefMatchResult(self.gen_cls, passed)
37
+ return ret
38
+
39
+ @classmethod
40
+ def _match(cls, config, acl, _path=tuple()):
41
+ ret = []
42
+ for (row, children) in config.items():
43
+ (rule, children_rules) = match_row_to_acl(row, acl)
44
+ if rule and rule["attrs"]["match"]:
45
+ ret.append(rule["attrs"]["match"])
46
+ if rule and children and children_rules:
47
+ ret += cls._match(children, children_rules, _path + (row,))
48
+ return ret
49
+
50
+
51
+ class RefMatcher:
52
+ def __init__(self):
53
+ self.matches = []
54
+
55
+ def match(self, config):
56
+ ret = RefMatchResult()
57
+ for rmatch in self.matches:
58
+ ret += rmatch.match(config)
59
+ return ret
60
+
61
+ def add(self, acl_text, gen_class):
62
+ acl = compile_ref_acl_text(acl_text)
63
+ self.matches.append(RefMatch(acl, gen_class))
64
+
65
+
66
+ class RefTracker:
67
+ class Root:
68
+ pass
69
+
70
+ def __init__(self):
71
+ self.cfgs = {}
72
+ self.mapidx = {}
73
+ self.graph = Graph()
74
+ self.root = self.__class__.Root
75
+ self.map(self.root, self.graph.root_id)
76
+
77
+ def map(self, newcls, node_id):
78
+ self.mapidx[node_id] = newcls
79
+ self.mapidx[newcls] = node_id
80
+
81
+ def addcls(self, newcls):
82
+ if newcls not in self.mapidx:
83
+ self.map(newcls, self.graph.newnode())
84
+ return self.mapidx[newcls]
85
+
86
+ def add(self, refcls, defcls):
87
+ if refcls not in self.mapidx:
88
+ self.add(self.root, refcls)
89
+ ridx = self.addcls(refcls)
90
+ didx = self.addcls(defcls)
91
+ self.graph.connect(ridx, didx)
92
+
93
+ def config(self, newcls, config):
94
+ self.cfgs[newcls] = config
95
+
96
+ def walk(self):
97
+ ret = []
98
+ for didx, ridx in self.graph.walk():
99
+ dc = self.mapidx[didx]
100
+ rc = self.mapidx[ridx]
101
+ yield dc, rc
102
+ return ret
103
+
104
+ def configs(self):
105
+ ret = []
106
+ for dc, rc in self.walk():
107
+ if dc in self.cfgs and rc in self.cfgs:
108
+ ret.append((self.cfgs[dc], self.cfgs[rc]))
109
+ return ret
110
+
111
+
112
+ class Graph:
113
+ def __init__(self):
114
+ self.indices = []
115
+ self.root_id = self.newnode()
116
+
117
+ def newnode(self):
118
+ node_id = len(self.indices)
119
+ self.indices.append([0 for _ in range(len(self.indices))])
120
+ for ind in self.indices:
121
+ ind.append(0)
122
+ return node_id
123
+
124
+ def connect(self, ridx, didx):
125
+ self.indices[ridx][didx] = 1
126
+
127
+ def walk(self):
128
+ def childs(ridx):
129
+ ret = []
130
+ for didx, is_ref in enumerate(self.indices[ridx]):
131
+ if is_ref:
132
+ ret.append((ridx, didx))
133
+ return ret
134
+
135
+ def bfs(run, seen):
136
+ ch = []
137
+ for key in run:
138
+ ridx, didx = key
139
+ if key not in seen:
140
+ seen.add(key)
141
+ ch += childs(didx)
142
+ yield ridx, didx
143
+ if ch:
144
+ yield from bfs(ch, seen)
145
+
146
+ for left, right in bfs(childs(self.root_id), set()):
147
+ if left not in (self.root_id, right):
148
+ yield left, right