umnetdb-utils 0.1.4__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- umnetdb_utils/__init__.py +1 -1
- umnetdb_utils/base.py +28 -17
- umnetdb_utils/cli.py +114 -0
- umnetdb_utils/umnetdb.py +331 -31
- umnetdb_utils/umnetdisco.py +84 -64
- umnetdb_utils/umnetequip.py +90 -83
- umnetdb_utils/umnetinfo.py +3 -6
- umnetdb_utils/utils.py +293 -9
- {umnetdb_utils-0.1.4.dist-info → umnetdb_utils-0.2.0.dist-info}/METADATA +2 -1
- umnetdb_utils-0.2.0.dist-info/RECORD +12 -0
- umnetdb_utils-0.2.0.dist-info/entry_points.txt +3 -0
- umnetdb_utils-0.1.4.dist-info/RECORD +0 -10
- {umnetdb_utils-0.1.4.dist-info → umnetdb_utils-0.2.0.dist-info}/WHEEL +0 -0
umnetdb_utils/__init__.py
CHANGED
umnetdb_utils/base.py
CHANGED
@@ -1,4 +1,3 @@
|
|
1
|
-
|
2
1
|
from typing import Union
|
3
2
|
from os import getenv
|
4
3
|
import re
|
@@ -10,14 +9,16 @@ from sqlalchemy.orm import Session
|
|
10
9
|
|
11
10
|
logger = logging.getLogger(__name__)
|
12
11
|
|
12
|
+
|
13
13
|
class UMnetdbBase:
|
14
14
|
"""
|
15
15
|
Base helper class
|
16
16
|
"""
|
17
|
+
|
17
18
|
# set in child classes - you can use environment variables within curly braces here
|
18
19
|
URL = None
|
19
20
|
|
20
|
-
def __init__(self, env_file:str=".env"):
|
21
|
+
def __init__(self, env_file: str = ".env"):
|
21
22
|
"""
|
22
23
|
Initiate a umnetdb object. Optionally provide a path to a file with environment variables
|
23
24
|
containing the credentials for the database. If no file is provided and there's no ".env",
|
@@ -73,14 +74,24 @@ class UMnetdbBase:
|
|
73
74
|
def __exit__(self, fexc_type, exc_val, exc_tb):
|
74
75
|
self.close()
|
75
76
|
|
76
|
-
def __getattr__(self, val:str):
|
77
|
+
def __getattr__(self, val: str):
|
77
78
|
if self.session:
|
78
79
|
return getattr(self.session, val)
|
79
80
|
|
80
81
|
raise AttributeError(self)
|
81
82
|
|
82
|
-
def _build_select(
|
83
|
-
|
83
|
+
def _build_select(
|
84
|
+
self,
|
85
|
+
select,
|
86
|
+
table,
|
87
|
+
joins=None,
|
88
|
+
where=None,
|
89
|
+
order_by=None,
|
90
|
+
limit=None,
|
91
|
+
group_by=None,
|
92
|
+
distinct=False,
|
93
|
+
) -> str:
|
94
|
+
"""
|
84
95
|
Generic 'select' query string builder built from standard query components as input.
|
85
96
|
The user is required to generate substrings for the more complex inputs
|
86
97
|
(eg joins, where statements), this function just puts all the components
|
@@ -93,7 +104,7 @@ class UMnetdbBase:
|
|
93
104
|
ex: "node_ip nip"
|
94
105
|
:joins: a list of strings representing join statements. Include the actual 'join' part!
|
95
106
|
ex: ["join node n on nip.mac = n.mac", "join device d on d.ip = n.switch"]
|
96
|
-
:where: For a single where statement, provide a string. For multiple provide a list.
|
107
|
+
:where: For a single where statement, provide a string. For multiple provide a list.
|
97
108
|
The list of statements are "anded". If you need "or", embed it in one of your list items
|
98
109
|
DO NOT provide the keyword 'where' - it is auto-added.
|
99
110
|
ex: ["node_ip.ip = '1.2.3.4'", "node.switch = '10.233.0.5'"]
|
@@ -101,10 +112,10 @@ class UMnetdbBase:
|
|
101
112
|
:group_by: A string representing a column name (or names) to group by
|
102
113
|
:limit: An integer
|
103
114
|
|
104
|
-
|
115
|
+
"""
|
105
116
|
|
106
117
|
# First part of the sql statement is the 'select'
|
107
|
-
distinct =
|
118
|
+
distinct = "distinct " if distinct else ""
|
108
119
|
sql = f"select {distinct}" + ", ".join(select) + "\n"
|
109
120
|
|
110
121
|
# Next is the table
|
@@ -118,7 +129,6 @@ class UMnetdbBase:
|
|
118
129
|
|
119
130
|
# Next are the filters. They are 'anded'
|
120
131
|
if where and isinstance(where, list):
|
121
|
-
|
122
132
|
sql += "where\n"
|
123
133
|
sql += " and\n".join(where) + "\n"
|
124
134
|
elif where:
|
@@ -133,20 +143,20 @@ class UMnetdbBase:
|
|
133
143
|
|
134
144
|
if limit:
|
135
145
|
sql += f"limit {limit}\n"
|
136
|
-
|
146
|
+
|
137
147
|
logger.debug(f"Generated SQL command:\n****\n{sql}\n****\n")
|
138
148
|
|
139
149
|
return sql
|
140
150
|
|
141
|
-
def _execute(self, sql:str, rows_as_dict:bool=True, fetch_one:bool=False):
|
142
|
-
|
151
|
+
def _execute(self, sql: str, rows_as_dict: bool = True, fetch_one: bool = False):
|
152
|
+
"""
|
143
153
|
Generic sqlalchemy "open a session, execute this sql command and give me all the results"
|
144
154
|
|
145
155
|
NB This function is defined for legacy database classes that came from umnet-scripts.
|
146
156
|
It's encouraged to use "self.session.execute" in other child methods, allowing
|
147
157
|
scripts that import the child class to use the context manager and execute multiple
|
148
158
|
mehtods within the same session.
|
149
|
-
|
159
|
+
"""
|
150
160
|
with self.engine.begin() as c:
|
151
161
|
r = c.execute(text(sql))
|
152
162
|
|
@@ -158,8 +168,9 @@ class UMnetdbBase:
|
|
158
168
|
else:
|
159
169
|
return []
|
160
170
|
|
161
|
-
|
162
|
-
|
171
|
+
def execute(
|
172
|
+
self, sql: str, rows_as_dict: bool = True, fetch_one: bool = False
|
173
|
+
) -> Union[list[dict], dict]:
|
163
174
|
"""
|
164
175
|
Executes a sqlalchemy command and gives all the results as a list of dicts, or as a dict
|
165
176
|
if 'fetch_one' is set to true.
|
@@ -172,5 +183,5 @@ class UMnetdbBase:
|
|
172
183
|
|
173
184
|
if fetch_one:
|
174
185
|
return dict(result.fetchone())
|
175
|
-
|
176
|
-
return [dict(r) for r in result.fetchall()]
|
186
|
+
|
187
|
+
return [dict(r) for r in result.fetchall()]
|
umnetdb_utils/cli.py
ADDED
@@ -0,0 +1,114 @@
|
|
1
|
+
import typer
|
2
|
+
from umnetdb_utils import UMnetdb
|
3
|
+
import inspect
|
4
|
+
from functools import wraps
|
5
|
+
|
6
|
+
from rich.console import Console
|
7
|
+
from rich.table import Table
|
8
|
+
|
9
|
+
from typing import Callable, List
|
10
|
+
from typing_extensions import Annotated
|
11
|
+
|
12
|
+
app = typer.Typer()
|
13
|
+
|
14
|
+
def print_result(result:List[dict]):
|
15
|
+
"""
|
16
|
+
Takes the result of a umnetdb call and prints it as a table
|
17
|
+
"""
|
18
|
+
if len(result) == 0:
|
19
|
+
print("No results found")
|
20
|
+
return
|
21
|
+
|
22
|
+
if isinstance(result, dict):
|
23
|
+
result = [result]
|
24
|
+
|
25
|
+
# instantiate table with columns based on entry dict keys
|
26
|
+
table = Table(*result[0].keys())
|
27
|
+
for row in result:
|
28
|
+
table.add_row(*[str(i) for i in row.values()])
|
29
|
+
|
30
|
+
console = Console()
|
31
|
+
console.print(table)
|
32
|
+
|
33
|
+
|
34
|
+
def command_generator(method_name:str, method:Callable):
|
35
|
+
"""
|
36
|
+
Generates a typer command function for an arbitrary method
|
37
|
+
in the umnetdb class. The generated function opens a connection with
|
38
|
+
the database, executes the method, and prints out the results.
|
39
|
+
|
40
|
+
Note that the docstring of each method is interrogated to generate
|
41
|
+
help text for each typer command.
|
42
|
+
|
43
|
+
:method_name: The name of the method
|
44
|
+
:method: The method itself
|
45
|
+
"""
|
46
|
+
|
47
|
+
# first we're going to tease out the 'help' portions of the method
|
48
|
+
# from the docstring.
|
49
|
+
docstr = method.__doc__
|
50
|
+
docstr_parts = docstr.split("\n:")
|
51
|
+
|
52
|
+
# first section of the docstring is always a generic 'this is what the method does'.
|
53
|
+
cmd_help = docstr_parts.pop(0)
|
54
|
+
|
55
|
+
# next sections are details on the specific arguments that we want to pass to typer as
|
56
|
+
# special annotated type hints
|
57
|
+
arg_help = {}
|
58
|
+
for arg_str in docstr_parts:
|
59
|
+
if ":" in arg_str:
|
60
|
+
arg, help = arg_str.split(":")
|
61
|
+
arg_help[arg] = help.strip()
|
62
|
+
|
63
|
+
sig = inspect.signature(method)
|
64
|
+
|
65
|
+
# going through the method's arguments and augmenting the 'help' section for each one
|
66
|
+
# from the docstring if applicable
|
67
|
+
new_params = []
|
68
|
+
for p_name, p in sig.parameters.items():
|
69
|
+
|
70
|
+
# need to skip self
|
71
|
+
if p_name == "self":
|
72
|
+
continue
|
73
|
+
|
74
|
+
# if there wasn't any helper text then just append the parameter as is
|
75
|
+
if p_name not in arg_help:
|
76
|
+
new_params.append(p)
|
77
|
+
continue
|
78
|
+
|
79
|
+
# params without default values should be typer 'arguments'
|
80
|
+
if p.default == inspect._empty:
|
81
|
+
new_params.append(p.replace(annotation=Annotated[p.annotation, typer.Argument(help=arg_help[p_name])]))
|
82
|
+
continue
|
83
|
+
|
84
|
+
# params with default values should be typer 'options'
|
85
|
+
new_params.append(p.replace(annotation=Annotated[p.annotation, typer.Option(help=arg_help[p_name])]))
|
86
|
+
|
87
|
+
new_sig = sig.replace(parameters=new_params)
|
88
|
+
|
89
|
+
|
90
|
+
# new munged function based on the origional method, with a new signature
|
91
|
+
# and docstring for typer
|
92
|
+
@wraps(method)
|
93
|
+
def wrapper(*args, **kwargs):
|
94
|
+
with UMnetdb() as db:
|
95
|
+
result = getattr(db, method_name)(*args, **kwargs)
|
96
|
+
print_result(result)
|
97
|
+
|
98
|
+
wrapper.__signature__ = new_sig
|
99
|
+
wrapper.__doc__ = cmd_help
|
100
|
+
|
101
|
+
return wrapper
|
102
|
+
|
103
|
+
|
104
|
+
def main():
|
105
|
+
for f_name,f in UMnetdb.__dict__.items():
|
106
|
+
if not(f_name.startswith("_")) and callable(f):
|
107
|
+
app.command()(command_generator(f_name, f))
|
108
|
+
|
109
|
+
app()
|
110
|
+
|
111
|
+
if __name__ == "__main__":
|
112
|
+
main()
|
113
|
+
|
114
|
+
|
umnetdb_utils/umnetdb.py
CHANGED
@@ -1,20 +1,18 @@
|
|
1
|
-
|
2
|
-
from typing import List
|
1
|
+
from typing import List, Optional
|
3
2
|
import logging
|
4
3
|
import re
|
4
|
+
import ipaddress
|
5
|
+
from copy import deepcopy
|
5
6
|
|
6
|
-
|
7
|
-
from sqlalchemy import text
|
8
7
|
from .base import UMnetdbBase
|
8
|
+
from .utils import is_ip_address, Packet, Hop, Path, LOCAL_PROTOCOLS
|
9
9
|
|
10
|
+
logger = logging.getLogger(__name__)
|
10
11
|
|
11
12
|
class UMnetdb(UMnetdbBase):
|
13
|
+
URL = "postgresql+psycopg://{UMNETDB_USER}:{UMNETDB_PASSWORD}@wintermute.umnet.umich.edu/umnetdb"
|
12
14
|
|
13
|
-
|
14
|
-
|
15
|
-
def get_neighbors(
|
16
|
-
self, device: str, known_devices_only: bool = True
|
17
|
-
) -> List[dict]:
|
15
|
+
def get_neighbors(self, device: str, known_devices_only: bool = True, interface:Optional[str]=None) -> List[dict]:
|
18
16
|
"""
|
19
17
|
Gets a list of the neighbors of a particular device. If the port
|
20
18
|
has a parent in the LAG table that is included as well.
|
@@ -24,9 +22,12 @@ class UMnetdb(UMnetdbBase):
|
|
24
22
|
|
25
23
|
Setting 'known_devices_only' to true only returns neighbors that are found
|
26
24
|
in umnet_db's device table. Setting it to false will return all lldp neighbors
|
27
|
-
and will include things like phones and APs
|
25
|
+
and will include things like phones and APs.
|
28
26
|
|
29
27
|
Returns results as a list of dictionary entries keyed on column names.
|
28
|
+
:device: Name of the device
|
29
|
+
:known_devices_only: If set to true, will only return neighbors found in umnetdb's device table.
|
30
|
+
:interface: If supplied, restrict to only find neighbors on a particular interface on the device
|
30
31
|
"""
|
31
32
|
|
32
33
|
if known_devices_only:
|
@@ -35,12 +36,12 @@ class UMnetdb(UMnetdbBase):
|
|
35
36
|
"n_d.name as remote_device",
|
36
37
|
"n.remote_port",
|
37
38
|
"l.parent",
|
38
|
-
"n_l.parent as remote_parent"
|
39
|
-
|
39
|
+
"n_l.parent as remote_parent",
|
40
|
+
]
|
40
41
|
joins = [
|
41
|
-
|
42
|
-
|
43
|
-
|
42
|
+
"join device n_d on n_d.hostname=n.remote_device",
|
43
|
+
"left outer join lag l on l.device=n.device and l.member=n.port",
|
44
|
+
"left outer join lag n_l on n_l.device=n_d.name and n_l.member=n.remote_port",
|
44
45
|
]
|
45
46
|
else:
|
46
47
|
select = [
|
@@ -48,7 +49,7 @@ class UMnetdb(UMnetdbBase):
|
|
48
49
|
"coalesce(n_d.name, n.remote_device) as remote_device",
|
49
50
|
"n.remote_port",
|
50
51
|
"l.parent",
|
51
|
-
"n_l.parent as remote_parent"
|
52
|
+
"n_l.parent as remote_parent",
|
52
53
|
]
|
53
54
|
joins = [
|
54
55
|
"left outer join device n_d on n_d.hostname=n.remote_device",
|
@@ -59,38 +60,43 @@ class UMnetdb(UMnetdbBase):
|
|
59
60
|
table = "neighbor n"
|
60
61
|
|
61
62
|
where = [f"n.device='{device}'"]
|
63
|
+
if interface:
|
64
|
+
where.append(f"n.port='{interface}'")
|
62
65
|
|
63
66
|
query = self._build_select(select, table, joins, where)
|
64
67
|
|
65
68
|
return self.execute(query)
|
66
69
|
|
67
|
-
|
68
|
-
|
69
|
-
def get_dlzone(self, zone_name:str) -> List[dict]:
|
70
|
+
def get_dlzone(self, zone_name: str) -> List[dict]:
|
70
71
|
"""
|
71
72
|
Gets all devices within a DL zone based on walking the 'neighbors'
|
72
73
|
table.
|
73
|
-
|
74
|
+
|
74
75
|
For each device, the following attributes are returned:
|
75
76
|
"name", "ip", "version", "vendor", "model", "serial"
|
77
|
+
|
78
|
+
:zone_name: Name of the DL zone
|
76
79
|
"""
|
77
80
|
device_cols = ["name", "ip", "version", "vendor", "model", "serial"]
|
78
81
|
|
79
82
|
# step 1 is to find DLs in the database - we'll seed our zone with them
|
80
|
-
query = self._build_select(
|
83
|
+
query = self._build_select(
|
84
|
+
select=device_cols,
|
85
|
+
table="device",
|
86
|
+
where=f"name similar to '(d-|dl-){zone_name}-(1|2)'",
|
87
|
+
)
|
81
88
|
dls = self.execute(query)
|
82
89
|
|
83
90
|
if not dls:
|
84
91
|
raise ValueError(f"No DLs found in umnetdb for zone {zone_name}")
|
85
92
|
|
86
|
-
devices_by_name = {d[
|
93
|
+
devices_by_name = {d["name"]: d for d in dls}
|
87
94
|
|
88
95
|
# now we'll look for neighbors on each device within the zone.
|
89
96
|
# Note that outside of the DLs we only expect to find devices that start with
|
90
97
|
# "s-" anything else is considered 'outside the zone'
|
91
98
|
todo = list(devices_by_name.keys())
|
92
|
-
while
|
93
|
-
|
99
|
+
while len(todo) != 0:
|
94
100
|
device = todo.pop()
|
95
101
|
|
96
102
|
# note that by default this method only returns neighbors in the 'device' table,
|
@@ -99,20 +105,314 @@ class UMnetdb(UMnetdbBase):
|
|
99
105
|
devices_by_name[device]["neighbors"] = {}
|
100
106
|
for neigh in neighs:
|
101
107
|
|
102
|
-
# only want 'd- or 'dl-' or 's-' devices
|
103
|
-
if re.match(r"(dl
|
104
|
-
|
108
|
+
# only want 'd- or 'dl-' or 's-' devices, and we don't want out of band devices
|
109
|
+
if re.match(r"(dl?-|s-)", neigh["remote_device"]) and not re.match(
|
110
|
+
r"s-oob-", neigh["remote_device"]
|
111
|
+
):
|
105
112
|
# adding neighbor to local device's neighbor list
|
106
|
-
devices_by_name[device]["neighbors"][neigh["port"]] = {
|
113
|
+
devices_by_name[device]["neighbors"][neigh["port"]] = {
|
114
|
+
k: v for k, v in neigh.items() if k != "port"
|
115
|
+
}
|
107
116
|
|
108
117
|
# if we haven't seen this neighbor yet, pull data from our device table for it, and
|
109
118
|
# add it to our 'to do' list to pull its neighbors.
|
110
119
|
if neigh["remote_device"] not in devices_by_name:
|
111
|
-
|
112
|
-
|
120
|
+
query = self._build_select(
|
121
|
+
select=device_cols,
|
122
|
+
table="device",
|
123
|
+
where=f"name = '{neigh['remote_device']}'",
|
124
|
+
)
|
113
125
|
neigh_device = self.execute(query, fetch_one=True)
|
114
126
|
devices_by_name[neigh_device["name"]] = neigh_device
|
115
|
-
|
127
|
+
|
116
128
|
todo.append(neigh_device["name"])
|
117
129
|
|
118
130
|
return list(devices_by_name.values())
|
131
|
+
|
132
|
+
|
133
|
+
def l3info(self, search_str:str, detail:bool=False, num_results:int=10, exact:bool=False)->list[dict]:
|
134
|
+
"""
|
135
|
+
Does a search of the umnetdb ip_interface table.
|
136
|
+
|
137
|
+
:search_str: Can be an IP address, 'VlanX', or a full or partial netname
|
138
|
+
:detail: Adds admin/oper status, primary/secondary, helpers, and timestamps to output.
|
139
|
+
:num_results: Limits number of results printed out.
|
140
|
+
:exact: Only return exact matches, either for IP addresses or for string matches.
|
141
|
+
"""
|
142
|
+
|
143
|
+
cols = ["device", "ip_address", "interface", "description", "vrf"]
|
144
|
+
if detail:
|
145
|
+
cols.extend(["admin_up", "oper_up", "secondary", "helpers", "first_seen", "last_updated"])
|
146
|
+
|
147
|
+
# 'is contained within' IP search - reference:
|
148
|
+
# https://www.postgresql.org/docs/9.3/functions-net.html
|
149
|
+
|
150
|
+
# VlanX based searches are always 'exact'
|
151
|
+
if re.match(r"Vlan\d+$", search_str):
|
152
|
+
where = [f"interface = '{search_str}'"]
|
153
|
+
|
154
|
+
# ip or description based searches can be 'exact' or inexact
|
155
|
+
elif exact:
|
156
|
+
if is_ip_address(search_str):
|
157
|
+
where = [f"host(ip_address) = '{search_str}'"]
|
158
|
+
else:
|
159
|
+
where = [f"description = '{search_str}'"]
|
160
|
+
|
161
|
+
else:
|
162
|
+
if is_ip_address(search_str):
|
163
|
+
where = [f"ip_address >>= '{search_str}'"]
|
164
|
+
else:
|
165
|
+
where = [f"description like '{search_str}%'"]
|
166
|
+
|
167
|
+
# removing IPs assigned to mgmt interfaces
|
168
|
+
where.append("vrf != 'management'")
|
169
|
+
|
170
|
+
query = self._build_select(
|
171
|
+
select=cols,
|
172
|
+
table="ip_interface",
|
173
|
+
where=where,
|
174
|
+
limit=num_results,
|
175
|
+
)
|
176
|
+
return self.execute(query)
|
177
|
+
|
178
|
+
|
179
|
+
def route(self, router:str, prefix:str, vrf:str, resolve_nh:bool=True, details:bool=False) -> list[dict]:
|
180
|
+
"""
|
181
|
+
Does an lpm query on a particular router for a particular prefix
|
182
|
+
in a particular VRF.
|
183
|
+
|
184
|
+
:router: Name of the router to query
|
185
|
+
:prefix: Prefix to query for
|
186
|
+
:vrf: Name of the VRF to query against
|
187
|
+
:resolve_nh: If no nh_interface is present in the database, recursively resolve for it.
|
188
|
+
:details: Set to true to get output of all columns in the route table.
|
189
|
+
"""
|
190
|
+
if details:
|
191
|
+
cols = ["*"]
|
192
|
+
else:
|
193
|
+
cols = ["device", "vrf", "prefix", "nh_ip", "nh_table", "nh_interface"]
|
194
|
+
|
195
|
+
lpms = self.lpm_query(router, prefix, vrf, columns=cols)
|
196
|
+
|
197
|
+
if not lpms:
|
198
|
+
return []
|
199
|
+
|
200
|
+
if not resolve_nh:
|
201
|
+
return lpms
|
202
|
+
|
203
|
+
resolved_results = []
|
204
|
+
for route in lpms:
|
205
|
+
if route["nh_interface"]:
|
206
|
+
resolved_results.append(route)
|
207
|
+
else:
|
208
|
+
self._resolve_nh(route, resolved_results, 0)
|
209
|
+
|
210
|
+
return resolved_results
|
211
|
+
|
212
|
+
def lpm_query(self, router:str, prefix:str, vrf:str, columns:Optional[str]=None)->list[dict]:
|
213
|
+
"""
|
214
|
+
Does an lpm query against a particular router, prefix, and vrf. Optionally specify
|
215
|
+
which columns you want to limit the query to.
|
216
|
+
"""
|
217
|
+
|
218
|
+
select = columns if columns else ["*"]
|
219
|
+
|
220
|
+
query = self._build_select(
|
221
|
+
select=select,
|
222
|
+
table="route",
|
223
|
+
where=[f"device='{router}'", f"prefix >>= '{prefix}'", f"vrf='{vrf}'"],
|
224
|
+
order_by="prefix"
|
225
|
+
)
|
226
|
+
|
227
|
+
result = self.execute(query)
|
228
|
+
|
229
|
+
if not result:
|
230
|
+
return None
|
231
|
+
|
232
|
+
if len(result) == 1:
|
233
|
+
return result
|
234
|
+
|
235
|
+
# peeling the longest matching prefix of the end of the results, which
|
236
|
+
# are ordered by ascending prefixlength
|
237
|
+
lpm_results = [result.pop()]
|
238
|
+
|
239
|
+
# finding any other equivalent lpms. As soon as we run into one that
|
240
|
+
# doesn't match we know we're done.
|
241
|
+
result.reverse()
|
242
|
+
for r in result:
|
243
|
+
if r["prefix"] == lpm_results[0]["prefix"]:
|
244
|
+
lpm_results.append(r)
|
245
|
+
else:
|
246
|
+
break
|
247
|
+
|
248
|
+
return lpm_results
|
249
|
+
|
250
|
+
def mpls_label(self, router:str, label:str) -> list[dict]:
|
251
|
+
"""
|
252
|
+
Looks up a particular label for a particular device in the mpls table.
|
253
|
+
:router: device name
|
254
|
+
:label: label value
|
255
|
+
"""
|
256
|
+
query = self._build_select(
|
257
|
+
select=["*"],
|
258
|
+
table="mpls",
|
259
|
+
where=[f"device='{router}'", f"in_label='{label}'"]
|
260
|
+
)
|
261
|
+
return self.execute(query)
|
262
|
+
|
263
|
+
def vni(self, router:str, vni:int) -> dict:
|
264
|
+
"""
|
265
|
+
Looks up a particular vni on the router and returns
|
266
|
+
the VRF or vlan_id it's associated with
|
267
|
+
"""
|
268
|
+
|
269
|
+
query = self._build_select(
|
270
|
+
select=["*"],
|
271
|
+
table="vni",
|
272
|
+
where=[f"device='{router}'", f"vni='{vni}'"]
|
273
|
+
)
|
274
|
+
return self.execute(query, fetch_one=True)
|
275
|
+
|
276
|
+
|
277
|
+
def _resolve_nh(self, route:dict, resolved_routes:list[dict], depth:int) -> dict:
|
278
|
+
"""
|
279
|
+
Recursively resolves next hop of a route till we find a nh interface.
|
280
|
+
If we hit a recursion depth of 4 then an exception is thrown - the max depth on our
|
281
|
+
network I've seen is like 2 (for a static route to an indirect next hop)
|
282
|
+
"""
|
283
|
+
if route["nh_interface"]:
|
284
|
+
return route
|
285
|
+
|
286
|
+
depth += 1
|
287
|
+
if depth == 4:
|
288
|
+
raise RecursionError(f"Reached max recursion depth of 4 trying to resolve route {route}")
|
289
|
+
|
290
|
+
nh_ip = route["nh_ip"]
|
291
|
+
nh_table = route["nh_table"]
|
292
|
+
router = route["device"]
|
293
|
+
|
294
|
+
nh_routes = self.lpm_query(router, nh_ip, nh_table)
|
295
|
+
|
296
|
+
for nh_route in nh_routes:
|
297
|
+
r = self._resolve_nh(nh_route, resolved_routes, depth)
|
298
|
+
resolved_route = deepcopy(route)
|
299
|
+
resolved_route["nh_interface"] = r["nh_interface"]
|
300
|
+
if r["mpls_label"] and resolved_route["mpls_label"] is not None:
|
301
|
+
resolved_route["mpls_label"].extend(r["mpls_label"])
|
302
|
+
elif r["mpls_label"]:
|
303
|
+
resolved_route["mpls_label"] = r["mpls_label"]
|
304
|
+
resolved_route["nh_ip"] = r["nh_ip"]
|
305
|
+
resolved_routes.append(resolved_route)
|
306
|
+
|
307
|
+
|
308
|
+
def get_all_paths(self, src_ip:str, dst_ip:str):
|
309
|
+
"""
|
310
|
+
Traces the path between a particular source and destination IP
|
311
|
+
:src_ip: A source IP address, must be somewhere on our network
|
312
|
+
:dst_ip: A destination IP address, does not have to be on our network.
|
313
|
+
"""
|
314
|
+
for ip_name, ip in [("source", src_ip), ("destination", dst_ip)]:
|
315
|
+
if not is_ip_address(ip):
|
316
|
+
raise ValueError(f"invalid {ip_name} IP address {ip}")
|
317
|
+
|
318
|
+
src_l3info = self.l3info(src_ip, num_results=1)
|
319
|
+
if not src_l3info:
|
320
|
+
raise ValueError(f"Could not find where {src_ip} is routed")
|
321
|
+
|
322
|
+
packet = Packet(dst_ip=ipaddress.ip_address(dst_ip))
|
323
|
+
hop = Hop(src_l3info[0]["device"], src_l3info[0]["vrf"], src_l3info[0]["interface"], packet)
|
324
|
+
path = Path(hop)
|
325
|
+
|
326
|
+
self._walk_path(path, hop, hop.router, hop.vrf, packet)
|
327
|
+
|
328
|
+
return path.get_path()
|
329
|
+
|
330
|
+
|
331
|
+
def _walk_path(self, path:Path, curr_hop:Hop, nh_router:str, nh_table:str, packet:Packet):
|
332
|
+
|
333
|
+
logger.debug(f"\n******* walking path - current hop: {curr_hop}, nh_route: {nh_router}, nh_table {nh_table} *******")
|
334
|
+
logger.debug(f"Known hops: {path.hops.keys()}")
|
335
|
+
|
336
|
+
# mpls-based lookup
|
337
|
+
if packet.label_stack:
|
338
|
+
logger.debug("")
|
339
|
+
routes = self.mpls_label(router=nh_router, label=packet.label_stack[-1])
|
340
|
+
|
341
|
+
# otherwise we want to do an ip based lokup
|
342
|
+
else:
|
343
|
+
routes = self.route(router=nh_router, prefix=packet.dst_ip, vrf=nh_table, details=True)
|
344
|
+
|
345
|
+
if not routes:
|
346
|
+
raise ValueError(f"No route found for {curr_hop}")
|
347
|
+
|
348
|
+
for idx, route in zip(range(1,len(routes)+1), routes):
|
349
|
+
|
350
|
+
logger.debug(f"*** Processing route {idx} of {len(routes)}:{route} ***")
|
351
|
+
nh_router = None
|
352
|
+
nh_table = None
|
353
|
+
new_packet = deepcopy(packet)
|
354
|
+
|
355
|
+
# if the packet is not encapsulated and the route is local, we have reached our destination
|
356
|
+
if not packet.is_encapped() and route.get("protocol") in LOCAL_PROTOCOLS:
|
357
|
+
logger.debug(f"Destination reached at {curr_hop}")
|
358
|
+
final_hop = Hop(route["device"], vrf=route["vrf"], interface=route["nh_interface"], packet=new_packet)
|
359
|
+
path.add_hop(curr_hop, final_hop)
|
360
|
+
continue
|
361
|
+
|
362
|
+
# VXLAN decap - requires local lookup in the vrf that maps to the packet's VNI
|
363
|
+
if route.get("protocol") == "direct" and packet.vni and curr_hop.router == route["device"]:
|
364
|
+
vni = self.vni(router=route["device"], vni=packet.vni)
|
365
|
+
new_packet.vxlan_decap()
|
366
|
+
nh_table = vni["vrf"]
|
367
|
+
nh_router = route["device"]
|
368
|
+
logger.debug(f"vxlan-decapping packet, new packet {new_packet}")
|
369
|
+
|
370
|
+
# MPLS decap - requires local lookup in the vrf indicated by 'nh_interface' field
|
371
|
+
# of this aggregate route
|
372
|
+
elif route.get("aggregate"):
|
373
|
+
new_packet.mpls_pop()
|
374
|
+
nh_table = route["nh_interface"]
|
375
|
+
nh_router = route["device"]
|
376
|
+
logger.debug(f"mpls aggregate route, new packet {new_packet}")
|
377
|
+
|
378
|
+
# VXLAN encap - requires local lookup of encapped packet in the nh table
|
379
|
+
elif route.get("vxlan_vni") and not packet.is_encapped():
|
380
|
+
new_packet.vxlan_encap(route["vxlan_vni"], route["vxlan_endpoint"])
|
381
|
+
logger.debug(f"vxlan-encapping packet, new packet {new_packet}")
|
382
|
+
|
383
|
+
# MPLS encap for an IP route. Resolved routes will have both transport and vrf
|
384
|
+
# labels if applicable - this 'push' will add both to the packet.
|
385
|
+
elif route.get("mpls_label"):
|
386
|
+
new_packet.mpls_push(route["mpls_label"])
|
387
|
+
logger.debug(f"mpls-encapping packet, new packet {new_packet}")
|
388
|
+
|
389
|
+
# MPLS route - note in our environment we don't have anything that requires a
|
390
|
+
# push on an already-labeled packet (!)
|
391
|
+
elif route.get("in_label"):
|
392
|
+
|
393
|
+
if route["out_label"] == ["pop"]:
|
394
|
+
new_packet.mpls_pop()
|
395
|
+
else:
|
396
|
+
new_packet.mpls_swap(route["out_label"])
|
397
|
+
logger.debug(f"mpls push or swap, new packet {new_packet}")
|
398
|
+
|
399
|
+
# if the next hop isn't local we need to figure out which router it's on. In our environment
|
400
|
+
# the easiest way to do that is to use l3info against the nh_ip of the route.
|
401
|
+
if not nh_router and route["nh_ip"]:
|
402
|
+
|
403
|
+
l3i_router = self.l3info(str(route["nh_ip"]), exact=True)
|
404
|
+
if l3i_router:
|
405
|
+
nh_router = l3i_router[0]["device"]
|
406
|
+
nh_table = nh_table if nh_table else l3i_router[0]["vrf"]
|
407
|
+
logger.debug(f"found router {nh_router} for nh ip {route['nh_ip']}")
|
408
|
+
|
409
|
+
if not nh_router:
|
410
|
+
raise ValueError(f"Unknown next hop for {curr_hop} route {route}")
|
411
|
+
|
412
|
+
# add this hop to our path and if it's a new hop, keep waking
|
413
|
+
new_hop = Hop(route["device"], vrf=route.get("nh_table", "default"), interface=route["nh_interface"], packet=new_packet)
|
414
|
+
logger.debug(f"new hop generated: {new_hop}")
|
415
|
+
new_path = path.add_hop(curr_hop, new_hop)
|
416
|
+
if new_path:
|
417
|
+
logger.debug("New path detected - still walking")
|
418
|
+
self._walk_path(path, new_hop, nh_router, nh_table, new_packet)
|