ipfabric_netbox 3.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ipfabric_netbox might be problematic. Click here for more details.
- ipfabric_netbox/__init__.py +42 -0
- ipfabric_netbox/api/__init__.py +2 -0
- ipfabric_netbox/api/nested_serializers.py +99 -0
- ipfabric_netbox/api/serializers.py +160 -0
- ipfabric_netbox/api/urls.py +21 -0
- ipfabric_netbox/api/views.py +111 -0
- ipfabric_netbox/choices.py +226 -0
- ipfabric_netbox/filtersets.py +125 -0
- ipfabric_netbox/forms.py +1063 -0
- ipfabric_netbox/jobs.py +95 -0
- ipfabric_netbox/migrations/0001_initial.py +342 -0
- ipfabric_netbox/migrations/0002_ipfabricsnapshot_status.py +17 -0
- ipfabric_netbox/migrations/0003_ipfabricsource_type_and_more.py +49 -0
- ipfabric_netbox/migrations/0004_ipfabricsync_auto_merge.py +17 -0
- ipfabric_netbox/migrations/0005_alter_ipfabricrelationshipfield_source_model_and_more.py +64 -0
- ipfabric_netbox/migrations/0006_alter_ipfabrictransformmap_target_model.py +48 -0
- ipfabric_netbox/migrations/__init__.py +0 -0
- ipfabric_netbox/models.py +874 -0
- ipfabric_netbox/navigation.py +62 -0
- ipfabric_netbox/signals.py +68 -0
- ipfabric_netbox/tables.py +208 -0
- ipfabric_netbox/template_content.py +13 -0
- ipfabric_netbox/templates/ipfabric_netbox/inc/diff.html +72 -0
- ipfabric_netbox/templates/ipfabric_netbox/inc/json.html +20 -0
- ipfabric_netbox/templates/ipfabric_netbox/inc/logs_pending.html +6 -0
- ipfabric_netbox/templates/ipfabric_netbox/inc/merge_form.html +22 -0
- ipfabric_netbox/templates/ipfabric_netbox/inc/site_topology_button.html +70 -0
- ipfabric_netbox/templates/ipfabric_netbox/inc/site_topology_modal.html +61 -0
- ipfabric_netbox/templates/ipfabric_netbox/inc/snapshotdata.html +60 -0
- ipfabric_netbox/templates/ipfabric_netbox/inc/sync_delete.html +19 -0
- ipfabric_netbox/templates/ipfabric_netbox/inc/transform_map_field_map.html +11 -0
- ipfabric_netbox/templates/ipfabric_netbox/inc/transform_map_relationship_map.html +11 -0
- ipfabric_netbox/templates/ipfabric_netbox/ipfabric_table.html +55 -0
- ipfabric_netbox/templates/ipfabric_netbox/ipfabricbranch.html +141 -0
- ipfabric_netbox/templates/ipfabric_netbox/ipfabricsnapshot.html +105 -0
- ipfabric_netbox/templates/ipfabric_netbox/ipfabricsource.html +111 -0
- ipfabric_netbox/templates/ipfabric_netbox/ipfabricsync.html +103 -0
- ipfabric_netbox/templates/ipfabric_netbox/ipfabrictransformmap.html +41 -0
- ipfabric_netbox/templates/ipfabric_netbox/ipfabrictransformmap_list.html +17 -0
- ipfabric_netbox/templates/ipfabric_netbox/ipfabrictransformmap_restore.html +59 -0
- ipfabric_netbox/templates/ipfabric_netbox/partials/branch_all.html +10 -0
- ipfabric_netbox/templates/ipfabric_netbox/partials/branch_progress.html +19 -0
- ipfabric_netbox/templates/ipfabric_netbox/partials/branch_status.html +1 -0
- ipfabric_netbox/templates/ipfabric_netbox/partials/job_logs.html +53 -0
- ipfabric_netbox/templates/ipfabric_netbox/partials/sync_last_branch.html +1 -0
- ipfabric_netbox/templates/ipfabric_netbox/sync_list.html +126 -0
- ipfabric_netbox/templates/static/ipfabric_netbox/css/rack.css +9 -0
- ipfabric_netbox/tests/__init__.py +0 -0
- ipfabric_netbox/tests/test_models.py +1340 -0
- ipfabric_netbox/urls.py +141 -0
- ipfabric_netbox/utilities/__init__.py +0 -0
- ipfabric_netbox/utilities/ipfutils.py +591 -0
- ipfabric_netbox/utilities/logging.py +93 -0
- ipfabric_netbox/utilities/nbutils.py +105 -0
- ipfabric_netbox/utilities/transform_map.py +35 -0
- ipfabric_netbox/views.py +845 -0
- ipfabric_netbox-3.1.2.dist-info/METADATA +88 -0
- ipfabric_netbox-3.1.2.dist-info/RECORD +59 -0
- ipfabric_netbox-3.1.2.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,591 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import logging
|
|
3
|
+
import uuid
|
|
4
|
+
from importlib import metadata
|
|
5
|
+
|
|
6
|
+
from core.exceptions import SyncError
|
|
7
|
+
from dcim.models import Device
|
|
8
|
+
from django.conf import settings
|
|
9
|
+
from django.core.exceptions import ObjectDoesNotExist
|
|
10
|
+
from django.shortcuts import get_object_or_404
|
|
11
|
+
from django.utils.text import slugify
|
|
12
|
+
from django_tables2 import Column
|
|
13
|
+
from ipfabric import IPFClient
|
|
14
|
+
from jinja2.sandbox import SandboxedEnvironment
|
|
15
|
+
from netbox.config import get_config
|
|
16
|
+
from netutils.utils import jinja2_convenience_function
|
|
17
|
+
|
|
18
|
+
from ..choices import IPFabricSourceTypeChoices
|
|
19
|
+
from .nbutils import device_serial_max_length
|
|
20
|
+
from .nbutils import order_devices
|
|
21
|
+
from .nbutils import order_members
|
|
22
|
+
from .nbutils import order_pn
|
|
23
|
+
from .nbutils import order_vrf
|
|
24
|
+
|
|
25
|
+
logger = logging.getLogger("ipfabric_netbox.utilities.ipf_utils")
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def slugify_text(value):
|
|
29
|
+
return slugify(value)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def serial(value):
|
|
33
|
+
sn_length = len(value.get("sn"))
|
|
34
|
+
serial_number = value.get("sn") if sn_length < device_serial_max_length else ""
|
|
35
|
+
if not serial_number:
|
|
36
|
+
serial_number = value.get("id")
|
|
37
|
+
return serial_number
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
IPF_JINJA_FILTERS = {"slugify": slugify_text, "serial": serial}
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def render_jinja2(template_code, context):
|
|
44
|
+
"""
|
|
45
|
+
Render a Jinja2 template with the provided context. Return the rendered content.
|
|
46
|
+
"""
|
|
47
|
+
environment = SandboxedEnvironment()
|
|
48
|
+
environment.filters.update(get_config().JINJA2_FILTERS)
|
|
49
|
+
environment.filters.update(IPF_JINJA_FILTERS)
|
|
50
|
+
environment.filters.update(jinja2_convenience_function())
|
|
51
|
+
return environment.from_string(source=template_code).render(**context)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
class IPFabric(object):
|
|
55
|
+
def __init__(self, parameters=None, transform_map=None) -> None:
|
|
56
|
+
if parameters:
|
|
57
|
+
self.ipf = IPFClient(**parameters, unloaded=True)
|
|
58
|
+
else:
|
|
59
|
+
self.ipf = IPFClient(
|
|
60
|
+
**settings.PLUGINS_CONFIG["ipfabric_netbox"], unloaded=True
|
|
61
|
+
)
|
|
62
|
+
self.ipf._client.headers[
|
|
63
|
+
"user-agent"
|
|
64
|
+
] += f'; ipfabric-netbox/{metadata.version("ipfabric-netbox")}' # noqa: E702
|
|
65
|
+
self.transform_map = transform_map
|
|
66
|
+
|
|
67
|
+
def get_snapshots(self) -> dict:
|
|
68
|
+
formatted_snapshots = {}
|
|
69
|
+
if self.ipf:
|
|
70
|
+
for snapshot_ref, snapshot in self.ipf.snapshots.items():
|
|
71
|
+
if snapshot.status != "done" and snapshot.finish_status != "done":
|
|
72
|
+
continue
|
|
73
|
+
if snapshot_ref in ["$prev", "$lastLocked"]:
|
|
74
|
+
continue
|
|
75
|
+
if snapshot.name:
|
|
76
|
+
description = (
|
|
77
|
+
snapshot.name
|
|
78
|
+
+ " - "
|
|
79
|
+
+ snapshot.end.strftime("%d-%b-%y %H:%M:%S")
|
|
80
|
+
)
|
|
81
|
+
else:
|
|
82
|
+
description = snapshot.end.strftime("%d-%b-%y %H:%M:%S")
|
|
83
|
+
|
|
84
|
+
formatted_snapshots[snapshot_ref] = (description, snapshot.snapshot_id)
|
|
85
|
+
return formatted_snapshots
|
|
86
|
+
|
|
87
|
+
def get_sites(self, snapshot=None) -> dict():
|
|
88
|
+
if snapshot:
|
|
89
|
+
raw_sites = self.ipf.inventory.sites.all(snapshot_id=snapshot)
|
|
90
|
+
else:
|
|
91
|
+
raw_sites = self.ipf.inventory.sites.all()
|
|
92
|
+
sites = []
|
|
93
|
+
for item in raw_sites:
|
|
94
|
+
sites.append(item["siteName"])
|
|
95
|
+
return sites
|
|
96
|
+
|
|
97
|
+
def get_table_data(self, table, device):
|
|
98
|
+
filter = {"sn": ["eq", device.serial]}
|
|
99
|
+
split = table.split(".")
|
|
100
|
+
|
|
101
|
+
if len(split) == 2:
|
|
102
|
+
if split[1] == "serial_ports":
|
|
103
|
+
table = getattr(self.ipf.technology, split[1])
|
|
104
|
+
else:
|
|
105
|
+
tech = getattr(self.ipf.technology, split[0])
|
|
106
|
+
table = getattr(tech, split[1])
|
|
107
|
+
else:
|
|
108
|
+
table = getattr(self.ipf.inventory, split[0])
|
|
109
|
+
|
|
110
|
+
columns = self.ipf.get_columns(table.endpoint)
|
|
111
|
+
|
|
112
|
+
columns.pop(0)
|
|
113
|
+
|
|
114
|
+
columns = [(k, Column()) for k in columns]
|
|
115
|
+
data = table.all(
|
|
116
|
+
filters=filter,
|
|
117
|
+
)
|
|
118
|
+
return data, columns
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
class IPFabricSyncRunner(object):
|
|
122
|
+
def __init__(
|
|
123
|
+
self, transform_map, sync=None, client: IPFabric = None, settings: dict = None
|
|
124
|
+
) -> None:
|
|
125
|
+
self.client = client
|
|
126
|
+
self.settings = settings
|
|
127
|
+
self.transform_map = transform_map
|
|
128
|
+
self.sync = sync
|
|
129
|
+
self.relationship_store = {}
|
|
130
|
+
self.siteUUID = {}
|
|
131
|
+
if hasattr(self.sync, "logger"):
|
|
132
|
+
self.logger = self.sync.logger
|
|
133
|
+
self.interface_count_total = 0
|
|
134
|
+
self.interface_count = 1
|
|
135
|
+
self.inventoryitem_count = 1
|
|
136
|
+
self.inventoryitem_count_total = 0
|
|
137
|
+
|
|
138
|
+
if self.sync.snapshot_data.status != "loaded":
|
|
139
|
+
raise SyncError("Snapshot not loaded in IP Fabric.")
|
|
140
|
+
|
|
141
|
+
def get_model_or_update(self, app, model, data, uuid=None):
|
|
142
|
+
transform_map = self.transform_map.objects.filter(
|
|
143
|
+
target_model__app_label=app, target_model__model=model
|
|
144
|
+
).first()
|
|
145
|
+
|
|
146
|
+
if not transform_map:
|
|
147
|
+
raise SystemError(f"No transform map available for {app}: {model}")
|
|
148
|
+
|
|
149
|
+
model_settings = self.settings.get(model, False)
|
|
150
|
+
object = None
|
|
151
|
+
|
|
152
|
+
if model_settings:
|
|
153
|
+
logger.info(f"Creating {model}")
|
|
154
|
+
object = transform_map.update_or_create_instance(
|
|
155
|
+
data=data,
|
|
156
|
+
uuid=uuid,
|
|
157
|
+
relationship_store=self.relationship_store,
|
|
158
|
+
tags=self.sync.tags.all(),
|
|
159
|
+
logger=self.logger,
|
|
160
|
+
)
|
|
161
|
+
else:
|
|
162
|
+
logger.info(f"Getting {model}")
|
|
163
|
+
coalesce_fields = transform_map.get_coalesce_fields(data)
|
|
164
|
+
object = get_object_or_404(
|
|
165
|
+
transform_map.target_model.model_class().objects.all(),
|
|
166
|
+
**coalesce_fields,
|
|
167
|
+
)
|
|
168
|
+
|
|
169
|
+
store = self.relationship_store.get(uuid)
|
|
170
|
+
|
|
171
|
+
if store:
|
|
172
|
+
store[object._meta.model] = object
|
|
173
|
+
else:
|
|
174
|
+
self.relationship_store[uuid] = {object._meta.model: object}
|
|
175
|
+
|
|
176
|
+
return object
|
|
177
|
+
|
|
178
|
+
def create_interface(
|
|
179
|
+
self, device_interface, device_uuid, managed_ips, device_object, device
|
|
180
|
+
):
|
|
181
|
+
device_interface["loginIp"] = device.get("loginIp")
|
|
182
|
+
interface_object = self.get_model_or_update(
|
|
183
|
+
"dcim", "interface", device_interface, uuid=device_uuid
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
self.logger.increment_statistics(
|
|
187
|
+
model="interface",
|
|
188
|
+
current=self.interface_count,
|
|
189
|
+
total=self.interface_count_total,
|
|
190
|
+
)
|
|
191
|
+
self.interface_count += 1
|
|
192
|
+
|
|
193
|
+
if self.settings.get("ipaddress"):
|
|
194
|
+
managed_ip = managed_ips.get(device_object.serial, {}).get(
|
|
195
|
+
interface_object.name
|
|
196
|
+
)
|
|
197
|
+
if managed_ip:
|
|
198
|
+
ip_address_obj = self.get_model_or_update(
|
|
199
|
+
"ipam",
|
|
200
|
+
"ipaddress",
|
|
201
|
+
managed_ip,
|
|
202
|
+
)
|
|
203
|
+
try:
|
|
204
|
+
other_device = Device.objects.get(primary_ip4=ip_address_obj)
|
|
205
|
+
if other_device and device_object != other_device:
|
|
206
|
+
other_device.primary_ip4 = None
|
|
207
|
+
other_device.save()
|
|
208
|
+
except ObjectDoesNotExist:
|
|
209
|
+
pass
|
|
210
|
+
|
|
211
|
+
if device.get("loginIp") == device_interface.get("primaryIp"):
|
|
212
|
+
device_object.primary_ip4 = ip_address_obj
|
|
213
|
+
device_object.save()
|
|
214
|
+
|
|
215
|
+
return True
|
|
216
|
+
|
|
217
|
+
def collect_data(self):
|
|
218
|
+
try:
|
|
219
|
+
self.logger.log_info(
|
|
220
|
+
"Collecting information from IP Fabric",
|
|
221
|
+
obj=self.sync.snapshot_data.source,
|
|
222
|
+
)
|
|
223
|
+
data = {}
|
|
224
|
+
if self.sync.snapshot_data.source.type == IPFabricSourceTypeChoices.REMOTE:
|
|
225
|
+
self.logger.log_info(
|
|
226
|
+
"Remote collector checking for snapshot data.", obj=self.sync
|
|
227
|
+
)
|
|
228
|
+
if not self.sync.snapshot_data.ipf_data.count() > 0:
|
|
229
|
+
raise SyncError(
|
|
230
|
+
"No snapshot data available. This is a remote sync. Push data to NetBox first."
|
|
231
|
+
)
|
|
232
|
+
data["site"] = list(
|
|
233
|
+
self.sync.snapshot_data.ipf_data.filter(type="site").values_list(
|
|
234
|
+
"data", flat=True
|
|
235
|
+
)
|
|
236
|
+
)
|
|
237
|
+
data["device"] = list(
|
|
238
|
+
self.sync.snapshot_data.ipf_data.filter(type="device").values_list(
|
|
239
|
+
"data", flat=True
|
|
240
|
+
)
|
|
241
|
+
)
|
|
242
|
+
data["virtualchassis"] = list(
|
|
243
|
+
self.sync.snapshot_data.ipf_data.filter(
|
|
244
|
+
type="virtualchassis"
|
|
245
|
+
).values_list("data", flat=True)
|
|
246
|
+
)
|
|
247
|
+
data["interface"] = list(
|
|
248
|
+
self.sync.snapshot_data.ipf_data.filter(
|
|
249
|
+
type="interface"
|
|
250
|
+
).values_list("data", flat=True)
|
|
251
|
+
)
|
|
252
|
+
data["inventoryitem"] = list(
|
|
253
|
+
self.sync.snapshot_data.ipf_data.filter(
|
|
254
|
+
type="inventoryitem"
|
|
255
|
+
).values_list("data", flat=True)
|
|
256
|
+
)
|
|
257
|
+
data["vlan"] = list(
|
|
258
|
+
self.sync.snapshot_data.ipf_data.filter(type="vlan").values_list(
|
|
259
|
+
"data", flat=True
|
|
260
|
+
)
|
|
261
|
+
)
|
|
262
|
+
data["vrf"] = list(
|
|
263
|
+
self.sync.snapshot_data.ipf_data.filter(type="vrf").values_list(
|
|
264
|
+
"data", flat=True
|
|
265
|
+
)
|
|
266
|
+
)
|
|
267
|
+
data["prefix"] = list(
|
|
268
|
+
self.sync.snapshot_data.ipf_data.filter(type="prefix").values_list(
|
|
269
|
+
"data", flat=True
|
|
270
|
+
)
|
|
271
|
+
)
|
|
272
|
+
data["ipaddress"] = list(
|
|
273
|
+
self.sync.snapshot_data.ipf_data.filter(
|
|
274
|
+
type="ipaddress"
|
|
275
|
+
).values_list("data", flat=True)
|
|
276
|
+
)
|
|
277
|
+
else:
|
|
278
|
+
self.logger.log_info(
|
|
279
|
+
"Local collector being used for snapshot data.", obj=self.sync
|
|
280
|
+
)
|
|
281
|
+
excluded_vendors = ["aws", "azure"]
|
|
282
|
+
|
|
283
|
+
query_filter = {
|
|
284
|
+
"and": [{"vendor": ["neq", vendor]} for vendor in excluded_vendors]
|
|
285
|
+
}
|
|
286
|
+
# filter = {"and": [{"vendor": ["neq", "aws"]}, {"vendor": ["neq", "azure"]}]}
|
|
287
|
+
|
|
288
|
+
if ingestion_sites := self.settings.get("sites"):
|
|
289
|
+
site_filter = {
|
|
290
|
+
"or": [{"siteName": ["eq", site]} for site in ingestion_sites]
|
|
291
|
+
}
|
|
292
|
+
query_filter["and"].append(site_filter)
|
|
293
|
+
|
|
294
|
+
self.logger.log_info(
|
|
295
|
+
f"Creating site filter {json.dumps(site_filter)}", obj=self.sync
|
|
296
|
+
)
|
|
297
|
+
else:
|
|
298
|
+
site_filter = {}
|
|
299
|
+
|
|
300
|
+
data["site"] = self.client.inventory.sites.all(
|
|
301
|
+
snapshot_id=self.settings["snapshot_id"]
|
|
302
|
+
)
|
|
303
|
+
|
|
304
|
+
data["device"] = self.client.inventory.devices.all(
|
|
305
|
+
snapshot_id=self.settings["snapshot_id"], filters=query_filter
|
|
306
|
+
)
|
|
307
|
+
|
|
308
|
+
data[
|
|
309
|
+
"virtualchassis"
|
|
310
|
+
] = self.client.technology.platforms.stacks_members.all(
|
|
311
|
+
snapshot_id=self.settings["snapshot_id"], filters=site_filter
|
|
312
|
+
)
|
|
313
|
+
|
|
314
|
+
data["interface"] = self.client.inventory.interfaces.all(
|
|
315
|
+
snapshot_id=self.settings["snapshot_id"]
|
|
316
|
+
)
|
|
317
|
+
|
|
318
|
+
data["inventoryitem"] = self.client.inventory.pn.all(
|
|
319
|
+
snapshot_id=self.settings["snapshot_id"],
|
|
320
|
+
filters={
|
|
321
|
+
"and": [{"sn": ["empty", False]}, {"name": ["empty", False]}]
|
|
322
|
+
},
|
|
323
|
+
)
|
|
324
|
+
|
|
325
|
+
data["vlan"] = self.client.technology.vlans.site_summary.all(
|
|
326
|
+
snapshot_id=self.settings["snapshot_id"], filters=site_filter
|
|
327
|
+
)
|
|
328
|
+
|
|
329
|
+
data["vrf"] = self.client.technology.routing.vrf_detail.all(
|
|
330
|
+
snapshot_id=self.settings["snapshot_id"], filters=site_filter
|
|
331
|
+
)
|
|
332
|
+
|
|
333
|
+
if site_filter:
|
|
334
|
+
networks_filter = {
|
|
335
|
+
"and": [site_filter, {"and": [{"net": ["empty", False]}]}]
|
|
336
|
+
}
|
|
337
|
+
else:
|
|
338
|
+
networks_filter = {"and": [{"net": ["empty", False]}]}
|
|
339
|
+
self.logger.log_info(f"Creating network filter: `{networks_filter}`")
|
|
340
|
+
data["prefix"] = self.client.technology.managed_networks.networks.all(
|
|
341
|
+
snapshot_id=self.settings["snapshot_id"], filters=networks_filter
|
|
342
|
+
)
|
|
343
|
+
|
|
344
|
+
data[
|
|
345
|
+
"ipaddress"
|
|
346
|
+
] = self.client.technology.addressing.managed_ip_ipv4.all(
|
|
347
|
+
snapshot_id=self.settings["snapshot_id"]
|
|
348
|
+
)
|
|
349
|
+
except Exception as e:
|
|
350
|
+
self.logger.log_failure(
|
|
351
|
+
f"Error collecting data from IP Fabric: {e}", obj=self.sync
|
|
352
|
+
)
|
|
353
|
+
raise SyncError(f"Error collecting data from IP Fabric: {e}")
|
|
354
|
+
|
|
355
|
+
self.logger.log_info(
|
|
356
|
+
f"{len(data['site'])} sites collected", obj=self.sync.snapshot_data.source
|
|
357
|
+
)
|
|
358
|
+
self.logger.log_info(
|
|
359
|
+
f"{len(data['device'])} devices collected",
|
|
360
|
+
obj=self.sync.snapshot_data.source,
|
|
361
|
+
)
|
|
362
|
+
self.logger.log_info(
|
|
363
|
+
f"{len(data['virtualchassis'])} stack members collected",
|
|
364
|
+
obj=self.sync.snapshot_data.source,
|
|
365
|
+
)
|
|
366
|
+
|
|
367
|
+
self.logger.log_info(
|
|
368
|
+
f"{len(data['interface'])} interfaces collected",
|
|
369
|
+
obj=self.sync.snapshot_data.source,
|
|
370
|
+
)
|
|
371
|
+
|
|
372
|
+
self.logger.log_info(
|
|
373
|
+
f"{len(data.get('inventoryitem', []))} part numbers collected",
|
|
374
|
+
obj=self.sync.snapshot_data.source,
|
|
375
|
+
)
|
|
376
|
+
|
|
377
|
+
self.logger.log_info(
|
|
378
|
+
f"{len(data.get('vlan', []))} VLANs collected",
|
|
379
|
+
obj=self.sync.snapshot_data.source,
|
|
380
|
+
)
|
|
381
|
+
|
|
382
|
+
self.logger.log_info(
|
|
383
|
+
f"{len(data.get('vrf', []))} VRFs collected",
|
|
384
|
+
obj=self.sync.snapshot_data.source,
|
|
385
|
+
)
|
|
386
|
+
|
|
387
|
+
self.logger.log_info(
|
|
388
|
+
f"{len(data.get('prefix', []))} networks collected",
|
|
389
|
+
obj=self.sync.snapshot_data.source,
|
|
390
|
+
)
|
|
391
|
+
|
|
392
|
+
self.logger.log_info(
|
|
393
|
+
f"{len(data.get('ipaddress', []))} management IP's collected",
|
|
394
|
+
obj=self.sync.snapshot_data.source,
|
|
395
|
+
)
|
|
396
|
+
|
|
397
|
+
self.logger.log_info("Ordering devices", obj=self.sync)
|
|
398
|
+
|
|
399
|
+
members = order_members(data.get("virtualchassis", []))
|
|
400
|
+
devices = order_devices(data.get("device", []), members)
|
|
401
|
+
|
|
402
|
+
self.logger.log_info("Ordering Part Numbers", obj=self.sync)
|
|
403
|
+
|
|
404
|
+
part_numbers = order_pn(data.get("inventoryitem", []))
|
|
405
|
+
|
|
406
|
+
self.logger.log_info("Ordering VRF's", obj=self.sync)
|
|
407
|
+
|
|
408
|
+
vrfs = order_vrf(data["vrf"])
|
|
409
|
+
|
|
410
|
+
managed_ips = {}
|
|
411
|
+
site_dict = {}
|
|
412
|
+
interface_dict = {}
|
|
413
|
+
for site in data["site"]:
|
|
414
|
+
site_dict[site["siteName"]] = site
|
|
415
|
+
|
|
416
|
+
for interface in data["interface"]:
|
|
417
|
+
if int_sn := interface.get("sn"):
|
|
418
|
+
if interface_dict.get(int_sn):
|
|
419
|
+
interface_dict[int_sn].append(interface)
|
|
420
|
+
else:
|
|
421
|
+
interface_dict[int_sn] = [interface]
|
|
422
|
+
|
|
423
|
+
interface_key = "nameOriginal"
|
|
424
|
+
try:
|
|
425
|
+
int_transform_map = self.transform_map.objects.filter(
|
|
426
|
+
target_model__app_label="dcim", target_model__model="interface"
|
|
427
|
+
).first()
|
|
428
|
+
int_name_field_map = int_transform_map.field_maps.filter(
|
|
429
|
+
target_field="name"
|
|
430
|
+
).first()
|
|
431
|
+
interface_key = int_name_field_map.source_field
|
|
432
|
+
except Exception as e:
|
|
433
|
+
self.logger.log_failure(
|
|
434
|
+
f"Error collecting information about transform map for interface name: {e}",
|
|
435
|
+
obj=self.sync,
|
|
436
|
+
)
|
|
437
|
+
raise SyncError(f"Error collecting source column name for interface: {e}")
|
|
438
|
+
|
|
439
|
+
for ip in data["ipaddress"]:
|
|
440
|
+
# Find corresponding interface list by serial number (sn)
|
|
441
|
+
device_interfaces = interface_dict.get(ip["sn"], [])
|
|
442
|
+
|
|
443
|
+
# Use filter to find the interface with the matching intName
|
|
444
|
+
filtered_interface = list(
|
|
445
|
+
filter(lambda d: d["intName"] == ip["intName"], device_interfaces)
|
|
446
|
+
)
|
|
447
|
+
|
|
448
|
+
if filtered_interface:
|
|
449
|
+
ip["nameOriginal"] = filtered_interface[0]["nameOriginal"]
|
|
450
|
+
if ip[interface_key]:
|
|
451
|
+
int_name = ip[interface_key]
|
|
452
|
+
else:
|
|
453
|
+
int_name = ip["intName"]
|
|
454
|
+
|
|
455
|
+
if managed_ips.get(ip["sn"]):
|
|
456
|
+
managed_ips[ip["sn"]][int_name] = ip
|
|
457
|
+
else:
|
|
458
|
+
managed_ips[ip["sn"]] = {int_name: ip}
|
|
459
|
+
|
|
460
|
+
return (
|
|
461
|
+
site_dict,
|
|
462
|
+
devices,
|
|
463
|
+
interface_dict,
|
|
464
|
+
part_numbers,
|
|
465
|
+
vrfs,
|
|
466
|
+
data["vlan"],
|
|
467
|
+
data["prefix"],
|
|
468
|
+
managed_ips,
|
|
469
|
+
)
|
|
470
|
+
|
|
471
|
+
def sync_devices(self, branch=None):
|
|
472
|
+
self.logger.log_info("Starting device sync", obj=self.sync)
|
|
473
|
+
|
|
474
|
+
(
|
|
475
|
+
site_dict,
|
|
476
|
+
devices,
|
|
477
|
+
interface_dict,
|
|
478
|
+
part_numbers,
|
|
479
|
+
vrfs,
|
|
480
|
+
vlans,
|
|
481
|
+
networks,
|
|
482
|
+
managed_ips,
|
|
483
|
+
) = self.collect_data()
|
|
484
|
+
vlan_count = 1
|
|
485
|
+
vrf_count = 1
|
|
486
|
+
network_count = 1
|
|
487
|
+
device_vrfs_total = 0
|
|
488
|
+
|
|
489
|
+
for device_count, device in enumerate(devices, start=1):
|
|
490
|
+
logger.info(f"Device {device_count} out of {len(devices)}")
|
|
491
|
+
self.logger.increment_statistics(
|
|
492
|
+
model="device", current=device_count, total=len(devices)
|
|
493
|
+
)
|
|
494
|
+
|
|
495
|
+
device_uuid = str(uuid.uuid4())
|
|
496
|
+
|
|
497
|
+
site_object = self.get_model_or_update(
|
|
498
|
+
"dcim", "site", site_dict[device["siteName"]], uuid=device_uuid
|
|
499
|
+
)
|
|
500
|
+
|
|
501
|
+
self.get_model_or_update("dcim", "manufacturer", device, uuid=device_uuid)
|
|
502
|
+
self.get_model_or_update("dcim", "devicetype", device, uuid=device_uuid)
|
|
503
|
+
|
|
504
|
+
self.get_model_or_update("dcim", "platform", device, uuid=device_uuid)
|
|
505
|
+
|
|
506
|
+
self.get_model_or_update("dcim", "devicerole", device, uuid=device_uuid)
|
|
507
|
+
|
|
508
|
+
device_object = self.get_model_or_update(
|
|
509
|
+
"dcim", "device", device, uuid=device_uuid
|
|
510
|
+
)
|
|
511
|
+
|
|
512
|
+
site_object.custom_field_data[
|
|
513
|
+
"ipfabric_source"
|
|
514
|
+
] = self.sync.snapshot_data.source.pk
|
|
515
|
+
|
|
516
|
+
device_object.custom_field_data[
|
|
517
|
+
"ipfabric_source"
|
|
518
|
+
] = self.sync.snapshot_data.source.pk
|
|
519
|
+
if branch:
|
|
520
|
+
site_object.custom_field_data["ipfabric_branch"] = branch.pk
|
|
521
|
+
device_object.custom_field_data["ipfabric_branch"] = branch.pk
|
|
522
|
+
|
|
523
|
+
site_object.save()
|
|
524
|
+
device_object.save()
|
|
525
|
+
|
|
526
|
+
if self.settings.get("virtualchassis"):
|
|
527
|
+
if member := device.get("virtual_chassis"):
|
|
528
|
+
self.get_model_or_update("dcim", "virtualchassis", member)
|
|
529
|
+
device_object = self.get_model_or_update(
|
|
530
|
+
"dcim", "device", device, uuid=device_uuid
|
|
531
|
+
)
|
|
532
|
+
|
|
533
|
+
if device_object and self.settings.get("interface"):
|
|
534
|
+
device_interfaces = interface_dict.get(device.get("sn"), [])
|
|
535
|
+
self.interface_count_total += len(device_interfaces)
|
|
536
|
+
for device_interface in device_interfaces:
|
|
537
|
+
self.create_interface(
|
|
538
|
+
device_interface,
|
|
539
|
+
device_uuid,
|
|
540
|
+
managed_ips,
|
|
541
|
+
device_object,
|
|
542
|
+
device,
|
|
543
|
+
)
|
|
544
|
+
# x = threading.Thread(target=self.create_interface, args=((device_interface, device_uuid, managed_ips, device_object, device)))
|
|
545
|
+
# threads.append(x)
|
|
546
|
+
# x.start()
|
|
547
|
+
|
|
548
|
+
if self.settings.get("vrf"):
|
|
549
|
+
device_vrfs = vrfs.get(device_object.serial, [])
|
|
550
|
+
device_vrfs_total += len(device_vrfs)
|
|
551
|
+
for vrf in device_vrfs:
|
|
552
|
+
self.get_model_or_update("ipam", "vrf", vrf, uuid=device_uuid)
|
|
553
|
+
self.logger.increment_statistics(
|
|
554
|
+
model="vrf", current=vrf_count, total=device_vrfs_total
|
|
555
|
+
)
|
|
556
|
+
vrf_count += 1
|
|
557
|
+
|
|
558
|
+
device_count += 1
|
|
559
|
+
|
|
560
|
+
if self.settings.get("inventoryitem"):
|
|
561
|
+
devices = Device.objects.all()
|
|
562
|
+
for device in devices:
|
|
563
|
+
device_parts = part_numbers.get(device.serial, [])
|
|
564
|
+
self.inventoryitem_count_total += len(device_parts)
|
|
565
|
+
for part in device_parts:
|
|
566
|
+
self.get_model_or_update("dcim", "inventoryitem", part)
|
|
567
|
+
self.logger.increment_statistics(
|
|
568
|
+
model="inventory_item",
|
|
569
|
+
current=self.inventoryitem_count,
|
|
570
|
+
total=self.inventoryitem_count_total,
|
|
571
|
+
)
|
|
572
|
+
self.inventoryitem_count += 1
|
|
573
|
+
|
|
574
|
+
if self.settings.get("vlan"):
|
|
575
|
+
for vlan in vlans:
|
|
576
|
+
self.get_model_or_update("ipam", "vlan", vlan)
|
|
577
|
+
self.logger.increment_statistics(
|
|
578
|
+
model="vlan", current=vlan_count, total=len(vlans)
|
|
579
|
+
)
|
|
580
|
+
vlan_count += 1
|
|
581
|
+
|
|
582
|
+
if self.settings.get("prefix"):
|
|
583
|
+
for network in networks:
|
|
584
|
+
self.get_model_or_update("ipam", "prefix", network)
|
|
585
|
+
self.logger.increment_statistics(
|
|
586
|
+
model="prefix", current=network_count, total=len(networks)
|
|
587
|
+
)
|
|
588
|
+
network_count += 1
|
|
589
|
+
|
|
590
|
+
def sync(self):
|
|
591
|
+
self.sync_devices()
|
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
|
|
3
|
+
from django.core.cache import cache
|
|
4
|
+
from django.utils import timezone
|
|
5
|
+
from extras.choices import LogLevelChoices
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class SyncLogging:
|
|
9
|
+
def __init__(self, key_prefix="ipfabric_sync", job=None, cache_timeout=3600):
|
|
10
|
+
self.key_prefix = key_prefix
|
|
11
|
+
self.job_id = job
|
|
12
|
+
self.cache_key = f"{self.key_prefix}_{job}"
|
|
13
|
+
self.cache_timeout = cache_timeout
|
|
14
|
+
self.log_data = {"logs": [], "statistics": {}}
|
|
15
|
+
self.logger = logging.getLogger("ipfabric.sync")
|
|
16
|
+
|
|
17
|
+
def _log(self, obj, message, level=LogLevelChoices.LOG_DEFAULT):
|
|
18
|
+
"""
|
|
19
|
+
Log a message from a test method. Do not call this method directly; use one of the log_* wrappers below.
|
|
20
|
+
"""
|
|
21
|
+
if level not in LogLevelChoices.values():
|
|
22
|
+
raise Exception(f"Unknown logging level: {level}")
|
|
23
|
+
self.log_data["logs"].append(
|
|
24
|
+
(
|
|
25
|
+
timezone.now().isoformat(),
|
|
26
|
+
level,
|
|
27
|
+
str(obj) if obj else None,
|
|
28
|
+
obj.get_absolute_url() if hasattr(obj, "get_absolute_url") else None,
|
|
29
|
+
message,
|
|
30
|
+
)
|
|
31
|
+
)
|
|
32
|
+
cache.set(self.cache_key, self.log_data, self.cache_timeout)
|
|
33
|
+
|
|
34
|
+
def log(self, message):
|
|
35
|
+
"""
|
|
36
|
+
Log a message which is not associated with a particular object.
|
|
37
|
+
"""
|
|
38
|
+
self._log(None, message, level=LogLevelChoices.LOG_DEFAULT)
|
|
39
|
+
self.logger.info(message)
|
|
40
|
+
|
|
41
|
+
def log_success(self, message, obj=None):
|
|
42
|
+
"""
|
|
43
|
+
Record a successful test against an object.
|
|
44
|
+
"""
|
|
45
|
+
self._log(obj, message, level=LogLevelChoices.LOG_SUCCESS)
|
|
46
|
+
self.logger.info(f"Success | {obj}: {message}")
|
|
47
|
+
|
|
48
|
+
def log_info(self, message: str, obj=None):
|
|
49
|
+
"""
|
|
50
|
+
Log an informational message.
|
|
51
|
+
"""
|
|
52
|
+
self._log(obj, message, level=LogLevelChoices.LOG_INFO)
|
|
53
|
+
self.logger.info(f"Info | {obj}: {message}")
|
|
54
|
+
|
|
55
|
+
def log_warning(self, message, obj=None):
|
|
56
|
+
"""
|
|
57
|
+
Log a warning.
|
|
58
|
+
"""
|
|
59
|
+
self._log(obj, message, level=LogLevelChoices.LOG_WARNING)
|
|
60
|
+
self.logger.info(f"Warning | {obj}: {message}")
|
|
61
|
+
|
|
62
|
+
def log_failure(self, message, obj=None):
|
|
63
|
+
"""
|
|
64
|
+
Log a failure. Calling this method will automatically mark the report as failed.
|
|
65
|
+
"""
|
|
66
|
+
self._log(obj, message, level=LogLevelChoices.LOG_FAILURE)
|
|
67
|
+
self.logger.info(f"Failure | {obj}: {message}")
|
|
68
|
+
|
|
69
|
+
def increment_statistics(self, model, current=None, total=None):
|
|
70
|
+
statistics = self.log_data.get("statistics")
|
|
71
|
+
|
|
72
|
+
if not statistics.get(model):
|
|
73
|
+
stats = statistics[model] = {"current": current, "total": total}
|
|
74
|
+
else:
|
|
75
|
+
stats = statistics.get(model)
|
|
76
|
+
if total:
|
|
77
|
+
stats["total"] = total
|
|
78
|
+
if current:
|
|
79
|
+
stats["current"] = current
|
|
80
|
+
cache.set(self.cache_key, self.log_data, self.cache_timeout)
|
|
81
|
+
|
|
82
|
+
def clear_log(self):
|
|
83
|
+
self.log_data["logs"] = []
|
|
84
|
+
|
|
85
|
+
@classmethod
|
|
86
|
+
def retrieve_from_cache(cls, key_prefix="log"):
|
|
87
|
+
cache_key = f"{key_prefix}_log"
|
|
88
|
+
log_data = cache.get(cache_key)
|
|
89
|
+
if log_data is None:
|
|
90
|
+
return cls(key_prefix)
|
|
91
|
+
log = cls(key_prefix)
|
|
92
|
+
log.log_data = log_data
|
|
93
|
+
return log
|