ipfabric_netbox 3.2.0__py3-none-any.whl → 3.2.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ipfabric_netbox might be problematic. Click here for more details.
- ipfabric_netbox/__init__.py +2 -2
- ipfabric_netbox/data/transform_map.json +16 -16
- ipfabric_netbox/exceptions.py +24 -0
- ipfabric_netbox/forms.py +2 -2
- ipfabric_netbox/migrations/0009_transformmap_changes_for_netbox_v4_2.py +0 -3
- ipfabric_netbox/migrations/0010_remove_uuid_from_get_or_create.py +95 -0
- ipfabric_netbox/models.py +58 -63
- ipfabric_netbox/signals.py +29 -0
- ipfabric_netbox/tests/test_models.py +10 -26
- ipfabric_netbox/utilities/ipfutils.py +336 -217
- ipfabric_netbox/utilities/logging.py +12 -7
- ipfabric_netbox/utilities/nbutils.py +0 -26
- {ipfabric_netbox-3.2.0.dist-info → ipfabric_netbox-3.2.2.dist-info}/METADATA +12 -19
- {ipfabric_netbox-3.2.0.dist-info → ipfabric_netbox-3.2.2.dist-info}/RECORD +15 -12
- {ipfabric_netbox-3.2.0.dist-info → ipfabric_netbox-3.2.2.dist-info}/WHEEL +1 -1
|
@@ -1,13 +1,18 @@
|
|
|
1
1
|
import json
|
|
2
2
|
import logging
|
|
3
|
-
import uuid
|
|
4
3
|
from importlib import metadata
|
|
4
|
+
from typing import Callable
|
|
5
|
+
from typing import TYPE_CHECKING
|
|
5
6
|
|
|
7
|
+
from core.choices import DataSourceStatusChoices
|
|
6
8
|
from core.exceptions import SyncError
|
|
7
9
|
from dcim.models import Device
|
|
10
|
+
from dcim.models import Interface
|
|
11
|
+
from dcim.models import MACAddress
|
|
8
12
|
from django.conf import settings
|
|
13
|
+
from django.core.exceptions import MultipleObjectsReturned
|
|
9
14
|
from django.core.exceptions import ObjectDoesNotExist
|
|
10
|
-
from django.
|
|
15
|
+
from django.db.models import Model
|
|
11
16
|
from django.utils.text import slugify
|
|
12
17
|
from django_tables2 import Column
|
|
13
18
|
from ipfabric import IPFClient
|
|
@@ -16,11 +21,14 @@ from netbox.config import get_config
|
|
|
16
21
|
from netutils.utils import jinja2_convenience_function
|
|
17
22
|
|
|
18
23
|
from ..choices import IPFabricSourceTypeChoices
|
|
24
|
+
from ..exceptions import SearchError
|
|
25
|
+
from ..exceptions import SyncDataError
|
|
19
26
|
from .nbutils import device_serial_max_length
|
|
20
27
|
from .nbutils import order_devices
|
|
21
28
|
from .nbutils import order_members
|
|
22
|
-
|
|
23
|
-
|
|
29
|
+
|
|
30
|
+
if TYPE_CHECKING:
|
|
31
|
+
from ..models import IPFabricBranch
|
|
24
32
|
|
|
25
33
|
logger = logging.getLogger("ipfabric_netbox.utilities.ipf_utils")
|
|
26
34
|
|
|
@@ -126,19 +134,45 @@ class IPFabricSyncRunner(object):
|
|
|
126
134
|
self.settings = settings
|
|
127
135
|
self.transform_map = transform_map
|
|
128
136
|
self.sync = sync
|
|
129
|
-
self.relationship_store = {}
|
|
130
|
-
self.siteUUID = {}
|
|
131
137
|
if hasattr(self.sync, "logger"):
|
|
132
138
|
self.logger = self.sync.logger
|
|
133
|
-
self.interface_count_total = 0
|
|
134
|
-
self.interface_count = 1
|
|
135
|
-
self.inventoryitem_count = 1
|
|
136
|
-
self.inventoryitem_count_total = 0
|
|
137
139
|
|
|
138
140
|
if self.sync.snapshot_data.status != "loaded":
|
|
139
141
|
raise SyncError("Snapshot not loaded in IP Fabric.")
|
|
140
142
|
|
|
141
|
-
|
|
143
|
+
@staticmethod
|
|
144
|
+
def handle_errors(func: Callable):
|
|
145
|
+
def wrapper(*args, **kwargs):
|
|
146
|
+
try:
|
|
147
|
+
return func(*args, **kwargs)
|
|
148
|
+
except Exception as err:
|
|
149
|
+
# Log the error to logger outside of job - console/file
|
|
150
|
+
logger.error(err, exc_info=True)
|
|
151
|
+
# Logging section for logs inside job - facing user
|
|
152
|
+
self = args[0]
|
|
153
|
+
if isinstance(err, SearchError):
|
|
154
|
+
if self.settings.get(err.model):
|
|
155
|
+
self.logger.log_failure(
|
|
156
|
+
f"Aborting syncing {err.model} instance due to above error, please check your transform maps and/or existing data.",
|
|
157
|
+
obj=self.sync,
|
|
158
|
+
)
|
|
159
|
+
else:
|
|
160
|
+
self.logger.log_failure(
|
|
161
|
+
f"Syncing {err.model} is disabled in settings, but hit above error trying to find the correct item. Please check your transform maps and/or existing data.",
|
|
162
|
+
obj=self.sync,
|
|
163
|
+
)
|
|
164
|
+
else:
|
|
165
|
+
self.logger.log_failure(
|
|
166
|
+
f"Syncing failed with: {err}. See above error for more details.",
|
|
167
|
+
obj=self.sync,
|
|
168
|
+
)
|
|
169
|
+
# Make sure the whole sync is failed when we encounter error
|
|
170
|
+
self.sync.status = DataSourceStatusChoices.FAILED
|
|
171
|
+
return None
|
|
172
|
+
|
|
173
|
+
return wrapper
|
|
174
|
+
|
|
175
|
+
def get_model_or_update(self, app, model, data):
|
|
142
176
|
transform_map = self.transform_map.objects.filter(
|
|
143
177
|
target_model__app_label=app, target_model__model=model
|
|
144
178
|
).first()
|
|
@@ -147,84 +181,57 @@ class IPFabricSyncRunner(object):
|
|
|
147
181
|
raise SystemError(f"No transform map available for {app}: {model}")
|
|
148
182
|
|
|
149
183
|
model_settings = self.settings.get(model, False)
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
)
|
|
161
|
-
else:
|
|
162
|
-
logger.info(f"Getting {model}")
|
|
163
|
-
coalesce_fields = transform_map.get_coalesce_fields(data)
|
|
164
|
-
object = get_object_or_404(
|
|
165
|
-
transform_map.target_model.model_class().objects.all(),
|
|
166
|
-
**coalesce_fields,
|
|
184
|
+
try:
|
|
185
|
+
context = transform_map.get_context(data)
|
|
186
|
+
except Exception as err:
|
|
187
|
+
message = f"Error getting context for `{model}`."
|
|
188
|
+
if isinstance(err, ObjectDoesNotExist):
|
|
189
|
+
message += " Could not find related object using on template in transform maps."
|
|
190
|
+
elif isinstance(err, MultipleObjectsReturned):
|
|
191
|
+
message += " Multiple objects returned using on template in transform maps, the template is not strict enough."
|
|
192
|
+
self.logger.log_failure(
|
|
193
|
+
f"<b>{message}</b><br/>data: `{data}`<br/>error: `{err}`", obj=self.sync
|
|
167
194
|
)
|
|
195
|
+
raise SyncError(err) from err
|
|
168
196
|
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
if store:
|
|
172
|
-
store[object._meta.model] = object
|
|
173
|
-
else:
|
|
174
|
-
self.relationship_store[uuid] = {object._meta.model: object}
|
|
197
|
+
queryset = transform_map.target_model.model_class().objects
|
|
175
198
|
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
"dcim", "interface", device_interface, uuid=device_uuid
|
|
184
|
-
)
|
|
185
|
-
|
|
186
|
-
self.logger.increment_statistics(
|
|
187
|
-
model="interface",
|
|
188
|
-
current=self.interface_count,
|
|
189
|
-
total=self.interface_count_total,
|
|
190
|
-
)
|
|
191
|
-
self.interface_count += 1
|
|
192
|
-
|
|
193
|
-
if self.settings.get("ipaddress"):
|
|
194
|
-
managed_ip = managed_ips.get(device_object.serial, {}).get(
|
|
195
|
-
interface_object.name
|
|
196
|
-
)
|
|
197
|
-
if managed_ip:
|
|
198
|
-
ip_address_obj = self.get_model_or_update(
|
|
199
|
-
"ipam",
|
|
200
|
-
"ipaddress",
|
|
201
|
-
managed_ip,
|
|
199
|
+
object = None
|
|
200
|
+
try:
|
|
201
|
+
if model_settings:
|
|
202
|
+
logger.info(f"Creating {model}")
|
|
203
|
+
object = transform_map.update_or_create_instance(
|
|
204
|
+
context=context,
|
|
205
|
+
tags=self.sync.tags.all(),
|
|
202
206
|
)
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
if device.get("loginIp") == device_interface.get("primaryIp"):
|
|
212
|
-
device_object.primary_ip4 = ip_address_obj
|
|
213
|
-
device_object.save()
|
|
214
|
-
|
|
215
|
-
if self.settings.get("macaddress") and device_interface.get("mac"):
|
|
216
|
-
# Need to create MAC Address object before we can assign it to Interface
|
|
217
|
-
macaddress_data = {
|
|
218
|
-
"mac": device_interface.get("mac"),
|
|
219
|
-
"id": interface_object.id,
|
|
220
|
-
}
|
|
221
|
-
macaddress_object = self.get_model_or_update(
|
|
222
|
-
"dcim", "macaddress", macaddress_data, uuid=device_uuid
|
|
207
|
+
else:
|
|
208
|
+
logger.info(f"Getting {model}")
|
|
209
|
+
context.pop("defaults", None)
|
|
210
|
+
object = queryset.get(**context)
|
|
211
|
+
except queryset.model.DoesNotExist as err:
|
|
212
|
+
self.logger.log_failure(
|
|
213
|
+
f"<b>`{model}` with these keys not found: `{context}`.</b><br/>Original data: `{data}`.",
|
|
214
|
+
obj=self.sync,
|
|
223
215
|
)
|
|
224
|
-
|
|
225
|
-
|
|
216
|
+
raise SearchError(model=model, context=context, data=data) from err
|
|
217
|
+
except queryset.model.MultipleObjectsReturned as err:
|
|
218
|
+
self.logger.log_failure(
|
|
219
|
+
f"<b>Multiple `{model}` with these keys found: `{context}`.</b><br/>Original data: `{data}`.",
|
|
220
|
+
obj=self.sync,
|
|
221
|
+
)
|
|
222
|
+
raise SearchError(model=model, context=context, data=data) from err
|
|
223
|
+
except Exception as err:
|
|
224
|
+
self.logger.log_failure(
|
|
225
|
+
f"""Failed to create instance of `{model}`:<br/>
|
|
226
|
+
message: `{err}`<br/>
|
|
227
|
+
raw data: `{data}`<br/>
|
|
228
|
+
context: `{context}`<br/>
|
|
229
|
+
""",
|
|
230
|
+
obj=self.sync,
|
|
231
|
+
)
|
|
232
|
+
raise SyncDataError(model=model, context=context, data=data) from err
|
|
226
233
|
|
|
227
|
-
return
|
|
234
|
+
return object
|
|
228
235
|
|
|
229
236
|
def collect_data(self):
|
|
230
237
|
try:
|
|
@@ -295,7 +302,6 @@ class IPFabricSyncRunner(object):
|
|
|
295
302
|
query_filter = {
|
|
296
303
|
"and": [{"vendor": ["neq", vendor]} for vendor in excluded_vendors]
|
|
297
304
|
}
|
|
298
|
-
# filter = {"and": [{"vendor": ["neq", "aws"]}, {"vendor": ["neq", "azure"]}]}
|
|
299
305
|
|
|
300
306
|
if ingestion_sites := self.settings.get("sites"):
|
|
301
307
|
site_filter = {
|
|
@@ -304,13 +310,14 @@ class IPFabricSyncRunner(object):
|
|
|
304
310
|
query_filter["and"].append(site_filter)
|
|
305
311
|
|
|
306
312
|
self.logger.log_info(
|
|
307
|
-
f"Creating site filter {json.dumps(site_filter)}",
|
|
313
|
+
f"Creating site filter `{json.dumps(site_filter)}`",
|
|
314
|
+
obj=self.sync,
|
|
308
315
|
)
|
|
309
316
|
else:
|
|
310
317
|
site_filter = {}
|
|
311
318
|
|
|
312
319
|
data["site"] = self.client.inventory.sites.all(
|
|
313
|
-
snapshot_id=self.settings["snapshot_id"]
|
|
320
|
+
snapshot_id=self.settings["snapshot_id"], filters=site_filter
|
|
314
321
|
)
|
|
315
322
|
|
|
316
323
|
data["device"] = self.client.inventory.devices.all(
|
|
@@ -324,13 +331,17 @@ class IPFabricSyncRunner(object):
|
|
|
324
331
|
)
|
|
325
332
|
|
|
326
333
|
data["interface"] = self.client.inventory.interfaces.all(
|
|
327
|
-
snapshot_id=self.settings["snapshot_id"]
|
|
334
|
+
snapshot_id=self.settings["snapshot_id"], filters=site_filter
|
|
328
335
|
)
|
|
329
336
|
|
|
330
337
|
data["inventoryitem"] = self.client.inventory.pn.all(
|
|
331
338
|
snapshot_id=self.settings["snapshot_id"],
|
|
332
339
|
filters={
|
|
333
|
-
"and": [
|
|
340
|
+
"and": [
|
|
341
|
+
site_filter,
|
|
342
|
+
{"sn": ["empty", False]},
|
|
343
|
+
{"name": ["empty", False]},
|
|
344
|
+
]
|
|
334
345
|
},
|
|
335
346
|
)
|
|
336
347
|
|
|
@@ -356,7 +367,7 @@ class IPFabricSyncRunner(object):
|
|
|
356
367
|
data[
|
|
357
368
|
"ipaddress"
|
|
358
369
|
] = self.client.technology.addressing.managed_ip_ipv4.all(
|
|
359
|
-
snapshot_id=self.settings["snapshot_id"]
|
|
370
|
+
snapshot_id=self.settings["snapshot_id"], filters=site_filter
|
|
360
371
|
)
|
|
361
372
|
except Exception as e:
|
|
362
373
|
self.logger.log_failure(
|
|
@@ -413,18 +424,7 @@ class IPFabricSyncRunner(object):
|
|
|
413
424
|
|
|
414
425
|
self.logger.log_info("Ordering Part Numbers", obj=self.sync)
|
|
415
426
|
|
|
416
|
-
part_numbers = order_pn(data.get("inventoryitem", []))
|
|
417
|
-
|
|
418
|
-
self.logger.log_info("Ordering VRF's", obj=self.sync)
|
|
419
|
-
|
|
420
|
-
vrfs = order_vrf(data["vrf"])
|
|
421
|
-
|
|
422
|
-
managed_ips = {}
|
|
423
|
-
site_dict = {}
|
|
424
427
|
interface_dict = {}
|
|
425
|
-
for site in data["site"]:
|
|
426
|
-
site_dict[site["siteName"]] = site
|
|
427
|
-
|
|
428
428
|
for interface in data["interface"]:
|
|
429
429
|
if int_sn := interface.get("sn"):
|
|
430
430
|
if interface_dict.get(int_sn):
|
|
@@ -448,6 +448,7 @@ class IPFabricSyncRunner(object):
|
|
|
448
448
|
)
|
|
449
449
|
raise SyncError(f"Error collecting source column name for interface: {e}")
|
|
450
450
|
|
|
451
|
+
managed_ips = {}
|
|
451
452
|
for ip in data["ipaddress"]:
|
|
452
453
|
# Find corresponding interface list by serial number (sn)
|
|
453
454
|
device_interfaces = interface_dict.get(ip["sn"], [])
|
|
@@ -463,141 +464,259 @@ class IPFabricSyncRunner(object):
|
|
|
463
464
|
int_name = ip[interface_key]
|
|
464
465
|
else:
|
|
465
466
|
int_name = ip["intName"]
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
467
|
+
if ip["sn"] not in managed_ips:
|
|
468
|
+
managed_ips[ip["sn"]] = {int_name: [ip]}
|
|
469
|
+
elif int_name not in managed_ips.get(ip["sn"]):
|
|
470
|
+
managed_ips[ip["sn"]][int_name] = [ip]
|
|
469
471
|
else:
|
|
470
|
-
managed_ips[ip["sn"]]
|
|
472
|
+
managed_ips[ip["sn"]][int_name].append(ip)
|
|
473
|
+
|
|
474
|
+
for model, item_count in [
|
|
475
|
+
("site", len(data.get("site", []))),
|
|
476
|
+
("device", len(devices)),
|
|
477
|
+
("interface", len(data.get("interface", []))),
|
|
478
|
+
("inventoryitem", len(data.get("inventoryitem", []))),
|
|
479
|
+
("vlan", len(data.get("vlan", []))),
|
|
480
|
+
("vrf", len(data.get("vrf", []))),
|
|
481
|
+
("prefix", len(data.get("prefix", []))),
|
|
482
|
+
# TODO: Since we sync only those assigned to interfaces, we are skipping some IPs
|
|
483
|
+
# TODO: This is fixable by syncing IPs separately from interface and only assign them on interfaces
|
|
484
|
+
("ipaddress", len(data.get("ipaddress", []))),
|
|
485
|
+
]:
|
|
486
|
+
if self.settings.get(model):
|
|
487
|
+
self.logger.init_statistics(model, item_count)
|
|
471
488
|
|
|
472
489
|
return (
|
|
473
|
-
|
|
490
|
+
data["site"],
|
|
474
491
|
devices,
|
|
475
492
|
interface_dict,
|
|
476
|
-
|
|
477
|
-
|
|
493
|
+
data["inventoryitem"],
|
|
494
|
+
data["vrf"],
|
|
478
495
|
data["vlan"],
|
|
479
496
|
data["prefix"],
|
|
480
497
|
managed_ips,
|
|
481
498
|
)
|
|
482
499
|
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
logger.info(f"Device {device_count} out of {len(devices)}")
|
|
503
|
-
self.logger.increment_statistics(
|
|
504
|
-
model="device", current=device_count, total=len(devices)
|
|
500
|
+
@handle_errors
|
|
501
|
+
def sync_model(self, app_label: str, model: str, data: dict | None) -> Model | None:
|
|
502
|
+
"""Sync a single item to NetBox."""
|
|
503
|
+
if not data:
|
|
504
|
+
return None
|
|
505
|
+
return self.get_model_or_update(app_label, model, data)
|
|
506
|
+
|
|
507
|
+
def sync_items(
|
|
508
|
+
self,
|
|
509
|
+
items,
|
|
510
|
+
app_label: str,
|
|
511
|
+
model: str,
|
|
512
|
+
cf: bool = False,
|
|
513
|
+
branch: "IPFabricBranch" = None,
|
|
514
|
+
) -> None:
|
|
515
|
+
"""Sync list of items to NetBox."""
|
|
516
|
+
if not self.settings.get(model):
|
|
517
|
+
self.logger.log_info(
|
|
518
|
+
f"Did not ask to sync {model}s, skipping.", obj=self.sync
|
|
505
519
|
)
|
|
520
|
+
return
|
|
521
|
+
|
|
522
|
+
for item in items:
|
|
523
|
+
synced_object = self.sync_model(app_label=app_label, model=model, data=item)
|
|
524
|
+
if synced_object is None:
|
|
525
|
+
continue
|
|
526
|
+
# Only log when we successfully synced the item
|
|
527
|
+
self.logger.increment_statistics(model=model)
|
|
528
|
+
|
|
529
|
+
if cf:
|
|
530
|
+
synced_object.custom_field_data[
|
|
531
|
+
"ipfabric_source"
|
|
532
|
+
] = self.sync.snapshot_data.source.pk
|
|
533
|
+
if branch:
|
|
534
|
+
synced_object.custom_field_data["ipfabric_branch"] = branch.pk
|
|
535
|
+
synced_object.save()
|
|
536
|
+
|
|
537
|
+
@handle_errors
|
|
538
|
+
def sync_devices(
|
|
539
|
+
self,
|
|
540
|
+
branch,
|
|
541
|
+
devices,
|
|
542
|
+
interface_dict,
|
|
543
|
+
managed_ips,
|
|
544
|
+
):
|
|
545
|
+
for model, name in [
|
|
546
|
+
("manufacturer", "manufacturers"),
|
|
547
|
+
("devicetype", "device types"),
|
|
548
|
+
("platform", "platforms"),
|
|
549
|
+
("devicerole", "device roles"),
|
|
550
|
+
("virtualchassis", "virtual chassis"),
|
|
551
|
+
("device", "devices"),
|
|
552
|
+
("inventoryitem", "device inventory items"),
|
|
553
|
+
]:
|
|
554
|
+
if not self.settings.get(model):
|
|
555
|
+
self.logger.log_info(
|
|
556
|
+
f"Did not ask to sync {name}, skipping", obj=self.sync
|
|
557
|
+
)
|
|
506
558
|
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
559
|
+
devices_total = len(devices)
|
|
560
|
+
|
|
561
|
+
for device in devices:
|
|
562
|
+
if self.sync_model("dcim", "manufacturer", device) is None:
|
|
563
|
+
continue
|
|
564
|
+
if self.sync_model("dcim", "devicetype", device) is None:
|
|
565
|
+
continue
|
|
566
|
+
if self.sync_model("dcim", "platform", device) is None:
|
|
567
|
+
continue
|
|
568
|
+
if self.sync_model("dcim", "devicerole", device) is None:
|
|
569
|
+
continue
|
|
570
|
+
|
|
571
|
+
virtual_chassis = device.get("virtual_chassis", {})
|
|
572
|
+
self.sync_model("dcim", "virtualchassis", virtual_chassis)
|
|
573
|
+
|
|
574
|
+
if (device_object := self.sync_model("dcim", "device", device)) is None:
|
|
575
|
+
continue
|
|
576
|
+
|
|
577
|
+
if self.settings.get("device"):
|
|
578
|
+
device_object.custom_field_data[
|
|
579
|
+
"ipfabric_source"
|
|
580
|
+
] = self.sync.snapshot_data.source.pk
|
|
581
|
+
if branch:
|
|
582
|
+
device_object.custom_field_data["ipfabric_branch"] = branch.pk
|
|
583
|
+
device_object.save()
|
|
584
|
+
|
|
585
|
+
self.logger.increment_statistics(model="device")
|
|
586
|
+
logger.info(
|
|
587
|
+
f"Device {self.logger.log_data.get('statistics', {}).get('device', {}).get('current')} out of {devices_total}"
|
|
511
588
|
)
|
|
512
589
|
|
|
513
|
-
|
|
514
|
-
|
|
590
|
+
# The Device exists now, so we can update master of the VC.
|
|
591
|
+
# The logic is handled in transform maps.
|
|
592
|
+
self.sync_model("dcim", "virtualchassis", virtual_chassis)
|
|
515
593
|
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
594
|
+
device_interfaces = interface_dict.get(device.get("sn"), [])
|
|
595
|
+
for device_interface in device_interfaces:
|
|
596
|
+
interface_object = self.sync_interface(
|
|
597
|
+
device_interface, managed_ips, device_object, device
|
|
598
|
+
)
|
|
599
|
+
if interface_object is None:
|
|
600
|
+
continue
|
|
601
|
+
self.logger.increment_statistics(model="interface")
|
|
602
|
+
|
|
603
|
+
@handle_errors
|
|
604
|
+
def sync_ipaddress(
|
|
605
|
+
self,
|
|
606
|
+
managed_ip: dict | None,
|
|
607
|
+
device_object: Device | None,
|
|
608
|
+
primary_ip: str | None,
|
|
609
|
+
login_ip: str | None,
|
|
610
|
+
):
|
|
611
|
+
if not self.settings.get("ipaddress") or not managed_ip:
|
|
612
|
+
return
|
|
613
|
+
ip_address_obj = self.get_model_or_update(
|
|
614
|
+
"ipam",
|
|
615
|
+
"ipaddress",
|
|
616
|
+
managed_ip,
|
|
617
|
+
)
|
|
618
|
+
if ip_address_obj is None:
|
|
619
|
+
return
|
|
620
|
+
self.logger.increment_statistics(model="ipaddress")
|
|
519
621
|
|
|
520
|
-
|
|
521
|
-
|
|
622
|
+
try:
|
|
623
|
+
# Removing other IP is done in .signals.clear_other_primary_ip
|
|
624
|
+
# But do it here too so the change is shown in StagedChange diff
|
|
625
|
+
other_device = Device.objects.get(primary_ip4=ip_address_obj)
|
|
626
|
+
if other_device and device_object != other_device:
|
|
627
|
+
other_device.primary_ip4 = None
|
|
628
|
+
other_device.save()
|
|
629
|
+
except ObjectDoesNotExist:
|
|
630
|
+
pass
|
|
631
|
+
|
|
632
|
+
if login_ip == primary_ip:
|
|
633
|
+
try:
|
|
634
|
+
device_object.primary_ip4 = ip_address_obj
|
|
635
|
+
device_object.save()
|
|
636
|
+
except ValueError as err:
|
|
637
|
+
self.logger.log_failure(
|
|
638
|
+
f"Error assigning primary IP to device: {err}", obj=self.sync
|
|
639
|
+
)
|
|
640
|
+
return None
|
|
641
|
+
return ip_address_obj
|
|
642
|
+
|
|
643
|
+
@handle_errors
|
|
644
|
+
def sync_macaddress(
|
|
645
|
+
self, data: dict | None, interface_object: Interface
|
|
646
|
+
) -> MACAddress | None:
|
|
647
|
+
if not self.settings.get("macaddress") or not data:
|
|
648
|
+
return None
|
|
649
|
+
# Need to create MAC Address object before we can assign it to Interface
|
|
650
|
+
# TODO: Figure out how to do this using transform maps
|
|
651
|
+
macaddress_data = {
|
|
652
|
+
"mac": data,
|
|
653
|
+
"id": interface_object.pk,
|
|
654
|
+
}
|
|
655
|
+
macaddress_object = self.get_model_or_update(
|
|
656
|
+
"dcim", "macaddress", macaddress_data
|
|
657
|
+
)
|
|
658
|
+
try:
|
|
659
|
+
interface_object.primary_mac_address = macaddress_object
|
|
660
|
+
interface_object.save()
|
|
661
|
+
except ValueError as err:
|
|
662
|
+
self.logger.log_failure(
|
|
663
|
+
f"Error assigning MAC Address to interface: {err}", obj=self.sync
|
|
664
|
+
)
|
|
665
|
+
return None
|
|
666
|
+
return macaddress_object
|
|
667
|
+
|
|
668
|
+
@handle_errors
|
|
669
|
+
def sync_interface(
|
|
670
|
+
self,
|
|
671
|
+
device_interface: dict,
|
|
672
|
+
managed_ips: dict,
|
|
673
|
+
device_object: Device | None,
|
|
674
|
+
device: dict,
|
|
675
|
+
):
|
|
676
|
+
device_interface["loginIp"] = device.get("loginIp")
|
|
677
|
+
interface_object = self.get_model_or_update(
|
|
678
|
+
"dcim", "interface", device_interface
|
|
679
|
+
)
|
|
680
|
+
if interface_object is None:
|
|
681
|
+
return None
|
|
682
|
+
|
|
683
|
+
for ipaddress in managed_ips.get(device_object.serial, {}).get(
|
|
684
|
+
interface_object.name, []
|
|
685
|
+
):
|
|
686
|
+
self.sync_ipaddress(
|
|
687
|
+
ipaddress,
|
|
688
|
+
device_object,
|
|
689
|
+
device_interface.get("primaryIp"),
|
|
690
|
+
device.get("loginIp"),
|
|
522
691
|
)
|
|
523
692
|
|
|
524
|
-
|
|
525
|
-
"ipfabric_source"
|
|
526
|
-
] = self.sync.snapshot_data.source.pk
|
|
527
|
-
|
|
528
|
-
device_object.custom_field_data[
|
|
529
|
-
"ipfabric_source"
|
|
530
|
-
] = self.sync.snapshot_data.source.pk
|
|
531
|
-
if branch:
|
|
532
|
-
site_object.custom_field_data["ipfabric_branch"] = branch.pk
|
|
533
|
-
device_object.custom_field_data["ipfabric_branch"] = branch.pk
|
|
534
|
-
|
|
535
|
-
site_object.save()
|
|
536
|
-
device_object.save()
|
|
537
|
-
|
|
538
|
-
if self.settings.get("virtualchassis"):
|
|
539
|
-
if member := device.get("virtual_chassis"):
|
|
540
|
-
self.get_model_or_update("dcim", "virtualchassis", member)
|
|
541
|
-
device_object = self.get_model_or_update(
|
|
542
|
-
"dcim", "device", device, uuid=device_uuid
|
|
543
|
-
)
|
|
693
|
+
self.sync_macaddress(device_interface.get("mac"), interface_object)
|
|
544
694
|
|
|
545
|
-
|
|
546
|
-
device_interfaces = interface_dict.get(device.get("sn"), [])
|
|
547
|
-
self.interface_count_total += len(device_interfaces)
|
|
548
|
-
for device_interface in device_interfaces:
|
|
549
|
-
self.create_interface(
|
|
550
|
-
device_interface,
|
|
551
|
-
device_uuid,
|
|
552
|
-
managed_ips,
|
|
553
|
-
device_object,
|
|
554
|
-
device,
|
|
555
|
-
)
|
|
556
|
-
# x = threading.Thread(target=self.create_interface, args=((device_interface, device_uuid, managed_ips, device_object, device)))
|
|
557
|
-
# threads.append(x)
|
|
558
|
-
# x.start()
|
|
559
|
-
|
|
560
|
-
if self.settings.get("vrf"):
|
|
561
|
-
device_vrfs = vrfs.get(device_object.serial, [])
|
|
562
|
-
device_vrfs_total += len(device_vrfs)
|
|
563
|
-
for vrf in device_vrfs:
|
|
564
|
-
self.get_model_or_update("ipam", "vrf", vrf, uuid=device_uuid)
|
|
565
|
-
self.logger.increment_statistics(
|
|
566
|
-
model="vrf", current=vrf_count, total=device_vrfs_total
|
|
567
|
-
)
|
|
568
|
-
vrf_count += 1
|
|
569
|
-
|
|
570
|
-
device_count += 1
|
|
571
|
-
|
|
572
|
-
if self.settings.get("inventoryitem"):
|
|
573
|
-
devices = Device.objects.all()
|
|
574
|
-
for device in devices:
|
|
575
|
-
device_parts = part_numbers.get(device.serial, [])
|
|
576
|
-
self.inventoryitem_count_total += len(device_parts)
|
|
577
|
-
for part in device_parts:
|
|
578
|
-
self.get_model_or_update("dcim", "inventoryitem", part)
|
|
579
|
-
self.logger.increment_statistics(
|
|
580
|
-
model="inventory_item",
|
|
581
|
-
current=self.inventoryitem_count,
|
|
582
|
-
total=self.inventoryitem_count_total,
|
|
583
|
-
)
|
|
584
|
-
self.inventoryitem_count += 1
|
|
585
|
-
|
|
586
|
-
if self.settings.get("vlan"):
|
|
587
|
-
for vlan in vlans:
|
|
588
|
-
self.get_model_or_update("ipam", "vlan", vlan)
|
|
589
|
-
self.logger.increment_statistics(
|
|
590
|
-
model="vlan", current=vlan_count, total=len(vlans)
|
|
591
|
-
)
|
|
592
|
-
vlan_count += 1
|
|
695
|
+
return interface_object
|
|
593
696
|
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
697
|
+
def collect_and_sync(self, branch=None) -> None:
|
|
698
|
+
self.logger.log_info("Starting data sync.", obj=self.sync)
|
|
699
|
+
(
|
|
700
|
+
sites,
|
|
701
|
+
devices,
|
|
702
|
+
interface_dict,
|
|
703
|
+
inventory_items,
|
|
704
|
+
vrfs,
|
|
705
|
+
vlans,
|
|
706
|
+
networks,
|
|
707
|
+
managed_ips,
|
|
708
|
+
) = self.collect_data()
|
|
601
709
|
|
|
602
|
-
|
|
603
|
-
|
|
710
|
+
self.sync_items(
|
|
711
|
+
app_label="dcim", model="site", items=sites, cf=True, branch=branch
|
|
712
|
+
)
|
|
713
|
+
self.sync_devices(
|
|
714
|
+
branch,
|
|
715
|
+
devices,
|
|
716
|
+
interface_dict,
|
|
717
|
+
managed_ips,
|
|
718
|
+
)
|
|
719
|
+
self.sync_items(app_label="dcim", model="inventoryitem", items=inventory_items)
|
|
720
|
+
self.sync_items(app_label="ipam", model="vlan", items=vlans)
|
|
721
|
+
self.sync_items(app_label="ipam", model="vrf", items=vrfs)
|
|
722
|
+
self.sync_items(app_label="ipam", model="prefix", items=networks)
|