ipfabric_netbox 4.3.2b10__py3-none-any.whl → 4.3.2b11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ipfabric_netbox might be problematic. Click here for more details.
- ipfabric_netbox/__init__.py +2 -2
- ipfabric_netbox/choices.py +2 -0
- ipfabric_netbox/data/endpoint.json +5 -0
- ipfabric_netbox/data/filters.json +1 -1
- ipfabric_netbox/data/transform_map.json +3 -3
- ipfabric_netbox/forms.py +10 -5
- ipfabric_netbox/jobs.py +10 -3
- ipfabric_netbox/migrations/0023_populate_filters_data.py +24 -0
- ipfabric_netbox/migrations/0025_add_vss_chassis_endpoint.py +166 -0
- ipfabric_netbox/models.py +72 -29
- ipfabric_netbox/templatetags/ipfabric_netbox_helpers.py +7 -4
- ipfabric_netbox/tests/test_forms.py +93 -0
- ipfabric_netbox/tests/test_views.py +1 -1
- ipfabric_netbox/utilities/endpoint.py +53 -0
- ipfabric_netbox/utilities/ipfutils.py +252 -174
- ipfabric_netbox/utilities/transform_map.py +18 -5
- {ipfabric_netbox-4.3.2b10.dist-info → ipfabric_netbox-4.3.2b11.dist-info}/METADATA +3 -4
- {ipfabric_netbox-4.3.2b10.dist-info → ipfabric_netbox-4.3.2b11.dist-info}/RECORD +19 -18
- {ipfabric_netbox-4.3.2b10.dist-info → ipfabric_netbox-4.3.2b11.dist-info}/WHEEL +1 -1
|
@@ -14,7 +14,12 @@ from dcim.models import Device
|
|
|
14
14
|
from dcim.models import Site
|
|
15
15
|
from dcim.models import VirtualChassis
|
|
16
16
|
from dcim.signals import assign_virtualchassis_master
|
|
17
|
-
|
|
17
|
+
|
|
18
|
+
try:
|
|
19
|
+
# Got added in NetBox 4.4.9
|
|
20
|
+
from dcim.signals import sync_cached_scope_fields
|
|
21
|
+
except ImportError:
|
|
22
|
+
sync_cached_scope_fields = None
|
|
18
23
|
from django.conf import settings
|
|
19
24
|
from django.core.exceptions import MultipleObjectsReturned
|
|
20
25
|
from django.core.exceptions import ObjectDoesNotExist
|
|
@@ -224,8 +229,11 @@ class DataRecord:
|
|
|
224
229
|
self._hash = None # Invalidate cached hash
|
|
225
230
|
|
|
226
231
|
|
|
227
|
-
def order_members(members: list[dict]) -> dict[str, list[dict]]:
|
|
228
|
-
"""Order
|
|
232
|
+
def order_members(members: list[dict], sn_column: str) -> dict[str, list[dict]]:
|
|
233
|
+
"""Order stack members to dict, where key is master serial number and values are all members.
|
|
234
|
+
For stack members table we get `memberSn` column for member serial number.
|
|
235
|
+
For VSS chassis table we get `chassisSn` column for member serial number.
|
|
236
|
+
"""
|
|
229
237
|
devices = {}
|
|
230
238
|
|
|
231
239
|
for member in members:
|
|
@@ -233,7 +241,7 @@ def order_members(members: list[dict]) -> dict[str, list[dict]]:
|
|
|
233
241
|
# This can be spotted by checking if `sn` is different from `memberSn` for the master device
|
|
234
242
|
# Plus `sn` will be IP of loopback in hex...
|
|
235
243
|
master_serial = member.get("sn")
|
|
236
|
-
if master_serial and member.get(
|
|
244
|
+
if master_serial and member.get(sn_column):
|
|
237
245
|
if master_serial in devices:
|
|
238
246
|
devices[master_serial].append(member)
|
|
239
247
|
else:
|
|
@@ -265,17 +273,20 @@ def prepare_devices(
|
|
|
265
273
|
if child_members := members.get(device.get("sn")):
|
|
266
274
|
# This device is the VC master, and we're iterating over all it's members
|
|
267
275
|
for child_member in child_members:
|
|
276
|
+
member_id = child_member.get("member") or child_member.get("chassisId")
|
|
277
|
+
member_sn = child_member.get("memberSn") or child_member.get(
|
|
278
|
+
"chassisSn"
|
|
279
|
+
)
|
|
268
280
|
# There is physically no device with hostname matching the virtual chassis
|
|
269
281
|
# There are only members, so "hostname/1", "hostname/2", etc.
|
|
270
282
|
new_device = deepcopy(device)
|
|
271
|
-
new_device[
|
|
272
|
-
"hostname"
|
|
273
|
-
] = f"{device['hostname']}/{child_member.get('member')}"
|
|
283
|
+
new_device["hostname"] = f"{device['hostname']}/{member_id}"
|
|
274
284
|
new_device["virtual_chassis"] = child_member
|
|
275
|
-
if device.get("sn") !=
|
|
285
|
+
if device.get("sn") != member_sn:
|
|
276
286
|
# VC members (non-master) are not in Device table, need to add them as new Device
|
|
277
|
-
|
|
278
|
-
new_device["
|
|
287
|
+
# VSS Chassis have no model in the table, get it from master device
|
|
288
|
+
new_device["model"] = child_member.get("pn") or device["model"]
|
|
289
|
+
new_device["sn"] = member_sn
|
|
279
290
|
all_devices.append(new_device)
|
|
280
291
|
else:
|
|
281
292
|
# Master device, create the virtual chassis based on it
|
|
@@ -577,7 +588,16 @@ class IPFabricSyncRunner(object):
|
|
|
577
588
|
enabled_models.append("dcim.interface")
|
|
578
589
|
transform_maps = self.sync.get_transform_maps()
|
|
579
590
|
for endpoint in IPFabricEndpoint.objects.all():
|
|
580
|
-
|
|
591
|
+
# FIXME: Dirty hack to sync VSS, remove when IN-68 is getting done
|
|
592
|
+
if endpoint.endpoint == "/technology/platforms/vss/chassis":
|
|
593
|
+
tms_for_endpoint = transform_maps.filter(
|
|
594
|
+
source_endpoint__endpoint="/technology/platforms/stack/members"
|
|
595
|
+
)
|
|
596
|
+
else:
|
|
597
|
+
tms_for_endpoint = transform_maps.filter(
|
|
598
|
+
source_endpoint=endpoint
|
|
599
|
+
)
|
|
600
|
+
|
|
581
601
|
if not tms_for_endpoint.exists():
|
|
582
602
|
raise SyncError(
|
|
583
603
|
f"No transform map found for endpoint `{endpoint.endpoint}`."
|
|
@@ -612,30 +632,46 @@ class IPFabricSyncRunner(object):
|
|
|
612
632
|
return data
|
|
613
633
|
|
|
614
634
|
@cache
|
|
615
|
-
def get_transform_map(
|
|
635
|
+
def get_transform_map(
|
|
636
|
+
self, app: str, model: str, endpoint: str = None
|
|
637
|
+
) -> "IPFabricTransformMap":
|
|
616
638
|
"""Get transform map for given app and model. Cached to improve performance."""
|
|
617
|
-
|
|
618
|
-
target_model__app_label
|
|
619
|
-
|
|
639
|
+
filter_kwargs = {
|
|
640
|
+
"target_model__app_label": app,
|
|
641
|
+
"target_model__model": model,
|
|
642
|
+
}
|
|
643
|
+
if endpoint:
|
|
644
|
+
filter_kwargs["source_endpoint__endpoint"] = endpoint
|
|
645
|
+
return self.transform_maps.get(**filter_kwargs)
|
|
620
646
|
|
|
621
647
|
def create_new_data_records(
|
|
622
|
-
self,
|
|
648
|
+
self,
|
|
649
|
+
app: str,
|
|
650
|
+
model: str,
|
|
651
|
+
data: list[dict] | dict[str, list[dict]],
|
|
652
|
+
endpoint: str = None,
|
|
623
653
|
) -> set[DataRecord]:
|
|
624
654
|
"""Create DataRecord objects for given app, model and data list if enabled in settings."""
|
|
625
655
|
if not self.sync.parameters.get(f"{app}.{model}"):
|
|
626
656
|
return set()
|
|
627
657
|
if isinstance(data, dict):
|
|
628
658
|
# Data is either already list or full data where we have to choose the list
|
|
629
|
-
transform_map = self.get_transform_map(
|
|
659
|
+
transform_map = self.get_transform_map(
|
|
660
|
+
app=app, model=model, endpoint=endpoint
|
|
661
|
+
)
|
|
630
662
|
data = data.get(transform_map.source_endpoint.endpoint, [])
|
|
631
663
|
return set(
|
|
632
|
-
self.create_new_data_record(
|
|
664
|
+
self.create_new_data_record(
|
|
665
|
+
app=app, model=model, data=item, endpoint=endpoint
|
|
666
|
+
)
|
|
633
667
|
for item in data
|
|
634
668
|
)
|
|
635
669
|
|
|
636
|
-
def create_new_data_record(
|
|
670
|
+
def create_new_data_record(
|
|
671
|
+
self, app: str, model: str, data: dict, endpoint: str = None
|
|
672
|
+
) -> DataRecord:
|
|
637
673
|
"""Extract only relevant source data according to transform map configuration."""
|
|
638
|
-
transform_map = self.get_transform_map(app=app, model=model)
|
|
674
|
+
transform_map = self.get_transform_map(app=app, model=model, endpoint=endpoint)
|
|
639
675
|
model_string = f"{app}.{model}"
|
|
640
676
|
try:
|
|
641
677
|
source_data = transform_map.strip_source_data(data)
|
|
@@ -647,66 +683,75 @@ class IPFabricSyncRunner(object):
|
|
|
647
683
|
model_string=model_string, data=source_data, transform_map=transform_map
|
|
648
684
|
)
|
|
649
685
|
|
|
650
|
-
def
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
"dcim.
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
),
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
|
|
674
|
-
),
|
|
675
|
-
|
|
676
|
-
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
data=data,
|
|
684
|
-
),
|
|
685
|
-
"ipam.ipaddress": set(),
|
|
686
|
+
def _get_custom_preprocessor(
|
|
687
|
+
self, model_string: str, endpoint: str
|
|
688
|
+
) -> Callable | None:
|
|
689
|
+
"""
|
|
690
|
+
Get custom preprocessing function for a model+endpoint combination if one exists.
|
|
691
|
+
|
|
692
|
+
Args:
|
|
693
|
+
model_string: The model in 'app.model' format (e.g., 'dcim.virtualchassis')
|
|
694
|
+
endpoint: The source endpoint (e.g., '/technology/platforms/stack/members')
|
|
695
|
+
|
|
696
|
+
Returns:
|
|
697
|
+
Callable preprocessor function or None if no custom preprocessor exists
|
|
698
|
+
"""
|
|
699
|
+
preprocessors = {
|
|
700
|
+
# Devices and related models processed together (from /inventory/devices)
|
|
701
|
+
("dcim.device", "/inventory/devices"): self._preprocess_devices,
|
|
702
|
+
("dcim.manufacturer", "/inventory/devices"): self._preprocess_devices,
|
|
703
|
+
("dcim.devicetype", "/inventory/devices"): self._preprocess_devices,
|
|
704
|
+
("dcim.platform", "/inventory/devices"): self._preprocess_devices,
|
|
705
|
+
("dcim.devicerole", "/inventory/devices"): self._preprocess_devices,
|
|
706
|
+
# Virtual chassis from stack members
|
|
707
|
+
(
|
|
708
|
+
"dcim.virtualchassis",
|
|
709
|
+
"/technology/platforms/stack/members",
|
|
710
|
+
): self._preprocess_devices,
|
|
711
|
+
# Interfaces and MAC addresses processed together
|
|
712
|
+
("dcim.interface", "/inventory/interfaces"): self._preprocess_interfaces,
|
|
713
|
+
("dcim.macaddress", "/inventory/interfaces"): self._preprocess_interfaces,
|
|
714
|
+
# IP addresses need interface data
|
|
715
|
+
(
|
|
716
|
+
"ipam.ipaddress",
|
|
717
|
+
"/technology/addressing/managed-ip/ipv4",
|
|
718
|
+
): self._preprocess_ipaddresses,
|
|
686
719
|
}
|
|
687
720
|
|
|
688
|
-
|
|
689
|
-
self.logger.log_info("Preparing virtual chassis members", obj=self.sync)
|
|
690
|
-
members = order_members(data.get("/technology/platforms/stack/members", []))
|
|
691
|
-
else:
|
|
692
|
-
members = []
|
|
721
|
+
return preprocessors.get((model_string, endpoint))
|
|
693
722
|
|
|
694
|
-
|
|
695
|
-
|
|
723
|
+
def _preprocess_devices(self, data: dict, records: dict) -> None:
|
|
724
|
+
"""Custom preprocessing for devices - single iteration for efficiency."""
|
|
725
|
+
if not (
|
|
726
|
+
self.sync.parameters.get("dcim.device")
|
|
727
|
+
or self.sync.parameters.get("dcim.virtualchassis")
|
|
728
|
+
or self.sync.parameters.get("dcim.interface")
|
|
729
|
+
or self.sync.parameters.get("dcim.macaddress")
|
|
730
|
+
or self.sync.parameters.get("ipam.ipaddress")
|
|
696
731
|
):
|
|
697
|
-
|
|
698
|
-
|
|
699
|
-
|
|
732
|
+
return
|
|
733
|
+
|
|
734
|
+
# Prepare virtual chassis members once
|
|
735
|
+
members = {}
|
|
736
|
+
if self.sync.parameters.get("dcim.virtualchassis"):
|
|
737
|
+
self.logger.log_info("Preparing virtual chassis members", obj=self.sync)
|
|
738
|
+
members = order_members(
|
|
739
|
+
data.get("/technology/platforms/stack/members", []), "memberSn"
|
|
700
740
|
)
|
|
701
|
-
|
|
702
|
-
|
|
703
|
-
|
|
741
|
+
members.update(
|
|
742
|
+
order_members(
|
|
743
|
+
data.get("/technology/platforms/vss/chassis", []), "chassisSn"
|
|
744
|
+
)
|
|
745
|
+
)
|
|
746
|
+
|
|
747
|
+
self.logger.log_info("Preparing devices", obj=self.sync)
|
|
748
|
+
devices, virtualchassis = prepare_devices(
|
|
749
|
+
data.get("/inventory/devices", []), members
|
|
750
|
+
)
|
|
704
751
|
|
|
705
|
-
# We need to store primary IPs of Devices to assign them later
|
|
706
|
-
# since they are not stored on Device object directly
|
|
707
|
-
# TODO: This will be later replaced when we are able to sync from multiple API tables to 1 model
|
|
708
752
|
device_primary_ips = {}
|
|
709
753
|
|
|
754
|
+
# Single iteration creating records for all related models
|
|
710
755
|
for device in devices:
|
|
711
756
|
if self.sync.parameters.get("dcim.manufacturer"):
|
|
712
757
|
records["dcim.manufacturer"].add(
|
|
@@ -714,40 +759,55 @@ class IPFabricSyncRunner(object):
|
|
|
714
759
|
app="dcim", model="manufacturer", data=device
|
|
715
760
|
)
|
|
716
761
|
)
|
|
762
|
+
|
|
717
763
|
if self.sync.parameters.get("dcim.devicetype"):
|
|
718
764
|
records["dcim.devicetype"].add(
|
|
719
765
|
self.create_new_data_record(
|
|
720
766
|
app="dcim", model="devicetype", data=device
|
|
721
767
|
)
|
|
722
768
|
)
|
|
769
|
+
|
|
723
770
|
if self.sync.parameters.get("dcim.platform"):
|
|
724
771
|
records["dcim.platform"].add(
|
|
725
772
|
self.create_new_data_record(
|
|
726
773
|
app="dcim", model="platform", data=device
|
|
727
774
|
)
|
|
728
775
|
)
|
|
776
|
+
|
|
729
777
|
if self.sync.parameters.get("dcim.devicerole"):
|
|
730
778
|
records["dcim.devicerole"].add(
|
|
731
779
|
self.create_new_data_record(
|
|
732
780
|
app="dcim", model="devicerole", data=device
|
|
733
781
|
)
|
|
734
782
|
)
|
|
735
|
-
|
|
783
|
+
|
|
736
784
|
if "virtual_chassis" not in device:
|
|
737
785
|
device["virtual_chassis"] = None
|
|
786
|
+
|
|
738
787
|
if self.sync.parameters.get("dcim.device"):
|
|
739
788
|
records["dcim.device"].add(
|
|
740
789
|
self.create_new_data_record(app="dcim", model="device", data=device)
|
|
741
790
|
)
|
|
791
|
+
|
|
742
792
|
device_primary_ips[device.get("sn")] = device.get("loginIp")
|
|
743
793
|
|
|
794
|
+
self._device_primary_ips = device_primary_ips
|
|
744
795
|
records["dcim.virtualchassis"] = self.create_new_data_records(
|
|
745
796
|
app="dcim", model="virtualchassis", data=virtualchassis
|
|
746
797
|
)
|
|
747
798
|
|
|
748
|
-
|
|
749
|
-
|
|
750
|
-
|
|
799
|
+
def _preprocess_interfaces(self, data: dict, records: dict) -> None:
|
|
800
|
+
"""Custom preprocessing for interfaces and MAC addresses."""
|
|
801
|
+
if not (
|
|
802
|
+
self.sync.parameters.get("dcim.interface")
|
|
803
|
+
or self.sync.parameters.get("dcim.macaddress")
|
|
804
|
+
or self.sync.parameters.get("ipam.ipaddress")
|
|
805
|
+
):
|
|
806
|
+
return
|
|
807
|
+
|
|
808
|
+
if not hasattr(self, "_device_primary_ips"):
|
|
809
|
+
self._preprocess_devices(data=data, records=records)
|
|
810
|
+
|
|
751
811
|
interface_key = "nameOriginal"
|
|
752
812
|
try:
|
|
753
813
|
int_transform_map = self.get_transform_map(app="dcim", model="interface")
|
|
@@ -755,61 +815,127 @@ class IPFabricSyncRunner(object):
|
|
|
755
815
|
interface_key = int_name_field_map.source_field
|
|
756
816
|
except Exception as e:
|
|
757
817
|
self.logger.log_failure(
|
|
758
|
-
f"Error collecting
|
|
759
|
-
obj=self.sync,
|
|
818
|
+
f"Error collecting transform map info: {e}", obj=self.sync
|
|
760
819
|
)
|
|
761
820
|
raise SyncError(f"Error collecting source column name for interface: {e}")
|
|
762
821
|
|
|
763
822
|
# Store human-readable interface names to use them later for IP Addresses
|
|
764
823
|
readable_int_names = {}
|
|
765
|
-
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
|
|
772
|
-
|
|
773
|
-
|
|
774
|
-
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
]
|
|
783
|
-
|
|
784
|
-
|
|
785
|
-
self.create_new_data_record(
|
|
786
|
-
app="dcim", model="macaddress", data=interface
|
|
787
|
-
)
|
|
824
|
+
self.logger.log_info("Preparing Interfaces", obj=self.sync)
|
|
825
|
+
|
|
826
|
+
for interface in data.get("/inventory/interfaces", []):
|
|
827
|
+
if self.sync.parameters.get("dcim.interface"):
|
|
828
|
+
interface_record = self.create_new_data_record(
|
|
829
|
+
app="dcim", model="interface", data=interface
|
|
830
|
+
)
|
|
831
|
+
interface_record.data["loginIp"] = self._device_primary_ips.get(
|
|
832
|
+
interface.get("sn")
|
|
833
|
+
)
|
|
834
|
+
records["dcim.interface"].add(interface_record)
|
|
835
|
+
|
|
836
|
+
readable_int_names[
|
|
837
|
+
f"{interface.get('sn')}_{interface.get('intName')}"
|
|
838
|
+
] = interface.get(interface_key)
|
|
839
|
+
|
|
840
|
+
if self.sync.parameters.get("dcim.macaddress"):
|
|
841
|
+
records["dcim.macaddress"].add(
|
|
842
|
+
self.create_new_data_record(
|
|
843
|
+
app="dcim", model="macaddress", data=interface
|
|
788
844
|
)
|
|
845
|
+
)
|
|
846
|
+
|
|
847
|
+
self._readable_int_names = readable_int_names
|
|
848
|
+
|
|
849
|
+
def _preprocess_ipaddresses(self, data: dict, records: dict) -> None:
|
|
850
|
+
"""Custom preprocessing for IP addresses."""
|
|
851
|
+
if not self.sync.parameters.get("ipam.ipaddress"):
|
|
852
|
+
return
|
|
853
|
+
|
|
854
|
+
if not hasattr(self, "_readable_int_names"):
|
|
855
|
+
self._preprocess_interfaces(data=data, records=records)
|
|
856
|
+
|
|
857
|
+
self.logger.log_info("Preparing IP Addresses", obj=self.sync)
|
|
858
|
+
for ip in data.get("/technology/addressing/managed-ip/ipv4", []):
|
|
859
|
+
ip["nameOriginal"] = self._readable_int_names.get(
|
|
860
|
+
f"{ip.get('sn')}_{ip.get('intName')}"
|
|
861
|
+
)
|
|
862
|
+
if not ip["nameOriginal"]:
|
|
863
|
+
continue
|
|
864
|
+
|
|
865
|
+
ipaddress_record = self.create_new_data_record(
|
|
866
|
+
app="ipam", model="ipaddress", data=ip
|
|
867
|
+
)
|
|
868
|
+
ipaddress_record.data["is_primary"] = ip.get(
|
|
869
|
+
"sn"
|
|
870
|
+
) in self._device_primary_ips and self._device_primary_ips.get(
|
|
871
|
+
ip.get("sn")
|
|
872
|
+
) == ipaddress_record.data.get(
|
|
873
|
+
"ip"
|
|
874
|
+
)
|
|
875
|
+
records["ipam.ipaddress"].add(ipaddress_record)
|
|
789
876
|
|
|
790
|
-
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
|
|
795
|
-
|
|
877
|
+
def preprocess_data(self, data: dict) -> dict[str, set[DataRecord]]:
|
|
878
|
+
"""
|
|
879
|
+
Preprocess data using hierarchical transform map order.
|
|
880
|
+
|
|
881
|
+
Iterates over transform maps in dependency order (parents before children),
|
|
882
|
+
using custom preprocessing functions where available or direct record creation.
|
|
883
|
+
Each transform map is processed exactly once, avoiding duplicate iterations.
|
|
884
|
+
"""
|
|
885
|
+
# Get transform maps in hierarchical order (now returns transform maps directly!)
|
|
886
|
+
group_ids = self.sync.parameters.get("groups", [])
|
|
887
|
+
transform_maps_hierarchy = self.sync.get_model_hierarchy(group_ids=group_ids)
|
|
888
|
+
|
|
889
|
+
# Initialize records dict for all models (extract unique model strings)
|
|
890
|
+
records = {}
|
|
891
|
+
seen_models = set()
|
|
892
|
+
for transform_map in transform_maps_hierarchy:
|
|
893
|
+
model_string = f"{transform_map.target_model.app_label}.{transform_map.target_model.model}"
|
|
894
|
+
if model_string not in seen_models:
|
|
895
|
+
records[model_string] = set()
|
|
896
|
+
seen_models.add(model_string)
|
|
897
|
+
|
|
898
|
+
# Track which custom preprocessors have run (to avoid duplication)
|
|
899
|
+
# Use (preprocessor, endpoint) tuple to allow same preprocessor for different endpoints
|
|
900
|
+
preprocessors_run = set()
|
|
901
|
+
|
|
902
|
+
# Process each transform map in hierarchical order
|
|
903
|
+
for transform_map in transform_maps_hierarchy:
|
|
904
|
+
app = transform_map.target_model.app_label
|
|
905
|
+
model = transform_map.target_model.model
|
|
906
|
+
model_string = f"{app}.{model}"
|
|
907
|
+
endpoint = transform_map.source_endpoint.endpoint
|
|
908
|
+
|
|
909
|
+
# Skip if not enabled in sync parameters
|
|
910
|
+
if not self.sync.parameters.get(model_string):
|
|
911
|
+
continue
|
|
912
|
+
|
|
913
|
+
# Check for custom preprocessor (pass endpoint for endpoint-specific preprocessing)
|
|
914
|
+
preprocessor = self._get_custom_preprocessor(model_string, endpoint)
|
|
915
|
+
|
|
916
|
+
# Create unique key combining preprocessor function and endpoint
|
|
917
|
+
preprocessor_key = (preprocessor, endpoint) if preprocessor else None
|
|
918
|
+
|
|
919
|
+
if preprocessor and preprocessor_key not in preprocessors_run:
|
|
920
|
+
# Run custom preprocessor (may populate multiple models)
|
|
921
|
+
self.logger.log_info(
|
|
922
|
+
f"Running custom preprocessor for `{model_string}` from `{endpoint}`",
|
|
923
|
+
obj=self.sync,
|
|
796
924
|
)
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
925
|
+
preprocessor(data, records)
|
|
926
|
+
preprocessors_run.add(preprocessor_key)
|
|
927
|
+
elif not preprocessor:
|
|
928
|
+
# Standard processing: use the transform map directly (no need to filter!)
|
|
929
|
+
self.logger.log_info(
|
|
930
|
+
f"Preparing `{model_string}` from `{endpoint}`", obj=self.sync
|
|
802
931
|
)
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
|
|
806
|
-
|
|
807
|
-
ip.get("sn")
|
|
808
|
-
) == ipaddress_record.data.get(
|
|
809
|
-
"ip"
|
|
932
|
+
records[model_string].update(
|
|
933
|
+
self.create_new_data_records(
|
|
934
|
+
app=app, model=model, data=data, endpoint=endpoint
|
|
935
|
+
)
|
|
810
936
|
)
|
|
811
|
-
records["ipam.ipaddress"].add(ipaddress_record)
|
|
812
937
|
|
|
938
|
+
# Log statistics
|
|
813
939
|
for model_string, records_set in records.items():
|
|
814
940
|
if self.settings.get(model_string) and len(records_set):
|
|
815
941
|
self.logger.init_statistics(model_string, len(records_set))
|
|
@@ -883,56 +1009,6 @@ class IPFabricSyncRunner(object):
|
|
|
883
1009
|
self.sync_item(item, cf, ingestion, stats)
|
|
884
1010
|
self.events_clearer.increment()
|
|
885
1011
|
|
|
886
|
-
@handle_errors
|
|
887
|
-
def sync_devices(
|
|
888
|
-
self,
|
|
889
|
-
devices: set[DataRecord],
|
|
890
|
-
cf: bool = False,
|
|
891
|
-
ingestion: "IPFabricIngestion" = None,
|
|
892
|
-
) -> None:
|
|
893
|
-
"""Sync devices separately to handle resetting primary IP."""
|
|
894
|
-
if not self.settings.get("dcim.device"):
|
|
895
|
-
self.logger.log_info(
|
|
896
|
-
"Did not ask to sync devices, skipping.", obj=self.sync
|
|
897
|
-
)
|
|
898
|
-
return
|
|
899
|
-
|
|
900
|
-
for device in devices:
|
|
901
|
-
device_obj: "Device | None" = self.sync_item(
|
|
902
|
-
record=device, cf=cf, ingestion=ingestion
|
|
903
|
-
)
|
|
904
|
-
|
|
905
|
-
if (
|
|
906
|
-
device_obj is None
|
|
907
|
-
or device_obj.primary_ip4 is None
|
|
908
|
-
or device.data.get("loginIp") is not None
|
|
909
|
-
):
|
|
910
|
-
self.events_clearer.increment()
|
|
911
|
-
continue
|
|
912
|
-
|
|
913
|
-
# If device has primary IP assigned in NetBox, but not in IP Fabric, remove it
|
|
914
|
-
try:
|
|
915
|
-
connection_name = self.get_db_connection_name()
|
|
916
|
-
device_obj.refresh_from_db(using=connection_name)
|
|
917
|
-
device_obj.snapshot()
|
|
918
|
-
device_obj.primary_ip4 = None
|
|
919
|
-
device_obj.save(using=connection_name)
|
|
920
|
-
except Exception as err:
|
|
921
|
-
_, issue = self.create_or_get_sync_issue(
|
|
922
|
-
exception=err,
|
|
923
|
-
ingestion=self.ingestion,
|
|
924
|
-
message="Error removing primary IP current device.",
|
|
925
|
-
model_string=device.model_string,
|
|
926
|
-
data=device.data,
|
|
927
|
-
)
|
|
928
|
-
self.events_clearer.increment()
|
|
929
|
-
raise IPAddressPrimaryRemovalError(
|
|
930
|
-
data=device.data,
|
|
931
|
-
model_string=device.model_string,
|
|
932
|
-
issue_id=issue.pk,
|
|
933
|
-
) from err
|
|
934
|
-
self.events_clearer.increment()
|
|
935
|
-
|
|
936
1012
|
@handle_errors
|
|
937
1013
|
def sync_ipaddress(self, ip_address: DataRecord) -> "IPAddress | None":
|
|
938
1014
|
"""Sync a single IP Address to NetBox, separated to use @handle_errors."""
|
|
@@ -1037,14 +1113,16 @@ class IPFabricSyncRunner(object):
|
|
|
1037
1113
|
self.logger.log_info("Starting data sync.", obj=self.sync)
|
|
1038
1114
|
try:
|
|
1039
1115
|
# This signal does not call for snapshot(), causing issue with branching plugin
|
|
1040
|
-
|
|
1116
|
+
if sync_cached_scope_fields is not None:
|
|
1117
|
+
signals.post_save.disconnect(sync_cached_scope_fields, sender=Site)
|
|
1041
1118
|
self.sync_items(
|
|
1042
1119
|
items=records["dcim.site"],
|
|
1043
1120
|
cf=self.sync.update_custom_fields,
|
|
1044
1121
|
ingestion=ingestion,
|
|
1045
1122
|
)
|
|
1046
1123
|
finally:
|
|
1047
|
-
|
|
1124
|
+
if sync_cached_scope_fields is not None:
|
|
1125
|
+
signals.post_save.connect(sync_cached_scope_fields, sender=Site)
|
|
1048
1126
|
self.sync_items(items=records["dcim.manufacturer"])
|
|
1049
1127
|
self.sync_items(items=records["dcim.devicetype"])
|
|
1050
1128
|
self.sync_items(items=records["dcim.platform"])
|
|
@@ -1055,8 +1133,8 @@ class IPFabricSyncRunner(object):
|
|
|
1055
1133
|
assign_virtualchassis_master, sender=VirtualChassis
|
|
1056
1134
|
)
|
|
1057
1135
|
self.sync_items(items=records["dcim.virtualchassis"])
|
|
1058
|
-
self.
|
|
1059
|
-
|
|
1136
|
+
self.sync_items(
|
|
1137
|
+
items=records["dcim.device"],
|
|
1060
1138
|
cf=self.sync.update_custom_fields,
|
|
1061
1139
|
ingestion=ingestion,
|
|
1062
1140
|
)
|
|
@@ -181,12 +181,15 @@ class RelationshipRecord(Record):
|
|
|
181
181
|
class TransformMapRecord:
|
|
182
182
|
def __init__(
|
|
183
183
|
self,
|
|
184
|
-
source_model: str,
|
|
185
184
|
target_model: str,
|
|
186
185
|
fields: tuple[FieldRecord, ...] = tuple(),
|
|
187
186
|
relationships: tuple[RelationshipRecord, ...] = tuple(),
|
|
187
|
+
# Support both source_model string and source_endpoint string for backward compatibility
|
|
188
|
+
source_model: str | None = None,
|
|
189
|
+
source_endpoint: str | None = None,
|
|
188
190
|
):
|
|
189
191
|
self.source_model = source_model
|
|
192
|
+
self.source_endpoint = source_endpoint
|
|
190
193
|
self.target_model = target_model
|
|
191
194
|
self.fields = fields
|
|
192
195
|
self.relationships = relationships
|
|
@@ -208,10 +211,20 @@ def do_change(
|
|
|
208
211
|
for change in changes:
|
|
209
212
|
app, model = change.target_model.split(".")
|
|
210
213
|
try:
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
214
|
+
if change.source_model:
|
|
215
|
+
transform_map = IPFabricTransformMap.objects.get(
|
|
216
|
+
source_model=change.source_model,
|
|
217
|
+
target_model=ContentType.objects.get(
|
|
218
|
+
app_label=app, model=model
|
|
219
|
+
),
|
|
220
|
+
)
|
|
221
|
+
else:
|
|
222
|
+
transform_map = IPFabricTransformMap.objects.get(
|
|
223
|
+
source_endpoint__endpoint=change.source_endpoint,
|
|
224
|
+
target_model=ContentType.objects.get(
|
|
225
|
+
app_label=app, model=model
|
|
226
|
+
),
|
|
227
|
+
)
|
|
215
228
|
except IPFabricTransformMap.DoesNotExist:
|
|
216
229
|
continue
|
|
217
230
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: ipfabric_netbox
|
|
3
|
-
Version: 4.3.
|
|
3
|
+
Version: 4.3.2b11
|
|
4
4
|
Summary: NetBox plugin to sync IP Fabric data into NetBox
|
|
5
5
|
License: MIT
|
|
6
6
|
Keywords: netbox,ipfabric,plugin,sync
|
|
@@ -25,7 +25,7 @@ Requires-Dist: ipfabric (>=7.0.0,<7.1.0) ; extra == "ipfabric_7_0" and extra !=
|
|
|
25
25
|
Requires-Dist: ipfabric (>=7.2.0,<7.3.0) ; extra != "ipfabric_7_0" and extra == "ipfabric_7_2" and extra != "ipfabric_7_3" and extra != "ipfabric_7_5"
|
|
26
26
|
Requires-Dist: ipfabric (>=7.3.0,<7.4.0) ; extra != "ipfabric_7_0" and extra != "ipfabric_7_2" and extra == "ipfabric_7_3" and extra != "ipfabric_7_5"
|
|
27
27
|
Requires-Dist: ipfabric (>=7.5.0,<7.6.0) ; extra != "ipfabric_7_0" and extra != "ipfabric_7_2" and extra != "ipfabric_7_3" and extra == "ipfabric_7_5"
|
|
28
|
-
Requires-Dist: netboxlabs-netbox-branching (
|
|
28
|
+
Requires-Dist: netboxlabs-netbox-branching (>=0.7.0)
|
|
29
29
|
Requires-Dist: netutils
|
|
30
30
|
Project-URL: Bug Tracker, https://gitlab.com/ip-fabric/integrations/ipfabric-netbox-sync/-/issues
|
|
31
31
|
Project-URL: Homepage, https://gitlab.com/ip-fabric/integrations/ipfabric-netbox-sync
|
|
@@ -66,8 +66,7 @@ These are the required NetBox versions for corresponding plugin version. Any oth
|
|
|
66
66
|
|
|
67
67
|
| Netbox Version | Plugin Version |
|
|
68
68
|
|----------------|----------------|
|
|
69
|
-
| 4.4.
|
|
70
|
-
| 4.4.0 - 4.4.9 | 4.3.0 - 4.3.2 |
|
|
69
|
+
| 4.4.0 and up | 4.3.0 and up |
|
|
71
70
|
| 4.3.0 - 4.3.7 | 4.2.2 |
|
|
72
71
|
| 4.3.0 - 4.3.6 | 4.0.0 - 4.2.1 |
|
|
73
72
|
| 4.2.4 - 4.2.9 | 3.2.2 - 3.2.4 |
|