ipfabric_netbox 4.3.2b9__py3-none-any.whl → 4.3.2b11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ipfabric_netbox might be problematic. Click here for more details.

Files changed (50) hide show
  1. ipfabric_netbox/__init__.py +1 -1
  2. ipfabric_netbox/api/serializers.py +112 -7
  3. ipfabric_netbox/api/urls.py +6 -0
  4. ipfabric_netbox/api/views.py +23 -0
  5. ipfabric_netbox/choices.py +74 -40
  6. ipfabric_netbox/data/endpoint.json +52 -0
  7. ipfabric_netbox/data/filters.json +51 -0
  8. ipfabric_netbox/data/transform_map.json +190 -176
  9. ipfabric_netbox/exceptions.py +7 -5
  10. ipfabric_netbox/filtersets.py +310 -41
  11. ipfabric_netbox/forms.py +330 -80
  12. ipfabric_netbox/graphql/__init__.py +6 -0
  13. ipfabric_netbox/graphql/enums.py +5 -5
  14. ipfabric_netbox/graphql/filters.py +56 -4
  15. ipfabric_netbox/graphql/schema.py +28 -0
  16. ipfabric_netbox/graphql/types.py +61 -1
  17. ipfabric_netbox/jobs.py +12 -1
  18. ipfabric_netbox/migrations/0022_prepare_for_filters.py +182 -0
  19. ipfabric_netbox/migrations/0023_populate_filters_data.py +303 -0
  20. ipfabric_netbox/migrations/0024_finish_filters.py +29 -0
  21. ipfabric_netbox/migrations/0025_add_vss_chassis_endpoint.py +166 -0
  22. ipfabric_netbox/models.py +432 -17
  23. ipfabric_netbox/navigation.py +98 -24
  24. ipfabric_netbox/tables.py +194 -9
  25. ipfabric_netbox/templates/ipfabric_netbox/htmx_list.html +5 -0
  26. ipfabric_netbox/templates/ipfabric_netbox/inc/combined_expressions.html +59 -0
  27. ipfabric_netbox/templates/ipfabric_netbox/inc/combined_expressions_content.html +39 -0
  28. ipfabric_netbox/templates/ipfabric_netbox/inc/endpoint_filters_with_selector.html +54 -0
  29. ipfabric_netbox/templates/ipfabric_netbox/ipfabricendpoint.html +39 -0
  30. ipfabric_netbox/templates/ipfabric_netbox/ipfabricfilter.html +51 -0
  31. ipfabric_netbox/templates/ipfabric_netbox/ipfabricfilterexpression.html +39 -0
  32. ipfabric_netbox/templates/ipfabric_netbox/ipfabricfilterexpression_edit.html +150 -0
  33. ipfabric_netbox/templates/ipfabric_netbox/ipfabricsync.html +1 -1
  34. ipfabric_netbox/templates/ipfabric_netbox/ipfabrictransformmap.html +16 -2
  35. ipfabric_netbox/templatetags/ipfabric_netbox_helpers.py +68 -0
  36. ipfabric_netbox/tests/api/test_api.py +333 -13
  37. ipfabric_netbox/tests/test_filtersets.py +2592 -0
  38. ipfabric_netbox/tests/test_forms.py +1349 -74
  39. ipfabric_netbox/tests/test_models.py +242 -34
  40. ipfabric_netbox/tests/test_views.py +2031 -26
  41. ipfabric_netbox/urls.py +35 -0
  42. ipfabric_netbox/utilities/endpoint.py +83 -0
  43. ipfabric_netbox/utilities/filters.py +88 -0
  44. ipfabric_netbox/utilities/ipfutils.py +393 -377
  45. ipfabric_netbox/utilities/logging.py +7 -7
  46. ipfabric_netbox/utilities/transform_map.py +144 -5
  47. ipfabric_netbox/views.py +719 -5
  48. {ipfabric_netbox-4.3.2b9.dist-info → ipfabric_netbox-4.3.2b11.dist-info}/METADATA +2 -2
  49. {ipfabric_netbox-4.3.2b9.dist-info → ipfabric_netbox-4.3.2b11.dist-info}/RECORD +50 -33
  50. {ipfabric_netbox-4.3.2b9.dist-info → ipfabric_netbox-4.3.2b11.dist-info}/WHEEL +1 -1
@@ -2,11 +2,8 @@ import json
2
2
  import logging
3
3
  from collections import Counter
4
4
  from copy import deepcopy
5
- from enum import Enum
6
5
  from functools import cache
7
- from functools import partial
8
6
  from importlib import metadata
9
- from typing import Any
10
7
  from typing import Callable
11
8
  from typing import TYPE_CHECKING
12
9
  from typing import TypeVar
@@ -14,8 +11,15 @@ from typing import TypeVar
14
11
  from core.exceptions import SyncError
15
12
  from core.signals import clear_events
16
13
  from dcim.models import Device
14
+ from dcim.models import Site
17
15
  from dcim.models import VirtualChassis
18
16
  from dcim.signals import assign_virtualchassis_master
17
+
18
+ try:
19
+ # Got added in NetBox 4.4.9
20
+ from dcim.signals import sync_cached_scope_fields
21
+ except ImportError:
22
+ sync_cached_scope_fields = None
19
23
  from django.conf import settings
20
24
  from django.core.exceptions import MultipleObjectsReturned
21
25
  from django.core.exceptions import ObjectDoesNotExist
@@ -136,26 +140,6 @@ class IPFabric(object):
136
140
  "user-agent"
137
141
  ] += f'; ipfabric-netbox/{metadata.version("ipfabric-netbox")}' # noqa: E702
138
142
 
139
- def get_snapshots(self) -> dict:
140
- formatted_snapshots = {}
141
- if self.ipf:
142
- for snapshot_ref, snapshot in self.ipf.snapshots.items():
143
- if snapshot.status != "done" and snapshot.finish_status != "done":
144
- continue
145
- if snapshot_ref in ["$prev", "$lastLocked"]:
146
- continue
147
- if snapshot.name:
148
- description = (
149
- snapshot.name
150
- + " - "
151
- + snapshot.end.strftime("%d-%b-%y %H:%M:%S")
152
- )
153
- else:
154
- description = snapshot.end.strftime("%d-%b-%y %H:%M:%S")
155
-
156
- formatted_snapshots[snapshot_ref] = (description, snapshot.snapshot_id)
157
- return formatted_snapshots
158
-
159
143
  def get_table_data(self, table, device):
160
144
  filter = {"sn": ["eq", device.serial]}
161
145
  split = table.split(".")
@@ -194,15 +178,13 @@ class DataRecord:
194
178
 
195
179
  def __init__(
196
180
  self,
197
- app: str,
198
- model: str,
181
+ model_string: str,
199
182
  data: dict,
200
183
  # These values are filled later as the record is passed down the pipeline
201
184
  context: dict | None = None,
202
185
  transform_map: "IPFabricTransformMap | None" = None,
203
186
  ):
204
- self.app = app
205
- self.model = model
187
+ self.model_string = model_string
206
188
  self.data = data
207
189
  self.context = context or dict()
208
190
  self.transform_map = transform_map
@@ -212,8 +194,7 @@ class DataRecord:
212
194
  try:
213
195
  self._hash = hash(
214
196
  (
215
- self.app,
216
- self.model,
197
+ self.model_string,
217
198
  # Since the dicts are already ordered, it is safe to hash them
218
199
  # .values() are mutable, this is fixed by tuple() to get same hash every time
219
200
  make_hashable(self.data),
@@ -248,119 +229,11 @@ class DataRecord:
248
229
  self._hash = None # Invalidate cached hash
249
230
 
250
231
 
251
- # TODO: 1) Store this as model in DB linked to Transform map
252
- # TODO: 2) Each table will be added to template data with customizable name (instead of just `object`)
253
- # TODO: and linked using common column (like `sn` for Device and Interface)
254
- # TODO: 3) Only pull required tables according to sync settings
255
- class Endpoints(Enum):
256
- SITE = "inventory.sites"
257
- DEVICE = "inventory.devices"
258
- VIRTUALCHASSIS = "technology.platforms.stacks_members"
259
- INTERFACE = "inventory.interfaces"
260
- INVENTORYITEM = "inventory.pn"
261
- VLAN = "technology.vlans.site_summary"
262
- VRF = "technology.routing.vrf_detail"
263
- PREFIX = "technology.managed_networks.networks"
264
- IPADDRESS = "technology.addressing.managed_ip_ipv4"
265
-
266
-
267
- class Filters(Enum):
268
- SITE = "site"
269
- DEVICE = "device"
270
- VIRTUALCHASSIS = "virtualchassis"
271
- INTERFACE = "interface"
272
- INVENTORYITEM = "inventoryitem"
273
- VLAN = "vlan"
274
- VRF = "vrf"
275
- PREFIX = "prefix"
276
- IPADDRESS = "ipaddress"
277
-
278
- @staticmethod
279
- def _site_filter(sites: dict | None) -> dict:
280
- if sites:
281
- return {"or": [{"siteName": ["eq", site]} for site in sites]}
282
- return {}
283
-
284
- @staticmethod
285
- def _device_filter(sites: dict | None, child_table: bool = False) -> dict:
286
- key = "vendor" if not child_table else "device.vendor"
287
- excluded_vendors = ["aws", "azure"]
288
- device_filter = {"and": [{key: ["neq", vendor]} for vendor in excluded_vendors]}
289
- if sites:
290
- site_filter = Filters._site_filter(sites)
291
- device_filter["and"].append(site_filter)
292
- return device_filter
293
-
294
- @staticmethod
295
- def _virtualchassis_filter(sites: dict | None) -> dict:
296
- return Filters._device_filter(sites, child_table=True)
297
-
298
- @staticmethod
299
- def _interface_filter(sites: dict | None) -> dict:
300
- return Filters._device_filter(sites, child_table=True)
301
-
302
- @staticmethod
303
- def _inventoryitem_filter(sites: dict | None) -> dict:
304
- inventory_item_filter = {
305
- "and": [
306
- {"sn": ["empty", False]},
307
- {"name": ["empty", False]},
308
- ]
309
- }
310
- if site_filter := Filters._device_filter(sites, child_table=True):
311
- inventory_item_filter["and"].append(site_filter)
312
- return inventory_item_filter
313
-
314
- @staticmethod
315
- def _vlan_filter(sites: dict | None) -> dict:
316
- # Remove VLANs with ID 0, minimum VLAN ID in NetBox is 1
317
- vlan_filter = {"and": [{"vlanId": ["neq", 0]}]}
318
- if site_filter := Filters._site_filter(sites):
319
- vlan_filter["and"].append(site_filter)
320
- return vlan_filter
321
-
322
- @staticmethod
323
- def _vrf_filter(sites: dict | None) -> dict:
324
- return Filters._device_filter(sites, child_table=True)
325
-
326
- @staticmethod
327
- def _prefix_filter(sites: dict | None) -> dict:
328
- if site_filter := Filters._device_filter(sites, child_table=True):
329
- return {"and": [site_filter, {"and": [{"net": ["empty", False]}]}]}
330
- else:
331
- return {"and": [{"net": ["empty", False]}]}
332
-
333
- @staticmethod
334
- def _ipaddress_filter(sites: dict | None) -> dict:
335
- return Filters._device_filter(sites, child_table=True)
336
-
337
- @staticmethod
338
- def get_filter(endpoint: str, sites: dict | None) -> dict:
339
- method_name = f"_{getattr(Filters, endpoint).value}_filter"
340
- filter_func = getattr(Filters, method_name, None)
341
- if filter_func:
342
- return filter_func(sites)
343
- return {}
344
-
345
-
346
- # TODO: Store also hierarchy of models (e.g. Device required Device and virtual chassis endpoints)
347
-
348
-
349
- class EndpointHandler:
350
- def __init__(self, client: IPFClient, snapshot_id: str) -> None:
351
- self.client = client
352
- self.snapshot_id = snapshot_id
353
-
354
- def resolve_endpoint(self, endpoint: str) -> Callable[..., Any]:
355
- """Resolve a dot-separated endpoint string to the corresponding IPFClient attribute."""
356
- obj = self.client
357
- for attr in endpoint.split("."):
358
- obj = getattr(obj, attr)
359
- return partial(obj.all, snapshot_id=self.snapshot_id)
360
-
361
-
362
- def order_members(members: list[dict]) -> dict[str, list[dict]]:
363
- """Order VC members to dict, where key is master serial number and values are all members."""
232
+ def order_members(members: list[dict], sn_column: str) -> dict[str, list[dict]]:
233
+ """Order stack members to dict, where key is master serial number and values are all members.
234
+ For stack members table we get `memberSn` column for member serial number.
235
+ For VSS chassis table we get `chassisSn` column for member serial number.
236
+ """
364
237
  devices = {}
365
238
 
366
239
  for member in members:
@@ -368,7 +241,7 @@ def order_members(members: list[dict]) -> dict[str, list[dict]]:
368
241
  # This can be spotted by checking if `sn` is different from `memberSn` for the master device
369
242
  # Plus `sn` will be IP of loopback in hex...
370
243
  master_serial = member.get("sn")
371
- if master_serial and member.get("memberSn"):
244
+ if master_serial and member.get(sn_column):
372
245
  if master_serial in devices:
373
246
  devices[master_serial].append(member)
374
247
  else:
@@ -400,17 +273,20 @@ def prepare_devices(
400
273
  if child_members := members.get(device.get("sn")):
401
274
  # This device is the VC master, and we're iterating over all it's members
402
275
  for child_member in child_members:
276
+ member_id = child_member.get("member") or child_member.get("chassisId")
277
+ member_sn = child_member.get("memberSn") or child_member.get(
278
+ "chassisSn"
279
+ )
403
280
  # There is physically no device with hostname matching the virtual chassis
404
281
  # There are only members, so "hostname/1", "hostname/2", etc.
405
282
  new_device = deepcopy(device)
406
- new_device[
407
- "hostname"
408
- ] = f"{device['hostname']}/{child_member.get('member')}"
283
+ new_device["hostname"] = f"{device['hostname']}/{member_id}"
409
284
  new_device["virtual_chassis"] = child_member
410
- if device.get("sn") != child_member.get("memberSn"):
285
+ if device.get("sn") != member_sn:
411
286
  # VC members (non-master) are not in Device table, need to add them as new Device
412
- new_device["model"] = child_member.get("pn")
413
- new_device["sn"] = child_member.get("memberSn")
287
+ # VSS Chassis have no model in the table, get it from master device
288
+ new_device["model"] = child_member.get("pn") or device["model"]
289
+ new_device["sn"] = member_sn
414
290
  all_devices.append(new_device)
415
291
  else:
416
292
  # Master device, create the virtual chassis based on it
@@ -465,7 +341,7 @@ class IPFabricSyncRunner(object):
465
341
  exception: Exception,
466
342
  ingestion: "IPFabricIngestion",
467
343
  message: str = None,
468
- model: str = None,
344
+ model_string: str = None,
469
345
  context: dict = None,
470
346
  data: dict = None,
471
347
  ) -> "tuple[bool, IPFabricIngestionIssue]":
@@ -476,7 +352,7 @@ class IPFabricSyncRunner(object):
476
352
 
477
353
  error_serial = self.get_error_serial(context, data)
478
354
  # Ignore models that do not have any dependencies by serial number
479
- if error_serial and model not in ["ipaddress", "macaddress"]:
355
+ if error_serial and model_string not in ["ipam.ipaddress", "dcim.macaddress"]:
480
356
  self.error_serials.add(error_serial)
481
357
 
482
358
  # TODO: This is to prevent circular import issues, clean it up later.
@@ -487,7 +363,7 @@ class IPFabricSyncRunner(object):
487
363
  ingestion=ingestion,
488
364
  exception=exception.__class__.__name__,
489
365
  message=message or getattr(exception, "message", str(exception)),
490
- model=model,
366
+ model=model_string,
491
367
  coalesce_fields={
492
368
  k: v for k, v in context.items() if k not in ["defaults"]
493
369
  },
@@ -515,19 +391,19 @@ class IPFabricSyncRunner(object):
515
391
  # Logging section for logs inside job - facing user
516
392
  self = args[0]
517
393
  if isinstance(err, SearchError):
518
- if self.settings.get(err.model):
394
+ if self.settings.get(err.model_string):
519
395
  self.logger.log_failure(
520
- f"Aborting syncing `{err.model}` instance due to above error, please check your transform maps and/or existing data.",
396
+ f"Aborting syncing `{err.model_string}` instance due to above error, please check your transform maps and/or existing data.",
521
397
  obj=self.sync,
522
398
  )
523
399
  else:
524
400
  self.logger.log_failure(
525
- f"Syncing `{err.model}` is disabled in settings, but hit above error trying to find the correct item. Please check your transform maps and/or existing data.",
401
+ f"Syncing `{err.model_string}` is disabled in settings, but hit above error trying to find the correct item. Please check your transform maps and/or existing data.",
526
402
  obj=self.sync,
527
403
  )
528
404
  if isinstance(err, IPAddressDuplicateError):
529
405
  self.logger.log_warning(
530
- f"IP Address `{err.data.get('address')}` already exists in `{err.model}` with coalesce fields: `{err.coalesce_fields}`. Please check your transform maps and/or existing data.",
406
+ f"IP Address `{err.data.get('address')}` already exists in `{err.model_string}` with coalesce fields: `{err.coalesce_fields}`. Please check your transform maps and/or existing data.",
531
407
  obj=self.sync,
532
408
  )
533
409
  else:
@@ -549,13 +425,11 @@ class IPFabricSyncRunner(object):
549
425
 
550
426
  def get_transform_context(self, record: DataRecord) -> DataRecord:
551
427
  if not record.transform_map:
552
- raise SystemError(
553
- f"No transform map available for {record.app}: {record.model}"
554
- )
428
+ raise SystemError(f"No transform map available for {record.model_string}")
555
429
  try:
556
430
  record.context = record.transform_map.get_context(record.data)
557
431
  except Exception as err:
558
- message = f"Error getting context for `{record.model}`."
432
+ message = f"Error getting context for `{record.model_string}`."
559
433
  if isinstance(err, ObjectDoesNotExist):
560
434
  message += (
561
435
  " Could not find related object using template in transform maps."
@@ -566,11 +440,14 @@ class IPFabricSyncRunner(object):
566
440
  exception=err,
567
441
  ingestion=self.ingestion,
568
442
  message=message,
569
- model=record.model,
443
+ model_string=record.model_string,
570
444
  data=record.data,
571
445
  )
572
446
  raise SearchError(
573
- message=message, data=record.data, model=record.model, issue_id=issue.pk
447
+ message=message,
448
+ data=record.data,
449
+ model_string=record.model_string,
450
+ issue_id=issue.pk,
574
451
  ) from err
575
452
 
576
453
  return record
@@ -581,67 +458,67 @@ class IPFabricSyncRunner(object):
581
458
  if error_serial and error_serial in self.error_serials:
582
459
  # We want to raise it as exception so it's shown in ingestion issues but can be filtered out.
583
460
  exception = RequiredDependencyFailedSkip(
584
- message=f"Skipping syncing of `{record.model}` with serial `{error_serial}` due to previous errors.",
585
- model=record.model,
461
+ message=f"Skipping syncing of `{record.model_string}` with serial `{error_serial}` due to previous errors.",
462
+ model_string=record.model_string,
586
463
  context=record.context,
587
464
  data=record.data,
588
465
  )
589
466
  _, issue = self.create_or_get_sync_issue(
590
467
  exception=exception,
591
468
  ingestion=self.ingestion,
592
- model=record.model,
469
+ model_string=record.model_string,
593
470
  context=record.context,
594
471
  data=record.data,
595
472
  )
596
473
  raise exception
597
474
  record = self.get_transform_context(record)
598
475
  queryset = record.transform_map.target_model.model_class().objects
599
- model_settings = self.settings.get(record.model, False)
476
+ model_settings = self.settings.get(f"{record.model_string}", False)
600
477
 
601
478
  obj = None
602
479
  try:
603
480
  connection_name = self.get_db_connection_name()
604
481
  if model_settings:
605
- logger.info(f"Creating {record.model}")
482
+ logger.info(f"Creating {record.model_string}")
606
483
  obj = record.transform_map.update_or_create_instance(
607
484
  context=record.context,
608
485
  tags=self.sync.tags.all(),
609
486
  connection_name=connection_name,
610
487
  )
611
488
  else:
612
- logger.info(f"Getting {record.model}")
489
+ logger.info(f"Getting {record.model_string}")
613
490
  record.context.pop("defaults", None)
614
491
  obj = queryset.using(connection_name).get(**record.context)
615
492
  except queryset.model.DoesNotExist as err:
616
- message = f"Instance of `{record.model}` not found."
493
+ message = f"Instance of `{record.model_string}` not found."
617
494
  _, issue = self.create_or_get_sync_issue(
618
495
  exception=err,
619
496
  ingestion=self.ingestion,
620
497
  message=message,
621
- model=record.model,
498
+ model_string=record.model_string,
622
499
  context=record.context,
623
500
  data=record.data,
624
501
  )
625
502
  raise SearchError(
626
503
  message=message,
627
- model=record.model,
504
+ model_string=record.model_string,
628
505
  context=record.context,
629
506
  data=record.data,
630
507
  issue_id=issue.pk,
631
508
  ) from err
632
509
  except queryset.model.MultipleObjectsReturned as err:
633
- message = f"Multiple instances of `{record.model}` found."
510
+ message = f"Multiple instances of `{record.model_string}` found."
634
511
  _, issue = self.create_or_get_sync_issue(
635
512
  exception=err,
636
513
  ingestion=self.ingestion,
637
514
  message=message,
638
- model=record.model,
515
+ model_string=record.model_string,
639
516
  context=record.context,
640
517
  data=record.data,
641
518
  )
642
519
  raise SearchError(
643
520
  message=message,
644
- model=record.model,
521
+ model_string=record.model_string,
645
522
  context=record.context,
646
523
  data=record.data,
647
524
  issue_id=issue.pk,
@@ -650,12 +527,12 @@ class IPFabricSyncRunner(object):
650
527
  _, issue = self.create_or_get_sync_issue(
651
528
  exception=err,
652
529
  ingestion=self.ingestion,
653
- model=record.model,
530
+ model_string=record.model_string,
654
531
  context=record.context,
655
532
  data=record.data,
656
533
  )
657
534
  raise SyncDataError(
658
- model=record.model,
535
+ model_string=record.model_string,
659
536
  context=record.context,
660
537
  data=record.data,
661
538
  issue_id=issue.pk,
@@ -664,12 +541,16 @@ class IPFabricSyncRunner(object):
664
541
  return obj
665
542
 
666
543
  def collect_data(self):
544
+ # Importing here to avoid circular import
545
+ from ..models import IPFabricEndpoint
546
+
667
547
  try:
668
548
  self.logger.log_info(
669
549
  "Collecting information from IP Fabric",
670
550
  obj=self.sync.snapshot_data.source,
671
551
  )
672
552
  data = {}
553
+ # TODO: Replace endpoint value in SnapshotData with Endpoint link
673
554
  if self.sync.snapshot_data.source.type == IPFabricSourceTypeChoices.REMOTE:
674
555
  # This requires data already pushed to NetBox by user, does not connect to IPF directly
675
556
  self.logger.log_info(
@@ -679,34 +560,68 @@ class IPFabricSyncRunner(object):
679
560
  raise SyncError(
680
561
  "No snapshot data available. This is a remote sync. Push data to NetBox first."
681
562
  )
682
- for endpoint in Endpoints:
683
- data[endpoint.name.lower()] = list(
563
+ for endpoint in IPFabricEndpoint.objects.all():
564
+ data[endpoint.endpoint] = list(
684
565
  self.sync.snapshot_data.ipf_data.filter(
685
- type=endpoint.name.lower()
566
+ type=endpoint.endpoint
686
567
  ).values_list("data", flat=True)
687
568
  )
688
569
  else:
689
570
  # This pulls data directly from IP Fabric instance
571
+ # TODO: If certain columns are not enabled in IPF, sync will fail in odd ways
572
+ # For example missing `nameOriginal` on Interface makes it unable to sync IPs
573
+ # When pulling from endpoint, make sure to specify columns according to transform maps
574
+ # This can be pulled using logic in IPFabricTransformMap.strip_source_data()
575
+ # TODO: Also cache it to improve performance, no need to regex every time!
690
576
  self.logger.log_info(
691
577
  "Local collector being used for snapshot data.", obj=self.sync
692
578
  )
693
- endpoint_handler = EndpointHandler(
694
- self.client,
695
- snapshot_id=self.settings["snapshot_id"],
696
- )
697
- ingestion_sites = self.settings.get("sites")
698
- for endpoint in Endpoints:
699
- filters = Filters.get_filter(endpoint.name, ingestion_sites)
579
+ enabled_models = [
580
+ param
581
+ for param in self.sync.parameters.keys()
582
+ if "." in param and self.sync.parameters.get(param) is True
583
+ ]
584
+ # Always pull devices for now since other models depend on them
585
+ enabled_models.append("dcim.device")
586
+ # Interfaces are required for IP Addresses assigned interface name
587
+ if "ipam.ipaddress" in enabled_models:
588
+ enabled_models.append("dcim.interface")
589
+ transform_maps = self.sync.get_transform_maps()
590
+ for endpoint in IPFabricEndpoint.objects.all():
591
+ # FIXME: Dirty hack to sync VSS, remove when IN-68 is getting done
592
+ if endpoint.endpoint == "/technology/platforms/vss/chassis":
593
+ tms_for_endpoint = transform_maps.filter(
594
+ source_endpoint__endpoint="/technology/platforms/stack/members"
595
+ )
596
+ else:
597
+ tms_for_endpoint = transform_maps.filter(
598
+ source_endpoint=endpoint
599
+ )
600
+
601
+ if not tms_for_endpoint.exists():
602
+ raise SyncError(
603
+ f"No transform map found for endpoint `{endpoint.endpoint}`."
604
+ )
605
+ if not [
606
+ True
607
+ for tm in tms_for_endpoint
608
+ if ".".join(tm.target_model.natural_key()) in enabled_models
609
+ ]:
610
+ logger.debug(
611
+ f"Skipping endpoint `{endpoint.endpoint}` as no enabled models are using it."
612
+ )
613
+ continue
614
+ filters = endpoint.combine_filters(sync=self.sync)
700
615
  logger.debug(
701
- f"Collecting data from endpoint: `{endpoint.value}` using filter `{json.dumps(filters)}`."
616
+ f"Collecting data from endpoint: `{endpoint.endpoint}` using filter `{json.dumps(filters)}`."
702
617
  )
703
- data[endpoint.name.lower()] = endpoint_handler.resolve_endpoint(
704
- endpoint.value
705
- )(
618
+ data[endpoint.endpoint] = self.client.fetch_all(
619
+ url=endpoint.endpoint,
620
+ snapshot_id=self.settings["snapshot_id"],
706
621
  filters=filters,
707
622
  )
708
623
  self.logger.log_info(
709
- f"Collected {len(data[endpoint.name.lower()])} items from endpoint `{endpoint.value}`.",
624
+ f"Collected {len(data[endpoint.endpoint])} items from endpoint `{endpoint.endpoint}`.",
710
625
  obj=self.sync.snapshot_data.source,
711
626
  )
712
627
  except Exception as e:
@@ -717,103 +632,182 @@ class IPFabricSyncRunner(object):
717
632
  return data
718
633
 
719
634
  @cache
720
- def get_transform_map(self, app: str, model: str) -> "IPFabricTransformMap":
635
+ def get_transform_map(
636
+ self, app: str, model: str, endpoint: str = None
637
+ ) -> "IPFabricTransformMap":
721
638
  """Get transform map for given app and model. Cached to improve performance."""
722
- return self.transform_maps.get(
723
- target_model__app_label=app, target_model__model=model
639
+ filter_kwargs = {
640
+ "target_model__app_label": app,
641
+ "target_model__model": model,
642
+ }
643
+ if endpoint:
644
+ filter_kwargs["source_endpoint__endpoint"] = endpoint
645
+ return self.transform_maps.get(**filter_kwargs)
646
+
647
+ def create_new_data_records(
648
+ self,
649
+ app: str,
650
+ model: str,
651
+ data: list[dict] | dict[str, list[dict]],
652
+ endpoint: str = None,
653
+ ) -> set[DataRecord]:
654
+ """Create DataRecord objects for given app, model and data list if enabled in settings."""
655
+ if not self.sync.parameters.get(f"{app}.{model}"):
656
+ return set()
657
+ if isinstance(data, dict):
658
+ # Data is either already list or full data where we have to choose the list
659
+ transform_map = self.get_transform_map(
660
+ app=app, model=model, endpoint=endpoint
661
+ )
662
+ data = data.get(transform_map.source_endpoint.endpoint, [])
663
+ return set(
664
+ self.create_new_data_record(
665
+ app=app, model=model, data=item, endpoint=endpoint
666
+ )
667
+ for item in data
724
668
  )
725
669
 
726
- def create_new_data_record(self, app: str, model: str, data: dict) -> DataRecord:
670
+ def create_new_data_record(
671
+ self, app: str, model: str, data: dict, endpoint: str = None
672
+ ) -> DataRecord:
727
673
  """Extract only relevant source data according to transform map configuration."""
728
- transform_map = self.get_transform_map(app=app, model=model)
674
+ transform_map = self.get_transform_map(app=app, model=model, endpoint=endpoint)
675
+ model_string = f"{app}.{model}"
729
676
  try:
730
677
  source_data = transform_map.strip_source_data(data)
731
678
  except KeyError as err:
732
679
  raise SyncError(
733
- f"Missing key column {err.args[0]} in source data when preparing data for {app}_{model}."
680
+ f"Missing key column {err.args[0]} in source data when preparing data for {model_string}."
734
681
  ) from err
735
682
  return DataRecord(
736
- app=app, model=model, data=source_data, transform_map=transform_map
683
+ model_string=model_string, data=source_data, transform_map=transform_map
737
684
  )
738
685
 
739
- def preprocess_data(self, data: dict) -> dict[str, set[DataRecord]]:
740
- # TODO: Only process data according to settings to improve performance
741
- # Set those records that can't be iterated separately
742
- # Others are as empty set to define order which is shown in UI progress
743
- records = {
744
- "site": set(
745
- self.create_new_data_record(app="dcim", model="site", data=item)
746
- for item in data.get("site", [])
747
- ),
748
- "manufacturer": set(),
749
- "devicetype": set(),
750
- "platform": set(),
751
- "devicerole": set(),
752
- "device": set(),
753
- "virtualchassis": set(),
754
- "interface": set(),
755
- "macaddress": set(),
756
- "inventoryitem": set(
757
- self.create_new_data_record(
758
- app="dcim", model="inventoryitem", data=item
759
- )
760
- for item in data.get("inventoryitem", [])
761
- ),
762
- "vlan": set(
763
- self.create_new_data_record(app="ipam", model="vlan", data=item)
764
- for item in data.get("vlan", [])
765
- ),
766
- "vrf": set(
767
- self.create_new_data_record(app="ipam", model="vrf", data=item)
768
- for item in data.get("vrf", [])
769
- ),
770
- "prefix": set(
771
- self.create_new_data_record(app="ipam", model="prefix", data=item)
772
- for item in data.get("prefix", [])
773
- ),
774
- "ipaddress": set(),
686
+ def _get_custom_preprocessor(
687
+ self, model_string: str, endpoint: str
688
+ ) -> Callable | None:
689
+ """
690
+ Get custom preprocessing function for a model+endpoint combination if one exists.
691
+
692
+ Args:
693
+ model_string: The model in 'app.model' format (e.g., 'dcim.virtualchassis')
694
+ endpoint: The source endpoint (e.g., '/technology/platforms/stack/members')
695
+
696
+ Returns:
697
+ Callable preprocessor function or None if no custom preprocessor exists
698
+ """
699
+ preprocessors = {
700
+ # Devices and related models processed together (from /inventory/devices)
701
+ ("dcim.device", "/inventory/devices"): self._preprocess_devices,
702
+ ("dcim.manufacturer", "/inventory/devices"): self._preprocess_devices,
703
+ ("dcim.devicetype", "/inventory/devices"): self._preprocess_devices,
704
+ ("dcim.platform", "/inventory/devices"): self._preprocess_devices,
705
+ ("dcim.devicerole", "/inventory/devices"): self._preprocess_devices,
706
+ # Virtual chassis from stack members
707
+ (
708
+ "dcim.virtualchassis",
709
+ "/technology/platforms/stack/members",
710
+ ): self._preprocess_devices,
711
+ # Interfaces and MAC addresses processed together
712
+ ("dcim.interface", "/inventory/interfaces"): self._preprocess_interfaces,
713
+ ("dcim.macaddress", "/inventory/interfaces"): self._preprocess_interfaces,
714
+ # IP addresses need interface data
715
+ (
716
+ "ipam.ipaddress",
717
+ "/technology/addressing/managed-ip/ipv4",
718
+ ): self._preprocess_ipaddresses,
775
719
  }
776
720
 
721
+ return preprocessors.get((model_string, endpoint))
722
+
723
+ def _preprocess_devices(self, data: dict, records: dict) -> None:
724
+ """Custom preprocessing for devices - single iteration for efficiency."""
725
+ if not (
726
+ self.sync.parameters.get("dcim.device")
727
+ or self.sync.parameters.get("dcim.virtualchassis")
728
+ or self.sync.parameters.get("dcim.interface")
729
+ or self.sync.parameters.get("dcim.macaddress")
730
+ or self.sync.parameters.get("ipam.ipaddress")
731
+ ):
732
+ return
733
+
734
+ # Prepare virtual chassis members once
735
+ members = {}
736
+ if self.sync.parameters.get("dcim.virtualchassis"):
737
+ self.logger.log_info("Preparing virtual chassis members", obj=self.sync)
738
+ members = order_members(
739
+ data.get("/technology/platforms/stack/members", []), "memberSn"
740
+ )
741
+ members.update(
742
+ order_members(
743
+ data.get("/technology/platforms/vss/chassis", []), "chassisSn"
744
+ )
745
+ )
746
+
777
747
  self.logger.log_info("Preparing devices", obj=self.sync)
778
- members = order_members(data.get("virtualchassis", []))
779
- devices, virtualchassis = prepare_devices(data.get("device", []), members)
748
+ devices, virtualchassis = prepare_devices(
749
+ data.get("/inventory/devices", []), members
750
+ )
780
751
 
781
- # We need to store primary IPs of Devices to assign them later
782
- # since they are not stored on Device object directly
783
- # TODO: This will be later replaced when we are able to sync from multiple API tables to 1 model
784
752
  device_primary_ips = {}
785
753
 
754
+ # Single iteration creating records for all related models
786
755
  for device in devices:
787
- records["manufacturer"].add(
788
- self.create_new_data_record(
789
- app="dcim", model="manufacturer", data=device
756
+ if self.sync.parameters.get("dcim.manufacturer"):
757
+ records["dcim.manufacturer"].add(
758
+ self.create_new_data_record(
759
+ app="dcim", model="manufacturer", data=device
760
+ )
790
761
  )
791
- )
792
- records["devicetype"].add(
793
- self.create_new_data_record(app="dcim", model="devicetype", data=device)
794
- )
795
- records["platform"].add(
796
- self.create_new_data_record(app="dcim", model="platform", data=device)
797
- )
798
- records["devicerole"].add(
799
- self.create_new_data_record(app="dcim", model="devicerole", data=device)
800
- )
801
- # This field is required by Device transform maps, but is set only when Device is part of VC.
762
+
763
+ if self.sync.parameters.get("dcim.devicetype"):
764
+ records["dcim.devicetype"].add(
765
+ self.create_new_data_record(
766
+ app="dcim", model="devicetype", data=device
767
+ )
768
+ )
769
+
770
+ if self.sync.parameters.get("dcim.platform"):
771
+ records["dcim.platform"].add(
772
+ self.create_new_data_record(
773
+ app="dcim", model="platform", data=device
774
+ )
775
+ )
776
+
777
+ if self.sync.parameters.get("dcim.devicerole"):
778
+ records["dcim.devicerole"].add(
779
+ self.create_new_data_record(
780
+ app="dcim", model="devicerole", data=device
781
+ )
782
+ )
783
+
802
784
  if "virtual_chassis" not in device:
803
785
  device["virtual_chassis"] = None
804
- records["device"].add(
805
- self.create_new_data_record(app="dcim", model="device", data=device)
806
- )
786
+
787
+ if self.sync.parameters.get("dcim.device"):
788
+ records["dcim.device"].add(
789
+ self.create_new_data_record(app="dcim", model="device", data=device)
790
+ )
791
+
807
792
  device_primary_ips[device.get("sn")] = device.get("loginIp")
808
793
 
809
- records["virtualchassis"] = set(
810
- self.create_new_data_record(app="dcim", model="virtualchassis", data=item)
811
- for item in virtualchassis
794
+ self._device_primary_ips = device_primary_ips
795
+ records["dcim.virtualchassis"] = self.create_new_data_records(
796
+ app="dcim", model="virtualchassis", data=virtualchassis
812
797
  )
813
798
 
814
- # `nameOriginal` is human-readable interface name hidden column in IP Fabric
815
- # This allows us to use it instead of the `intName`
816
- # But it can be customized using transform maps, so we need to use the current value
799
+ def _preprocess_interfaces(self, data: dict, records: dict) -> None:
800
+ """Custom preprocessing for interfaces and MAC addresses."""
801
+ if not (
802
+ self.sync.parameters.get("dcim.interface")
803
+ or self.sync.parameters.get("dcim.macaddress")
804
+ or self.sync.parameters.get("ipam.ipaddress")
805
+ ):
806
+ return
807
+
808
+ if not hasattr(self, "_device_primary_ips"):
809
+ self._preprocess_devices(data=data, records=records)
810
+
817
811
  interface_key = "nameOriginal"
818
812
  try:
819
813
  int_transform_map = self.get_transform_map(app="dcim", model="interface")
@@ -821,58 +815,132 @@ class IPFabricSyncRunner(object):
821
815
  interface_key = int_name_field_map.source_field
822
816
  except Exception as e:
823
817
  self.logger.log_failure(
824
- f"Error collecting information about transform map for interface name: {e}",
825
- obj=self.sync,
818
+ f"Error collecting transform map info: {e}", obj=self.sync
826
819
  )
827
820
  raise SyncError(f"Error collecting source column name for interface: {e}")
828
821
 
829
- self.logger.log_info("Preparing Interfaces", obj=self.sync)
830
822
  # Store human-readable interface names to use them later for IP Addresses
831
823
  readable_int_names = {}
832
- for interface in data["interface"]:
833
- interface_record = self.create_new_data_record(
834
- app="dcim", model="interface", data=interface
835
- )
836
- interface_record.data["loginIp"] = device_primary_ips.get(
837
- interface.get("sn")
838
- )
839
- records["interface"].add(interface_record)
824
+ self.logger.log_info("Preparing Interfaces", obj=self.sync)
825
+
826
+ for interface in data.get("/inventory/interfaces", []):
827
+ if self.sync.parameters.get("dcim.interface"):
828
+ interface_record = self.create_new_data_record(
829
+ app="dcim", model="interface", data=interface
830
+ )
831
+ interface_record.data["loginIp"] = self._device_primary_ips.get(
832
+ interface.get("sn")
833
+ )
834
+ records["dcim.interface"].add(interface_record)
835
+
840
836
  readable_int_names[
841
837
  f"{interface.get('sn')}_{interface.get('intName')}"
842
838
  ] = interface.get(interface_key)
843
- records["macaddress"].add(
844
- self.create_new_data_record(
845
- app="dcim", model="macaddress", data=interface
839
+
840
+ if self.sync.parameters.get("dcim.macaddress"):
841
+ records["dcim.macaddress"].add(
842
+ self.create_new_data_record(
843
+ app="dcim", model="macaddress", data=interface
844
+ )
846
845
  )
847
- )
846
+
847
+ self._readable_int_names = readable_int_names
848
+
849
+ def _preprocess_ipaddresses(self, data: dict, records: dict) -> None:
850
+ """Custom preprocessing for IP addresses."""
851
+ if not self.sync.parameters.get("ipam.ipaddress"):
852
+ return
853
+
854
+ if not hasattr(self, "_readable_int_names"):
855
+ self._preprocess_interfaces(data=data, records=records)
848
856
 
849
857
  self.logger.log_info("Preparing IP Addresses", obj=self.sync)
850
- for ip in data["ipaddress"]:
851
- # We get `nameOriginal` from Interface table to get human-readable name instead fo `intName`
852
- ip["nameOriginal"] = readable_int_names.get(
858
+ for ip in data.get("/technology/addressing/managed-ip/ipv4", []):
859
+ ip["nameOriginal"] = self._readable_int_names.get(
853
860
  f"{ip.get('sn')}_{ip.get('intName')}"
854
861
  )
855
- # Let's skip IPs we cannot assign to an interface
856
862
  if not ip["nameOriginal"]:
857
863
  continue
864
+
858
865
  ipaddress_record = self.create_new_data_record(
859
866
  app="ipam", model="ipaddress", data=ip
860
867
  )
861
- # Store whether this IP is primary for the device
862
868
  ipaddress_record.data["is_primary"] = ip.get(
863
869
  "sn"
864
- ) in device_primary_ips and device_primary_ips.get(
870
+ ) in self._device_primary_ips and self._device_primary_ips.get(
865
871
  ip.get("sn")
866
872
  ) == ipaddress_record.data.get(
867
873
  "ip"
868
874
  )
869
- records["ipaddress"].add(ipaddress_record)
875
+ records["ipam.ipaddress"].add(ipaddress_record)
876
+
877
+ def preprocess_data(self, data: dict) -> dict[str, set[DataRecord]]:
878
+ """
879
+ Preprocess data using hierarchical transform map order.
880
+
881
+ Iterates over transform maps in dependency order (parents before children),
882
+ using custom preprocessing functions where available or direct record creation.
883
+ Each transform map is processed exactly once, avoiding duplicate iterations.
884
+ """
885
+ # Get transform maps in hierarchical order (now returns transform maps directly!)
886
+ group_ids = self.sync.parameters.get("groups", [])
887
+ transform_maps_hierarchy = self.sync.get_model_hierarchy(group_ids=group_ids)
888
+
889
+ # Initialize records dict for all models (extract unique model strings)
890
+ records = {}
891
+ seen_models = set()
892
+ for transform_map in transform_maps_hierarchy:
893
+ model_string = f"{transform_map.target_model.app_label}.{transform_map.target_model.model}"
894
+ if model_string not in seen_models:
895
+ records[model_string] = set()
896
+ seen_models.add(model_string)
897
+
898
+ # Track which custom preprocessors have run (to avoid duplication)
899
+ # Use (preprocessor, endpoint) tuple to allow same preprocessor for different endpoints
900
+ preprocessors_run = set()
901
+
902
+ # Process each transform map in hierarchical order
903
+ for transform_map in transform_maps_hierarchy:
904
+ app = transform_map.target_model.app_label
905
+ model = transform_map.target_model.model
906
+ model_string = f"{app}.{model}"
907
+ endpoint = transform_map.source_endpoint.endpoint
908
+
909
+ # Skip if not enabled in sync parameters
910
+ if not self.sync.parameters.get(model_string):
911
+ continue
912
+
913
+ # Check for custom preprocessor (pass endpoint for endpoint-specific preprocessing)
914
+ preprocessor = self._get_custom_preprocessor(model_string, endpoint)
915
+
916
+ # Create unique key combining preprocessor function and endpoint
917
+ preprocessor_key = (preprocessor, endpoint) if preprocessor else None
918
+
919
+ if preprocessor and preprocessor_key not in preprocessors_run:
920
+ # Run custom preprocessor (may populate multiple models)
921
+ self.logger.log_info(
922
+ f"Running custom preprocessor for `{model_string}` from `{endpoint}`",
923
+ obj=self.sync,
924
+ )
925
+ preprocessor(data, records)
926
+ preprocessors_run.add(preprocessor_key)
927
+ elif not preprocessor:
928
+ # Standard processing: use the transform map directly (no need to filter!)
929
+ self.logger.log_info(
930
+ f"Preparing `{model_string}` from `{endpoint}`", obj=self.sync
931
+ )
932
+ records[model_string].update(
933
+ self.create_new_data_records(
934
+ app=app, model=model, data=data, endpoint=endpoint
935
+ )
936
+ )
870
937
 
871
- for model, records_set in records.items():
872
- if self.settings.get(model) and len(records_set):
873
- self.logger.init_statistics(model, len(records_set))
938
+ # Log statistics
939
+ for model_string, records_set in records.items():
940
+ if self.settings.get(model_string) and len(records_set):
941
+ self.logger.init_statistics(model_string, len(records_set))
874
942
  self.logger.log_info(
875
- f"Prepared {len(records_set)} items for `{model}` to be synced.",
943
+ f"Prepared {len(records_set)} items for `{model_string}` to be synced.",
876
944
  obj=self.sync,
877
945
  )
878
946
 
@@ -883,13 +951,8 @@ class IPFabricSyncRunner(object):
883
951
  self,
884
952
  record: DataRecord,
885
953
  stats: bool = True,
886
- sync: bool = False,
887
954
  ) -> ModelTypeVar | None:
888
955
  """Sync a single item to NetBox."""
889
- # The `sync` param is a workaround since we need to get some models (Device...) even when not syncing them.
890
- if not sync:
891
- return None
892
-
893
956
  if not record.data:
894
957
  return None
895
958
 
@@ -897,7 +960,7 @@ class IPFabricSyncRunner(object):
897
960
 
898
961
  # Only log when we successfully synced the item and asked for it
899
962
  if stats and instance:
900
- self.logger.increment_statistics(model=record.model)
963
+ self.logger.increment_statistics(model_string=record.model_string)
901
964
 
902
965
  return instance
903
966
 
@@ -909,11 +972,7 @@ class IPFabricSyncRunner(object):
909
972
  stats: bool = True,
910
973
  ) -> ModelTypeVar | None:
911
974
  """Sync a single item to NetBox."""
912
- synced_object = self.sync_model(
913
- record=record,
914
- sync=self.settings.get(record.model),
915
- stats=stats,
916
- )
975
+ synced_object = self.sync_model(record=record, stats=stats)
917
976
  if synced_object is None:
918
977
  return None
919
978
 
@@ -939,10 +998,10 @@ class IPFabricSyncRunner(object):
939
998
  if not items:
940
999
  return
941
1000
 
942
- app, model = (lambda x: (x.app, x.model))(next(iter(items)))
943
- if not self.settings.get(model):
1001
+ model_string = (lambda record: record.model_string)(next(iter(items)))
1002
+ if not self.settings.get(model_string):
944
1003
  self.logger.log_info(
945
- f"Did not ask to sync {model}s, skipping.", obj=self.sync
1004
+ f"Did not ask to sync {model_string}s, skipping.", obj=self.sync
946
1005
  )
947
1006
  return
948
1007
 
@@ -950,56 +1009,6 @@ class IPFabricSyncRunner(object):
950
1009
  self.sync_item(item, cf, ingestion, stats)
951
1010
  self.events_clearer.increment()
952
1011
 
953
- @handle_errors
954
- def sync_devices(
955
- self,
956
- devices: set[DataRecord],
957
- cf: bool = False,
958
- ingestion: "IPFabricIngestion" = None,
959
- ) -> None:
960
- """Sync devices separately to handle resetting primary IP."""
961
- if not self.settings.get("device"):
962
- self.logger.log_info(
963
- "Did not ask to sync devices, skipping.", obj=self.sync
964
- )
965
- return
966
-
967
- for device in devices:
968
- device_obj: "Device | None" = self.sync_item(
969
- record=device, cf=cf, ingestion=ingestion
970
- )
971
-
972
- if (
973
- device_obj is None
974
- or device_obj.primary_ip4 is None
975
- or device.data.get("loginIp") is not None
976
- ):
977
- self.events_clearer.increment()
978
- continue
979
-
980
- # If device has primary IP assigned in NetBox, but not in IP Fabric, remove it
981
- try:
982
- connection_name = self.get_db_connection_name()
983
- device_obj.refresh_from_db(using=connection_name)
984
- device_obj.snapshot()
985
- device_obj.primary_ip4 = None
986
- device_obj.save(using=connection_name)
987
- except Exception as err:
988
- _, issue = self.create_or_get_sync_issue(
989
- exception=err,
990
- ingestion=self.ingestion,
991
- message="Error removing primary IP current device.",
992
- model=device.model,
993
- data=device.data,
994
- )
995
- self.events_clearer.increment()
996
- raise IPAddressPrimaryRemovalError(
997
- data=device.data,
998
- model=device.model,
999
- issue_id=issue.pk,
1000
- ) from err
1001
- self.events_clearer.increment()
1002
-
1003
1012
  @handle_errors
1004
1013
  def sync_ipaddress(self, ip_address: DataRecord) -> "IPAddress | None":
1005
1014
  """Sync a single IP Address to NetBox, separated to use @handle_errors."""
@@ -1032,13 +1041,13 @@ class IPFabricSyncRunner(object):
1032
1041
  exception=err,
1033
1042
  ingestion=self.ingestion,
1034
1043
  message="Error removing primary IP from other device.",
1035
- model=ip_address.model,
1044
+ model_string=ip_address.model_string,
1036
1045
  data=ip_address.data,
1037
1046
  )
1038
1047
  self.events_clearer.increment()
1039
1048
  raise IPAddressPrimaryRemovalError(
1040
1049
  data=ip_address.data,
1041
- model=ip_address.model,
1050
+ model_string=ip_address.model_string,
1042
1051
  issue_id=issue.pk,
1043
1052
  ) from err
1044
1053
 
@@ -1063,13 +1072,13 @@ class IPFabricSyncRunner(object):
1063
1072
  exception=err,
1064
1073
  ingestion=self.ingestion,
1065
1074
  message="Error assigning primary IP to device.",
1066
- model=ip_address.model,
1075
+ model_string=ip_address.model_string,
1067
1076
  data=ip_address.data,
1068
1077
  )
1069
1078
  self.events_clearer.increment()
1070
1079
  raise IPAddressPrimaryAssignmentError(
1071
1080
  data=ip_address.data,
1072
- model=ip_address.model,
1081
+ model_string=ip_address.model_string,
1073
1082
  issue_id=issue.pk,
1074
1083
  ) from err
1075
1084
  self.events_clearer.increment()
@@ -1086,7 +1095,7 @@ class IPFabricSyncRunner(object):
1086
1095
  Cleaning events queue happens during each cycle to make sure all required
1087
1096
  operations (primary IP assignment) happen during the same batch.
1088
1097
  """
1089
- if not self.settings.get("ipaddress"):
1098
+ if not self.settings.get("ipam.ipaddress"):
1090
1099
  self.logger.log_info(
1091
1100
  "Did not ask to sync ipaddresses, skipping.", obj=self.sync
1092
1101
  )
@@ -1102,40 +1111,47 @@ class IPFabricSyncRunner(object):
1102
1111
  records = self.preprocess_data(data=data)
1103
1112
 
1104
1113
  self.logger.log_info("Starting data sync.", obj=self.sync)
1105
- self.sync_items(
1106
- items=records["site"],
1107
- cf=self.sync.update_custom_fields,
1108
- ingestion=ingestion,
1109
- )
1110
- self.sync_items(items=records["manufacturer"])
1111
- self.sync_items(items=records["devicetype"])
1112
- self.sync_items(items=records["platform"])
1113
- self.sync_items(items=records["devicerole"])
1114
+ try:
1115
+ # This signal does not call for snapshot(), causing issue with branching plugin
1116
+ if sync_cached_scope_fields is not None:
1117
+ signals.post_save.disconnect(sync_cached_scope_fields, sender=Site)
1118
+ self.sync_items(
1119
+ items=records["dcim.site"],
1120
+ cf=self.sync.update_custom_fields,
1121
+ ingestion=ingestion,
1122
+ )
1123
+ finally:
1124
+ if sync_cached_scope_fields is not None:
1125
+ signals.post_save.connect(sync_cached_scope_fields, sender=Site)
1126
+ self.sync_items(items=records["dcim.manufacturer"])
1127
+ self.sync_items(items=records["dcim.devicetype"])
1128
+ self.sync_items(items=records["dcim.platform"])
1129
+ self.sync_items(items=records["dcim.devicerole"])
1114
1130
  try:
1115
1131
  # This signal does not call for snapshot(), causing issue with branching plugin
1116
1132
  signals.post_save.disconnect(
1117
1133
  assign_virtualchassis_master, sender=VirtualChassis
1118
1134
  )
1119
- self.sync_items(items=records["virtualchassis"])
1120
- self.sync_devices(
1121
- devices=records["device"],
1135
+ self.sync_items(items=records["dcim.virtualchassis"])
1136
+ self.sync_items(
1137
+ items=records["dcim.device"],
1122
1138
  cf=self.sync.update_custom_fields,
1123
1139
  ingestion=ingestion,
1124
1140
  )
1125
1141
  # The Device exists now, so we can update the master of the VC.
1126
1142
  # The logic is handled in transform maps.
1127
- self.sync_items(items=records["virtualchassis"], stats=False)
1143
+ self.sync_items(items=records["dcim.virtualchassis"], stats=False)
1128
1144
  finally:
1129
1145
  signals.post_save.connect(
1130
1146
  assign_virtualchassis_master, sender=VirtualChassis
1131
1147
  )
1132
- self.sync_items(items=records["interface"])
1133
- self.sync_items(items=records["macaddress"])
1134
- self.sync_items(items=records["inventoryitem"])
1135
- self.sync_items(items=records["vlan"])
1136
- self.sync_items(items=records["vrf"])
1137
- self.sync_items(items=records["prefix"])
1138
- self.sync_ip_addresses(ip_addresses=records["ipaddress"])
1148
+ self.sync_items(items=records["dcim.interface"])
1149
+ self.sync_items(items=records["dcim.macaddress"])
1150
+ self.sync_items(items=records["dcim.inventoryitem"])
1151
+ self.sync_items(items=records["ipam.vlan"])
1152
+ self.sync_items(items=records["ipam.vrf"])
1153
+ self.sync_items(items=records["ipam.prefix"])
1154
+ self.sync_ip_addresses(ip_addresses=records["ipam.ipaddress"])
1139
1155
 
1140
1156
  # Make sure to clean queue (and memory) at the end
1141
1157
  self.events_clearer.clear()