ipfabric_netbox 4.3.2b1__py3-none-any.whl → 4.3.2b2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ipfabric_netbox might be problematic. Click here for more details.

@@ -1,14 +1,18 @@
1
1
  import json
2
2
  import logging
3
+ from collections import Counter
4
+ from copy import deepcopy
5
+ from enum import Enum
6
+ from functools import cache
7
+ from functools import partial
3
8
  from importlib import metadata
9
+ from typing import Any
4
10
  from typing import Callable
5
11
  from typing import TYPE_CHECKING
6
12
  from typing import TypeVar
7
13
 
8
14
  from core.exceptions import SyncError
9
15
  from dcim.models import Device
10
- from dcim.models import Interface
11
- from dcim.models import MACAddress
12
16
  from django.conf import settings
13
17
  from django.core.exceptions import MultipleObjectsReturned
14
18
  from django.core.exceptions import ObjectDoesNotExist
@@ -22,17 +26,17 @@ from netutils.utils import jinja2_convenience_function
22
26
 
23
27
  from ..choices import IPFabricSourceTypeChoices
24
28
  from ..choices import IPFabricSyncStatusChoices
25
- from ..exceptions import create_or_get_sync_issue
26
29
  from ..exceptions import IPAddressDuplicateError
30
+ from ..exceptions import IPAddressPrimaryRemovalError
27
31
  from ..exceptions import SearchError
28
32
  from ..exceptions import SyncDataError
29
- from .nbutils import device_serial_max_length
30
- from .nbutils import order_devices
31
- from .nbutils import order_members
33
+
32
34
 
33
35
  if TYPE_CHECKING:
34
36
  from ..models import IPFabricIngestion
37
+ from ..models import IPFabricIngestionIssue
35
38
  from ipam.models import IPAddress
39
+ from ..models import IPFabricTransformMap
36
40
 
37
41
  logger = logging.getLogger("ipfabric_netbox.utilities.ipf_utils")
38
42
 
@@ -43,11 +47,14 @@ def slugify_text(value):
43
47
  return slugify(value)
44
48
 
45
49
 
46
- def serial(value):
47
- sn_length = len(value.get("sn"))
48
- serial_number = value.get("sn") if sn_length < device_serial_max_length else ""
50
+ device_serial_max_length = Device._meta.get_field("serial").max_length
51
+
52
+
53
+ def serial(data: dict) -> str:
54
+ sn_length = len(data.get("sn"))
55
+ serial_number = data.get("sn") if sn_length < device_serial_max_length else ""
49
56
  if not serial_number:
50
- serial_number = value.get("id")
57
+ serial_number = data.get("id")
51
58
  return serial_number
52
59
 
53
60
 
@@ -97,16 +104,6 @@ class IPFabric(object):
97
104
  formatted_snapshots[snapshot_ref] = (description, snapshot.snapshot_id)
98
105
  return formatted_snapshots
99
106
 
100
- def get_sites(self, snapshot=None) -> list:
101
- if snapshot:
102
- raw_sites = self.ipf.inventory.sites.all(snapshot_id=snapshot)
103
- else:
104
- raw_sites = self.ipf.inventory.sites.all()
105
- sites = []
106
- for item in raw_sites:
107
- sites.append(item["siteName"])
108
- return sites
109
-
110
107
  def get_table_data(self, table, device):
111
108
  filter = {"sn": ["eq", device.serial]}
112
109
  split = table.split(".")
@@ -131,11 +128,246 @@ class IPFabric(object):
131
128
  return data, columns
132
129
 
133
130
 
131
+ def make_hashable(obj):
132
+ if isinstance(obj, dict):
133
+ return tuple((k, make_hashable(v)) for k, v in obj.items())
134
+ elif isinstance(obj, list):
135
+ return tuple(make_hashable(x) for x in obj)
136
+ else:
137
+ return obj
138
+
139
+
140
+ class DataRecord:
141
+ """Contains all data required to sync single object to NetBox."""
142
+
143
+ def __init__(
144
+ self,
145
+ app: str,
146
+ model: str,
147
+ data: dict,
148
+ # These values are filled later as the record is passed down the pipeline
149
+ context: dict | None = None,
150
+ transform_map: "IPFabricTransformMap | None" = None,
151
+ ):
152
+ self.app = app
153
+ self.model = model
154
+ self.data = data
155
+ self.context = context or dict()
156
+ self.transform_map = transform_map
157
+
158
+ def __hash__(self):
159
+ if self._hash is None:
160
+ try:
161
+ self._hash = hash(
162
+ (
163
+ self.app,
164
+ self.model,
165
+ # Since the dicts are already ordered, it is safe to hash them
166
+ # .values() are mutable, this is fixed by tuple() to get same hash every time
167
+ make_hashable(self.data),
168
+ make_hashable(self.context),
169
+ )
170
+ )
171
+ except Exception as err:
172
+ raise Exception(f"DATA: {self.data}") from err
173
+ return self._hash
174
+
175
+ def __eq__(self, other):
176
+ return isinstance(other, DataRecord) and hash(self) == hash(other)
177
+
178
+ # Make sure data and context are sorted by keys when stored to speed up hash calculation
179
+ # This should be safe since they do not contain nested dicts
180
+ @property
181
+ def data(self):
182
+ return self._data
183
+
184
+ @data.setter
185
+ def data(self, value):
186
+ self._data = {k: v for k, v in sorted(value.items())}
187
+ self._hash = None # Invalidate cached hash
188
+
189
+ @property
190
+ def context(self):
191
+ return self._context
192
+
193
+ @context.setter
194
+ def context(self, value):
195
+ self._context = {k: v for k, v in sorted(value.items())}
196
+ self._hash = None # Invalidate cached hash
197
+
198
+
199
+ # TODO: 1) Store this as model in DB linked to Transform map
200
+ # TODO: 2) Each table will be added to template data with customizable name (instead of just `object`)
201
+ # TODO: and linked using common column (like `sn` for Device and Interface)
202
+ # TODO: 3) Only pull required tables according to sync settings
203
+ class Endpoints(Enum):
204
+ SITE = "inventory.sites"
205
+ DEVICE = "inventory.devices"
206
+ VIRTUALCHASSIS = "technology.platforms.stacks_members"
207
+ INTERFACE = "inventory.interfaces"
208
+ INVENTORYITEM = "inventory.pn"
209
+ VLAN = "technology.vlans.site_summary"
210
+ VRF = "technology.routing.vrf_detail"
211
+ PREFIX = "technology.managed_networks.networks"
212
+ IPADDRESS = "technology.addressing.managed_ip_ipv4"
213
+
214
+
215
+ class Filters(Enum):
216
+ SITE = "site"
217
+ DEVICE = "device"
218
+ VIRTUALCHASSIS = "virtualchassis"
219
+ INTERFACE = "interface"
220
+ INVENTORYITEM = "inventoryitem"
221
+ VLAN = "vlan"
222
+ VRF = "vrf"
223
+ PREFIX = "prefix"
224
+ IPADDRESS = "ipaddress"
225
+
226
+ @staticmethod
227
+ def _site_filter(sites: dict | None) -> dict:
228
+ if sites:
229
+ return {"or": [{"siteName": ["eq", site]} for site in sites]}
230
+ return {}
231
+
232
+ @staticmethod
233
+ def _device_filter(sites: dict | None, child_table: bool = False) -> dict:
234
+ key = "vendor" if not child_table else "device.vendor"
235
+ excluded_vendors = ["aws", "azure"]
236
+ device_filter = {"and": [{key: ["neq", vendor]} for vendor in excluded_vendors]}
237
+ if sites:
238
+ site_filter = Filters._site_filter(sites)
239
+ device_filter["and"].append(site_filter)
240
+ return device_filter
241
+
242
+ @staticmethod
243
+ def _virtualchassis_filter(sites: dict | None) -> dict:
244
+ return Filters._device_filter(sites, child_table=True)
245
+
246
+ @staticmethod
247
+ def _interface_filter(sites: dict | None) -> dict:
248
+ return Filters._device_filter(sites, child_table=True)
249
+
250
+ @staticmethod
251
+ def _inventoryitem_filter(sites: dict | None) -> dict:
252
+ inventory_item_filter = {
253
+ "and": [
254
+ {"sn": ["empty", False]},
255
+ {"name": ["empty", False]},
256
+ ]
257
+ }
258
+ if site_filter := Filters._device_filter(sites, child_table=True):
259
+ inventory_item_filter["and"].append(site_filter)
260
+ return inventory_item_filter
261
+
262
+ @staticmethod
263
+ def _vlan_filter(sites: dict | None) -> dict:
264
+ # Remove VLANs with ID 0, minimum VLAN ID in NetBox is 1
265
+ vlan_filter = {"and": [{"vlanId": ["neq", 0]}]}
266
+ if site_filter := Filters._site_filter(sites):
267
+ vlan_filter["and"].append(site_filter)
268
+ return vlan_filter
269
+
270
+ @staticmethod
271
+ def _vrf_filter(sites: dict | None) -> dict:
272
+ return Filters._device_filter(sites, child_table=True)
273
+
274
+ @staticmethod
275
+ def _prefix_filter(sites: dict | None) -> dict:
276
+ if site_filter := Filters._device_filter(sites, child_table=True):
277
+ return {"and": [site_filter, {"and": [{"net": ["empty", False]}]}]}
278
+ else:
279
+ return {"and": [{"net": ["empty", False]}]}
280
+
281
+ @staticmethod
282
+ def _ipaddress_filter(sites: dict | None) -> dict:
283
+ return Filters._device_filter(sites, child_table=True)
284
+
285
+ @staticmethod
286
+ def get_filter(endpoint: str, sites: dict | None) -> dict:
287
+ method_name = f"_{getattr(Filters, endpoint).value}_filter"
288
+ filter_func = getattr(Filters, method_name, None)
289
+ if filter_func:
290
+ return filter_func(sites)
291
+ return {}
292
+
293
+
294
+ # TODO: Store also hierarchy of models (e.g. Device required Device and virtual chassis endpoints)
295
+
296
+
297
+ class EndpointHandler:
298
+ def __init__(self, client: IPFClient, snapshot_id: str) -> None:
299
+ self.client = client
300
+ self.snapshot_id = snapshot_id
301
+
302
+ def resolve_endpoint(self, endpoint: str) -> Callable[..., Any]:
303
+ """Resolve a dot-separated endpoint string to the corresponding IPFClient attribute."""
304
+ obj = self.client
305
+ for attr in endpoint.split("."):
306
+ obj = getattr(obj, attr)
307
+ return partial(obj.all, snapshot_id=self.snapshot_id)
308
+
309
+
310
+ def order_members(members: list[dict]) -> dict[str, list[dict]]:
311
+ """Order VC members to dict, where key is master serial number and values are all members."""
312
+ devices = {}
313
+
314
+ for member in members:
315
+ # Caution: If the snapshot is created in development mode, the `sn` field is calculated from loopback IP
316
+ # This can be spotted by checking if `sn` is different from `memberSn` for the master device
317
+ # Plus `sn` will be IP of loopback in hex...
318
+ master_serial = member.get("sn")
319
+ if master_serial and member.get("memberSn"):
320
+ if master_serial in devices:
321
+ devices[master_serial].append(member)
322
+ else:
323
+ devices[master_serial] = [member]
324
+
325
+ return devices
326
+
327
+
328
+ def order_devices(
329
+ devices: list[dict], members: dict[str, list[dict]]
330
+ ) -> tuple[list[dict], list[dict]]:
331
+ hostnames = [d["hostname"] for d in devices]
332
+ counter = Counter(hostnames)
333
+
334
+ new_devices = []
335
+ ordered_devices = []
336
+ virtualchassis = []
337
+
338
+ for device in devices[:]:
339
+ if counter[device["hostname"]] > 1:
340
+ device["hostname"] = f"{device['hostname']} - ({device['sn']})"
341
+ if child_members := members.get(device.get("sn")):
342
+ # This device is the VC master, and we're iterating over all it's members
343
+ for child_member in child_members:
344
+ if device.get("sn") != child_member.get("memberSn"):
345
+ # VC members (non-master) are not in Device table, need to add them as new Device
346
+ new_device = deepcopy(device)
347
+ new_device[
348
+ "hostname"
349
+ ] = f"{device['hostname']}/{child_member.get('member')}"
350
+ new_device["model"] = child_member.get("pn")
351
+ new_device["sn"] = child_member.get("memberSn")
352
+ new_device["virtual_chassis"] = child_member
353
+ new_devices.append(new_device)
354
+ else:
355
+ # This is the master device
356
+ device["virtual_chassis"] = child_member
357
+ virtualchassis.append(child_member)
358
+ hostnames = [d["hostname"] for d in devices]
359
+ counter = Counter(hostnames)
360
+ else:
361
+ ordered_devices.append(device)
362
+ ordered_devices.extend(new_devices)
363
+ return ordered_devices, virtualchassis
364
+
365
+
134
366
  class IPFabricSyncRunner(object):
135
367
  def __init__(
136
368
  self,
137
369
  sync,
138
- client: IPFabric = None,
370
+ client: IPFClient = None,
139
371
  ingestion=None,
140
372
  settings: dict = None,
141
373
  ) -> None:
@@ -150,6 +382,62 @@ class IPFabricSyncRunner(object):
150
382
  if self.sync.snapshot_data.status != "loaded":
151
383
  raise SyncError("Snapshot not loaded in IP Fabric.")
152
384
 
385
+ # Some objects depend on others being synced first, store errors to avoid duplicates
386
+ # We should store all dependant objects, but it's very hard to do
387
+ # For now store only serial numbers since that is the largest dependency chain
388
+ self.error_serials = set()
389
+
390
+ @staticmethod
391
+ def get_error_serial(context: dict | None, data: dict | None) -> str | None:
392
+ """Get error serial from context or raw data for skipping purposes."""
393
+ context = context or {}
394
+ data = data or {}
395
+ return (
396
+ context.get("sn") or context.get("serial") or serial(data)
397
+ if "sn" in data
398
+ else None
399
+ )
400
+
401
+ def create_or_get_sync_issue(
402
+ self,
403
+ exception: Exception,
404
+ ingestion: "IPFabricIngestion",
405
+ message: str = None,
406
+ model: str = None,
407
+ context: dict = None,
408
+ data: dict = None,
409
+ ) -> "tuple[bool, IPFabricIngestionIssue]":
410
+ """
411
+ Helper function to handle sync errors and create IPFabricIngestionIssue if needed.
412
+ """
413
+ context = context or {}
414
+
415
+ error_serial = self.get_error_serial(context, data)
416
+ if error_serial:
417
+ self.error_serials.add(error_serial)
418
+
419
+ # TODO: This is to prevent circular import issues, clean it up later.
420
+ from ..models import IPFabricIngestionIssue
421
+
422
+ if not hasattr(exception, "issue_id") or not exception.issue_id:
423
+ issue = IPFabricIngestionIssue.objects.create(
424
+ ingestion=ingestion,
425
+ exception=exception.__class__.__name__,
426
+ message=message or getattr(exception, "message", str(exception)),
427
+ model=model,
428
+ coalesce_fields={
429
+ k: v for k, v in context.items() if k not in ["defaults"]
430
+ },
431
+ defaults=context.get("defaults", dict()),
432
+ raw_data=data or dict(),
433
+ )
434
+ if hasattr(exception, "issue_id"):
435
+ exception.issue_id = issue.id
436
+ return True, issue
437
+ else:
438
+ issue = IPFabricIngestionIssue.objects.get(id=exception.issue_id)
439
+ return False, issue
440
+
153
441
  @staticmethod
154
442
  def handle_errors(func: Callable):
155
443
  def wrapper(*args, **kwargs):
@@ -196,99 +484,111 @@ class IPFabricSyncRunner(object):
196
484
  connection_name = self.ingestion.branch.connection_name
197
485
  return connection_name
198
486
 
199
- def get_model_or_update(self, app, model, data):
200
- transform_map = self.transform_maps.get(
201
- target_model__app_label=app, target_model__model=model
202
- )
203
-
204
- if not transform_map:
205
- raise SystemError(f"No transform map available for {app}: {model}")
206
-
207
- model_settings = self.settings.get(model, False)
487
+ def get_transform_context(self, record: DataRecord) -> DataRecord:
488
+ if not record.transform_map:
489
+ raise SystemError(
490
+ f"No transform map available for {record.app}: {record.model}"
491
+ )
208
492
  try:
209
- context = transform_map.get_context(data)
493
+ record.context = record.transform_map.get_context(record.data)
210
494
  except Exception as err:
211
- message = f"Error getting context for `{model}`."
495
+ message = f"Error getting context for `{record.model}`."
212
496
  if isinstance(err, ObjectDoesNotExist):
213
497
  message += (
214
498
  " Could not find related object using template in transform maps."
215
499
  )
216
500
  elif isinstance(err, MultipleObjectsReturned):
217
501
  message += " Multiple objects returned using on template in transform maps, the template is not strict enough."
218
- _, issue = create_or_get_sync_issue(
502
+ _, issue = self.create_or_get_sync_issue(
219
503
  exception=err,
220
504
  ingestion=self.ingestion,
221
505
  message=message,
222
- model=model,
223
- data=data,
506
+ model=record.model,
507
+ data=record.data,
224
508
  )
225
509
  raise SearchError(
226
- message=message, data=data, model=model, issue_id=issue.id
510
+ message=message, data=record.data, model=record.model, issue_id=issue.pk
227
511
  ) from err
228
512
 
229
- queryset = transform_map.target_model.model_class().objects
513
+ return record
230
514
 
231
- object = None
515
+ def get_model_or_update(self, record: DataRecord) -> ModelTypeVar | None:
516
+ # First check if there are any previous errors linked to this object
517
+ error_serial = self.get_error_serial(record.context, record.data)
518
+ if error_serial and error_serial in self.error_serials:
519
+ self.logger.log_info(
520
+ f"Skipping syncing of `{record.model}` with serial `{error_serial}` due to previous errors.",
521
+ obj=self.sync,
522
+ )
523
+ return None
524
+ record = self.get_transform_context(record)
525
+ queryset = record.transform_map.target_model.model_class().objects
526
+ model_settings = self.settings.get(record.model, False)
527
+
528
+ obj = None
232
529
  try:
233
530
  connection_name = self.get_db_connection_name()
234
531
  if model_settings:
235
- logger.info(f"Creating {model}")
236
- object = transform_map.update_or_create_instance(
237
- context=context,
532
+ logger.info(f"Creating {record.model}")
533
+ obj = record.transform_map.update_or_create_instance(
534
+ context=record.context,
238
535
  tags=self.sync.tags.all(),
239
536
  connection_name=connection_name,
240
537
  )
241
538
  else:
242
- logger.info(f"Getting {model}")
243
- context.pop("defaults", None)
244
- object = queryset.using(connection_name).get(**context)
539
+ logger.info(f"Getting {record.model}")
540
+ record.context.pop("defaults", None)
541
+ obj = queryset.using(connection_name).get(**record.context)
245
542
  except queryset.model.DoesNotExist as err:
246
- message = f"Instance of `{model}` not found."
247
- _, issue = create_or_get_sync_issue(
543
+ message = f"Instance of `{record.model}` not found."
544
+ _, issue = self.create_or_get_sync_issue(
248
545
  exception=err,
249
546
  ingestion=self.ingestion,
250
547
  message=message,
251
- model=model,
252
- context=context,
253
- data=data,
548
+ model=record.model,
549
+ context=record.context,
550
+ data=record.data,
254
551
  )
255
552
  raise SearchError(
256
553
  message=message,
257
- model=model,
258
- context=context,
259
- data=data,
260
- issue_id=issue.id,
554
+ model=record.model,
555
+ context=record.context,
556
+ data=record.data,
557
+ issue_id=issue.pk,
261
558
  ) from err
262
559
  except queryset.model.MultipleObjectsReturned as err:
263
- message = f"Multiple instances of `{model}` found."
264
- _, issue = create_or_get_sync_issue(
560
+ message = f"Multiple instances of `{record.model}` found."
561
+ _, issue = self.create_or_get_sync_issue(
265
562
  exception=err,
266
563
  ingestion=self.ingestion,
267
564
  message=message,
268
- model=model,
269
- context=context,
270
- data=data,
565
+ model=record.model,
566
+ context=record.context,
567
+ data=record.data,
271
568
  )
272
569
  raise SearchError(
273
570
  message=message,
274
- model=model,
275
- context=context,
276
- data=data,
277
- issue_id=issue.id,
571
+ model=record.model,
572
+ context=record.context,
573
+ data=record.data,
574
+ issue_id=issue.pk,
278
575
  ) from err
279
576
  except Exception as err:
280
- _, issue = create_or_get_sync_issue(
577
+ _, issue = self.create_or_get_sync_issue(
281
578
  exception=err,
282
579
  ingestion=self.ingestion,
283
- model=model,
284
- context=context,
285
- data=data,
580
+ model=record.model,
581
+ context=record.context,
582
+ data=record.data,
286
583
  )
287
584
  raise SyncDataError(
288
- model=model, context=context, data=data, issue_id=issue.id
585
+ model=record.model,
586
+ context=record.context,
587
+ data=record.data,
588
+ issue_id=issue.pk,
289
589
  ) from err
290
590
 
291
- return object
591
+ return obj
292
592
 
293
593
  def collect_data(self):
294
594
  try:
@@ -298,6 +598,7 @@ class IPFabricSyncRunner(object):
298
598
  )
299
599
  data = {}
300
600
  if self.sync.snapshot_data.source.type == IPFabricSourceTypeChoices.REMOTE:
601
+ # This requires data already pushed to NetBox by user, does not connect to IPF directly
301
602
  self.logger.log_info(
302
603
  "Remote collector checking for snapshot data.", obj=self.sync
303
604
  )
@@ -305,197 +606,144 @@ class IPFabricSyncRunner(object):
305
606
  raise SyncError(
306
607
  "No snapshot data available. This is a remote sync. Push data to NetBox first."
307
608
  )
308
- data["site"] = list(
309
- self.sync.snapshot_data.ipf_data.filter(type="site").values_list(
310
- "data", flat=True
311
- )
312
- )
313
- data["device"] = list(
314
- self.sync.snapshot_data.ipf_data.filter(type="device").values_list(
315
- "data", flat=True
316
- )
317
- )
318
- data["virtualchassis"] = list(
319
- self.sync.snapshot_data.ipf_data.filter(
320
- type="virtualchassis"
321
- ).values_list("data", flat=True)
322
- )
323
- data["interface"] = list(
324
- self.sync.snapshot_data.ipf_data.filter(
325
- type="interface"
326
- ).values_list("data", flat=True)
327
- )
328
- data["inventoryitem"] = list(
329
- self.sync.snapshot_data.ipf_data.filter(
330
- type="inventoryitem"
331
- ).values_list("data", flat=True)
332
- )
333
- data["vlan"] = list(
334
- self.sync.snapshot_data.ipf_data.filter(type="vlan").values_list(
335
- "data", flat=True
336
- )
337
- )
338
- data["vrf"] = list(
339
- self.sync.snapshot_data.ipf_data.filter(type="vrf").values_list(
340
- "data", flat=True
341
- )
342
- )
343
- data["prefix"] = list(
344
- self.sync.snapshot_data.ipf_data.filter(type="prefix").values_list(
345
- "data", flat=True
609
+ for endpoint in Endpoints:
610
+ data[endpoint.name.lower()] = list(
611
+ self.sync.snapshot_data.ipf_data.filter(
612
+ type=endpoint.name.lower()
613
+ ).values_list("data", flat=True)
346
614
  )
347
- )
348
- data["ipaddress"] = list(
349
- self.sync.snapshot_data.ipf_data.filter(
350
- type="ipaddress"
351
- ).values_list("data", flat=True)
352
- )
353
615
  else:
616
+ # This pulls data directly from IP Fabric instance
354
617
  self.logger.log_info(
355
618
  "Local collector being used for snapshot data.", obj=self.sync
356
619
  )
357
- excluded_vendors = ["aws", "azure"]
358
-
359
- query_filter = {
360
- "and": [{"vendor": ["neq", vendor]} for vendor in excluded_vendors]
361
- }
362
-
363
- if ingestion_sites := self.settings.get("sites"):
364
- site_filter = {
365
- "or": [{"siteName": ["eq", site]} for site in ingestion_sites]
366
- }
367
- query_filter["and"].append(site_filter)
368
-
369
- self.logger.log_info(
370
- f"Creating site filter `{json.dumps(site_filter)}`",
371
- obj=self.sync,
372
- )
373
- else:
374
- site_filter = {}
375
-
376
- data["site"] = self.client.inventory.sites.all(
377
- snapshot_id=self.settings["snapshot_id"], filters=site_filter
378
- )
379
-
380
- data["device"] = self.client.inventory.devices.all(
381
- snapshot_id=self.settings["snapshot_id"], filters=query_filter
382
- )
383
-
384
- data[
385
- "virtualchassis"
386
- ] = self.client.technology.platforms.stacks_members.all(
387
- snapshot_id=self.settings["snapshot_id"], filters=site_filter
388
- )
389
-
390
- data["interface"] = self.client.inventory.interfaces.all(
391
- snapshot_id=self.settings["snapshot_id"], filters=site_filter
392
- )
393
-
394
- inventory_item_filter = {
395
- "and": [
396
- {"sn": ["empty", False]},
397
- {"name": ["empty", False]},
398
- ]
399
- }
400
- if site_filter:
401
- inventory_item_filter["and"].append(site_filter)
402
-
403
- data["inventoryitem"] = self.client.inventory.pn.all(
620
+ endpoint_handler = EndpointHandler(
621
+ self.client,
404
622
  snapshot_id=self.settings["snapshot_id"],
405
- filters=inventory_item_filter,
406
- )
407
-
408
- data["vlan"] = self.client.technology.vlans.site_summary.all(
409
- snapshot_id=self.settings["snapshot_id"], filters=site_filter
410
- )
411
-
412
- data["vrf"] = self.client.technology.routing.vrf_detail.all(
413
- snapshot_id=self.settings["snapshot_id"], filters=site_filter
414
- )
415
-
416
- if site_filter:
417
- networks_filter = {
418
- "and": [site_filter, {"and": [{"net": ["empty", False]}]}]
419
- }
420
- else:
421
- networks_filter = {"and": [{"net": ["empty", False]}]}
422
- self.logger.log_info(f"Creating network filter: `{networks_filter}`")
423
- data["prefix"] = self.client.technology.managed_networks.networks.all(
424
- snapshot_id=self.settings["snapshot_id"], filters=networks_filter
425
- )
426
-
427
- data[
428
- "ipaddress"
429
- ] = self.client.technology.addressing.managed_ip_ipv4.all(
430
- snapshot_id=self.settings["snapshot_id"], filters=site_filter
431
623
  )
624
+ ingestion_sites = self.settings.get("sites")
625
+ for endpoint in Endpoints:
626
+ filters = Filters.get_filter(endpoint.name, ingestion_sites)
627
+ logger.debug(
628
+ f"Collecting data from endpoint: `{endpoint.value}` using filter `{json.dumps(filters)}`."
629
+ )
630
+ data[endpoint.name.lower()] = endpoint_handler.resolve_endpoint(
631
+ endpoint.value
632
+ )(
633
+ filters=filters,
634
+ )
635
+ self.logger.log_info(
636
+ f"Collected {len(data[endpoint.name.lower()])} items from endpoint `{endpoint.value}`.",
637
+ obj=self.sync.snapshot_data.source,
638
+ )
432
639
  except Exception as e:
433
640
  self.logger.log_failure(
434
641
  f"Error collecting data from IP Fabric: {e}", obj=self.sync
435
642
  )
436
643
  raise SyncError(f"Error collecting data from IP Fabric: {e}")
644
+ return data
437
645
 
438
- self.logger.log_info(
439
- f"{len(data['site'])} sites collected", obj=self.sync.snapshot_data.source
440
- )
441
- self.logger.log_info(
442
- f"{len(data['device'])} devices collected",
443
- obj=self.sync.snapshot_data.source,
444
- )
445
- self.logger.log_info(
446
- f"{len(data['virtualchassis'])} stack members collected",
447
- obj=self.sync.snapshot_data.source,
646
+ @cache
647
+ def get_transform_map(self, app: str, model: str) -> "IPFabricTransformMap":
648
+ """Get transform map for given app and model. Cached to improve performance."""
649
+ return self.transform_maps.get(
650
+ target_model__app_label=app, target_model__model=model
448
651
  )
449
652
 
450
- self.logger.log_info(
451
- f"{len(data['interface'])} interfaces collected",
452
- obj=self.sync.snapshot_data.source,
653
+ def create_new_data_record(self, app: str, model: str, data: dict) -> DataRecord:
654
+ """Extract only relevant source data according to transform map configuration."""
655
+ transform_map = self.get_transform_map(app=app, model=model)
656
+ try:
657
+ source_data = transform_map.strip_source_data(data)
658
+ except KeyError as err:
659
+ raise SyncError(
660
+ f"Missing key column {err.args[0]} in source data when preparing data for {app}_{model}."
661
+ ) from err
662
+ return DataRecord(
663
+ app=app, model=model, data=source_data, transform_map=transform_map
453
664
  )
454
665
 
455
- self.logger.log_info(
456
- f"{len(data.get('inventoryitem', []))} part numbers collected",
457
- obj=self.sync.snapshot_data.source,
458
- )
666
+ def preprocess_data(self, data: dict) -> dict[str, set[DataRecord]]:
667
+ # TODO: Only process data according to settings to improve performance
668
+ # Set those records that can't be iterated separately
669
+ # Others are as empty set to define order which is shown in UI progress
670
+ records = {
671
+ "site": set(
672
+ self.create_new_data_record(app="dcim", model="site", data=item)
673
+ for item in data.get("site", [])
674
+ ),
675
+ "manufacturer": set(),
676
+ "devicetype": set(),
677
+ "platform": set(),
678
+ "devicerole": set(),
679
+ "device": set(),
680
+ "virtualchassis": set(),
681
+ "interface": set(),
682
+ "macaddress": set(),
683
+ "inventoryitem": set(
684
+ self.create_new_data_record(
685
+ app="dcim", model="inventoryitem", data=item
686
+ )
687
+ for item in data.get("inventoryitem", [])
688
+ ),
689
+ "vlan": set(
690
+ self.create_new_data_record(app="ipam", model="vlan", data=item)
691
+ for item in data.get("vlan", [])
692
+ ),
693
+ "vrf": set(
694
+ self.create_new_data_record(app="ipam", model="vrf", data=item)
695
+ for item in data.get("vrf", [])
696
+ ),
697
+ "prefix": set(
698
+ self.create_new_data_record(app="ipam", model="prefix", data=item)
699
+ for item in data.get("prefix", [])
700
+ ),
701
+ "ipaddress": set(),
702
+ }
459
703
 
460
- self.logger.log_info(
461
- f"{len(data.get('vlan', []))} VLANs collected",
462
- obj=self.sync.snapshot_data.source,
463
- )
704
+ self.logger.log_info("Preparing devices", obj=self.sync)
705
+ members = order_members(data.get("virtualchassis", []))
706
+ devices, virtualchassis = order_devices(data.get("device", []), members)
464
707
 
465
- self.logger.log_info(
466
- f"{len(data.get('vrf', []))} VRFs collected",
467
- obj=self.sync.snapshot_data.source,
468
- )
708
+ # We need to store primary IPs of Devices to assign them later
709
+ # since they are not stored on Device object directly
710
+ # TODO: This will be later replaced when we are able to sync from multiple API tables to 1 model
711
+ device_primary_ips = {}
469
712
 
470
- self.logger.log_info(
471
- f"{len(data.get('prefix', []))} networks collected",
472
- obj=self.sync.snapshot_data.source,
473
- )
713
+ for device in devices:
714
+ records["manufacturer"].add(
715
+ self.create_new_data_record(
716
+ app="dcim", model="manufacturer", data=device
717
+ )
718
+ )
719
+ records["devicetype"].add(
720
+ self.create_new_data_record(app="dcim", model="devicetype", data=device)
721
+ )
722
+ records["platform"].add(
723
+ self.create_new_data_record(app="dcim", model="platform", data=device)
724
+ )
725
+ records["devicerole"].add(
726
+ self.create_new_data_record(app="dcim", model="devicerole", data=device)
727
+ )
728
+ # This field is required by Device transform maps, but is set only when Device is part of VC.
729
+ if "virtual_chassis" not in device:
730
+ device["virtual_chassis"] = None
731
+ records["device"].add(
732
+ self.create_new_data_record(app="dcim", model="device", data=device)
733
+ )
734
+ device_primary_ips[device.get("sn")] = device.get("loginIp")
474
735
 
475
- self.logger.log_info(
476
- f"{len(data.get('ipaddress', []))} management IP's collected",
477
- obj=self.sync.snapshot_data.source,
736
+ records["virtualchassis"] = set(
737
+ self.create_new_data_record(app="dcim", model="virtualchassis", data=item)
738
+ for item in virtualchassis
478
739
  )
479
- self.logger.log_info("Ordering devices", obj=self.sync)
480
-
481
- members = order_members(data.get("virtualchassis", []))
482
- devices = order_devices(data.get("device", []), members)
483
-
484
- self.logger.log_info("Ordering Part Numbers", obj=self.sync)
485
-
486
- interface_dict = {}
487
- for interface in data["interface"]:
488
- if int_sn := interface.get("sn"):
489
- if interface_dict.get(int_sn):
490
- interface_dict[int_sn].append(interface)
491
- else:
492
- interface_dict[int_sn] = [interface]
493
740
 
741
+ # `nameOriginal` is human-readable interface name hidden column in IP Fabric
742
+ # This allows us to use it instead of the `intName`
743
+ # But it can be customized using transform maps, so we need to use the current value
494
744
  interface_key = "nameOriginal"
495
745
  try:
496
- int_transform_map = self.transform_maps.get(
497
- target_model__app_label="dcim", target_model__model="interface"
498
- )
746
+ int_transform_map = self.get_transform_map(app="dcim", model="interface")
499
747
  int_name_field_map = int_transform_map.field_maps.get(target_field="name")
500
748
  interface_key = int_name_field_map.source_field
501
749
  except Exception as e:
@@ -505,71 +753,62 @@ class IPFabricSyncRunner(object):
505
753
  )
506
754
  raise SyncError(f"Error collecting source column name for interface: {e}")
507
755
 
508
- managed_ips = {}
509
- for ip in data["ipaddress"]:
510
- # Find corresponding interface list by serial number (sn)
511
- device_interfaces = interface_dict.get(ip["sn"], [])
756
+ self.logger.log_info("Preparing Interfaces", obj=self.sync)
757
+ # Store human-readable interface names to use them later for IP Addresses
758
+ readable_int_names = {}
759
+ for interface in data["interface"]:
760
+ interface_record = self.create_new_data_record(
761
+ app="dcim", model="interface", data=interface
762
+ )
763
+ interface_record.data["loginIp"] = device_primary_ips.get(
764
+ interface.get("sn")
765
+ )
766
+ records["interface"].add(interface_record)
767
+ readable_int_names[
768
+ f"{interface.get('sn')}_{interface.get('intName')}"
769
+ ] = interface.get(interface_key)
770
+ records["macaddress"].add(
771
+ self.create_new_data_record(
772
+ app="dcim", model="macaddress", data=interface
773
+ )
774
+ )
512
775
 
513
- # Use filter to find the interface with the matching intName
514
- filtered_interface = list(
515
- filter(lambda d: d["intName"] == ip["intName"], device_interfaces)
776
+ self.logger.log_info("Preparing IP Addresses", obj=self.sync)
777
+ for ip in data["ipaddress"]:
778
+ # We get `nameOriginal` from Interface table to get human-readable name instead fo `intName`
779
+ ip["nameOriginal"] = readable_int_names.get(
780
+ f"{ip.get('sn')}_{ip.get('intName')}"
781
+ )
782
+ # Let's skip IPs we cannot assign to an interface
783
+ if not ip["nameOriginal"]:
784
+ continue
785
+ ipaddress_record = self.create_new_data_record(
786
+ app="ipam", model="ipaddress", data=ip
516
787
  )
788
+ # Store whether this IP is primary for the device
789
+ ipaddress_record.data["is_primary"] = ip.get(
790
+ "sn"
791
+ ) in device_primary_ips and device_primary_ips.get(
792
+ ip.get("sn")
793
+ ) == ipaddress_record.data.get(
794
+ "ip"
795
+ )
796
+ records["ipaddress"].add(ipaddress_record)
517
797
 
518
- if filtered_interface:
519
- ip["nameOriginal"] = filtered_interface[0]["nameOriginal"]
520
- if ip[interface_key]:
521
- int_name = ip[interface_key]
522
- else:
523
- int_name = ip["intName"]
524
- if ip["sn"] not in managed_ips:
525
- managed_ips[ip["sn"]] = {int_name: [ip]}
526
- elif int_name not in managed_ips.get(ip["sn"]):
527
- managed_ips[ip["sn"]][int_name] = [ip]
528
- else:
529
- managed_ips[ip["sn"]][int_name].append(ip)
530
-
531
- for vlan in data["vlan"][:]:
532
- # Remove VLANs with ID 0, minimum VLAN ID in NetBox is 1
533
- if vlan.get("vlanId") == 0:
534
- data["vlan"].remove(vlan)
535
-
536
- for item in data["inventoryitem"][:]:
537
- # Remove items with empty serial number
538
- if item.get("sn") in [None, "None"]:
539
- data["inventoryitem"].remove(item)
540
-
541
- for model, item_count in [
542
- ("site", len(data.get("site", []))),
543
- ("device", len(devices)),
544
- ("interface", len(data.get("interface", []))),
545
- ("inventoryitem", len(data.get("inventoryitem", []))),
546
- ("vlan", len(data.get("vlan", []))),
547
- ("vrf", len(data.get("vrf", []))),
548
- ("prefix", len(data.get("prefix", []))),
549
- # TODO: Since we sync only those assigned to interfaces, we are skipping some IPs
550
- # TODO: This is fixable by syncing IPs separately from interface and only assign them on interfaces
551
- ("ipaddress", len(data.get("ipaddress", []))),
552
- ]:
553
- if self.settings.get(model):
554
- self.logger.init_statistics(model, item_count)
798
+ for model, records_set in records.items():
799
+ if self.settings.get(model) and len(records_set):
800
+ self.logger.init_statistics(model, len(records_set))
801
+ self.logger.log_info(
802
+ f"Prepared {len(records_set)} items for `{model}` to be synced.",
803
+ obj=self.sync,
804
+ )
555
805
 
556
- return (
557
- data["site"],
558
- devices,
559
- interface_dict,
560
- data["inventoryitem"],
561
- data["vrf"],
562
- data["vlan"],
563
- data["prefix"],
564
- managed_ips,
565
- )
806
+ return records
566
807
 
567
808
  @handle_errors
568
809
  def sync_model(
569
810
  self,
570
- app_label: str,
571
- model: str,
572
- data: dict | None,
811
+ record: DataRecord,
573
812
  stats: bool = True,
574
813
  sync: bool = False,
575
814
  ) -> ModelTypeVar | None:
@@ -578,31 +817,29 @@ class IPFabricSyncRunner(object):
578
817
  if not sync:
579
818
  return None
580
819
 
581
- if not data:
820
+ if not record.data:
582
821
  return None
583
822
 
584
- instance = self.get_model_or_update(app_label, model, data)
823
+ instance = self.get_model_or_update(record)
585
824
 
586
825
  # Only log when we successfully synced the item and asked for it
587
- if stats:
588
- self.logger.increment_statistics(model=model)
826
+ if stats and instance:
827
+ self.logger.increment_statistics(model=record.model)
589
828
 
590
829
  return instance
591
830
 
592
831
  def sync_item(
593
832
  self,
594
- item,
595
- app_label: str,
596
- model: str,
833
+ record: DataRecord,
597
834
  cf: bool = False,
598
835
  ingestion: "IPFabricIngestion" = None,
836
+ stats: bool = True,
599
837
  ) -> ModelTypeVar | None:
600
838
  """Sync a single item to NetBox."""
601
839
  synced_object = self.sync_model(
602
- app_label=app_label,
603
- model=model,
604
- data=item,
605
- sync=self.settings.get(model),
840
+ record=record,
841
+ sync=self.settings.get(record.model),
842
+ stats=stats,
606
843
  )
607
844
  if synced_object is None:
608
845
  return None
@@ -620,13 +857,16 @@ class IPFabricSyncRunner(object):
620
857
 
621
858
  def sync_items(
622
859
  self,
623
- items,
624
- app_label: str,
625
- model: str,
860
+ items: set[DataRecord],
626
861
  cf: bool = False,
627
862
  ingestion: "IPFabricIngestion" = None,
863
+ stats: bool = True,
628
864
  ) -> None:
629
865
  """Sync list of items to NetBox."""
866
+ if not items:
867
+ return
868
+
869
+ app, model = (lambda x: (x.app, x.model))(next(iter(items)))
630
870
  if not self.settings.get(model):
631
871
  self.logger.log_info(
632
872
  f"Did not ask to sync {model}s, skipping.", obj=self.sync
@@ -634,230 +874,111 @@ class IPFabricSyncRunner(object):
634
874
  return
635
875
 
636
876
  for item in items:
637
- self.sync_item(item, app_label, model, cf, ingestion)
877
+ self.sync_item(item, cf, ingestion, stats)
638
878
 
639
879
  @handle_errors
640
- def sync_devices(
641
- self,
642
- ingestion,
643
- devices,
644
- interface_dict,
645
- managed_ips,
646
- ):
647
- for model, name in [
648
- ("manufacturer", "manufacturers"),
649
- ("devicetype", "device types"),
650
- ("platform", "platforms"),
651
- ("devicerole", "device roles"),
652
- ("virtualchassis", "virtual chassis"),
653
- ("device", "devices"),
654
- ("inventoryitem", "device inventory items"),
655
- ]:
656
- if not self.settings.get(model):
657
- self.logger.log_info(
658
- f"Did not ask to sync {name}, skipping", obj=self.sync
659
- )
660
-
661
- devices_total = len(devices)
662
-
663
- for device in devices:
664
- self.sync_model(
665
- "dcim", "manufacturer", device, sync=self.settings.get("manufacturer")
666
- )
667
- self.sync_model(
668
- "dcim", "devicetype", device, sync=self.settings.get("devicetype")
669
- )
670
- self.sync_model(
671
- "dcim", "platform", device, sync=self.settings.get("platform")
672
- )
673
- self.sync_model(
674
- "dcim", "devicerole", device, sync=self.settings.get("devicerole")
675
- )
676
-
677
- virtual_chassis = device.get("virtual_chassis", {})
678
- self.sync_model(
679
- "dcim",
680
- "virtualchassis",
681
- virtual_chassis,
682
- stats=False,
683
- sync=self.settings.get("virtualchassis"),
684
- )
685
-
686
- # We need to get a Device instance even when not syncing it but syncing Interfaces, IPs or MACs
687
- device_object: Device | None = self.sync_model(
688
- "dcim",
689
- "device",
690
- device,
691
- stats=False,
692
- sync=self.settings.get("device")
693
- or self.settings.get("interface")
694
- or self.settings.get("ipaddress")
695
- or self.settings.get("macaddress"),
880
+ def sync_ip_addresses(self, ip_addresses: set[DataRecord]):
881
+ """
882
+ We cannot assign primary IP in signals since IPAddress does not
883
+ contain information whether it is primary or not. And it must be done
884
+ on Device object, so cannot be done via Transform Maps yet since that
885
+ would require another Transform Map for Device.
886
+ So we need to do it manually here.
887
+ """
888
+ if not self.settings.get("ipaddress"):
889
+ self.logger.log_info(
890
+ "Did not ask to sync ipaddresses, skipping.", obj=self.sync
696
891
  )
892
+ return
697
893
 
698
- if device_object and self.settings.get("device"):
699
- device_object.snapshot()
700
- if self.sync.update_custom_fields:
701
- device_object.custom_field_data[
702
- "ipfabric_source"
703
- ] = self.sync.snapshot_data.source.pk
704
- if ingestion:
705
- device_object.custom_field_data[
706
- "ipfabric_ingestion"
707
- ] = ingestion.pk
708
- device_object.save()
709
-
710
- self.logger.increment_statistics(model="device")
711
- logger.info(
712
- f"Device {self.logger.log_data.get('statistics', {}).get('device', {}).get('current')} out of {devices_total}"
713
- )
894
+ for ip_address in ip_addresses:
895
+ connection_name = self.get_db_connection_name()
714
896
 
715
- # The Device exists now, so we can update the master of the VC.
716
- # The logic is handled in transform maps.
717
- self.sync_model(
718
- "dcim",
719
- "virtualchassis",
720
- virtual_chassis,
721
- stats=False,
722
- sync=self.settings.get("virtualchassis"),
897
+ # First remove primary IP from the target object.
898
+ # It cannot be done using hooks since there is no pre_clean at it fails on full_clean()
899
+ try:
900
+ ipv4_address = render_jinja2(
901
+ ip_address.transform_map.field_maps.get(
902
+ target_field="address"
903
+ ).template,
904
+ {"object": ip_address.data},
723
905
  )
724
-
725
- device_interfaces = interface_dict.get(device.get("sn"), [])
726
- for device_interface in device_interfaces:
727
- self.sync_interface(
728
- device_interface, managed_ips, device_object, device
906
+ other_device = (
907
+ Device.objects.using(connection_name)
908
+ .exclude(serial=serial(ip_address.data))
909
+ .get(primary_ip4__address=ipv4_address)
729
910
  )
730
-
731
- @handle_errors
732
- def sync_ipaddress(
733
- self,
734
- managed_ip: dict | None,
735
- device_object: Device | None,
736
- primary_ip: str | None,
737
- login_ip: str | None,
738
- ):
739
- ip_address_obj: "IPAddress | None" = self.sync_model(
740
- "ipam",
741
- "ipaddress",
742
- managed_ip,
743
- sync=self.settings.get("ipaddress"),
744
- )
745
- if ip_address_obj is None:
746
- return None
747
-
748
- connection_name = self.get_db_connection_name()
749
-
750
- try:
751
- # Removing another IP is done in .signals.clear_other_primary_ip
752
- # But do it here too, so the change is shown in StagedChange diff
753
- other_device = Device.objects.using(connection_name).get(
754
- primary_ip4=ip_address_obj
755
- )
756
- if other_device and device_object != other_device:
757
911
  other_device.snapshot()
758
912
  other_device.primary_ip4 = None
759
913
  other_device.save(using=connection_name)
760
- except ObjectDoesNotExist:
761
- pass
762
-
763
- if login_ip == primary_ip:
764
- try:
765
- device_object.snapshot()
766
- device_object.primary_ip4 = ip_address_obj
767
- device_object.save(using=connection_name)
768
- except (ValueError, AttributeError) as err:
769
- self.logger.log_failure(
770
- f"Error assigning primary IP to device: {err}", obj=self.sync
914
+ except Device.DoesNotExist:
915
+ # There is no other device with this IP as primary, all good
916
+ pass
917
+ except Exception as err:
918
+ # The transform maps might be changed, and we fail to resolve the template
919
+ # Make sure this does not crash the sync and is handled gracefully
920
+ _, issue = self.create_or_get_sync_issue(
921
+ exception=err,
922
+ ingestion=self.ingestion,
923
+ message="Error removing primary IP from other device.",
924
+ model=ip_address.model,
925
+ data=ip_address.data,
771
926
  )
772
- return None
773
- return ip_address_obj
774
-
775
- @handle_errors
776
- def sync_macaddress(
777
- self, data: dict | None, interface_object: Interface
778
- ) -> MACAddress | None:
779
- # Need to create MAC Address object before we can assign it to Interface
780
- # TODO: Figure out how to do this using transform maps
781
- macaddress_data = {
782
- "mac": data,
783
- "id": getattr(interface_object, "pk", None),
784
- }
785
- macaddress_object: MACAddress | None = self.sync_model(
786
- "dcim", "macaddress", macaddress_data, sync=self.settings.get("macaddress")
787
- )
788
- if macaddress_object is None:
789
- return None
790
- try:
791
- interface_object.snapshot()
792
- interface_object.primary_mac_address = macaddress_object
793
- interface_object.save(using=self.get_db_connection_name())
794
- except ValueError as err:
795
- self.logger.log_failure(
796
- f"Error assigning MAC Address to interface: {err}", obj=self.sync
797
- )
798
- return None
799
- return macaddress_object
800
-
801
- @handle_errors
802
- def sync_interface(
803
- self,
804
- device_interface: dict,
805
- managed_ips: dict,
806
- device_object: Device | None,
807
- device: dict,
808
- ):
809
- device_interface["loginIp"] = device.get("loginIp")
810
- # We need to get an Interface instance even when not syncing it but syncing IPs or MACs
811
- interface_object: Interface | None = self.sync_model(
812
- "dcim",
813
- "interface",
814
- device_interface,
815
- sync=self.settings.get("interface")
816
- or self.settings.get("ipaddress")
817
- or self.settings.get("macaddress"),
818
- )
819
-
820
- for ipaddress in managed_ips.get(
821
- getattr(device_object, "serial", None), {}
822
- ).get(getattr(interface_object, "name", None), []):
823
- self.sync_ipaddress(
824
- ipaddress,
825
- device_object,
826
- device_interface.get("primaryIp"),
827
- device.get("loginIp"),
828
- )
829
-
830
- self.sync_macaddress(device_interface.get("mac"), interface_object)
831
-
832
- return interface_object
927
+ raise IPAddressPrimaryRemovalError(
928
+ data=ip_address.data,
929
+ model=ip_address.model,
930
+ issue_id=issue.pk,
931
+ ) from err
932
+
933
+ ip_address_obj: "IPAddress | None" = self.sync_item(record=ip_address)
934
+ if ip_address_obj is None or ip_address_obj.assigned_object is None:
935
+ continue
936
+
937
+ parent_device = ip_address_obj.assigned_object.parent_object
938
+
939
+ # Now assign this IP as primary to the parent device, if not assigned yet or assigned to different IP
940
+ if ip_address.data.get("is_primary") and (
941
+ not parent_device.primary_ip4
942
+ or parent_device.primary_ip4.pk != ip_address_obj.pk
943
+ ):
944
+ try:
945
+ parent_device.snapshot()
946
+ parent_device.primary_ip4 = ip_address_obj
947
+ parent_device.save(using=connection_name)
948
+ except (ValueError, AttributeError) as err:
949
+ self.logger.log_failure(
950
+ f"Error assigning primary IP to device: {err}", obj=self.sync
951
+ )
833
952
 
834
953
  def collect_and_sync(self, ingestion=None) -> None:
835
- self.logger.log_info("Starting data sync.", obj=self.sync)
836
- (
837
- sites,
838
- devices,
839
- interface_dict,
840
- inventory_items,
841
- vrfs,
842
- vlans,
843
- networks,
844
- managed_ips,
845
- ) = self.collect_data()
954
+ self.logger.log_info("Starting data collection.", obj=self.sync)
955
+ data = self.collect_data()
956
+ self.logger.log_info("Starting to prepare items.", obj=self.sync)
957
+ records = self.preprocess_data(data=data)
846
958
 
959
+ self.logger.log_info("Starting data sync.", obj=self.sync)
847
960
  self.sync_items(
848
- app_label="dcim",
849
- model="site",
850
- items=sites,
961
+ items=records["site"],
851
962
  cf=self.sync.update_custom_fields,
852
963
  ingestion=ingestion,
853
964
  )
854
- self.sync_devices(
855
- ingestion,
856
- devices,
857
- interface_dict,
858
- managed_ips,
965
+ self.sync_items(items=records["manufacturer"])
966
+ self.sync_items(items=records["devicetype"])
967
+ self.sync_items(items=records["platform"])
968
+ self.sync_items(items=records["devicerole"])
969
+ self.sync_items(items=records["virtualchassis"])
970
+ self.sync_items(
971
+ items=records["device"],
972
+ cf=self.sync.update_custom_fields,
973
+ ingestion=ingestion,
859
974
  )
860
- self.sync_items(app_label="dcim", model="inventoryitem", items=inventory_items)
861
- self.sync_items(app_label="ipam", model="vlan", items=vlans)
862
- self.sync_items(app_label="ipam", model="vrf", items=vrfs)
863
- self.sync_items(app_label="ipam", model="prefix", items=networks)
975
+ # The Device exists now, so we can update the master of the VC.
976
+ # The logic is handled in transform maps.
977
+ self.sync_items(items=records["virtualchassis"], stats=False)
978
+ self.sync_items(items=records["interface"])
979
+ self.sync_items(items=records["macaddress"])
980
+ self.sync_items(items=records["inventoryitem"])
981
+ self.sync_items(items=records["vlan"])
982
+ self.sync_items(items=records["vrf"])
983
+ self.sync_items(items=records["prefix"])
984
+ self.sync_ip_addresses(ip_addresses=records["ipaddress"])