ipfabric_netbox 3.1.3__py3-none-any.whl → 3.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ipfabric_netbox might be problematic. Click here for more details.

@@ -1,13 +1,18 @@
1
1
  import json
2
2
  import logging
3
- import uuid
4
3
  from importlib import metadata
4
+ from typing import Callable
5
+ from typing import TYPE_CHECKING
5
6
 
7
+ from core.choices import DataSourceStatusChoices
6
8
  from core.exceptions import SyncError
7
9
  from dcim.models import Device
10
+ from dcim.models import Interface
11
+ from dcim.models import MACAddress
8
12
  from django.conf import settings
13
+ from django.core.exceptions import MultipleObjectsReturned
9
14
  from django.core.exceptions import ObjectDoesNotExist
10
- from django.shortcuts import get_object_or_404
15
+ from django.db.models import Model
11
16
  from django.utils.text import slugify
12
17
  from django_tables2 import Column
13
18
  from ipfabric import IPFClient
@@ -16,11 +21,14 @@ from netbox.config import get_config
16
21
  from netutils.utils import jinja2_convenience_function
17
22
 
18
23
  from ..choices import IPFabricSourceTypeChoices
24
+ from ..exceptions import SearchError
25
+ from ..exceptions import SyncDataError
19
26
  from .nbutils import device_serial_max_length
20
27
  from .nbutils import order_devices
21
28
  from .nbutils import order_members
22
- from .nbutils import order_pn
23
- from .nbutils import order_vrf
29
+
30
+ if TYPE_CHECKING:
31
+ from ..models import IPFabricBranch
24
32
 
25
33
  logger = logging.getLogger("ipfabric_netbox.utilities.ipf_utils")
26
34
 
@@ -126,19 +134,45 @@ class IPFabricSyncRunner(object):
126
134
  self.settings = settings
127
135
  self.transform_map = transform_map
128
136
  self.sync = sync
129
- self.relationship_store = {}
130
- self.siteUUID = {}
131
137
  if hasattr(self.sync, "logger"):
132
138
  self.logger = self.sync.logger
133
- self.interface_count_total = 0
134
- self.interface_count = 1
135
- self.inventoryitem_count = 1
136
- self.inventoryitem_count_total = 0
137
139
 
138
140
  if self.sync.snapshot_data.status != "loaded":
139
141
  raise SyncError("Snapshot not loaded in IP Fabric.")
140
142
 
141
- def get_model_or_update(self, app, model, data, uuid=None):
143
+ @staticmethod
144
+ def handle_errors(func: Callable):
145
+ def wrapper(*args, **kwargs):
146
+ try:
147
+ return func(*args, **kwargs)
148
+ except Exception as err:
149
+ # Log the error to logger outside of job - console/file
150
+ logger.error(err, exc_info=True)
151
+ # Logging section for logs inside job - facing user
152
+ self = args[0]
153
+ if isinstance(err, SearchError):
154
+ if self.settings.get(err.model):
155
+ self.logger.log_failure(
156
+ f"Aborting syncing {err.model} instance due to above error, please check your transform maps and/or existing data.",
157
+ obj=self.sync,
158
+ )
159
+ else:
160
+ self.logger.log_failure(
161
+ f"Syncing {err.model} is disabled in settings, but hit above error trying to find the correct item. Please check your transform maps and/or existing data.",
162
+ obj=self.sync,
163
+ )
164
+ else:
165
+ self.logger.log_failure(
166
+ f"Syncing failed with: {err}. See above error for more details.",
167
+ obj=self.sync,
168
+ )
169
+ # Make sure the whole sync is failed when we encounter error
170
+ self.sync.status = DataSourceStatusChoices.FAILED
171
+ return None
172
+
173
+ return wrapper
174
+
175
+ def get_model_or_update(self, app, model, data):
142
176
  transform_map = self.transform_map.objects.filter(
143
177
  target_model__app_label=app, target_model__model=model
144
178
  ).first()
@@ -147,72 +181,57 @@ class IPFabricSyncRunner(object):
147
181
  raise SystemError(f"No transform map available for {app}: {model}")
148
182
 
149
183
  model_settings = self.settings.get(model, False)
150
- object = None
151
-
152
- if model_settings:
153
- logger.info(f"Creating {model}")
154
- object = transform_map.update_or_create_instance(
155
- data=data,
156
- uuid=uuid,
157
- relationship_store=self.relationship_store,
158
- tags=self.sync.tags.all(),
159
- logger=self.logger,
160
- )
161
- else:
162
- logger.info(f"Getting {model}")
163
- coalesce_fields = transform_map.get_coalesce_fields(data)
164
- object = get_object_or_404(
165
- transform_map.target_model.model_class().objects.all(),
166
- **coalesce_fields,
184
+ try:
185
+ context = transform_map.get_context(data)
186
+ except Exception as err:
187
+ message = f"Error getting context for `{model}`."
188
+ if isinstance(err, ObjectDoesNotExist):
189
+ message += " Could not find related object using on template in transform maps."
190
+ elif isinstance(err, MultipleObjectsReturned):
191
+ message += " Multiple objects returned using on template in transform maps, the template is not strict enough."
192
+ self.logger.log_failure(
193
+ f"<b>{message}</b><br/>data: `{data}`<br/>error: `{err}`", obj=self.sync
167
194
  )
195
+ raise SyncError(err) from err
168
196
 
169
- store = self.relationship_store.get(uuid)
170
-
171
- if store:
172
- store[object._meta.model] = object
173
- else:
174
- self.relationship_store[uuid] = {object._meta.model: object}
175
-
176
- return object
177
-
178
- def create_interface(
179
- self, device_interface, device_uuid, managed_ips, device_object, device
180
- ):
181
- device_interface["loginIp"] = device.get("loginIp")
182
- interface_object = self.get_model_or_update(
183
- "dcim", "interface", device_interface, uuid=device_uuid
184
- )
185
-
186
- self.logger.increment_statistics(
187
- model="interface",
188
- current=self.interface_count,
189
- total=self.interface_count_total,
190
- )
191
- self.interface_count += 1
197
+ queryset = transform_map.target_model.model_class().objects
192
198
 
193
- if self.settings.get("ipaddress"):
194
- managed_ip = managed_ips.get(device_object.serial, {}).get(
195
- interface_object.name
196
- )
197
- if managed_ip:
198
- ip_address_obj = self.get_model_or_update(
199
- "ipam",
200
- "ipaddress",
201
- managed_ip,
199
+ object = None
200
+ try:
201
+ if model_settings:
202
+ logger.info(f"Creating {model}")
203
+ object = transform_map.update_or_create_instance(
204
+ context=context,
205
+ tags=self.sync.tags.all(),
202
206
  )
203
- try:
204
- other_device = Device.objects.get(primary_ip4=ip_address_obj)
205
- if other_device and device_object != other_device:
206
- other_device.primary_ip4 = None
207
- other_device.save()
208
- except ObjectDoesNotExist:
209
- pass
210
-
211
- if device.get("loginIp") == device_interface.get("primaryIp"):
212
- device_object.primary_ip4 = ip_address_obj
213
- device_object.save()
207
+ else:
208
+ logger.info(f"Getting {model}")
209
+ context.pop("defaults", None)
210
+ object = queryset.get(**context)
211
+ except queryset.model.DoesNotExist as err:
212
+ self.logger.log_failure(
213
+ f"<b>`{model}` with these keys not found: `{context}`.</b><br/>Original data: `{data}`.",
214
+ obj=self.sync,
215
+ )
216
+ raise SearchError(model=model, context=context, data=data) from err
217
+ except queryset.model.MultipleObjectsReturned as err:
218
+ self.logger.log_failure(
219
+ f"<b>Multiple `{model}` with these keys found: `{context}`.</b><br/>Original data: `{data}`.",
220
+ obj=self.sync,
221
+ )
222
+ raise SearchError(model=model, context=context, data=data) from err
223
+ except Exception as err:
224
+ self.logger.log_failure(
225
+ f"""Failed to create instance of `{model}`:<br/>
226
+ message: `{err}`<br/>
227
+ raw data: `{data}`<br/>
228
+ context: `{context}`<br/>
229
+ """,
230
+ obj=self.sync,
231
+ )
232
+ raise SyncDataError(model=model, context=context, data=data) from err
214
233
 
215
- return True
234
+ return object
216
235
 
217
236
  def collect_data(self):
218
237
  try:
@@ -401,18 +420,7 @@ class IPFabricSyncRunner(object):
401
420
 
402
421
  self.logger.log_info("Ordering Part Numbers", obj=self.sync)
403
422
 
404
- part_numbers = order_pn(data.get("inventoryitem", []))
405
-
406
- self.logger.log_info("Ordering VRF's", obj=self.sync)
407
-
408
- vrfs = order_vrf(data["vrf"])
409
-
410
- managed_ips = {}
411
- site_dict = {}
412
423
  interface_dict = {}
413
- for site in data["site"]:
414
- site_dict[site["siteName"]] = site
415
-
416
424
  for interface in data["interface"]:
417
425
  if int_sn := interface.get("sn"):
418
426
  if interface_dict.get(int_sn):
@@ -436,6 +444,7 @@ class IPFabricSyncRunner(object):
436
444
  )
437
445
  raise SyncError(f"Error collecting source column name for interface: {e}")
438
446
 
447
+ managed_ips = {}
439
448
  for ip in data["ipaddress"]:
440
449
  # Find corresponding interface list by serial number (sn)
441
450
  device_interfaces = interface_dict.get(ip["sn"], [])
@@ -451,141 +460,259 @@ class IPFabricSyncRunner(object):
451
460
  int_name = ip[interface_key]
452
461
  else:
453
462
  int_name = ip["intName"]
454
-
455
- if managed_ips.get(ip["sn"]):
456
- managed_ips[ip["sn"]][int_name] = ip
463
+ if ip["sn"] not in managed_ips:
464
+ managed_ips[ip["sn"]] = {int_name: [ip]}
465
+ elif int_name not in managed_ips.get(ip["sn"]):
466
+ managed_ips[ip["sn"]][int_name] = [ip]
457
467
  else:
458
- managed_ips[ip["sn"]] = {int_name: ip}
468
+ managed_ips[ip["sn"]][int_name].append(ip)
469
+
470
+ for model, item_count in [
471
+ ("site", len(data.get("site", []))),
472
+ ("device", len(devices)),
473
+ ("interface", len(data.get("interface", []))),
474
+ ("inventoryitem", len(data.get("inventoryitem", []))),
475
+ ("vlan", len(data.get("vlan", []))),
476
+ ("vrf", len(data.get("vrf", []))),
477
+ ("prefix", len(data.get("prefix", []))),
478
+ # TODO: Since we sync only those assigned to interfaces, we are skipping some IPs
479
+ # TODO: This is fixable by syncing IPs separately from interface and only assign them on interfaces
480
+ ("ipaddress", len(data.get("ipaddress", []))),
481
+ ]:
482
+ if self.settings.get(model):
483
+ self.logger.init_statistics(model, item_count)
459
484
 
460
485
  return (
461
- site_dict,
486
+ data["site"],
462
487
  devices,
463
488
  interface_dict,
464
- part_numbers,
465
- vrfs,
489
+ data["inventoryitem"],
490
+ data["vrf"],
466
491
  data["vlan"],
467
492
  data["prefix"],
468
493
  managed_ips,
469
494
  )
470
495
 
471
- def sync_devices(self, branch=None):
472
- self.logger.log_info("Starting device sync", obj=self.sync)
473
-
474
- (
475
- site_dict,
476
- devices,
477
- interface_dict,
478
- part_numbers,
479
- vrfs,
480
- vlans,
481
- networks,
482
- managed_ips,
483
- ) = self.collect_data()
484
- vlan_count = 1
485
- vrf_count = 1
486
- network_count = 1
487
- device_vrfs_total = 0
488
-
489
- for device_count, device in enumerate(devices, start=1):
490
- logger.info(f"Device {device_count} out of {len(devices)}")
491
- self.logger.increment_statistics(
492
- model="device", current=device_count, total=len(devices)
496
+ @handle_errors
497
+ def sync_model(self, app_label: str, model: str, data: dict | None) -> Model | None:
498
+ """Sync a single item to NetBox."""
499
+ if not data:
500
+ return None
501
+ return self.get_model_or_update(app_label, model, data)
502
+
503
+ def sync_items(
504
+ self,
505
+ items,
506
+ app_label: str,
507
+ model: str,
508
+ cf: bool = False,
509
+ branch: "IPFabricBranch" = None,
510
+ ) -> None:
511
+ """Sync list of items to NetBox."""
512
+ if not self.settings.get(model):
513
+ self.logger.log_info(
514
+ f"Did not ask to sync {model}s, skipping.", obj=self.sync
493
515
  )
516
+ return
517
+
518
+ for item in items:
519
+ synced_object = self.sync_model(app_label=app_label, model=model, data=item)
520
+ if synced_object is None:
521
+ continue
522
+ # Only log when we successfully synced the item
523
+ self.logger.increment_statistics(model=model)
524
+
525
+ if cf:
526
+ synced_object.custom_field_data[
527
+ "ipfabric_source"
528
+ ] = self.sync.snapshot_data.source.pk
529
+ if branch:
530
+ synced_object.custom_field_data["ipfabric_branch"] = branch.pk
531
+ synced_object.save()
532
+
533
+ @handle_errors
534
+ def sync_devices(
535
+ self,
536
+ branch,
537
+ devices,
538
+ interface_dict,
539
+ managed_ips,
540
+ ):
541
+ for model, name in [
542
+ ("manufacturer", "manufacturers"),
543
+ ("devicetype", "device types"),
544
+ ("platform", "platforms"),
545
+ ("devicerole", "device roles"),
546
+ ("virtualchassis", "virtual chassis"),
547
+ ("device", "devices"),
548
+ ("inventoryitem", "device inventory items"),
549
+ ]:
550
+ if not self.settings.get(model):
551
+ self.logger.log_info(
552
+ f"Did not ask to sync {name}, skipping", obj=self.sync
553
+ )
494
554
 
495
- device_uuid = str(uuid.uuid4())
496
-
497
- site_object = self.get_model_or_update(
498
- "dcim", "site", site_dict[device["siteName"]], uuid=device_uuid
555
+ devices_total = len(devices)
556
+
557
+ for device in devices:
558
+ if self.sync_model("dcim", "manufacturer", device) is None:
559
+ continue
560
+ if self.sync_model("dcim", "devicetype", device) is None:
561
+ continue
562
+ if self.sync_model("dcim", "platform", device) is None:
563
+ continue
564
+ if self.sync_model("dcim", "devicerole", device) is None:
565
+ continue
566
+
567
+ virtual_chassis = device.get("virtual_chassis", {})
568
+ self.sync_model("dcim", "virtualchassis", virtual_chassis)
569
+
570
+ if (device_object := self.sync_model("dcim", "device", device)) is None:
571
+ continue
572
+
573
+ if self.settings.get("device"):
574
+ device_object.custom_field_data[
575
+ "ipfabric_source"
576
+ ] = self.sync.snapshot_data.source.pk
577
+ if branch:
578
+ device_object.custom_field_data["ipfabric_branch"] = branch.pk
579
+ device_object.save()
580
+
581
+ self.logger.increment_statistics(model="device")
582
+ logger.info(
583
+ f"Device {self.logger.log_data.get('statistics', {}).get('device', {}).get('current')} out of {devices_total}"
499
584
  )
500
585
 
501
- self.get_model_or_update("dcim", "manufacturer", device, uuid=device_uuid)
502
- self.get_model_or_update("dcim", "devicetype", device, uuid=device_uuid)
586
+ # The Device exists now, so we can update master of the VC.
587
+ # The logic is handled in transform maps.
588
+ self.sync_model("dcim", "virtualchassis", virtual_chassis)
503
589
 
504
- self.get_model_or_update("dcim", "platform", device, uuid=device_uuid)
505
-
506
- self.get_model_or_update("dcim", "devicerole", device, uuid=device_uuid)
590
+ device_interfaces = interface_dict.get(device.get("sn"), [])
591
+ for device_interface in device_interfaces:
592
+ interface_object = self.sync_interface(
593
+ device_interface, managed_ips, device_object, device
594
+ )
595
+ if interface_object is None:
596
+ continue
597
+ self.logger.increment_statistics(model="interface")
598
+
599
+ @handle_errors
600
+ def sync_ipaddress(
601
+ self,
602
+ managed_ip: dict | None,
603
+ device_object: Device | None,
604
+ primary_ip: str | None,
605
+ login_ip: str | None,
606
+ ):
607
+ if not self.settings.get("ipaddress") or not managed_ip:
608
+ return
609
+ ip_address_obj = self.get_model_or_update(
610
+ "ipam",
611
+ "ipaddress",
612
+ managed_ip,
613
+ )
614
+ if ip_address_obj is None:
615
+ return
616
+ self.logger.increment_statistics(model="ipaddress")
507
617
 
508
- device_object = self.get_model_or_update(
509
- "dcim", "device", device, uuid=device_uuid
618
+ try:
619
+ # Removing other IP is done in .signals.clear_other_primary_ip
620
+ # But do it here too so the change is shown in StagedChange diff
621
+ other_device = Device.objects.get(primary_ip4=ip_address_obj)
622
+ if other_device and device_object != other_device:
623
+ other_device.primary_ip4 = None
624
+ other_device.save()
625
+ except ObjectDoesNotExist:
626
+ pass
627
+
628
+ if login_ip == primary_ip:
629
+ try:
630
+ device_object.primary_ip4 = ip_address_obj
631
+ device_object.save()
632
+ except ValueError as err:
633
+ self.logger.log_failure(
634
+ f"Error assigning primary IP to device: {err}", obj=self.sync
635
+ )
636
+ return None
637
+ return ip_address_obj
638
+
639
+ @handle_errors
640
+ def sync_macaddress(
641
+ self, data: dict | None, interface_object: Interface
642
+ ) -> MACAddress | None:
643
+ if not self.settings.get("macaddress") or not data:
644
+ return None
645
+ # Need to create MAC Address object before we can assign it to Interface
646
+ # TODO: Figure out how to do this using transform maps
647
+ macaddress_data = {
648
+ "mac": data,
649
+ "id": interface_object.pk,
650
+ }
651
+ macaddress_object = self.get_model_or_update(
652
+ "dcim", "macaddress", macaddress_data
653
+ )
654
+ try:
655
+ interface_object.primary_mac_address = macaddress_object
656
+ interface_object.save()
657
+ except ValueError as err:
658
+ self.logger.log_failure(
659
+ f"Error assigning MAC Address to interface: {err}", obj=self.sync
660
+ )
661
+ return None
662
+ return macaddress_object
663
+
664
+ @handle_errors
665
+ def sync_interface(
666
+ self,
667
+ device_interface: dict,
668
+ managed_ips: dict,
669
+ device_object: Device | None,
670
+ device: dict,
671
+ ):
672
+ device_interface["loginIp"] = device.get("loginIp")
673
+ interface_object = self.get_model_or_update(
674
+ "dcim", "interface", device_interface
675
+ )
676
+ if interface_object is None:
677
+ return None
678
+
679
+ for ipaddress in managed_ips.get(device_object.serial, {}).get(
680
+ interface_object.name, []
681
+ ):
682
+ self.sync_ipaddress(
683
+ ipaddress,
684
+ device_object,
685
+ device_interface.get("primaryIp"),
686
+ device.get("loginIp"),
510
687
  )
511
688
 
512
- site_object.custom_field_data[
513
- "ipfabric_source"
514
- ] = self.sync.snapshot_data.source.pk
515
-
516
- device_object.custom_field_data[
517
- "ipfabric_source"
518
- ] = self.sync.snapshot_data.source.pk
519
- if branch:
520
- site_object.custom_field_data["ipfabric_branch"] = branch.pk
521
- device_object.custom_field_data["ipfabric_branch"] = branch.pk
522
-
523
- site_object.save()
524
- device_object.save()
525
-
526
- if self.settings.get("virtualchassis"):
527
- if member := device.get("virtual_chassis"):
528
- self.get_model_or_update("dcim", "virtualchassis", member)
529
- device_object = self.get_model_or_update(
530
- "dcim", "device", device, uuid=device_uuid
531
- )
532
-
533
- if device_object and self.settings.get("interface"):
534
- device_interfaces = interface_dict.get(device.get("sn"), [])
535
- self.interface_count_total += len(device_interfaces)
536
- for device_interface in device_interfaces:
537
- self.create_interface(
538
- device_interface,
539
- device_uuid,
540
- managed_ips,
541
- device_object,
542
- device,
543
- )
544
- # x = threading.Thread(target=self.create_interface, args=((device_interface, device_uuid, managed_ips, device_object, device)))
545
- # threads.append(x)
546
- # x.start()
547
-
548
- if self.settings.get("vrf"):
549
- device_vrfs = vrfs.get(device_object.serial, [])
550
- device_vrfs_total += len(device_vrfs)
551
- for vrf in device_vrfs:
552
- self.get_model_or_update("ipam", "vrf", vrf, uuid=device_uuid)
553
- self.logger.increment_statistics(
554
- model="vrf", current=vrf_count, total=device_vrfs_total
555
- )
556
- vrf_count += 1
557
-
558
- device_count += 1
559
-
560
- if self.settings.get("inventoryitem"):
561
- devices = Device.objects.all()
562
- for device in devices:
563
- device_parts = part_numbers.get(device.serial, [])
564
- self.inventoryitem_count_total += len(device_parts)
565
- for part in device_parts:
566
- self.get_model_or_update("dcim", "inventoryitem", part)
567
- self.logger.increment_statistics(
568
- model="inventory_item",
569
- current=self.inventoryitem_count,
570
- total=self.inventoryitem_count_total,
571
- )
572
- self.inventoryitem_count += 1
689
+ self.sync_macaddress(device_interface.get("mac"), interface_object)
573
690
 
574
- if self.settings.get("vlan"):
575
- for vlan in vlans:
576
- self.get_model_or_update("ipam", "vlan", vlan)
577
- self.logger.increment_statistics(
578
- model="vlan", current=vlan_count, total=len(vlans)
579
- )
580
- vlan_count += 1
691
+ return interface_object
581
692
 
582
- if self.settings.get("prefix"):
583
- for network in networks:
584
- self.get_model_or_update("ipam", "prefix", network)
585
- self.logger.increment_statistics(
586
- model="prefix", current=network_count, total=len(networks)
587
- )
588
- network_count += 1
693
+ def collect_and_sync(self, branch=None) -> None:
694
+ self.logger.log_info("Starting data sync.", obj=self.sync)
695
+ (
696
+ sites,
697
+ devices,
698
+ interface_dict,
699
+ inventory_items,
700
+ vrfs,
701
+ vlans,
702
+ networks,
703
+ managed_ips,
704
+ ) = self.collect_data()
589
705
 
590
- def sync(self):
591
- self.sync_devices()
706
+ self.sync_items(
707
+ app_label="dcim", model="site", items=sites, cf=True, branch=branch
708
+ )
709
+ self.sync_devices(
710
+ branch,
711
+ devices,
712
+ interface_dict,
713
+ managed_ips,
714
+ )
715
+ self.sync_items(app_label="dcim", model="inventoryitem", items=inventory_items)
716
+ self.sync_items(app_label="ipam", model="vlan", items=vlans)
717
+ self.sync_items(app_label="ipam", model="vrf", items=vrfs)
718
+ self.sync_items(app_label="ipam", model="prefix", items=networks)
@@ -14,7 +14,7 @@ class SyncLogging:
14
14
  self.log_data = {"logs": [], "statistics": {}}
15
15
  self.logger = logging.getLogger("ipfabric.sync")
16
16
 
17
- def _log(self, obj, message, level=LogLevelChoices.LOG_DEFAULT):
17
+ def _log(self, obj, message, level=LogLevelChoices.LOG_INFO):
18
18
  """
19
19
  Log a message from a test method. Do not call this method directly; use one of the log_* wrappers below.
20
20
  """
@@ -35,7 +35,7 @@ class SyncLogging:
35
35
  """
36
36
  Log a message which is not associated with a particular object.
37
37
  """
38
- self._log(None, message, level=LogLevelChoices.LOG_DEFAULT)
38
+ self._log(None, message, level=LogLevelChoices.LOG_INFO)
39
39
  self.logger.info(message)
40
40
 
41
41
  def log_success(self, message, obj=None):
@@ -66,18 +66,23 @@ class SyncLogging:
66
66
  self._log(obj, message, level=LogLevelChoices.LOG_FAILURE)
67
67
  self.logger.info(f"Failure | {obj}: {message}")
68
68
 
69
- def increment_statistics(self, model, current=None, total=None):
69
+ def init_statistics(self, model: str, total: int) -> dict[str, int]:
70
70
  statistics = self.log_data.get("statistics")
71
-
72
71
  if not statistics.get(model):
73
- stats = statistics[model] = {"current": current, "total": total}
72
+ stats = statistics[model] = {"current": 0, "total": total}
74
73
  else:
75
74
  stats = statistics.get(model)
75
+ return stats
76
+
77
+ def increment_statistics(self, model: str, total: int = None) -> None:
78
+ stats = self.init_statistics(model, total)
76
79
  if total:
77
80
  stats["total"] = total
78
- if current:
79
- stats["current"] = current
81
+ stats["current"] += 1
80
82
  cache.set(self.cache_key, self.log_data, self.cache_timeout)
83
+ self.logger.info(
84
+ f"{model} - {stats['current']} out of {stats['total']} processed"
85
+ )
81
86
 
82
87
  def clear_log(self):
83
88
  self.log_data["logs"] = []