nornir-collection 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nornir_collection/__init__.py +0 -0
- nornir_collection/batfish/__init__.py +0 -0
- nornir_collection/batfish/assert_config.py +358 -0
- nornir_collection/batfish/utils.py +129 -0
- nornir_collection/cisco/__init__.py +0 -0
- nornir_collection/cisco/configuration_management/__init__.py +0 -0
- nornir_collection/cisco/configuration_management/cli/__init__.py +0 -0
- nornir_collection/cisco/configuration_management/cli/config_tasks.py +569 -0
- nornir_collection/cisco/configuration_management/cli/config_workflow.py +107 -0
- nornir_collection/cisco/configuration_management/cli/show_tasks.py +677 -0
- nornir_collection/cisco/configuration_management/netconf/__init__.py +0 -0
- nornir_collection/cisco/configuration_management/netconf/config_tasks.py +564 -0
- nornir_collection/cisco/configuration_management/netconf/config_workflow.py +298 -0
- nornir_collection/cisco/configuration_management/netconf/nr_cfg_iosxe_netconf.py +186 -0
- nornir_collection/cisco/configuration_management/netconf/ops_tasks.py +307 -0
- nornir_collection/cisco/configuration_management/processor.py +151 -0
- nornir_collection/cisco/configuration_management/pyats.py +236 -0
- nornir_collection/cisco/configuration_management/restconf/__init__.py +0 -0
- nornir_collection/cisco/configuration_management/restconf/cisco_rpc.py +514 -0
- nornir_collection/cisco/configuration_management/restconf/config_workflow.py +95 -0
- nornir_collection/cisco/configuration_management/restconf/tasks.py +325 -0
- nornir_collection/cisco/configuration_management/utils.py +511 -0
- nornir_collection/cisco/software_upgrade/__init__.py +0 -0
- nornir_collection/cisco/software_upgrade/cisco_software_upgrade.py +283 -0
- nornir_collection/cisco/software_upgrade/utils.py +794 -0
- nornir_collection/cisco/support_api/__init__.py +0 -0
- nornir_collection/cisco/support_api/api_calls.py +1173 -0
- nornir_collection/cisco/support_api/cisco_maintenance_report.py +221 -0
- nornir_collection/cisco/support_api/cisco_support.py +727 -0
- nornir_collection/cisco/support_api/reports.py +747 -0
- nornir_collection/cisco/support_api/utils.py +316 -0
- nornir_collection/fortinet/__init__.py +0 -0
- nornir_collection/fortinet/utils.py +36 -0
- nornir_collection/git.py +224 -0
- nornir_collection/netbox/__init__.py +0 -0
- nornir_collection/netbox/custom_script.py +107 -0
- nornir_collection/netbox/inventory.py +360 -0
- nornir_collection/netbox/scan_prefixes_and_update_ip_addresses.py +989 -0
- nornir_collection/netbox/set_device_status.py +67 -0
- nornir_collection/netbox/sync_datasource.py +111 -0
- nornir_collection/netbox/update_cisco_inventory_data.py +158 -0
- nornir_collection/netbox/update_cisco_support_plugin_data.py +339 -0
- nornir_collection/netbox/update_fortinet_inventory_data.py +161 -0
- nornir_collection/netbox/update_purestorage_inventory_data.py +144 -0
- nornir_collection/netbox/utils.py +261 -0
- nornir_collection/netbox/verify_device_primary_ip.py +202 -0
- nornir_collection/nornir_plugins/__init__.py +0 -0
- nornir_collection/nornir_plugins/inventory/__init__.py +0 -0
- nornir_collection/nornir_plugins/inventory/netbox.py +250 -0
- nornir_collection/nornir_plugins/inventory/staggered_yaml.py +143 -0
- nornir_collection/nornir_plugins/inventory/utils.py +277 -0
- nornir_collection/purestorage/__init__.py +0 -0
- nornir_collection/purestorage/utils.py +53 -0
- nornir_collection/utils.py +741 -0
- nornir_collection-0.0.1.dist-info/LICENSE +21 -0
- nornir_collection-0.0.1.dist-info/METADATA +136 -0
- nornir_collection-0.0.1.dist-info/RECORD +59 -0
- nornir_collection-0.0.1.dist-info/WHEEL +5 -0
- nornir_collection-0.0.1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,747 @@
|
|
1
|
+
#!/usr/bin/env python3
|
2
|
+
"""
|
3
|
+
This module contains functions to prepare Cisco Support API data to generate reports.
|
4
|
+
|
5
|
+
The functions are ordered as followed:
|
6
|
+
- Prepare Cisco Support API data for Pandas Dataframe
|
7
|
+
- Prepare IBM TSS data for Pandas Dataframe
|
8
|
+
- Create Pandas Dataframe with report data
|
9
|
+
- Excel Report Generation
|
10
|
+
"""
|
11
|
+
|
12
|
+
import argparse
|
13
|
+
import os
|
14
|
+
import json
|
15
|
+
from datetime import datetime, timedelta
|
16
|
+
import __main__
|
17
|
+
import pandas as pd
|
18
|
+
import numpy as np
|
19
|
+
from xlsxwriter.utility import xl_col_to_name
|
20
|
+
from nornir_collection.utils import (
|
21
|
+
print_task_name,
|
22
|
+
task_info,
|
23
|
+
get_pandas_column_width,
|
24
|
+
)
|
25
|
+
|
26
|
+
|
27
|
+
#### Prepare Cisco Support API data for Pandas Dataframe #####################################################
|
28
|
+
|
29
|
+
|
30
|
+
def prepare_report_data_host(serials_dict: dict, nornir: bool = False) -> dict:
|
31
|
+
"""
|
32
|
+
This function takes the serials_dict which has been filled with data by various functions and creates a
|
33
|
+
host dict with the key "host" and a list of all hostnames as the value. The key will be the pandas
|
34
|
+
dataframe column name and the value which is a list will be the colums cell content. The host dict will
|
35
|
+
be returned.
|
36
|
+
"""
|
37
|
+
# Define the host_data dict and its key value pairs
|
38
|
+
columns = ["host"]
|
39
|
+
# Create the dictionary
|
40
|
+
host_data = {column: [] for column in columns}
|
41
|
+
|
42
|
+
# Add all hostnames to the list
|
43
|
+
host_data["host"] = [item["host"] for item in serials_dict.values()]
|
44
|
+
|
45
|
+
# Return the host_data dict if Nornir is not used
|
46
|
+
if not nornir:
|
47
|
+
return host_data
|
48
|
+
|
49
|
+
# Define dict keys for the Nornir data
|
50
|
+
columns = ["switch_num", "desired_version", "current_version"]
|
51
|
+
# Create the dictionary
|
52
|
+
nr_data = {column: [] for column in columns}
|
53
|
+
|
54
|
+
# Append the Nornir nr_data values for each defined dict key
|
55
|
+
for header in nr_data:
|
56
|
+
success = False
|
57
|
+
for item in serials_dict.values():
|
58
|
+
for key, value in item["nr_data"].items():
|
59
|
+
if header == key:
|
60
|
+
if key in nr_data:
|
61
|
+
nr_data[key].append(value)
|
62
|
+
success = True
|
63
|
+
# If nothing was appended to the nr_data dict, append an empty string
|
64
|
+
if not success:
|
65
|
+
nr_data[header].append("")
|
66
|
+
|
67
|
+
# Merge the host data and the Nornir data dict together
|
68
|
+
data = {**host_data, **nr_data}
|
69
|
+
|
70
|
+
return data
|
71
|
+
|
72
|
+
|
73
|
+
def prepare_report_data_sni_owner_by_sn(serials_dict: dict) -> dict:
|
74
|
+
"""
|
75
|
+
This function takes the serials_dict which has been filled with data by various functions and creates a
|
76
|
+
dictionary with key-value pairs. The key will be the pandas dataframe column name and the value which
|
77
|
+
is a list will be the colums cell content. The host dict will be returned.
|
78
|
+
"""
|
79
|
+
# Define dict keys for SNIgetOwnerCoverageStatusBySerialNumbers
|
80
|
+
columns = ["sr_no_owner", "coverage_end_date"]
|
81
|
+
# Create the dictionary
|
82
|
+
sni_owner_by_sn = {column: [] for column in columns}
|
83
|
+
|
84
|
+
# Append the SNIgetOwnerCoverageStatusBySerialNumbers values for each defined dict key
|
85
|
+
for header in sni_owner_by_sn:
|
86
|
+
success = False
|
87
|
+
for sr_no in serials_dict.values():
|
88
|
+
for key, value in sr_no["SNIgetOwnerCoverageStatusBySerialNumbers"].items():
|
89
|
+
if header == key:
|
90
|
+
if key in sni_owner_by_sn:
|
91
|
+
sni_owner_by_sn[key].append(value)
|
92
|
+
success = True
|
93
|
+
# If nothing was appended to the sni_owner_by_sn dict, append an empty string
|
94
|
+
if not success:
|
95
|
+
sni_owner_by_sn[header].append("")
|
96
|
+
|
97
|
+
return sni_owner_by_sn
|
98
|
+
|
99
|
+
|
100
|
+
def prepare_report_data_sni_summary_by_sn(serials_dict: dict) -> dict:
|
101
|
+
"""
|
102
|
+
This function takes the serials_dict which has been filled with data by various functions and creates a
|
103
|
+
dictionary with key-value pairs. The key will be the pandas dataframe column name and the value which
|
104
|
+
is a list will be the colums cell content. The host dict will be returned.
|
105
|
+
"""
|
106
|
+
# Define dict keys for SNIgetCoverageSummaryBySerialNumbers
|
107
|
+
# fmt: off
|
108
|
+
columns = [
|
109
|
+
"sr_no", "is_covered", "contract_site_customer_name", "contract_site_address1", "contract_site_city",
|
110
|
+
"contract_site_state_province", "contract_site_country", "covered_product_line_end_date",
|
111
|
+
"service_contract_number", "service_line_descr", "warranty_end_date", "warranty_type",
|
112
|
+
"warranty_type_description", "item_description", "item_type", "orderable_pid",
|
113
|
+
]
|
114
|
+
# fmt: on
|
115
|
+
# Create the dictionary
|
116
|
+
sni_summary_by_sn = {column: [] for column in columns}
|
117
|
+
|
118
|
+
# Append the SNIgetCoverageSummaryBySerialNumbers values for each defined dict key
|
119
|
+
for header in sni_summary_by_sn:
|
120
|
+
for sr_no in serials_dict.values():
|
121
|
+
success = False
|
122
|
+
# Append all general coverage details
|
123
|
+
for key, value in sr_no["SNIgetCoverageSummaryBySerialNumbers"].items():
|
124
|
+
if header == key:
|
125
|
+
if key in sni_summary_by_sn:
|
126
|
+
sni_summary_by_sn[key].append(value)
|
127
|
+
success = True
|
128
|
+
# Append all the orderable pid details
|
129
|
+
for key, value in sr_no["SNIgetCoverageSummaryBySerialNumbers"]["orderable_pid_list"][0].items():
|
130
|
+
if header == key:
|
131
|
+
if key in sni_summary_by_sn:
|
132
|
+
sni_summary_by_sn[key].append(value)
|
133
|
+
success = True
|
134
|
+
# If nothing was appended to the sni_summary_by_sn dict, append an empty string
|
135
|
+
if not success:
|
136
|
+
sni_summary_by_sn[header].append("")
|
137
|
+
|
138
|
+
return sni_summary_by_sn
|
139
|
+
|
140
|
+
|
141
|
+
def prepare_report_data_eox_by_sn(serials_dict: dict) -> dict:
|
142
|
+
"""
|
143
|
+
This function takes the serials_dict which has been filled with data by various functions and creates a
|
144
|
+
dictionary with key-value pairs. The key will be the pandas dataframe column name and the value which is
|
145
|
+
a list will be the colums cell content. The host dict will be returned.
|
146
|
+
"""
|
147
|
+
# pylint: disable=too-many-nested-blocks
|
148
|
+
|
149
|
+
# Define dict keys for EOXgetBySerialNumbers
|
150
|
+
# fmt: off
|
151
|
+
columns = [
|
152
|
+
"EOXExternalAnnouncementDate", "EndOfSaleDate", "EndOfSWMaintenanceReleases",
|
153
|
+
"EndOfSecurityVulSupportDate", "EndOfRoutineFailureAnalysisDate", "EndOfServiceContractRenewal",
|
154
|
+
"LastDateOfSupport", "EndOfSvcAttachDate", "UpdatedTimeStamp", "MigrationInformation",
|
155
|
+
"MigrationProductId", "MigrationProductName", "MigrationStrategy", "MigrationProductInfoURL",
|
156
|
+
"ErrorDescription", "ErrorDataType", "ErrorDataValue",
|
157
|
+
]
|
158
|
+
# fmt: on
|
159
|
+
# Create the dictionary
|
160
|
+
eox_by_sn = {column: [] for column in columns}
|
161
|
+
|
162
|
+
# Append the EOXgetBySerialNumbers values for each defined dict key
|
163
|
+
for header in eox_by_sn:
|
164
|
+
for sr_no in serials_dict.values():
|
165
|
+
success = False
|
166
|
+
# Append all end of life dates
|
167
|
+
for key, value in sr_no["EOXgetBySerialNumbers"].items():
|
168
|
+
if header == key:
|
169
|
+
if isinstance(value, dict):
|
170
|
+
if "value" in value:
|
171
|
+
eox_by_sn[key].append(value["value"])
|
172
|
+
success = True
|
173
|
+
# Append all migration details
|
174
|
+
for key, value in sr_no["EOXgetBySerialNumbers"]["EOXMigrationDetails"].items():
|
175
|
+
if header == key:
|
176
|
+
if key in eox_by_sn:
|
177
|
+
eox_by_sn[key].append(value)
|
178
|
+
success = True
|
179
|
+
# If EOXError exists append the error reason, else append an empty string
|
180
|
+
if "EOXError" in sr_no["EOXgetBySerialNumbers"]:
|
181
|
+
for key, value in sr_no["EOXgetBySerialNumbers"]["EOXError"].items():
|
182
|
+
if header == key:
|
183
|
+
if key in eox_by_sn:
|
184
|
+
eox_by_sn[key].append(value)
|
185
|
+
success = True
|
186
|
+
# If nothing was appended to the eox_by_sn dict, append an empty string
|
187
|
+
if not success:
|
188
|
+
eox_by_sn[header].append("")
|
189
|
+
|
190
|
+
return eox_by_sn
|
191
|
+
|
192
|
+
|
193
|
+
def prepare_report_data_ss_by_pid(serials_dict: dict) -> dict:
|
194
|
+
"""
|
195
|
+
This function takes the serials_dict which has been filled with data by various functions and creates a
|
196
|
+
dictionary with key-value pairs. The key will be the pandas dataframe column name and the value which is
|
197
|
+
a list will be the colums cell content. The host dict will be returned.
|
198
|
+
"""
|
199
|
+
# Create the dictionary to fill with all suggested release information
|
200
|
+
ss_by_pid = {}
|
201
|
+
ss_by_pid["recommended_version"] = []
|
202
|
+
|
203
|
+
for records in serials_dict.values():
|
204
|
+
# Empty string to fill with the recommended releases
|
205
|
+
recommended_release = ""
|
206
|
+
# As there can be multiple suggestions with the same ID and release, but only with different mdfId,
|
207
|
+
# the no_duplicates list will be created to eliminate duplicate IDs and release information.
|
208
|
+
no_duplicates = []
|
209
|
+
for item in records["SSgetSuggestedReleasesByProductIDs"]:
|
210
|
+
if isinstance(item, str):
|
211
|
+
if item not in no_duplicates:
|
212
|
+
no_duplicates.append(item)
|
213
|
+
recommended_release += item
|
214
|
+
elif isinstance(item, dict):
|
215
|
+
for idx, suggestion in enumerate(item["suggestions"]):
|
216
|
+
idx = idx + 1
|
217
|
+
if suggestion["releaseFormat1"] and suggestion["releaseFormat1"] not in no_duplicates:
|
218
|
+
no_duplicates.append(suggestion["releaseFormat1"])
|
219
|
+
recommended_release += f"ID: {idx}, Release: {suggestion['releaseFormat1']} / "
|
220
|
+
elif (
|
221
|
+
suggestion["errorDetailsResponse"]
|
222
|
+
and suggestion["errorDetailsResponse"]["errorDescription"] not in no_duplicates
|
223
|
+
):
|
224
|
+
error_description = suggestion["errorDetailsResponse"]["errorDescription"]
|
225
|
+
recommended_release += f"{error_description} / "
|
226
|
+
|
227
|
+
# Remove the last two characters from the string to remove the trailing slash
|
228
|
+
if recommended_release.endswith(" / "):
|
229
|
+
recommended_release = recommended_release[:-2]
|
230
|
+
|
231
|
+
ss_by_pid["recommended_version"].append(recommended_release)
|
232
|
+
|
233
|
+
return ss_by_pid
|
234
|
+
|
235
|
+
|
236
|
+
#### Prepare action needed data for Pandas Dataframe #########################################################
|
237
|
+
|
238
|
+
|
239
|
+
def prepare_report_data_act_needed(serials_dict: dict) -> dict:
|
240
|
+
"""
|
241
|
+
This function takes the serials_dict an argument and creates a dictionary named act_needed will be
|
242
|
+
returned which contains the key value pairs "coverage_action_needed" and "api_action_needed" to create a
|
243
|
+
Pandas dataframe later.
|
244
|
+
"""
|
245
|
+
# Define the coverage_action_needed dict and its key value pairs to return as the end of the function
|
246
|
+
columns = ["coverage_action_needed", "api_action_needed"]
|
247
|
+
# Create the dictionary
|
248
|
+
act_needed = {column: [] for column in columns}
|
249
|
+
|
250
|
+
for records in serials_dict.values():
|
251
|
+
# Verify if the user has the correct access rights to access the serial API data
|
252
|
+
if "YES" in records["SNIgetOwnerCoverageStatusBySerialNumbers"]["sr_no_owner"]:
|
253
|
+
act_needed["api_action_needed"].append(
|
254
|
+
"No action needed (API user is associated with contract and device)"
|
255
|
+
)
|
256
|
+
else:
|
257
|
+
act_needed["api_action_needed"].append(
|
258
|
+
"Action needed (No association between api user, contract and device)"
|
259
|
+
)
|
260
|
+
|
261
|
+
# Verify if the serial is covered by Cisco add the coverage_action_needed variable to tss_info
|
262
|
+
if "YES" in records["SNIgetCoverageSummaryBySerialNumbers"]["is_covered"]:
|
263
|
+
act_needed["coverage_action_needed"].append(
|
264
|
+
"No action needed (Device is covered by a maintenance contract)"
|
265
|
+
)
|
266
|
+
else:
|
267
|
+
act_needed["coverage_action_needed"].append(
|
268
|
+
"Action needed (Device is not covered by a maintenance contract)"
|
269
|
+
)
|
270
|
+
|
271
|
+
return act_needed
|
272
|
+
|
273
|
+
|
274
|
+
#### Prepare IBM TSS data for Pandas Dataframe ###############################################################
|
275
|
+
|
276
|
+
|
277
|
+
def prepare_report_data_tss(serials_dict: dict, file: str) -> dict:
|
278
|
+
"""
|
279
|
+
This function takes the serials_dict and a source file which is the IBM TSS report as arguments. The only
|
280
|
+
mandatory column is the "Serials" which will be normalized to tss_serial. All other columns can be
|
281
|
+
specified with their order and the prefix "tss_" in the EXCEL_COLUMN_ORDER_WITH_TSS constant. A dictionary
|
282
|
+
named tss_info will be returned which contains the key value pairs "coverage_action_needed",
|
283
|
+
"api_action_needed" and all TSS data to create a Pandas dataframe later.
|
284
|
+
"""
|
285
|
+
# pylint: disable=invalid-name,too-many-branches
|
286
|
+
|
287
|
+
# Define the tss_info dict and its key value pairs to return as the end of the function
|
288
|
+
columns = ["coverage_action_needed", "api_action_needed"]
|
289
|
+
|
290
|
+
# Read the excel file into a pandas dataframe -> Row 0 is the title row
|
291
|
+
df = pd.read_excel(rf"{file}", dtype=str, engine="openpyxl")
|
292
|
+
|
293
|
+
# Make some data normalization of the TSS report file column headers
|
294
|
+
# Make column written in lowercase letters
|
295
|
+
df.columns = df.columns.str.lower()
|
296
|
+
# Replace column name whitespace with underscore
|
297
|
+
df.columns = df.columns.str.replace(" ", "_")
|
298
|
+
# Add a prefix to the column name to identify the TSS report columns
|
299
|
+
df = df.add_prefix("tss_")
|
300
|
+
|
301
|
+
# Create the dictionary with the static defined columns and all IBM TSS columns from the dataframe
|
302
|
+
tss_info = {column: [] for column in (columns + list(df.columns))}
|
303
|
+
|
304
|
+
# Make all serial numbers written in uppercase letters
|
305
|
+
df.tss_serial = df.tss_serial.str.upper()
|
306
|
+
|
307
|
+
# The first fillna will replace all of (None, NAT, np.nan, etc) with Numpy's NaN, then replace
|
308
|
+
# Numpy's NaN with python's None
|
309
|
+
df = df.fillna(np.nan).replace([np.nan], [None])
|
310
|
+
|
311
|
+
# Delete all rows which have not the value "Cisco" in the OEM column
|
312
|
+
df = df[df.tss_oem == "Cisco"]
|
313
|
+
|
314
|
+
# Create a list with all IBM TSS serial numbers
|
315
|
+
tss_serial_list = df["tss_serial"].tolist()
|
316
|
+
|
317
|
+
# Look for inventory serials which are covered by IBM TSS and add them to the serial_dict
|
318
|
+
# It's important to match IBM TSS serials to inventory serials first for the correct order
|
319
|
+
for sr_no, records in serials_dict.items():
|
320
|
+
records["tss_info"] = {}
|
321
|
+
|
322
|
+
# Covered by IBM TSS if inventory serial number is in all IBM TSS serial numbers
|
323
|
+
if sr_no in tss_serial_list:
|
324
|
+
# Verify if the user has the correct access rights to access the serial API data
|
325
|
+
if "YES" in records["SNIgetOwnerCoverageStatusBySerialNumbers"]["sr_no_owner"]:
|
326
|
+
tss_info["api_action_needed"].append(
|
327
|
+
"No action needed (API user is associated with contract and device)"
|
328
|
+
)
|
329
|
+
else:
|
330
|
+
tss_info["api_action_needed"].append(
|
331
|
+
"Action needed (No association between api user, contract and device)"
|
332
|
+
)
|
333
|
+
|
334
|
+
# Verify if the inventory serial is covered by IBM TSS and is also covered by Cisco
|
335
|
+
# Verify if the serial is covered by Cisco add the coverage_action_needed variable to tss_info
|
336
|
+
if "YES" in records["SNIgetCoverageSummaryBySerialNumbers"]["is_covered"]:
|
337
|
+
tss_info["coverage_action_needed"].append("No action needed (Covered by IBM TSS and Cisco)")
|
338
|
+
else:
|
339
|
+
tss_info["coverage_action_needed"].append(
|
340
|
+
"Action needed (Covered by IBM TSS, but Cisco coverage missing)"
|
341
|
+
)
|
342
|
+
|
343
|
+
# Get the index of the list item and assign the element from the TSS dataframe by its index
|
344
|
+
index = tss_serial_list.index(sr_no)
|
345
|
+
# Add the data from the TSS dataframe to tss_info
|
346
|
+
for column, value in tss_info.items():
|
347
|
+
if column.startswith("tss_"):
|
348
|
+
value.append(df[column].values[index])
|
349
|
+
|
350
|
+
# Inventory serial number is not in all IBM TSS serial numbers
|
351
|
+
else:
|
352
|
+
# Verify if the user has the correct access rights to access the serial API data
|
353
|
+
if "YES" in records["SNIgetOwnerCoverageStatusBySerialNumbers"]["sr_no_owner"]:
|
354
|
+
tss_info["api_action_needed"].append(
|
355
|
+
"No action needed (API user is associated with contract and device)"
|
356
|
+
)
|
357
|
+
else:
|
358
|
+
tss_info["api_action_needed"].append(
|
359
|
+
"Action needed (No association between api user, contract and device)"
|
360
|
+
)
|
361
|
+
|
362
|
+
# Verify if the inventory serial is covered by Cisco
|
363
|
+
# Add the coverage_action_needed variable to tss_info
|
364
|
+
if "YES" in records["SNIgetCoverageSummaryBySerialNumbers"]["is_covered"]:
|
365
|
+
tss_info["coverage_action_needed"].append("No action needed (Covered by Cisco SmartNet)")
|
366
|
+
else:
|
367
|
+
tss_info["coverage_action_needed"].append(
|
368
|
+
"Action needed (Cisco SmartNet or IBM TSS coverage missing)"
|
369
|
+
)
|
370
|
+
|
371
|
+
# Add the empty strings for all additional IBM TSS serials to tss_info
|
372
|
+
for column, value in tss_info.items():
|
373
|
+
if column.startswith("tss_"):
|
374
|
+
value.append("")
|
375
|
+
|
376
|
+
# After the inventory serials have been processed
|
377
|
+
# Add IBM TSS serials to tss_info which are not part of the inventory serials
|
378
|
+
for tss_serial in tss_serial_list:
|
379
|
+
if tss_serial not in serials_dict.keys():
|
380
|
+
# Add the coverage_action_needed variable to tss_info
|
381
|
+
tss_info["coverage_action_needed"].append(
|
382
|
+
"Action needed (Remove serial from IBM TSS inventory as device is decommissioned)"
|
383
|
+
)
|
384
|
+
tss_info["api_action_needed"].append("No Cisco API data as serial is not part of column sr_no")
|
385
|
+
|
386
|
+
# Get the index of the list item and assign the element from the TSS dataframe by its index
|
387
|
+
index = tss_serial_list.index(tss_serial)
|
388
|
+
# Add the data from the TSS dataframe to tss_info
|
389
|
+
for column, value in tss_info.items():
|
390
|
+
if column.startswith("tss_"):
|
391
|
+
value.append(df[column].values[index])
|
392
|
+
|
393
|
+
return tss_info
|
394
|
+
|
395
|
+
|
396
|
+
#### Create Pandas Dataframe with report data ################################################################
|
397
|
+
|
398
|
+
|
399
|
+
def create_pandas_dataframe_for_report(
|
400
|
+
serials_dict: dict, report_cfg: dict, args: argparse.Namespace
|
401
|
+
) -> pd.DataFrame:
|
402
|
+
"""
|
403
|
+
Prepare the report data and create a pandas dataframe. The pandas dataframe will be returned
|
404
|
+
"""
|
405
|
+
# pylint: disable=invalid-name
|
406
|
+
|
407
|
+
print_task_name(text="PYTHON prepare report data")
|
408
|
+
|
409
|
+
# Create an empty dict and append the previous dicts to create later the pandas dataframe
|
410
|
+
report_data = {}
|
411
|
+
|
412
|
+
# Prepare the needed data for the report from the serials dict. The serials dict contains all data
|
413
|
+
# that the Cisco support API sent. These functions return a dictionary with the needed data only
|
414
|
+
host = prepare_report_data_host(serials_dict=serials_dict, nornir=args.nornir)
|
415
|
+
sni_owner_by_sn = prepare_report_data_sni_owner_by_sn(serials_dict=serials_dict)
|
416
|
+
sni_summary_by_sn = prepare_report_data_sni_summary_by_sn(serials_dict=serials_dict)
|
417
|
+
eox_by_sn = prepare_report_data_eox_by_sn(serials_dict=serials_dict)
|
418
|
+
ss_by_pid = prepare_report_data_ss_by_pid(serials_dict=serials_dict)
|
419
|
+
|
420
|
+
# Update the report_data dict
|
421
|
+
report_data.update(**host, **sni_owner_by_sn, **sni_summary_by_sn, **eox_by_sn, **ss_by_pid)
|
422
|
+
|
423
|
+
if "tss_file" in report_cfg:
|
424
|
+
# Analyze the IBM TSS report file and create the tss_info dict
|
425
|
+
tss_info = prepare_report_data_tss(serials_dict=serials_dict, file=report_cfg["tss_file"])
|
426
|
+
# The tss_info dict may have more list elements as TSS serials have been found which are not inside
|
427
|
+
# the customer inventory -> Add the differente to all other lists as empty strings
|
428
|
+
for _ in range(len(tss_info["tss_serial"]) - len(host["host"])):
|
429
|
+
for column in report_data.values():
|
430
|
+
column.append("")
|
431
|
+
# Update the report_data dict
|
432
|
+
report_data.update(**tss_info)
|
433
|
+
else:
|
434
|
+
# Analyze if actions are needed for serial number or user
|
435
|
+
act_needed = prepare_report_data_act_needed(serials_dict=serials_dict)
|
436
|
+
# Update the report_data dict with all prepared data dicts
|
437
|
+
report_data.update(**act_needed)
|
438
|
+
|
439
|
+
print(task_info(text="PYTHON prepare report data dict", changed=False))
|
440
|
+
print("'PYTHON prepare report data dict' -> PythonResult <Success: True>")
|
441
|
+
if args.verbose:
|
442
|
+
print("\n" + json.dumps(report_data, indent=4))
|
443
|
+
|
444
|
+
# Reorder the data dict according to the key_order list -> This needs Python >= 3.6
|
445
|
+
if "df_order" in report_cfg:
|
446
|
+
report_data = {key: report_data[key] for key in report_cfg["df_order"]}
|
447
|
+
|
448
|
+
print(task_info(text="PYTHON order report data dict", changed=False))
|
449
|
+
print("'PYTHON order report data dict' -> PythonResult <Success: True>")
|
450
|
+
if args.verbose:
|
451
|
+
print("\n" + json.dumps(report_data, indent=4))
|
452
|
+
|
453
|
+
# Create a Pandas dataframe for the data dict
|
454
|
+
df = pd.DataFrame(report_data)
|
455
|
+
|
456
|
+
# Format each column in the list to a pandas date type for later conditional formatting
|
457
|
+
if "df_date_columns" in report_cfg:
|
458
|
+
for column in report_cfg["df_date_columns"]:
|
459
|
+
df[column] = pd.to_datetime(df[column], format="%Y-%m-%d")
|
460
|
+
|
461
|
+
print(task_info(text="PYTHON create pandas dataframe from dict", changed=False))
|
462
|
+
print("'PANDAS create dataframe' -> PandasResult <Success: True>")
|
463
|
+
if args.verbose:
|
464
|
+
print(df)
|
465
|
+
|
466
|
+
return df
|
467
|
+
|
468
|
+
|
469
|
+
#### Excel Report Generation #################################################################################
|
470
|
+
|
471
|
+
|
472
|
+
def _worksheet_add_title_row(workbook, worksheet, config):
|
473
|
+
"""
|
474
|
+
TBD
|
475
|
+
"""
|
476
|
+
# Setting for the whole worksheet
|
477
|
+
zoom = config["zoom"] if "zoom" in config else 110
|
478
|
+
worksheet.set_zoom(zoom)
|
479
|
+
# Specify how many columns should be frozen
|
480
|
+
freeze_col = config["freeze_columns"] if "freeze_columns" in config else 0
|
481
|
+
freeze_row = config["freeze_row"] if "freeze_row" in config else 2
|
482
|
+
worksheet.freeze_panes(freeze_row, freeze_col)
|
483
|
+
|
484
|
+
# Set the top row height
|
485
|
+
worksheet.set_row(0, config["title_row_height"] if "title_row_height" in config else 60)
|
486
|
+
# Create a format to use for the merged top row
|
487
|
+
title_format = workbook.add_format(
|
488
|
+
{
|
489
|
+
"font_name": config["title_font_name"] if "title_font_name" in config else "Calibri",
|
490
|
+
"font_size": config["title_font_size"] if "title_font_size" in config else 20,
|
491
|
+
"font_color": config["title_font_color"] if "title_font_color" in config else "#FFFFFF",
|
492
|
+
"bg_color": config["title_bg_color"] if "title_bg_color" in config else "#FF452C",
|
493
|
+
"align": "left",
|
494
|
+
"valign": "vcenter",
|
495
|
+
"bold": 1,
|
496
|
+
"bottom": 1,
|
497
|
+
}
|
498
|
+
)
|
499
|
+
# Enable text wrap for the title format to enable custum newlines
|
500
|
+
title_format.set_text_wrap()
|
501
|
+
|
502
|
+
# Insert a logo to the top row
|
503
|
+
if "title_logo" in config:
|
504
|
+
# Merge the number of top row cells according to the frozen columns to insert a logo
|
505
|
+
worksheet.merge_range(0, 0, 0, freeze_col - 1 if freeze_col != 0 else freeze_col, None, title_format)
|
506
|
+
worksheet.insert_image(
|
507
|
+
"A1",
|
508
|
+
config["title_logo"],
|
509
|
+
{
|
510
|
+
"x_scale": config["title_logo_x_scale"],
|
511
|
+
"y_scale": config["title_logo_y_scale"],
|
512
|
+
"x_offset": config["title_logo_x_offset"],
|
513
|
+
"y_offset": config["title_logo_y_offset"],
|
514
|
+
},
|
515
|
+
)
|
516
|
+
# Specify the title text
|
517
|
+
if "tss_report" in config:
|
518
|
+
title_text = (
|
519
|
+
config["title_text_tss"]
|
520
|
+
if "title_text_tss" in config
|
521
|
+
else "Cisco Maintenance Report incl. IBM TSS Analysis"
|
522
|
+
)
|
523
|
+
else:
|
524
|
+
title_text = config["title_text"] if "title_text" in config else "Cisco Maintenance Report"
|
525
|
+
# Merge from the cell 3 to the max_col and write a title
|
526
|
+
title_text = f"{title_text}\n(generated by {os.path.basename(__main__.__file__)})"
|
527
|
+
worksheet.merge_range(0, freeze_col, 0, config["max_col"], title_text, title_format)
|
528
|
+
|
529
|
+
return worksheet
|
530
|
+
|
531
|
+
|
532
|
+
def _worksheet_add_table(df, workbook, worksheet, config):
|
533
|
+
"""
|
534
|
+
TBD
|
535
|
+
"""
|
536
|
+
# pylint: disable=invalid-name
|
537
|
+
|
538
|
+
# Create a list of column headers, to use in add_table().
|
539
|
+
columns = [{"header": column} for column in df.columns]
|
540
|
+
|
541
|
+
# Add the Excel table structure. Pandas will add the data.
|
542
|
+
# fmt: off
|
543
|
+
worksheet.add_table(1, 0, config["max_row"] - 1, config["max_col"],
|
544
|
+
{
|
545
|
+
"columns": columns,
|
546
|
+
"style": config["table_style"] if "table_style" in config else "Table Style Medium 8",
|
547
|
+
},
|
548
|
+
)
|
549
|
+
# fmt: on
|
550
|
+
|
551
|
+
table_format = workbook.add_format(
|
552
|
+
{
|
553
|
+
"font_name": config["table_font_name"] if "table_font_name" in config else "Calibri",
|
554
|
+
"font_size": config["table_font_size"] if "table_font_size" in config else 11,
|
555
|
+
"align": "left",
|
556
|
+
"valign": "vcenter",
|
557
|
+
}
|
558
|
+
)
|
559
|
+
# Auto-adjust each column width -> +5 on the width makes space for the filter icon
|
560
|
+
for index, width in enumerate(get_pandas_column_width(df)):
|
561
|
+
# Set the minimum width of the column to 15 is the width is smaller than 15
|
562
|
+
width = 15 if width < 15 else width
|
563
|
+
worksheet.set_column(index, index - 1, width + 5, table_format)
|
564
|
+
|
565
|
+
return worksheet
|
566
|
+
|
567
|
+
|
568
|
+
def _worksheet_add_conditional_formatting(df, workbook, worksheet, config):
|
569
|
+
"""
|
570
|
+
TBD
|
571
|
+
"""
|
572
|
+
# pylint: disable=invalid-name
|
573
|
+
|
574
|
+
# Specify the table start row where conditional formatting should start
|
575
|
+
startrow = 3
|
576
|
+
# Create a red background format for the conditional formatting
|
577
|
+
format_red = workbook.add_format({"bg_color": "#C0504D", "align": "left", "valign": "vcenter"})
|
578
|
+
# Create a orange background format for the conditional formatting
|
579
|
+
format_orange = workbook.add_format({"bg_color": "#F79646", "align": "left", "valign": "vcenter"})
|
580
|
+
# Create a green background format for the conditional formatting
|
581
|
+
format_green = workbook.add_format({"bg_color": "#9BBB59", "align": "left", "valign": "vcenter"})
|
582
|
+
|
583
|
+
# Create a conditional formatting for each column in the list.
|
584
|
+
column_list = ["sr_no_owner", "is_covered", "coverage_action_needed", "api_action_needed"]
|
585
|
+
# Create a conditional formatting for each column in the list.
|
586
|
+
column_list = ["sr_no_owner", "is_covered", "coverage_action_needed", "api_action_needed"]
|
587
|
+
column_list = [column for column in column_list if column in df.columns]
|
588
|
+
for column in column_list:
|
589
|
+
# Get the column letter by the column name
|
590
|
+
target_col = xl_col_to_name(df.columns.get_loc(column))
|
591
|
+
# -> Excel requires the value for type cell to be double quoted
|
592
|
+
worksheet.conditional_format(
|
593
|
+
f"{target_col}{startrow}:{target_col}{config['max_row']}",
|
594
|
+
{"type": "cell", "criteria": "equal to", "value": '"NO"', "format": format_red},
|
595
|
+
)
|
596
|
+
worksheet.conditional_format(
|
597
|
+
f"{target_col}{startrow}:{target_col}{config['max_row']}",
|
598
|
+
{"type": "cell", "criteria": "equal to", "value": '"YES"', "format": format_green},
|
599
|
+
)
|
600
|
+
worksheet.conditional_format(
|
601
|
+
f"{target_col}{startrow}:{target_col}{config['max_row']}",
|
602
|
+
{"type": "text", "criteria": "containing", "value": "No action needed", "format": format_green},
|
603
|
+
)
|
604
|
+
worksheet.conditional_format(
|
605
|
+
f"{target_col}{startrow}:{target_col}{config['max_row']}",
|
606
|
+
{"type": "text", "criteria": "containing", "value": "Action needed", "format": format_red},
|
607
|
+
)
|
608
|
+
|
609
|
+
# Create a conditional formatting for each column with a date. Get the column letter by the column name
|
610
|
+
if "grace_period_cols" in config:
|
611
|
+
grace_period = config["grace_period_days"] if "grace_period_days" in config else 90
|
612
|
+
for column in config["grace_period_cols"]:
|
613
|
+
target_col = xl_col_to_name(df.columns.get_loc(column))
|
614
|
+
worksheet.conditional_format(
|
615
|
+
f"{target_col}{startrow}:{target_col}{config['max_row']}",
|
616
|
+
{
|
617
|
+
"type": "date",
|
618
|
+
"criteria": "between",
|
619
|
+
"minimum": datetime.today().date() + timedelta(days=grace_period),
|
620
|
+
"maximum": datetime.strptime("2999-01-01", "%Y-%m-%d"),
|
621
|
+
"format": format_green,
|
622
|
+
},
|
623
|
+
)
|
624
|
+
worksheet.conditional_format(
|
625
|
+
f"{target_col}{startrow}:{target_col}{config['max_row']}",
|
626
|
+
{
|
627
|
+
"type": "date",
|
628
|
+
"criteria": "between",
|
629
|
+
"minimum": datetime.today().date(),
|
630
|
+
"maximum": datetime.today().date() + timedelta(days=grace_period),
|
631
|
+
"format": format_orange,
|
632
|
+
},
|
633
|
+
)
|
634
|
+
worksheet.conditional_format(
|
635
|
+
f"{target_col}{startrow}:{target_col}{config['max_row']}",
|
636
|
+
{
|
637
|
+
"type": "date",
|
638
|
+
"criteria": "between",
|
639
|
+
"minimum": datetime.strptime("1999-01-01", "%Y-%m-%d"),
|
640
|
+
"maximum": datetime.today().date() - timedelta(days=1),
|
641
|
+
"format": format_red,
|
642
|
+
},
|
643
|
+
)
|
644
|
+
|
645
|
+
# Create a conditional formatting for the current_version compared with the desired_version
|
646
|
+
if "current_version" in df.columns and "desired_version" in df.columns:
|
647
|
+
# Get the column letter by the column name
|
648
|
+
version_col = xl_col_to_name(df.columns.get_loc("current_version"))
|
649
|
+
# Iterate over all cells in current_version and compare the string against the desired_version
|
650
|
+
for idx, version in enumerate(df["current_version"].values):
|
651
|
+
# If the current_version is in with the desired_version
|
652
|
+
if version and version in df["desired_version"][idx]:
|
653
|
+
# enumerate start with 0, but the cell start with 3 -> +3 to match idx with starting cell
|
654
|
+
worksheet.write(f"{version_col}{idx + startrow}", version, format_green)
|
655
|
+
elif version:
|
656
|
+
# enumerate start with 0, but the cell start with 3 -> +3 to match idx with starting cell
|
657
|
+
worksheet.write(f"{version_col}{idx + startrow}", version, format_red)
|
658
|
+
|
659
|
+
# Create a conditional formatting for the desired_version compared with the recommended_version
|
660
|
+
if "desired_version" in df.columns and "recommended_version" in df.columns:
|
661
|
+
# Get the column letter by the column name
|
662
|
+
version_col = xl_col_to_name(df.columns.get_loc("desired_version"))
|
663
|
+
# Iterate over all cells in desired_version and compare the string against the recommended_version
|
664
|
+
for idx, version in enumerate(df["desired_version"].values):
|
665
|
+
# If the desired_version is in with the recommended_version
|
666
|
+
if version and version in df["recommended_version"][idx]:
|
667
|
+
# enumerate start with 0, but the cell startrow is different -> +startrow to match start cell
|
668
|
+
worksheet.write(f"{version_col}{idx + startrow}", version, format_green)
|
669
|
+
elif version:
|
670
|
+
# enumerate start with 0, but the cell startrow is different -> +startrow to match start cell
|
671
|
+
worksheet.write(f"{version_col}{idx + startrow}", version, format_orange)
|
672
|
+
|
673
|
+
return worksheet
|
674
|
+
|
675
|
+
|
676
|
+
def generate_cisco_maintenance_report(df: pd.DataFrame, report_cfg: dict) -> None:
|
677
|
+
"""
|
678
|
+
Generate the Cisco Maintenance report Excel file specified by the report_file with the pandas dataframe.
|
679
|
+
The function returns None, but saves the Excel file to the local disk.
|
680
|
+
"""
|
681
|
+
# pylint: disable=invalid-name
|
682
|
+
|
683
|
+
# Disable Pandas SettingWithCopyWarning for "chained" assignments
|
684
|
+
# -> Error-Message: A value is trying to be set on a copy of a slice from a DataFrame
|
685
|
+
# -> https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
|
686
|
+
pd.options.mode.chained_assignment = None # default='warn'
|
687
|
+
|
688
|
+
print_task_name(text="PYTHON create Pandas writer object using XlsxWriter engine")
|
689
|
+
|
690
|
+
#### Create the xlsx writer, workbook and worksheet objects #############################################
|
691
|
+
|
692
|
+
# Get the dimensions of the dataframe.
|
693
|
+
(max_row, max_col) = df.shape
|
694
|
+
# Max_row + 2 because the first two rows are used for title and header
|
695
|
+
report_cfg["max_row"] = max_row + 2
|
696
|
+
# Max_com -1 otherwise would be one column to much
|
697
|
+
report_cfg["max_col"] = max_col - 1
|
698
|
+
|
699
|
+
# Create a Pandas excel writer using XlsxWriter as the engine.
|
700
|
+
writer = pd.ExcelWriter( # pylint: disable=abstract-class-instantiated
|
701
|
+
path=report_cfg["excel_file"],
|
702
|
+
engine="xlsxwriter",
|
703
|
+
date_format="yyyy-mm-dd",
|
704
|
+
datetime_format="yyyy-mm-dd",
|
705
|
+
)
|
706
|
+
|
707
|
+
# Get the xlsxwriter workbook object
|
708
|
+
workbook = writer.book
|
709
|
+
# Write the dataframe data to XlsxWriter. Turn off the default header and index and skip one row to allow
|
710
|
+
# us to insert a user defined header.
|
711
|
+
sheet_name = report_cfg["sheet_name"] if "sheet_name" in report_cfg else "Cisco_Maintenance_Report"
|
712
|
+
df.to_excel(writer, sheet_name=sheet_name, startrow=2, header=False, index=False)
|
713
|
+
# Get the xlsxwriter worksheet object
|
714
|
+
worksheet = writer.sheets[sheet_name]
|
715
|
+
|
716
|
+
print(task_info(text="PYTHON create XlsxWriter workbook and worksheet", changed=False))
|
717
|
+
print("'PYTHON create pandas writer object using XlsxWriter engine' -> PythonResult <Success: True>")
|
718
|
+
|
719
|
+
#### Add content and condidional formatting to the xlsx writer worksheet ################################
|
720
|
+
|
721
|
+
# Add the top title row
|
722
|
+
worksheet = _worksheet_add_title_row(workbook=workbook, worksheet=worksheet, config=report_cfg)
|
723
|
+
print(task_info(text="PYTHON create XlsxWriter title row", changed=False))
|
724
|
+
print("'PYTHON create XlsxWriter title row' -> PythonResult <Success: True>")
|
725
|
+
|
726
|
+
# Add a Excel table structure and add the Pandas dataframe
|
727
|
+
worksheet = _worksheet_add_table(df=df, workbook=workbook, worksheet=worksheet, config=report_cfg)
|
728
|
+
print(task_info(text="PYTHON create XlsxWriter table and add pandas dataframe", changed=False))
|
729
|
+
print("'PYTHON create XlsxWriter table and add pandas dataframe' -> PythonResult <Success: True>")
|
730
|
+
|
731
|
+
# Create conditional formating
|
732
|
+
worksheet = _worksheet_add_conditional_formatting(
|
733
|
+
df=df, workbook=workbook, worksheet=worksheet, config=report_cfg
|
734
|
+
)
|
735
|
+
print(task_info(text="PYTHON create XlsxWriter conditional formating", changed=False))
|
736
|
+
print("'PYTHON create XlsxWriter conditional formating' -> PythonResult <Success: True>")
|
737
|
+
|
738
|
+
#### Save the Excel report file to disk ##################################################################
|
739
|
+
|
740
|
+
print_task_name(text="PYTHON generate report Excel file")
|
741
|
+
|
742
|
+
# Close the Pandas Excel writer and output the Excel file.
|
743
|
+
writer.close()
|
744
|
+
|
745
|
+
print(task_info(text="PYTHON generate report Excel file", changed=False))
|
746
|
+
print("'PYTHON generate report Excel file' -> PythonResult <Success: True>")
|
747
|
+
print(f"-> Saved information about {df.shape[0]} serials to {report_cfg['excel_file']}")
|