wiliot-certificate 1.3.0a1__py3-none-any.whl → 1.4.0a2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- brg_certificate/__init__.py +0 -0
- brg_certificate/ag/energous_v0_defines.py +925 -0
- brg_certificate/ag/energous_v1_defines.py +931 -0
- brg_certificate/ag/energous_v2_defines.py +925 -0
- brg_certificate/ag/energous_v3_defines.py +925 -0
- brg_certificate/ag/energous_v4_defines.py +925 -0
- brg_certificate/ag/fanstel_lan_v0_defines.py +925 -0
- brg_certificate/ag/fanstel_lte_v0_defines.py +925 -0
- brg_certificate/ag/fanstel_wifi_v0_defines.py +925 -0
- brg_certificate/ag/minew_lte_v0_defines.py +925 -0
- brg_certificate/ag/wlt_cmd_if.html +102 -0
- brg_certificate/ag/wlt_types.html +6114 -0
- brg_certificate/ag/wlt_types_ag.py +7840 -0
- brg_certificate/ag/wlt_types_ag_jsons/brg2brg_ota.json +142 -0
- brg_certificate/ag/wlt_types_ag_jsons/brg2gw_hb.json +785 -0
- brg_certificate/ag/wlt_types_ag_jsons/brg2gw_hb_sleep.json +139 -0
- brg_certificate/ag/wlt_types_ag_jsons/calibration.json +394 -0
- brg_certificate/ag/wlt_types_ag_jsons/custom.json +515 -0
- brg_certificate/ag/wlt_types_ag_jsons/datapath.json +672 -0
- brg_certificate/ag/wlt_types_ag_jsons/energy2400.json +550 -0
- brg_certificate/ag/wlt_types_ag_jsons/energySub1g.json +595 -0
- brg_certificate/ag/wlt_types_ag_jsons/externalSensor.json +598 -0
- brg_certificate/ag/wlt_types_ag_jsons/interface.json +938 -0
- brg_certificate/ag/wlt_types_ag_jsons/powerManagement.json +1234 -0
- brg_certificate/ag/wlt_types_ag_jsons/side_info_sensor.json +105 -0
- brg_certificate/ag/wlt_types_ag_jsons/signal_indicator_data.json +77 -0
- brg_certificate/ag/wlt_types_ag_jsons/unified_echo_ext_pkt.json +61 -0
- brg_certificate/ag/wlt_types_ag_jsons/unified_echo_pkt.json +110 -0
- brg_certificate/brg_certificate.py +191 -0
- brg_certificate/brg_certificate_cli.py +47 -0
- brg_certificate/cert_common.py +828 -0
- brg_certificate/cert_config.py +395 -0
- brg_certificate/cert_data_sim.py +188 -0
- brg_certificate/cert_defines.py +337 -0
- brg_certificate/cert_gw_sim.py +285 -0
- brg_certificate/cert_mqtt.py +373 -0
- brg_certificate/cert_prints.py +181 -0
- brg_certificate/cert_protobuf.py +88 -0
- brg_certificate/cert_results.py +300 -0
- brg_certificate/cert_utils.py +358 -0
- brg_certificate/certificate_sanity_test_list.txt +36 -0
- brg_certificate/certificate_test_list.txt +43 -0
- brg_certificate/config/eclipse.json +10 -0
- brg_certificate/config/hivemq.json +10 -0
- brg_certificate/config/mosquitto.json +10 -0
- brg_certificate/config/mosquitto.md +95 -0
- brg_certificate/config/wiliot-dev.json +10 -0
- brg_certificate/restore_brg.py +59 -0
- brg_certificate/tests/calibration/interval_test/interval_test.json +13 -0
- brg_certificate/tests/calibration/interval_test/interval_test.py +28 -0
- brg_certificate/tests/calibration/output_power_test/output_power_test.json +13 -0
- brg_certificate/tests/calibration/output_power_test/output_power_test.py +28 -0
- brg_certificate/tests/calibration/pattern_test/pattern_test.json +13 -0
- brg_certificate/tests/calibration/pattern_test/pattern_test.py +70 -0
- brg_certificate/tests/datapath/adaptive_pacer_algo_test/adaptive_pacer_algo_test.json +13 -0
- brg_certificate/tests/datapath/adaptive_pacer_algo_test/adaptive_pacer_algo_test.py +76 -0
- brg_certificate/tests/datapath/num_of_tags_test/num_of_tags_test.json +13 -0
- brg_certificate/tests/datapath/num_of_tags_test/num_of_tags_test.py +83 -0
- brg_certificate/tests/datapath/output_power_test/output_power_test.json +13 -0
- brg_certificate/tests/datapath/output_power_test/output_power_test.py +27 -0
- brg_certificate/tests/datapath/pacer_interval_ble5_test/pacer_interval_ble5_test.json +13 -0
- brg_certificate/tests/datapath/pacer_interval_ble5_test/pacer_interval_ble5_test.py +43 -0
- brg_certificate/tests/datapath/pacer_interval_tags_count_test/pacer_interval_tags_count_test.json +13 -0
- brg_certificate/tests/datapath/pacer_interval_tags_count_test/pacer_interval_tags_count_test.py +63 -0
- brg_certificate/tests/datapath/pacer_interval_test/pacer_interval_test.json +13 -0
- brg_certificate/tests/datapath/pacer_interval_test/pacer_interval_test.py +50 -0
- brg_certificate/tests/datapath/pattern_test/pattern_test.json +13 -0
- brg_certificate/tests/datapath/pattern_test/pattern_test.py +28 -0
- brg_certificate/tests/datapath/pkt_filter_ble5_test/pkt_filter_ble5_test.json +13 -0
- brg_certificate/tests/datapath/pkt_filter_ble5_test/pkt_filter_ble5_test.py +51 -0
- brg_certificate/tests/datapath/pkt_filter_gen3_test/pkt_filter_gen3_test.json +13 -0
- brg_certificate/tests/datapath/pkt_filter_gen3_test/pkt_filter_gen3_test.py +54 -0
- brg_certificate/tests/datapath/pkt_filter_test/pkt_filter_test.json +13 -0
- brg_certificate/tests/datapath/pkt_filter_test/pkt_filter_test.py +55 -0
- brg_certificate/tests/datapath/rssi_threshold_test/rssi_threshold_test.json +13 -0
- brg_certificate/tests/datapath/rssi_threshold_test/rssi_threshold_test.py +73 -0
- brg_certificate/tests/datapath/rx_channel_test/rx_channel_test.json +13 -0
- brg_certificate/tests/datapath/rx_channel_test/rx_channel_test.py +41 -0
- brg_certificate/tests/datapath/rx_rate_gen2_test/rx_rate_gen2_test.json +21 -0
- brg_certificate/tests/datapath/rx_rate_gen2_test/rx_rate_gen2_test.py +184 -0
- brg_certificate/tests/datapath/rx_rate_gen3_test/rx_rate_gen3_test.json +21 -0
- brg_certificate/tests/datapath/rx_rate_gen3_test/rx_rate_gen3_test.py +210 -0
- brg_certificate/tests/datapath/stress_gen3_test/stress_gen3_test.json +30 -0
- brg_certificate/tests/datapath/stress_gen3_test/stress_gen3_test.py +203 -0
- brg_certificate/tests/datapath/stress_test/stress_test.json +30 -0
- brg_certificate/tests/datapath/stress_test/stress_test.py +210 -0
- brg_certificate/tests/datapath/tx_repetition_algo_test/tx_repetition_algo_test.json +13 -0
- brg_certificate/tests/datapath/tx_repetition_algo_test/tx_repetition_algo_test.py +113 -0
- brg_certificate/tests/datapath/tx_repetition_test/tx_repetition_test.json +13 -0
- brg_certificate/tests/datapath/tx_repetition_test/tx_repetition_test.py +79 -0
- brg_certificate/tests/edge_mgmt/actions_test/actions_test.json +13 -0
- brg_certificate/tests/edge_mgmt/actions_test/actions_test.py +432 -0
- brg_certificate/tests/edge_mgmt/brg2brg_ota_ble5_test/brg2brg_ota_ble5_test.json +13 -0
- brg_certificate/tests/edge_mgmt/brg2brg_ota_ble5_test/brg2brg_ota_ble5_test.py +94 -0
- brg_certificate/tests/edge_mgmt/brg2brg_ota_test/brg2brg_ota_test.json +13 -0
- brg_certificate/tests/edge_mgmt/brg2brg_ota_test/brg2brg_ota_test.py +87 -0
- brg_certificate/tests/edge_mgmt/leds_test/leds_test.json +13 -0
- brg_certificate/tests/edge_mgmt/leds_test/leds_test.py +210 -0
- brg_certificate/tests/edge_mgmt/ota_test/ota_test.json +13 -0
- brg_certificate/tests/edge_mgmt/ota_test/ota_test.py +83 -0
- brg_certificate/tests/edge_mgmt/stat_test/stat_test.json +13 -0
- brg_certificate/tests/edge_mgmt/stat_test/stat_test.py +48 -0
- brg_certificate/tests/energy2400/duty_cycle_test/duty_cycle_test.json +13 -0
- brg_certificate/tests/energy2400/duty_cycle_test/duty_cycle_test.py +26 -0
- brg_certificate/tests/energy2400/output_power_test/output_power_test.json +13 -0
- brg_certificate/tests/energy2400/output_power_test/output_power_test.py +27 -0
- brg_certificate/tests/energy2400/pattern_test/pattern_test.json +13 -0
- brg_certificate/tests/energy2400/pattern_test/pattern_test.py +28 -0
- brg_certificate/tests/energy2400/signal_indicator_ble5_test/signal_indicator_ble5_test.json +13 -0
- brg_certificate/tests/energy2400/signal_indicator_ble5_test/signal_indicator_ble5_test.py +398 -0
- brg_certificate/tests/energy2400/signal_indicator_sub1g_2_4_test/signal_indicator_sub1g_2_4_test.json +13 -0
- brg_certificate/tests/energy2400/signal_indicator_sub1g_2_4_test/signal_indicator_sub1g_2_4_test.py +153 -0
- brg_certificate/tests/energy2400/signal_indicator_test/signal_indicator_test.json +13 -0
- brg_certificate/tests/energy2400/signal_indicator_test/signal_indicator_test.py +264 -0
- brg_certificate/tests/energy_sub1g/duty_cycle_test/duty_cycle_test.json +13 -0
- brg_certificate/tests/energy_sub1g/duty_cycle_test/duty_cycle_test.py +27 -0
- brg_certificate/tests/energy_sub1g/pattern_test/pattern_test.json +13 -0
- brg_certificate/tests/energy_sub1g/pattern_test/pattern_test.py +26 -0
- brg_certificate/tests/energy_sub1g/signal_indicator_functionality_test/signal_indicator_functionality_test.json +13 -0
- brg_certificate/tests/energy_sub1g/signal_indicator_functionality_test/signal_indicator_functionality_test.py +397 -0
- brg_certificate/tests/energy_sub1g/signal_indicator_test/signal_indicator_test.json +13 -0
- brg_certificate/tests/energy_sub1g/signal_indicator_test/signal_indicator_test.py +27 -0
- brg_certificate/wltPb_pb2.py +72 -0
- brg_certificate/wltPb_pb2.pyi +227 -0
- brg_certificate/wlt_types.py +114 -0
- gw_certificate/api/extended_api.py +7 -1531
- gw_certificate/api_if/200/data.json +106 -0
- gw_certificate/api_if/200/logs.json +12 -0
- gw_certificate/api_if/200/status.json +47 -0
- gw_certificate/api_if/201/data.json +98 -0
- gw_certificate/api_if/201/logs.json +12 -0
- gw_certificate/api_if/201/status.json +53 -0
- gw_certificate/api_if/202/data.json +83 -0
- gw_certificate/api_if/202/logs.json +12 -0
- gw_certificate/api_if/202/status.json +60 -0
- gw_certificate/api_if/203/data.json +85 -0
- gw_certificate/api_if/203/logs.json +12 -0
- gw_certificate/api_if/203/status.json +63 -0
- gw_certificate/api_if/204/data.json +85 -0
- gw_certificate/api_if/204/logs.json +12 -0
- gw_certificate/api_if/204/status.json +63 -0
- gw_certificate/api_if/205/data.json +85 -0
- gw_certificate/api_if/205/logs.json +12 -0
- gw_certificate/api_if/205/status.json +63 -0
- gw_certificate/api_if/api_validation.py +0 -2
- gw_certificate/common/analysis_data_bricks.py +18 -1413
- gw_certificate/common/debug.py +0 -21
- gw_certificate/common/utils.py +1 -212
- gw_certificate/common/utils_defines.py +0 -87
- gw_certificate/gw_certificate.py +9 -7
- gw_certificate/gw_certificate_cli.py +39 -23
- gw_certificate/interface/4.4.52_app.zip +0 -0
- gw_certificate/interface/4.4.52_sd_bl_app.zip +0 -0
- gw_certificate/interface/ble_simulator.py +0 -32
- gw_certificate/interface/if_defines.py +1 -0
- gw_certificate/interface/mqtt.py +96 -19
- gw_certificate/interface/nrfutil-linux +0 -0
- gw_certificate/interface/nrfutil-mac +0 -0
- gw_certificate/interface/nrfutil.exe +0 -0
- gw_certificate/interface/pkt_generator.py +0 -82
- gw_certificate/interface/uart_if.py +73 -43
- gw_certificate/templates/results.html +1 -1
- gw_certificate/tests/__init__.py +1 -2
- gw_certificate/tests/actions.py +134 -9
- gw_certificate/tests/connection.py +10 -5
- gw_certificate/tests/downlink.py +2 -4
- gw_certificate/tests/generic.py +62 -12
- gw_certificate/tests/registration.py +78 -27
- gw_certificate/tests/static/generated_packet_table.py +12 -48
- gw_certificate/tests/static/packet_table.csv +10048 -10048
- gw_certificate/tests/static/references.py +2 -1
- gw_certificate/tests/static/uplink_defines.py +0 -7
- gw_certificate/tests/throughput.py +7 -12
- gw_certificate/tests/uplink.py +83 -43
- {wiliot_certificate-1.3.0a1.dist-info → wiliot_certificate-1.4.0a2.dist-info}/METADATA +59 -8
- wiliot_certificate-1.4.0a2.dist-info/RECORD +198 -0
- {wiliot_certificate-1.3.0a1.dist-info → wiliot_certificate-1.4.0a2.dist-info}/WHEEL +1 -1
- wiliot_certificate-1.4.0a2.dist-info/entry_points.txt +3 -0
- wiliot_certificate-1.4.0a2.dist-info/top_level.txt +2 -0
- gw_certificate/interface/packet_error.py +0 -22
- wiliot_certificate-1.3.0a1.dist-info/RECORD +0 -51
- wiliot_certificate-1.3.0a1.dist-info/entry_points.txt +0 -2
- wiliot_certificate-1.3.0a1.dist-info/top_level.txt +0 -1
- {wiliot_certificate-1.3.0a1.dist-info → wiliot_certificate-1.4.0a2.dist-info}/LICENSE +0 -0
|
@@ -0,0 +1,300 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import tabulate
|
|
3
|
+
import subprocess
|
|
4
|
+
from reportlab.lib import colors
|
|
5
|
+
from reportlab.lib.pagesizes import letter
|
|
6
|
+
from reportlab.platypus import SimpleDocTemplate, Table, TableStyle, Paragraph, Spacer
|
|
7
|
+
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
|
|
8
|
+
from reportlab.lib.enums import TA_CENTER, TA_LEFT
|
|
9
|
+
|
|
10
|
+
# Local imports
|
|
11
|
+
import brg_certificate.cert_utils as cert_utils
|
|
12
|
+
import brg_certificate.cert_prints as cert_prints
|
|
13
|
+
from brg_certificate.cert_defines import BASE_DIR
|
|
14
|
+
from brg_certificate.cert_defines import TEST_FAILED, TEST_SKIPPED, TEST_PASSED, TEST_INIT, UT_RESULT_FILE_HTML, UT_RESULT_FILE_PDF
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
##################################
|
|
18
|
+
# GENERIC
|
|
19
|
+
##################################
|
|
20
|
+
result_map = {TEST_FAILED: cert_prints.color("RED", "FAIL"), TEST_SKIPPED: cert_prints.color("WARNING", "SKIPPED"),
|
|
21
|
+
TEST_PASSED: cert_prints.color("GREEN", "PASS"), TEST_INIT: cert_prints.color("CYAN", "INIT")}
|
|
22
|
+
pass_or_fail = lambda obj : result_map[obj.rc]
|
|
23
|
+
class TestResult:
|
|
24
|
+
def __init__(self, name="", devices_to_print="", test_table=None, result=None, duration=0):
|
|
25
|
+
self.name = name
|
|
26
|
+
self.devices = devices_to_print
|
|
27
|
+
self.result = result
|
|
28
|
+
self.test_table = test_table
|
|
29
|
+
self.duration = duration
|
|
30
|
+
|
|
31
|
+
def __repr__(self):
|
|
32
|
+
return self.name
|
|
33
|
+
|
|
34
|
+
def generate_tests_table(tests=[], html=False):
|
|
35
|
+
headers = ["Module", "Test Name", "Device", "Result & Reason Breakdown", "Result", "Run Time"]
|
|
36
|
+
inner_format = "unsafehtml" if html else "simple"
|
|
37
|
+
_pass_or_fail = pass_or_fail_html if html else pass_or_fail
|
|
38
|
+
tests_results = []
|
|
39
|
+
for test in tests:
|
|
40
|
+
brgs_to_print = (test.gw if not test.brg0 or test.gw_only else
|
|
41
|
+
(f"{test.brg0.id_str}\n{test.brg1.id_str}" if test.brg1 and test.multi_brg else test.brg0.id_str))
|
|
42
|
+
inner_table = [[phase.name, _pass_or_fail(phase), phase.reason] for phase in test.phases]
|
|
43
|
+
result_breakdown_table = tabulate.tabulate(inner_table, headers=["Phase", "Result", "Reason"], tablefmt=inner_format)
|
|
44
|
+
tests_results.append([cert_utils.module2name(test.test_module),
|
|
45
|
+
test.module_name if (not test.internal_brg or "gw" in test.module_name) else f"{test.module_name} (internal brg)",
|
|
46
|
+
brgs_to_print,
|
|
47
|
+
result_breakdown_table,
|
|
48
|
+
_pass_or_fail(test),
|
|
49
|
+
test.duration])
|
|
50
|
+
return tabulate.tabulate(tests_results, headers=headers, tablefmt="unsafehtml" if html else "fancy_grid")
|
|
51
|
+
|
|
52
|
+
def get_update_status_from_log_file(log_file="update_log.txt"):
|
|
53
|
+
update_status = "No version update logs were found"
|
|
54
|
+
if os.path.isfile("update_log.txt"):
|
|
55
|
+
with open(os.path.join(BASE_DIR, log_file), "r") as update_log:
|
|
56
|
+
for l in update_log.readlines():
|
|
57
|
+
if "ERROR: Didn't get response from BRG" in l:
|
|
58
|
+
update_status = "Didn't get response from BRG in order to start the update!"
|
|
59
|
+
break
|
|
60
|
+
elif "ERROR: Didn't get response from" in l:
|
|
61
|
+
update_status = "Didn't get response from GW in order to start the update!"
|
|
62
|
+
break
|
|
63
|
+
elif "version_update_test failed!" in l:
|
|
64
|
+
update_status = "GW version update failed!"
|
|
65
|
+
break
|
|
66
|
+
elif "ota_test failed!" in l:
|
|
67
|
+
update_status = "BRG OTA failed!"
|
|
68
|
+
break
|
|
69
|
+
elif "Wiliot UT PASSED!" in l:
|
|
70
|
+
update_status = "GW and BRG versions were updated to latest successfully!"
|
|
71
|
+
break
|
|
72
|
+
elif "Update skipped!" in l:
|
|
73
|
+
update_status = "GW and BRG versions update skipped!"
|
|
74
|
+
break
|
|
75
|
+
return update_status
|
|
76
|
+
|
|
77
|
+
def generate_results_files(html=True, pdf=True, failures=0, skipped=0, duration=0, brg_version='', tests=[], error=None, pipeline=False):
|
|
78
|
+
# Generate HTML file
|
|
79
|
+
if html:
|
|
80
|
+
f = open(os.path.join(BASE_DIR, UT_RESULT_FILE_HTML), "w", encoding="utf-8")
|
|
81
|
+
f.write(HTML_START)
|
|
82
|
+
update_status = get_update_status_from_log_file()
|
|
83
|
+
if pipeline:
|
|
84
|
+
p = subprocess.Popen('git log --format=%B -n 1 {}'.format(os.environ['BITBUCKET_COMMIT']),
|
|
85
|
+
stdout=subprocess.PIPE, shell=True, cwd=os.environ['BITBUCKET_CLONE_DIR'])
|
|
86
|
+
output, err = p.communicate()
|
|
87
|
+
if error:
|
|
88
|
+
f.write("<br><h1 style='color:#ab0000'>Wiliot UT Failed!</h1><br>")
|
|
89
|
+
if pipeline:
|
|
90
|
+
f.write("<hr>" + output.decode("utf-8") + "<br>")
|
|
91
|
+
f.write("<p><a href='https://bitbucket.org/wiliot/wiliot-nordic-firmware/commits/{}'>Commit page on bitbucket</a><hr>".format(os.environ['BITBUCKET_COMMIT']))
|
|
92
|
+
f.write(update_status + "<br><br>")
|
|
93
|
+
f.write(error + "<br><br>")
|
|
94
|
+
f.write("Run duration: {} <br><br>".format(str(duration).split(".")[0]))
|
|
95
|
+
if brg_version:
|
|
96
|
+
f.write("Bridge version: {} <br><br>".format(brg_version))
|
|
97
|
+
elif tests:
|
|
98
|
+
if not failures and ("successfully!" in update_status or "skipped!" in update_status or not pipeline):
|
|
99
|
+
f.write("<br><h1 style='color:#00AB83'>Wiliot UT Passed!</h1>")
|
|
100
|
+
else:
|
|
101
|
+
f.write("<br><h1 style='color:#ab0000'>Wiliot UT Failed!</h1>")
|
|
102
|
+
if pipeline:
|
|
103
|
+
f.write("<hr>" + output.decode("utf-8") + "<br>")
|
|
104
|
+
f.write("<p><a href='https://bitbucket.org/wiliot/wiliot-nordic-firmware/commits/{}'>Commit page on bitbucket</a><hr>".format(os.environ['BITBUCKET_COMMIT']))
|
|
105
|
+
f.write(update_status + "<br><br>")
|
|
106
|
+
f.write("Tests duration: {} <br><br>".format(str(duration).split(".")[0]))
|
|
107
|
+
if brg_version:
|
|
108
|
+
f.write("Bridge version: {} <br><br>".format(brg_version))
|
|
109
|
+
f.write(tabulate.tabulate([[len(tests)-(failures+skipped), skipped, failures, len(tests)]], headers=["PASSED", "SKIPPED", "FAILED", "TOTAL"], tablefmt="html"))
|
|
110
|
+
f.write(generate_tests_table(tests, html=True))
|
|
111
|
+
f.write("<br><br>")
|
|
112
|
+
if pipeline:
|
|
113
|
+
f.write("<p><a href='https://bitbucket.org/wiliot/wiliot-nordic-firmware/pipelines/results/{}'>Build's page and artifacts on bitbucket</a></p><br><br>".format(os.environ['BITBUCKET_BUILD_NUMBER']))
|
|
114
|
+
f.write("<img src='https://www.wiliot.com/src/img/svg/logo.svg' width='100' height='40' alt='Wiliot logo'>")
|
|
115
|
+
f.write(HTML_END)
|
|
116
|
+
f.close()
|
|
117
|
+
|
|
118
|
+
# Generate PDF file
|
|
119
|
+
if pdf:
|
|
120
|
+
doc = SimpleDocTemplate(os.path.join(BASE_DIR, UT_RESULT_FILE_PDF), pagesize=letter)
|
|
121
|
+
styles = getSampleStyleSheet()
|
|
122
|
+
elements = []
|
|
123
|
+
update_status = get_update_status_from_log_file()
|
|
124
|
+
|
|
125
|
+
# Title and Summary
|
|
126
|
+
red_header = STYLES_PDF.get("RED_HEADER", ParagraphStyle("Default"))
|
|
127
|
+
green_header = STYLES_PDF.get("GREEN_HEADER", ParagraphStyle("Default"))
|
|
128
|
+
module_header = STYLES_PDF.get("MODULE_HEADER", ParagraphStyle("Default"))
|
|
129
|
+
test_header = STYLES_PDF.get("TEST_HEADER", ParagraphStyle("Default"))
|
|
130
|
+
if error:
|
|
131
|
+
title = Paragraph("<b>Wiliot UT Failed!</b>", red_header)
|
|
132
|
+
elements.append(title)
|
|
133
|
+
elements.append(Paragraph(f"Error: {error}", styles['BodyText']))
|
|
134
|
+
else:
|
|
135
|
+
title = Paragraph("<b>Wiliot UT Passed!</b>", green_header) if not failures else Paragraph("<b>Wiliot UT Failed!</b>", red_header)
|
|
136
|
+
elements.append(title)
|
|
137
|
+
elements.append(Spacer(1, 50))
|
|
138
|
+
|
|
139
|
+
elements.append(Paragraph(f"Tests duration: {str(duration).split('.')[0]}", styles['BodyText']))
|
|
140
|
+
if brg_version:
|
|
141
|
+
elements.append(Paragraph(f"Bridge version: {brg_version}", styles['BodyText']))
|
|
142
|
+
elements.append(Paragraph(f"Update status: {update_status}", styles['BodyText']))
|
|
143
|
+
elements.append(Spacer(1, 20))
|
|
144
|
+
|
|
145
|
+
# Summary Table
|
|
146
|
+
summary_data = [
|
|
147
|
+
["PASSED", "SKIPPED", "FAILED", "TOTAL"],
|
|
148
|
+
[len(tests)-(failures+skipped), skipped, failures, len(tests)]
|
|
149
|
+
]
|
|
150
|
+
summary_table = Table(summary_data)
|
|
151
|
+
summary_table.setStyle(INNER_TABLE_STYLE)
|
|
152
|
+
elements.append(summary_table)
|
|
153
|
+
elements.append(Spacer(1, 20))
|
|
154
|
+
|
|
155
|
+
# Test Results
|
|
156
|
+
results_per_module = generate_results_per_module_for_pdf(tests=tests)
|
|
157
|
+
for module, test_results in results_per_module.items():
|
|
158
|
+
elements.append(Paragraph(f"<b>{module} Module</b>", module_header))
|
|
159
|
+
elements.append(Spacer(1, 20))
|
|
160
|
+
for test_result in test_results:
|
|
161
|
+
elements.append(Paragraph(f"<b>{test_result.name}</b>", test_header))
|
|
162
|
+
elements.append(Spacer(1, 10))
|
|
163
|
+
|
|
164
|
+
elements.append(test_result.result)
|
|
165
|
+
elements.append(Spacer(1, 10))
|
|
166
|
+
|
|
167
|
+
elements.append(Paragraph(f"Tested Devices: {test_result.devices}", styles['BodyText']))
|
|
168
|
+
elements.append(Paragraph(f"Duration: {test_result.duration}", styles['BodyText']))
|
|
169
|
+
elements.append(Spacer(1, 10))
|
|
170
|
+
|
|
171
|
+
elements.append(test_result.test_table)
|
|
172
|
+
elements.append(Spacer(1, 20))
|
|
173
|
+
elements.append(Spacer(1, 50))
|
|
174
|
+
|
|
175
|
+
doc.build(elements)
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
##################################
|
|
179
|
+
# HTML
|
|
180
|
+
##################################
|
|
181
|
+
COLORS_HTML = {
|
|
182
|
+
"HEADER": "color: #ff00ff;", # Purple
|
|
183
|
+
"BLUE": "color: #0000ff;", # Blue
|
|
184
|
+
"CYAN": "color: #00ffff;", # Cyan
|
|
185
|
+
"GREEN": "color: #00ff00;", # Green
|
|
186
|
+
"WARNING": "color: #ffff00;", # Yellow
|
|
187
|
+
"RED": "color: #ff0000;", # Red
|
|
188
|
+
"BOLD": "font-weight: bold;",
|
|
189
|
+
"UNDERLINE": "text-decoration: underline;",
|
|
190
|
+
}
|
|
191
|
+
color_html = lambda c, t: f'<span style="{COLORS_HTML.get(c, "")}{COLORS_HTML["BOLD"]}">{t}</span>'
|
|
192
|
+
html_result_map = {TEST_FAILED: color_html("RED", "FAIL"), TEST_SKIPPED: color_html("WARNING", "SKIPPED"),
|
|
193
|
+
TEST_PASSED: color_html("GREEN", "PASS"), TEST_INIT: color_html("CYAN", "INIT")}
|
|
194
|
+
pass_or_fail_html = lambda obj : html_result_map[obj.rc]
|
|
195
|
+
|
|
196
|
+
HTML_START = """
|
|
197
|
+
<!DOCTYPE html>
|
|
198
|
+
<html>
|
|
199
|
+
<head>
|
|
200
|
+
<meta charset='utf-8'>
|
|
201
|
+
<meta http-equiv='X-UA-Compatible' content='IE=edge'>
|
|
202
|
+
<title>UT RESULTS</title>
|
|
203
|
+
<meta name='viewport' content='width=device-width, initial-scale=1'>
|
|
204
|
+
<style>
|
|
205
|
+
html, body {
|
|
206
|
+
height: 100%;
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
html {
|
|
210
|
+
display: table;
|
|
211
|
+
margin: auto;
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
body {
|
|
215
|
+
display: table-cell;
|
|
216
|
+
vertical-align: middle;
|
|
217
|
+
}
|
|
218
|
+
table {
|
|
219
|
+
border-collapse: collapse;
|
|
220
|
+
font-family: Tahoma, Geneva, sans-serif;
|
|
221
|
+
}
|
|
222
|
+
table td {
|
|
223
|
+
padding: 15px;
|
|
224
|
+
}
|
|
225
|
+
table thead td {
|
|
226
|
+
background-color: #54585d;
|
|
227
|
+
color: #ffffff;
|
|
228
|
+
font-weight: bold;
|
|
229
|
+
font-size: 13px;
|
|
230
|
+
border: 1px solid #54585d;
|
|
231
|
+
}
|
|
232
|
+
table tbody td {
|
|
233
|
+
color: #636363;
|
|
234
|
+
border: 1px solid #dddfe1;
|
|
235
|
+
}
|
|
236
|
+
table tbody tr {
|
|
237
|
+
background-color: #f9fafb;
|
|
238
|
+
}
|
|
239
|
+
table tbody tr:nth-child(odd) {
|
|
240
|
+
background-color: #ffffff;
|
|
241
|
+
}
|
|
242
|
+
</style>
|
|
243
|
+
</head>
|
|
244
|
+
<body>
|
|
245
|
+
"""
|
|
246
|
+
HTML_END = """
|
|
247
|
+
</body>
|
|
248
|
+
</html>
|
|
249
|
+
"""
|
|
250
|
+
|
|
251
|
+
##################################
|
|
252
|
+
# PDF
|
|
253
|
+
##################################
|
|
254
|
+
STYLES_PDF = {
|
|
255
|
+
"GREEN_HEADER": ParagraphStyle("Green Header", fontName="Helvetica-Bold", fontSize=20, textColor=colors.green, alignment=TA_CENTER),
|
|
256
|
+
"RED_HEADER": ParagraphStyle("Red Header", fontName="Helvetica-Bold", fontSize=20, textColor=colors.red, alignment=TA_CENTER),
|
|
257
|
+
"MODULE_HEADER": ParagraphStyle("Module Header", fontName="Helvetica-Bold", fontSize=16, textColor=colors.blue, alignment=TA_CENTER),
|
|
258
|
+
"TEST_HEADER": ParagraphStyle("Test Header", fontName="Helvetica-Bold", fontSize=12, textColor=colors.black, alignment=TA_CENTER),
|
|
259
|
+
"BLUE": ParagraphStyle("Blue", fontName="Helvetica-Bold", fontSize=9, textColor=colors.blue, splitLongWords=False, alignment=TA_CENTER),
|
|
260
|
+
"CYAN": ParagraphStyle("Cyan", fontName="Helvetica-Bold", fontSize=9, textColor=colors.cyan, splitLongWords=False, alignment=TA_CENTER),
|
|
261
|
+
"GREEN": ParagraphStyle("Green", fontName="Helvetica-Bold", fontSize=9, textColor=colors.green, splitLongWords=False, alignment=TA_CENTER),
|
|
262
|
+
"WARNING": ParagraphStyle("Warning", fontName="Helvetica-Bold", fontSize=9, textColor=colors.gold, splitLongWords=False, alignment=TA_CENTER),
|
|
263
|
+
"RED": ParagraphStyle("Red", fontName="Helvetica-Bold", fontSize=9, textColor=colors.red, splitLongWords=False, alignment=TA_CENTER),
|
|
264
|
+
}
|
|
265
|
+
def color_pdf(c, t):
|
|
266
|
+
style = STYLES_PDF.get(c, ParagraphStyle("Default"))
|
|
267
|
+
return Paragraph(t, style)
|
|
268
|
+
pdf_result_map = {TEST_FAILED: color_pdf("RED", "FAILED"), TEST_SKIPPED: color_pdf("WARNING", "SKIPPED"),
|
|
269
|
+
TEST_PASSED: color_pdf("GREEN", "PASSED"), TEST_INIT: color_pdf("CYAN", "INIT")}
|
|
270
|
+
pass_or_fail_pdf = lambda obj : pdf_result_map[obj.rc]
|
|
271
|
+
|
|
272
|
+
INNER_TABLE_STYLE = TableStyle([
|
|
273
|
+
('BACKGROUND', (0, 0), (-1, 0), colors.grey),
|
|
274
|
+
('TEXTCOLOR', (0, 0), (-1, 0), colors.whitesmoke),
|
|
275
|
+
('ALIGN', (0, 0), (-1, -1), 'CENTER'),
|
|
276
|
+
('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),
|
|
277
|
+
('FONTNAME', (0, 0), (-1, 0), 'Helvetica-Bold'),
|
|
278
|
+
('FONTSIZE', (0, 0), (-1, 0), 9),
|
|
279
|
+
('BOTTOMPADDING', (0, 0), (-1, 0), 12),
|
|
280
|
+
('BACKGROUND', (0, 1), (-1, -1), colors.beige),
|
|
281
|
+
('WORDWRAP', (0, 0), (-1, -1), False),
|
|
282
|
+
])
|
|
283
|
+
|
|
284
|
+
def generate_results_per_module_for_pdf(tests=[]):
|
|
285
|
+
results_per_module = {}
|
|
286
|
+
for test in tests:
|
|
287
|
+
name=test.module_name if (not test.internal_brg or "gw" in test.module_name) else f"{test.module_name} (internal brg)"
|
|
288
|
+
devices_to_print = (test.gw if not test.brg0 or test.gw_only else
|
|
289
|
+
(f"{test.brg0.id_str}\n{test.brg1.id_str}" if test.brg1 and test.multi_brg else test.brg0.id_str))
|
|
290
|
+
inner_table = [[phase.name, pass_or_fail_pdf(phase), phase.reason] for phase in test.phases]
|
|
291
|
+
test_table = Table([["Phase", "Result", "Reason"]] + inner_table)
|
|
292
|
+
test_table.setStyle(INNER_TABLE_STYLE)
|
|
293
|
+
test_result = TestResult(name=name, devices_to_print=devices_to_print,
|
|
294
|
+
test_table=test_table, result=pass_or_fail_pdf(test), duration=test.duration)
|
|
295
|
+
module_name = cert_utils.module2name(test.test_module)
|
|
296
|
+
if module_name not in results_per_module:
|
|
297
|
+
results_per_module[module_name] = [test_result]
|
|
298
|
+
else:
|
|
299
|
+
results_per_module[module_name] += [test_result]
|
|
300
|
+
return results_per_module
|
|
@@ -0,0 +1,358 @@
|
|
|
1
|
+
|
|
2
|
+
import os
|
|
3
|
+
import random
|
|
4
|
+
import tabulate
|
|
5
|
+
import importlib # needed for importing all of the tests
|
|
6
|
+
from requests import codes as r_codes
|
|
7
|
+
|
|
8
|
+
# Local imports
|
|
9
|
+
import brg_certificate.cert_config as cert_config
|
|
10
|
+
import brg_certificate.cert_common as cert_common
|
|
11
|
+
import brg_certificate.cert_results as cert_results
|
|
12
|
+
from brg_certificate.wlt_types import *
|
|
13
|
+
from brg_certificate.cert_defines import *
|
|
14
|
+
from brg_certificate.cert_prints import *
|
|
15
|
+
|
|
16
|
+
MULTI_BRG_STR = "multi_brg" # used for multi brg tests
|
|
17
|
+
GW_ONLY_STR = "gw_only" # used for gw only tests
|
|
18
|
+
INTERNAL_BRG_STR = "internal_brg"
|
|
19
|
+
ORIGINAL_AG_FILE = "wlt_types_ag.py"
|
|
20
|
+
|
|
21
|
+
##################################
|
|
22
|
+
# Utils
|
|
23
|
+
##################################
|
|
24
|
+
|
|
25
|
+
TEST_MODULES_MAP = {"calibration": ag.MODULE_CALIBRATION, "datapath": ag.MODULE_DATAPATH, "energy2400": ag.MODULE_ENERGY_2400, "energy_sub1g": ag.MODULE_ENERGY_SUB1G,
|
|
26
|
+
"pwr_mgmt": ag.MODULE_PWR_MGMT, "sensors": ag.MODULE_EXT_SENSORS, "custom": ag.MODULE_CUSTOM}
|
|
27
|
+
|
|
28
|
+
STATIC_RANDOM_ADDR_MASK = 0x0000000000C0
|
|
29
|
+
hex2alias_id_get = lambda id_str: cert_common.int2mac_get(int(id_str, 16) | STATIC_RANDOM_ADDR_MASK)
|
|
30
|
+
|
|
31
|
+
def module2name(module_id):
|
|
32
|
+
for k, v in TEST_MODULES_MAP.items():
|
|
33
|
+
if module_id == v:
|
|
34
|
+
return k
|
|
35
|
+
return ''
|
|
36
|
+
|
|
37
|
+
def load_module(module_name, module_path, rel_path="."):
|
|
38
|
+
spec = importlib.util.spec_from_file_location(module_name, os.path.join(BASE_DIR, rel_path, module_path))
|
|
39
|
+
module = importlib.util.module_from_spec(spec)
|
|
40
|
+
spec.loader.exec_module(module)
|
|
41
|
+
return module
|
|
42
|
+
|
|
43
|
+
def handle_error(error, start_time):
|
|
44
|
+
utPrint(error, "red")
|
|
45
|
+
duration = (datetime.datetime.now()-start_time)
|
|
46
|
+
cert_results.generate_results_files(html=True, pdf=True, duration=duration, error=error, pipeline=cert_common.pipeline_running())
|
|
47
|
+
sys.exit(-1)
|
|
48
|
+
|
|
49
|
+
##################################
|
|
50
|
+
# Test
|
|
51
|
+
##################################
|
|
52
|
+
class WltTest:
|
|
53
|
+
def __init__(self, line, gw, mqttc, brg0=None, brg1=None, exit_on_param_failure=False, gw_lan=False,
|
|
54
|
+
gw_orig_versions={}, server=PROD, latest=False, release_candidate=False, private_setup=False,
|
|
55
|
+
internal_brg_obj=None, gw_sim='', data='', port='', protobuf=False):
|
|
56
|
+
if line:
|
|
57
|
+
test_list_line = line.strip().split()
|
|
58
|
+
self.name = test_list_line[0]
|
|
59
|
+
self.test_module = TEST_MODULES_MAP[self.name.split('/')[0]] if self.name.split('/')[0] in TEST_MODULES_MAP else ag.MODULE_EMPTY
|
|
60
|
+
line_params = test_list_line[1:]
|
|
61
|
+
self.dir = os.path.join("tests", self.name)
|
|
62
|
+
self.module_name = os.path.join(os.path.basename(self.name))
|
|
63
|
+
self.file = os.path.join(self.dir, os.path.basename(self.name)+".py")
|
|
64
|
+
# Load test json
|
|
65
|
+
test_json_file = open(os.path.join(BASE_DIR, self.dir, os.path.basename(self.name)+".json"))
|
|
66
|
+
self.test_json = json.load(test_json_file)
|
|
67
|
+
self.gw_only = self.test_json[GW_ONLY_TEST]
|
|
68
|
+
self.multi_brg = self.test_json[MULTI_BRG_TEST]
|
|
69
|
+
self.internal_brg = INTERNAL_BRG_STR in line_params
|
|
70
|
+
if INTERNAL_BRG_STR in line_params: line_params.remove(INTERNAL_BRG_STR)
|
|
71
|
+
self.create_test_phases_and_params(line_params)
|
|
72
|
+
else:
|
|
73
|
+
self.test_json = {}
|
|
74
|
+
self.internal_brg = False
|
|
75
|
+
self.multi_brg = False
|
|
76
|
+
self.phases = [Phase(PROLOG), Phase(TEST_BODY), Phase(EPILOG)]
|
|
77
|
+
self.params = []
|
|
78
|
+
|
|
79
|
+
self.gw = gw
|
|
80
|
+
self.internal_brg_obj = internal_brg_obj
|
|
81
|
+
# Actual brg to cfg - can be brg0 or brg1
|
|
82
|
+
self.active_brg = brg0
|
|
83
|
+
self.brg0 = brg0
|
|
84
|
+
self.brg1 = brg1
|
|
85
|
+
self.rc = TEST_PASSED
|
|
86
|
+
self.reason = TEST_SUCCESS
|
|
87
|
+
self.start_time = None
|
|
88
|
+
self.end_time = None
|
|
89
|
+
self.duration = None
|
|
90
|
+
self.mqttc = mqttc
|
|
91
|
+
self.rtsa = ""
|
|
92
|
+
self.exit_on_param_failure = exit_on_param_failure
|
|
93
|
+
self.rand = random.randrange(255)
|
|
94
|
+
self.gw_lan = gw_lan
|
|
95
|
+
self.gw_orig_versions = gw_orig_versions
|
|
96
|
+
self.server = server
|
|
97
|
+
self.latest = latest
|
|
98
|
+
self.release_candidate = release_candidate
|
|
99
|
+
self.private_setup = private_setup
|
|
100
|
+
self.gw_sim = gw_sim
|
|
101
|
+
self.data = data
|
|
102
|
+
self.port = port
|
|
103
|
+
self.protobuf = protobuf
|
|
104
|
+
|
|
105
|
+
def create_test_phases_and_params(self, line_params):
|
|
106
|
+
self.params = []
|
|
107
|
+
if len(self.test_json[ALL_SUPPORTED_VALUES]) > 0:
|
|
108
|
+
self.phases = [Phase(PROLOG)] + [Phase(param) for param in self.test_json[ALL_SUPPORTED_VALUES]] + [Phase(EPILOG)]
|
|
109
|
+
for param_phase in self.phases:
|
|
110
|
+
param = Param(param_phase.name)
|
|
111
|
+
if (param.name in line_params or param.value in [eval_param(p) for p in line_params]):
|
|
112
|
+
self.params += [param]
|
|
113
|
+
else:
|
|
114
|
+
param_phase.tested = False
|
|
115
|
+
param_phase.rc = TEST_SKIPPED
|
|
116
|
+
if all([param_phase.rc == TEST_SKIPPED for param_phase in self.phases]):
|
|
117
|
+
error = f"ERROR: All params skipped for test {self.name}! Check test list file and update the supported values!\n{[f.__dict__ for f in self.phases]}"
|
|
118
|
+
handle_error(error, datetime.datetime.now())
|
|
119
|
+
else:
|
|
120
|
+
if line_params:
|
|
121
|
+
error = f"ERROR: For {self.name} params exist in test_list but not in test_json!\nline_params:{line_params}"
|
|
122
|
+
handle_error(error, datetime.datetime.now())
|
|
123
|
+
self.phases = [Phase(PROLOG), Phase(TEST_BODY), Phase(EPILOG)]
|
|
124
|
+
|
|
125
|
+
# Phase rc
|
|
126
|
+
def set_phase_rc(self, phase_name, rc):
|
|
127
|
+
phase = self.get_phase_by_name(phase_name)
|
|
128
|
+
phase.rc = rc
|
|
129
|
+
|
|
130
|
+
def get_phase_rc(self, phase_name):
|
|
131
|
+
phase = self.get_phase_by_name(phase_name)
|
|
132
|
+
return phase.rc
|
|
133
|
+
|
|
134
|
+
# Phase reason
|
|
135
|
+
def add_phase_reason(self, phase_name, reason):
|
|
136
|
+
phase = self.get_phase_by_name(phase_name)
|
|
137
|
+
if TEST_SUCCESS in phase.reason:
|
|
138
|
+
phase.reason = phase.reason.replace(TEST_SUCCESS, "")
|
|
139
|
+
else:
|
|
140
|
+
phase.reason += "\n"
|
|
141
|
+
if reason not in phase.reason:
|
|
142
|
+
phase.reason += reason
|
|
143
|
+
|
|
144
|
+
def get_phase_reason(self, phase_name):
|
|
145
|
+
phase = self.get_phase_by_name(phase_name)
|
|
146
|
+
return phase.reason
|
|
147
|
+
|
|
148
|
+
# Test funcs
|
|
149
|
+
def get_phase_by_name(self, phase_name):
|
|
150
|
+
for phase in self.phases:
|
|
151
|
+
if phase.name == phase_name:
|
|
152
|
+
return phase
|
|
153
|
+
return None
|
|
154
|
+
|
|
155
|
+
def update_overall_rc(self):
|
|
156
|
+
if any([phase.rc == TEST_FAILED for phase in self.phases]):
|
|
157
|
+
self.rc = TEST_FAILED
|
|
158
|
+
|
|
159
|
+
def reset_result(self):
|
|
160
|
+
self.rc = TEST_PASSED
|
|
161
|
+
self.reason = TEST_SUCCESS
|
|
162
|
+
|
|
163
|
+
def get_seq_id(self):
|
|
164
|
+
self.rand = (self.rand + 1) % 256
|
|
165
|
+
return self.rand
|
|
166
|
+
|
|
167
|
+
# TODO - remove when test reason is re-designed
|
|
168
|
+
def add_reason(self, reason):
|
|
169
|
+
if TEST_SUCCESS in self.reason:
|
|
170
|
+
self.reason = self.reason.replace(TEST_SUCCESS, "")
|
|
171
|
+
else:
|
|
172
|
+
self.reason += "\n"
|
|
173
|
+
if reason not in self.reason:
|
|
174
|
+
self.reason += reason
|
|
175
|
+
|
|
176
|
+
def internal_id_alias(self):
|
|
177
|
+
return self.internal_brg_obj.id_alias
|
|
178
|
+
|
|
179
|
+
##################################
|
|
180
|
+
# Phases
|
|
181
|
+
##################################
|
|
182
|
+
class Phase:
|
|
183
|
+
def __init__(self, input=None, tested=True, rc=TEST_INIT, reason=TEST_SUCCESS):
|
|
184
|
+
self.name = str(input)
|
|
185
|
+
self.tested = tested
|
|
186
|
+
self.rc = rc
|
|
187
|
+
self.reason = reason
|
|
188
|
+
|
|
189
|
+
def __repr__(self):
|
|
190
|
+
return self.name
|
|
191
|
+
|
|
192
|
+
##################################
|
|
193
|
+
# Param
|
|
194
|
+
##################################
|
|
195
|
+
class Param:
|
|
196
|
+
def __init__(self, input=None):
|
|
197
|
+
self.name = str(input)
|
|
198
|
+
self.value = eval_param(input)
|
|
199
|
+
|
|
200
|
+
def __repr__(self):
|
|
201
|
+
return self.name
|
|
202
|
+
|
|
203
|
+
##################################
|
|
204
|
+
# Bridge
|
|
205
|
+
##################################
|
|
206
|
+
class Bridge:
|
|
207
|
+
def __init__(self, id_str="", board_type=0, cfg_hash=0, api_version=ag.API_VERSION_LATEST, interface_pkt=None, import_defs=True, rel_path="."):
|
|
208
|
+
self.id_str = id_str
|
|
209
|
+
self.id_int = hex_str2int(id_str)
|
|
210
|
+
self.id_alias = hex2alias_id_get(id_str)
|
|
211
|
+
self.board_type = interface_pkt.board_type if interface_pkt else board_type
|
|
212
|
+
self.version = f"{interface_pkt.major_ver}.{interface_pkt.minor_ver}.{interface_pkt.patch_ver}" if interface_pkt else ""
|
|
213
|
+
self.bl_version = interface_pkt.bl_version if interface_pkt else ""
|
|
214
|
+
self.cfg_hash = interface_pkt.cfg_hash if interface_pkt else cfg_hash
|
|
215
|
+
self.api_version = interface_pkt.api_version if interface_pkt else api_version
|
|
216
|
+
if import_defs:
|
|
217
|
+
self.defines_file_name = f'{ag.BOARD_TYPES_LIST[self.board_type]}_defines.py'
|
|
218
|
+
# Override auto-generated defines and classes for the specific bridge
|
|
219
|
+
if os.path.exists(f"./ag/{self.defines_file_name}"):
|
|
220
|
+
new_defines = load_module(self.defines_file_name, f"./ag/{self.defines_file_name}", rel_path)
|
|
221
|
+
else:
|
|
222
|
+
new_defines = load_module(ORIGINAL_AG_FILE, f"./ag/{ORIGINAL_AG_FILE}", rel_path)
|
|
223
|
+
ag.__dict__.update(new_defines.__dict__)
|
|
224
|
+
self.sup_caps = []
|
|
225
|
+
self.modules = []
|
|
226
|
+
if interface_pkt:
|
|
227
|
+
for key, value in interface_pkt.__dict__.items():
|
|
228
|
+
if 'sup_cap_' in key and value:
|
|
229
|
+
module = key.replace('sup_cap_','')
|
|
230
|
+
if module in TEST_MODULES_MAP:
|
|
231
|
+
self.sup_caps += [TEST_MODULES_MAP[module]]
|
|
232
|
+
self.modules += [eval_pkt(ag.MODULES_DICT[TEST_MODULES_MAP[module]] + str(self.api_version))]
|
|
233
|
+
|
|
234
|
+
def update_modules(self):
|
|
235
|
+
self.modules = []
|
|
236
|
+
for sup_cap in self.sup_caps:
|
|
237
|
+
self.modules += [eval_pkt(ag.MODULES_DICT[sup_cap] + str(self.api_version))]
|
|
238
|
+
|
|
239
|
+
def is_sup_cap(self, test):
|
|
240
|
+
return test.test_module in self.sup_caps if test.test_module and self.sup_caps else True
|
|
241
|
+
|
|
242
|
+
|
|
243
|
+
def cfg_brg_defaults_ret_after_fail(test):
|
|
244
|
+
utPrint(f"Configuring bridge {test.active_brg.id_str} to defaults", "BLUE")
|
|
245
|
+
modules = test.active_brg.modules
|
|
246
|
+
for module in modules:
|
|
247
|
+
utPrint(f"Configuring {module.__name__} to defaults", "BLUE")
|
|
248
|
+
cfg_pkt = cert_config.get_default_brg_pkt(test, module)
|
|
249
|
+
res = cert_config.brg_configure(test=test, cfg_pkt=cfg_pkt)[1]
|
|
250
|
+
if res == NO_RESPONSE:
|
|
251
|
+
utPrint(f"FAILURE: {module.__name__} configuration to defaults", "RED")
|
|
252
|
+
return NO_RESPONSE
|
|
253
|
+
else:
|
|
254
|
+
utPrint(f"SUCCESS: {module.__name__} configured to defaults", "GREEN")
|
|
255
|
+
return DONE
|
|
256
|
+
|
|
257
|
+
def handle_prep_brg_for_latest(test, interface, start_time):
|
|
258
|
+
if test.rc == TEST_FAILED:
|
|
259
|
+
utPrint(f"No ModuleIf pkts found, try again", "BLUE")
|
|
260
|
+
test.rc = TEST_SUCCESS
|
|
261
|
+
test, interface = cert_common.get_module_if_pkt(test)
|
|
262
|
+
if test.rc == TEST_FAILED:
|
|
263
|
+
error = f"ERROR: No ModuleIf pkts found for 2 tries, couldn't perform OTA for bridge"
|
|
264
|
+
handle_error(error, start_time)
|
|
265
|
+
version = f"{interface.major_ver}.{interface.minor_ver}.{interface.patch_ver}"
|
|
266
|
+
board_type = interface.board_type
|
|
267
|
+
utPrint(f"BRG version [{version}], board type [{board_type}]", "BLUE")
|
|
268
|
+
utPrint(f"Skipping configurations for BRG {test.brg0.id_str} to defaults because of latest/rc flag", "BLUE")
|
|
269
|
+
return Bridge(test.brg0.id_str, interface_pkt=interface)
|
|
270
|
+
|
|
271
|
+
# Check BRGs are online and configure to defaults
|
|
272
|
+
def ut_prep_brg(args, mqttc, start_time, gw, brg, gw_server, protobuf):
|
|
273
|
+
brg = Bridge(brg)
|
|
274
|
+
utPrint(SEP)
|
|
275
|
+
if not args.port:
|
|
276
|
+
versions_mgmt = load_module('versions_mgmt.py', f'{UTILS_BASE_REL_PATH}/versions_mgmt.py')
|
|
277
|
+
brg_owner = versions_mgmt.gw_brg_owner(env="aws", server="prod", brg=brg.id_str)
|
|
278
|
+
if brg_owner and not brg_owner in r_codes:
|
|
279
|
+
print_warn(f"BRG {brg.id_str} owned by account {brg_owner}")
|
|
280
|
+
test = WltTest("", gw, mqttc, brg0=brg, gw_lan=args.lan, server=gw_server, exit_on_param_failure=args.exit_on_param_failure,
|
|
281
|
+
protobuf=protobuf)
|
|
282
|
+
utPrint(f"Getting BRG {brg.id_str} version and board type", "BLUE")
|
|
283
|
+
test, interface = cert_common.get_module_if_pkt(test)
|
|
284
|
+
if args.latest or args.rc:
|
|
285
|
+
return handle_prep_brg_for_latest(test, interface, start_time)
|
|
286
|
+
elif test.rc == TEST_FAILED:
|
|
287
|
+
error = f"ERROR: Didn't get ModuleIfV{test.active_brg.api_version} from BRG:{brg.id_str}!"
|
|
288
|
+
handle_error(error, start_time)
|
|
289
|
+
version = f"{interface.major_ver}.{interface.minor_ver}.{interface.patch_ver}"
|
|
290
|
+
board_type = interface.board_type
|
|
291
|
+
utPrint(f"BRG version [{version}], board type [{board_type}]", "BLUE")
|
|
292
|
+
test.active_brg = Bridge(brg.id_str, interface_pkt=interface)
|
|
293
|
+
modules_support = []
|
|
294
|
+
for module in TEST_MODULES_MAP:
|
|
295
|
+
modules_support.append([module, color("GREEN", "SUPPORTED") if TEST_MODULES_MAP[module] in test.active_brg.sup_caps else color("RED", "UNSUPPORTED")])
|
|
296
|
+
utPrint(f"BRG {brg.id_str} modules support coverage:", "BLUE")
|
|
297
|
+
print(tabulate.tabulate(modules_support, headers=['Module', 'Support'], tablefmt="fancy_grid"))
|
|
298
|
+
test.active_brg.board_type = board_type
|
|
299
|
+
cfg_output = cfg_brg_defaults_ret_after_fail(test=test)[1]
|
|
300
|
+
if cfg_output == NO_RESPONSE:
|
|
301
|
+
error = f"ERROR: Didn't get response from BRG:{brg.id_str}!"
|
|
302
|
+
handle_error(error, start_time)
|
|
303
|
+
test, interface = cert_common.get_module_if_pkt(test)
|
|
304
|
+
if test.rc == TEST_FAILED:
|
|
305
|
+
error = f"ERROR: Didn't get ModuleIfV{test.active_brg.api_version} from BRG:{brg.id_str}!"
|
|
306
|
+
handle_error(error, start_time)
|
|
307
|
+
utPrint(f"Received cfg hash {hex(interface.cfg_hash)}", "BLUE")
|
|
308
|
+
if not interface.cfg_hash or len(str(interface.cfg_hash)) < BRG_CFG_HAS_LEN:
|
|
309
|
+
error = f"ERROR: invalid cfg_hash for BRG:{brg.id_str}!"
|
|
310
|
+
handle_error(error, start_time)
|
|
311
|
+
utPrint(f"BRG {brg.id_str} cfg_hash_default={hex(interface.cfg_hash)}", "BLUE")
|
|
312
|
+
return Bridge(brg.id_str, interface_pkt=interface)
|
|
313
|
+
|
|
314
|
+
##################################
|
|
315
|
+
# Gateway
|
|
316
|
+
##################################
|
|
317
|
+
# Used when gw is not really important for the test (e.g: gw_sim)
|
|
318
|
+
def get_random_gw():
|
|
319
|
+
return ''.join([random.choice('0123456789ABCDEF') for i in range(12)])
|
|
320
|
+
|
|
321
|
+
def get_gw_id(gw):
|
|
322
|
+
if gw.startswith(GW_SIM_PREFIX) and len(gw) == len(GW_SIM_PREFIX):
|
|
323
|
+
return f"GW_SIM_{get_random_gw()}"
|
|
324
|
+
else:
|
|
325
|
+
return gw
|
|
326
|
+
|
|
327
|
+
def ut_prep_gw(args, mqttc, start_time):
|
|
328
|
+
# Check GW is online and configure to defaults
|
|
329
|
+
utPrint(SEP)
|
|
330
|
+
gw = args.gw
|
|
331
|
+
test = WltTest("", gw, mqttc, gw_lan=args.lan)
|
|
332
|
+
utPrint(f"Getting GW {gw} Information", "BLUE")
|
|
333
|
+
response = cert_common.get_gw_info(test)
|
|
334
|
+
if response == NO_RESPONSE:
|
|
335
|
+
error = f"ERROR: Didn't get response from {gw} !"
|
|
336
|
+
handle_error(error, start_time)
|
|
337
|
+
if ENTRIES in response[GW_INFO]:
|
|
338
|
+
# Protobuf
|
|
339
|
+
test.protobuf = True
|
|
340
|
+
gw_version = {BLE_VERSION : response[GW_INFO][ENTRIES][BLE_VERSION][STR_VAL], WIFI_VERSION : response[GW_INFO][ENTRIES][WIFI_VERSION][STR_VAL]}
|
|
341
|
+
internal_brg_mac_addr = response[GW_INFO][ENTRIES][BLE_MAC_ADDR][STR_VAL]
|
|
342
|
+
gw_server = response[GW_INFO][ENTRIES][WLT_SERVER][STR_VAL] if WLT_SERVER in response[GW_INFO][ENTRIES] else PROD
|
|
343
|
+
else:
|
|
344
|
+
test.protobuf = False
|
|
345
|
+
gw_version = {BLE_VERSION : response[GW_INFO][BLE_VERSION], WIFI_VERSION : response[GW_INFO][WIFI_VERSION]}
|
|
346
|
+
internal_brg_mac_addr = response[GW_INFO][BLE_MAC_ADDR]
|
|
347
|
+
gw_server = response[GW_INFO][WLT_SERVER] if WLT_SERVER in response[GW_INFO] else PROD
|
|
348
|
+
if gw_server != args.server:
|
|
349
|
+
handle_error(f"ERROR: Test server [{args.server}] does not match GW server [{gw_server}]!", start_time)
|
|
350
|
+
print(f"Starting UT with GW ID {test.gw} and internal BRG ID {internal_brg_mac_addr}")
|
|
351
|
+
if not args.latest and not args.rc:
|
|
352
|
+
res = cert_config.config_gw_defaults(test, version=gw_version)[1]
|
|
353
|
+
if res == NO_RESPONSE:
|
|
354
|
+
handle_error("ERROR: Config GW to defaults failed!", start_time)
|
|
355
|
+
else:
|
|
356
|
+
utPrint(f"Skipping configurations for GW {gw} to defaults because of latest/rc flag", "BLUE")
|
|
357
|
+
internal_brg = ut_prep_brg(args, mqttc, start_time, gw, internal_brg_mac_addr, gw_server, test.protobuf)
|
|
358
|
+
return gw, internal_brg, gw_server, gw_version, test.protobuf
|