wiliot-certificate 1.3.0a1__py3-none-any.whl → 1.4.0a2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- brg_certificate/__init__.py +0 -0
- brg_certificate/ag/energous_v0_defines.py +925 -0
- brg_certificate/ag/energous_v1_defines.py +931 -0
- brg_certificate/ag/energous_v2_defines.py +925 -0
- brg_certificate/ag/energous_v3_defines.py +925 -0
- brg_certificate/ag/energous_v4_defines.py +925 -0
- brg_certificate/ag/fanstel_lan_v0_defines.py +925 -0
- brg_certificate/ag/fanstel_lte_v0_defines.py +925 -0
- brg_certificate/ag/fanstel_wifi_v0_defines.py +925 -0
- brg_certificate/ag/minew_lte_v0_defines.py +925 -0
- brg_certificate/ag/wlt_cmd_if.html +102 -0
- brg_certificate/ag/wlt_types.html +6114 -0
- brg_certificate/ag/wlt_types_ag.py +7840 -0
- brg_certificate/ag/wlt_types_ag_jsons/brg2brg_ota.json +142 -0
- brg_certificate/ag/wlt_types_ag_jsons/brg2gw_hb.json +785 -0
- brg_certificate/ag/wlt_types_ag_jsons/brg2gw_hb_sleep.json +139 -0
- brg_certificate/ag/wlt_types_ag_jsons/calibration.json +394 -0
- brg_certificate/ag/wlt_types_ag_jsons/custom.json +515 -0
- brg_certificate/ag/wlt_types_ag_jsons/datapath.json +672 -0
- brg_certificate/ag/wlt_types_ag_jsons/energy2400.json +550 -0
- brg_certificate/ag/wlt_types_ag_jsons/energySub1g.json +595 -0
- brg_certificate/ag/wlt_types_ag_jsons/externalSensor.json +598 -0
- brg_certificate/ag/wlt_types_ag_jsons/interface.json +938 -0
- brg_certificate/ag/wlt_types_ag_jsons/powerManagement.json +1234 -0
- brg_certificate/ag/wlt_types_ag_jsons/side_info_sensor.json +105 -0
- brg_certificate/ag/wlt_types_ag_jsons/signal_indicator_data.json +77 -0
- brg_certificate/ag/wlt_types_ag_jsons/unified_echo_ext_pkt.json +61 -0
- brg_certificate/ag/wlt_types_ag_jsons/unified_echo_pkt.json +110 -0
- brg_certificate/brg_certificate.py +191 -0
- brg_certificate/brg_certificate_cli.py +47 -0
- brg_certificate/cert_common.py +828 -0
- brg_certificate/cert_config.py +395 -0
- brg_certificate/cert_data_sim.py +188 -0
- brg_certificate/cert_defines.py +337 -0
- brg_certificate/cert_gw_sim.py +285 -0
- brg_certificate/cert_mqtt.py +373 -0
- brg_certificate/cert_prints.py +181 -0
- brg_certificate/cert_protobuf.py +88 -0
- brg_certificate/cert_results.py +300 -0
- brg_certificate/cert_utils.py +358 -0
- brg_certificate/certificate_sanity_test_list.txt +36 -0
- brg_certificate/certificate_test_list.txt +43 -0
- brg_certificate/config/eclipse.json +10 -0
- brg_certificate/config/hivemq.json +10 -0
- brg_certificate/config/mosquitto.json +10 -0
- brg_certificate/config/mosquitto.md +95 -0
- brg_certificate/config/wiliot-dev.json +10 -0
- brg_certificate/restore_brg.py +59 -0
- brg_certificate/tests/calibration/interval_test/interval_test.json +13 -0
- brg_certificate/tests/calibration/interval_test/interval_test.py +28 -0
- brg_certificate/tests/calibration/output_power_test/output_power_test.json +13 -0
- brg_certificate/tests/calibration/output_power_test/output_power_test.py +28 -0
- brg_certificate/tests/calibration/pattern_test/pattern_test.json +13 -0
- brg_certificate/tests/calibration/pattern_test/pattern_test.py +70 -0
- brg_certificate/tests/datapath/adaptive_pacer_algo_test/adaptive_pacer_algo_test.json +13 -0
- brg_certificate/tests/datapath/adaptive_pacer_algo_test/adaptive_pacer_algo_test.py +76 -0
- brg_certificate/tests/datapath/num_of_tags_test/num_of_tags_test.json +13 -0
- brg_certificate/tests/datapath/num_of_tags_test/num_of_tags_test.py +83 -0
- brg_certificate/tests/datapath/output_power_test/output_power_test.json +13 -0
- brg_certificate/tests/datapath/output_power_test/output_power_test.py +27 -0
- brg_certificate/tests/datapath/pacer_interval_ble5_test/pacer_interval_ble5_test.json +13 -0
- brg_certificate/tests/datapath/pacer_interval_ble5_test/pacer_interval_ble5_test.py +43 -0
- brg_certificate/tests/datapath/pacer_interval_tags_count_test/pacer_interval_tags_count_test.json +13 -0
- brg_certificate/tests/datapath/pacer_interval_tags_count_test/pacer_interval_tags_count_test.py +63 -0
- brg_certificate/tests/datapath/pacer_interval_test/pacer_interval_test.json +13 -0
- brg_certificate/tests/datapath/pacer_interval_test/pacer_interval_test.py +50 -0
- brg_certificate/tests/datapath/pattern_test/pattern_test.json +13 -0
- brg_certificate/tests/datapath/pattern_test/pattern_test.py +28 -0
- brg_certificate/tests/datapath/pkt_filter_ble5_test/pkt_filter_ble5_test.json +13 -0
- brg_certificate/tests/datapath/pkt_filter_ble5_test/pkt_filter_ble5_test.py +51 -0
- brg_certificate/tests/datapath/pkt_filter_gen3_test/pkt_filter_gen3_test.json +13 -0
- brg_certificate/tests/datapath/pkt_filter_gen3_test/pkt_filter_gen3_test.py +54 -0
- brg_certificate/tests/datapath/pkt_filter_test/pkt_filter_test.json +13 -0
- brg_certificate/tests/datapath/pkt_filter_test/pkt_filter_test.py +55 -0
- brg_certificate/tests/datapath/rssi_threshold_test/rssi_threshold_test.json +13 -0
- brg_certificate/tests/datapath/rssi_threshold_test/rssi_threshold_test.py +73 -0
- brg_certificate/tests/datapath/rx_channel_test/rx_channel_test.json +13 -0
- brg_certificate/tests/datapath/rx_channel_test/rx_channel_test.py +41 -0
- brg_certificate/tests/datapath/rx_rate_gen2_test/rx_rate_gen2_test.json +21 -0
- brg_certificate/tests/datapath/rx_rate_gen2_test/rx_rate_gen2_test.py +184 -0
- brg_certificate/tests/datapath/rx_rate_gen3_test/rx_rate_gen3_test.json +21 -0
- brg_certificate/tests/datapath/rx_rate_gen3_test/rx_rate_gen3_test.py +210 -0
- brg_certificate/tests/datapath/stress_gen3_test/stress_gen3_test.json +30 -0
- brg_certificate/tests/datapath/stress_gen3_test/stress_gen3_test.py +203 -0
- brg_certificate/tests/datapath/stress_test/stress_test.json +30 -0
- brg_certificate/tests/datapath/stress_test/stress_test.py +210 -0
- brg_certificate/tests/datapath/tx_repetition_algo_test/tx_repetition_algo_test.json +13 -0
- brg_certificate/tests/datapath/tx_repetition_algo_test/tx_repetition_algo_test.py +113 -0
- brg_certificate/tests/datapath/tx_repetition_test/tx_repetition_test.json +13 -0
- brg_certificate/tests/datapath/tx_repetition_test/tx_repetition_test.py +79 -0
- brg_certificate/tests/edge_mgmt/actions_test/actions_test.json +13 -0
- brg_certificate/tests/edge_mgmt/actions_test/actions_test.py +432 -0
- brg_certificate/tests/edge_mgmt/brg2brg_ota_ble5_test/brg2brg_ota_ble5_test.json +13 -0
- brg_certificate/tests/edge_mgmt/brg2brg_ota_ble5_test/brg2brg_ota_ble5_test.py +94 -0
- brg_certificate/tests/edge_mgmt/brg2brg_ota_test/brg2brg_ota_test.json +13 -0
- brg_certificate/tests/edge_mgmt/brg2brg_ota_test/brg2brg_ota_test.py +87 -0
- brg_certificate/tests/edge_mgmt/leds_test/leds_test.json +13 -0
- brg_certificate/tests/edge_mgmt/leds_test/leds_test.py +210 -0
- brg_certificate/tests/edge_mgmt/ota_test/ota_test.json +13 -0
- brg_certificate/tests/edge_mgmt/ota_test/ota_test.py +83 -0
- brg_certificate/tests/edge_mgmt/stat_test/stat_test.json +13 -0
- brg_certificate/tests/edge_mgmt/stat_test/stat_test.py +48 -0
- brg_certificate/tests/energy2400/duty_cycle_test/duty_cycle_test.json +13 -0
- brg_certificate/tests/energy2400/duty_cycle_test/duty_cycle_test.py +26 -0
- brg_certificate/tests/energy2400/output_power_test/output_power_test.json +13 -0
- brg_certificate/tests/energy2400/output_power_test/output_power_test.py +27 -0
- brg_certificate/tests/energy2400/pattern_test/pattern_test.json +13 -0
- brg_certificate/tests/energy2400/pattern_test/pattern_test.py +28 -0
- brg_certificate/tests/energy2400/signal_indicator_ble5_test/signal_indicator_ble5_test.json +13 -0
- brg_certificate/tests/energy2400/signal_indicator_ble5_test/signal_indicator_ble5_test.py +398 -0
- brg_certificate/tests/energy2400/signal_indicator_sub1g_2_4_test/signal_indicator_sub1g_2_4_test.json +13 -0
- brg_certificate/tests/energy2400/signal_indicator_sub1g_2_4_test/signal_indicator_sub1g_2_4_test.py +153 -0
- brg_certificate/tests/energy2400/signal_indicator_test/signal_indicator_test.json +13 -0
- brg_certificate/tests/energy2400/signal_indicator_test/signal_indicator_test.py +264 -0
- brg_certificate/tests/energy_sub1g/duty_cycle_test/duty_cycle_test.json +13 -0
- brg_certificate/tests/energy_sub1g/duty_cycle_test/duty_cycle_test.py +27 -0
- brg_certificate/tests/energy_sub1g/pattern_test/pattern_test.json +13 -0
- brg_certificate/tests/energy_sub1g/pattern_test/pattern_test.py +26 -0
- brg_certificate/tests/energy_sub1g/signal_indicator_functionality_test/signal_indicator_functionality_test.json +13 -0
- brg_certificate/tests/energy_sub1g/signal_indicator_functionality_test/signal_indicator_functionality_test.py +397 -0
- brg_certificate/tests/energy_sub1g/signal_indicator_test/signal_indicator_test.json +13 -0
- brg_certificate/tests/energy_sub1g/signal_indicator_test/signal_indicator_test.py +27 -0
- brg_certificate/wltPb_pb2.py +72 -0
- brg_certificate/wltPb_pb2.pyi +227 -0
- brg_certificate/wlt_types.py +114 -0
- gw_certificate/api/extended_api.py +7 -1531
- gw_certificate/api_if/200/data.json +106 -0
- gw_certificate/api_if/200/logs.json +12 -0
- gw_certificate/api_if/200/status.json +47 -0
- gw_certificate/api_if/201/data.json +98 -0
- gw_certificate/api_if/201/logs.json +12 -0
- gw_certificate/api_if/201/status.json +53 -0
- gw_certificate/api_if/202/data.json +83 -0
- gw_certificate/api_if/202/logs.json +12 -0
- gw_certificate/api_if/202/status.json +60 -0
- gw_certificate/api_if/203/data.json +85 -0
- gw_certificate/api_if/203/logs.json +12 -0
- gw_certificate/api_if/203/status.json +63 -0
- gw_certificate/api_if/204/data.json +85 -0
- gw_certificate/api_if/204/logs.json +12 -0
- gw_certificate/api_if/204/status.json +63 -0
- gw_certificate/api_if/205/data.json +85 -0
- gw_certificate/api_if/205/logs.json +12 -0
- gw_certificate/api_if/205/status.json +63 -0
- gw_certificate/api_if/api_validation.py +0 -2
- gw_certificate/common/analysis_data_bricks.py +18 -1413
- gw_certificate/common/debug.py +0 -21
- gw_certificate/common/utils.py +1 -212
- gw_certificate/common/utils_defines.py +0 -87
- gw_certificate/gw_certificate.py +9 -7
- gw_certificate/gw_certificate_cli.py +39 -23
- gw_certificate/interface/4.4.52_app.zip +0 -0
- gw_certificate/interface/4.4.52_sd_bl_app.zip +0 -0
- gw_certificate/interface/ble_simulator.py +0 -32
- gw_certificate/interface/if_defines.py +1 -0
- gw_certificate/interface/mqtt.py +96 -19
- gw_certificate/interface/nrfutil-linux +0 -0
- gw_certificate/interface/nrfutil-mac +0 -0
- gw_certificate/interface/nrfutil.exe +0 -0
- gw_certificate/interface/pkt_generator.py +0 -82
- gw_certificate/interface/uart_if.py +73 -43
- gw_certificate/templates/results.html +1 -1
- gw_certificate/tests/__init__.py +1 -2
- gw_certificate/tests/actions.py +134 -9
- gw_certificate/tests/connection.py +10 -5
- gw_certificate/tests/downlink.py +2 -4
- gw_certificate/tests/generic.py +62 -12
- gw_certificate/tests/registration.py +78 -27
- gw_certificate/tests/static/generated_packet_table.py +12 -48
- gw_certificate/tests/static/packet_table.csv +10048 -10048
- gw_certificate/tests/static/references.py +2 -1
- gw_certificate/tests/static/uplink_defines.py +0 -7
- gw_certificate/tests/throughput.py +7 -12
- gw_certificate/tests/uplink.py +83 -43
- {wiliot_certificate-1.3.0a1.dist-info → wiliot_certificate-1.4.0a2.dist-info}/METADATA +59 -8
- wiliot_certificate-1.4.0a2.dist-info/RECORD +198 -0
- {wiliot_certificate-1.3.0a1.dist-info → wiliot_certificate-1.4.0a2.dist-info}/WHEEL +1 -1
- wiliot_certificate-1.4.0a2.dist-info/entry_points.txt +3 -0
- wiliot_certificate-1.4.0a2.dist-info/top_level.txt +2 -0
- gw_certificate/interface/packet_error.py +0 -22
- wiliot_certificate-1.3.0a1.dist-info/RECORD +0 -51
- wiliot_certificate-1.3.0a1.dist-info/entry_points.txt +0 -2
- wiliot_certificate-1.3.0a1.dist-info/top_level.txt +0 -1
- {wiliot_certificate-1.3.0a1.dist-info → wiliot_certificate-1.4.0a2.dist-info}/LICENSE +0 -0
|
@@ -0,0 +1,203 @@
|
|
|
1
|
+
from brg_certificate.cert_prints import *
|
|
2
|
+
from brg_certificate.cert_defines import *
|
|
3
|
+
from brg_certificate.wlt_types import *
|
|
4
|
+
import brg_certificate.cert_common as cert_common
|
|
5
|
+
import brg_certificate.cert_config as cert_config
|
|
6
|
+
import brg_certificate.cert_data_sim as cert_data_sim
|
|
7
|
+
from brg_certificate.cert_gw_sim import DEDUPLICATION_PKTS
|
|
8
|
+
import statistics
|
|
9
|
+
import datetime
|
|
10
|
+
import sys
|
|
11
|
+
import time
|
|
12
|
+
|
|
13
|
+
def configure_pacer_n_times(test, num_of_times, pacer_interval, datapath_module):
|
|
14
|
+
test = cert_config.brg_configure(test, fields=[BRG_PACER_INTERVAL], values=[pacer_interval], module=datapath_module)[0]
|
|
15
|
+
if test.rc == TEST_FAILED and test.reason != TEST_SUCCESS:
|
|
16
|
+
for i in range(num_of_times):
|
|
17
|
+
if test.rc == TEST_PASSED:
|
|
18
|
+
return test
|
|
19
|
+
print(f"sleeping for 10 seconds before trying to configure pacer again\n")
|
|
20
|
+
time.sleep(10)
|
|
21
|
+
test = cert_config.brg_configure(test, fields=[BRG_PACER_INTERVAL], values=[pacer_interval], module=datapath_module)[0]
|
|
22
|
+
test.add_reason("Didn't succeed to configure after two attempts - No pkt was found!")
|
|
23
|
+
return test
|
|
24
|
+
|
|
25
|
+
def metric_checking_HB(test, mgmt_type_list, tx_queue_expected, pacer_increment_expected):
|
|
26
|
+
if not mgmt_type_list:
|
|
27
|
+
test.add_reason("\nDidn't find HB pkt, therefore will not check tx_queue and pacer increment\n")
|
|
28
|
+
print(f"Didn't find HB pkt, therefore will not check tx_queue and pacer increment")
|
|
29
|
+
else:
|
|
30
|
+
#check tx queue
|
|
31
|
+
watermarks = [pkt.tx_queue_watermark for pkt in mgmt_type_list]
|
|
32
|
+
half_index = len(watermarks) // 2
|
|
33
|
+
tx_queue_HB = statistics.mean(watermarks[half_index:])
|
|
34
|
+
if not (tx_queue_expected[0] <= tx_queue_HB <= tx_queue_expected[1]):
|
|
35
|
+
test.add_reason(f"tx_queue: {tx_queue_HB}")
|
|
36
|
+
print(f"\ntx_queue value is wrong!\nexpected: {tx_queue_expected}, got: {tx_queue_HB}")
|
|
37
|
+
else:
|
|
38
|
+
test.add_reason(f"tx_queue: {tx_queue_HB}")
|
|
39
|
+
print(f"\ntx_queue from HB : {tx_queue_HB}\n")
|
|
40
|
+
|
|
41
|
+
# check pacer increment
|
|
42
|
+
pacer_increment_HB = [pkt.effective_pacer_increment for pkt in mgmt_type_list]
|
|
43
|
+
average_pacer_increment_HB = statistics.mean(pacer_increment_HB)
|
|
44
|
+
if not (pacer_increment_expected[0] <= average_pacer_increment_HB <= pacer_increment_expected[1]):
|
|
45
|
+
test.add_reason(f"pacer_increment:{average_pacer_increment_HB}")
|
|
46
|
+
print(f"\npacer_increment value is wrong\nexpected: {pacer_increment_expected}\ngot: {average_pacer_increment_HB}")
|
|
47
|
+
else:
|
|
48
|
+
test.add_reason(f"pacer_increment: {average_pacer_increment_HB}")
|
|
49
|
+
print(f"\naverage pacer_increment from HB: {average_pacer_increment_HB}\n")
|
|
50
|
+
return test
|
|
51
|
+
|
|
52
|
+
def metric_checking_df(test, check, pacer_interval, df, repetition_value_expected, brg_latency_expected, num_of_pixels_expected):
|
|
53
|
+
if df.empty:
|
|
54
|
+
print(f" df is empty, therefore will not check repetitions, brg latency and num of tags")
|
|
55
|
+
test.rc = TEST_FAILED
|
|
56
|
+
test.add_reason(f"df is empty, therefore will not check repetitions, brg latency and num of tags")
|
|
57
|
+
else:
|
|
58
|
+
print(f"result of pacer interval: {pacer_interval}\n")
|
|
59
|
+
# check repetition value
|
|
60
|
+
payload_counts_per_tag = df.groupby(TAG_ID)[PAYLOAD].value_counts()
|
|
61
|
+
average_payload_count = round(payload_counts_per_tag.mean(), 2)
|
|
62
|
+
if not repetition_value_expected[0] <= average_payload_count <= repetition_value_expected[1]:
|
|
63
|
+
if check:
|
|
64
|
+
test.rc = TEST_FAILED
|
|
65
|
+
test.add_reason(f"Repetition:{average_payload_count}, expected: {repetition_value_expected},")
|
|
66
|
+
print(f"Repetition value is wrong! \nexpected:{repetition_value_expected}\ngot: {average_payload_count}")
|
|
67
|
+
else:
|
|
68
|
+
test.add_reason(f"Repetition:{average_payload_count}, expected: {repetition_value_expected},")
|
|
69
|
+
print(f"Repetition value is wrong! \nexpected:{repetition_value_expected}\ngot: {average_payload_count}")
|
|
70
|
+
else:
|
|
71
|
+
test.add_reason(f"Repetition value: {average_payload_count}")
|
|
72
|
+
print(f"Repetition value is correct! got: {average_payload_count}")
|
|
73
|
+
|
|
74
|
+
# check num of tags, with tolerance of 5%
|
|
75
|
+
num_of_tags = len(df[TAG_ID].unique())
|
|
76
|
+
if not num_of_pixels_expected*0.95 <= num_of_tags <= num_of_pixels_expected*1.05:
|
|
77
|
+
test.add_reason(f"num of tags: {num_of_tags}")
|
|
78
|
+
print(f"\n num of tags is not as expected\nexpected: {num_of_pixels_expected}, got: {num_of_tags}")
|
|
79
|
+
else:
|
|
80
|
+
test.add_reason(f"num of tags: {num_of_tags}")
|
|
81
|
+
print(f"\nnum of tags from df: {num_of_tags}\n")
|
|
82
|
+
|
|
83
|
+
#check brg_latency
|
|
84
|
+
if check:
|
|
85
|
+
brg_latency_avg = round(df[BRG_LATENCY].mean(),2)
|
|
86
|
+
if not (brg_latency_expected[0] <= brg_latency_avg <= brg_latency_expected[1]):
|
|
87
|
+
test.add_reason(f"brg_latency:{brg_latency_avg}")
|
|
88
|
+
print(f"Average brg_latency: {brg_latency_avg}")
|
|
89
|
+
else:
|
|
90
|
+
test.add_reason(f"brg_latency: {brg_latency_avg}")
|
|
91
|
+
print(f"Average brg_latency: {brg_latency_avg}")
|
|
92
|
+
else:
|
|
93
|
+
brg_latency_avg = round(df[BRG_LATENCY].mean(),2)
|
|
94
|
+
print(f"Average brg_latency: {brg_latency_avg}")
|
|
95
|
+
return test
|
|
96
|
+
|
|
97
|
+
def combination_func(test, datapath_module, pacer_interval, num_of_sim_tags, repetition_value_expected, tx_queue_expected, pacer_increment_expected, brg_latency_expected):
|
|
98
|
+
test = configure_pacer_n_times(test, 2, pacer_interval, datapath_module)
|
|
99
|
+
time.sleep(30)
|
|
100
|
+
# first df
|
|
101
|
+
df = cert_common.data_scan(test, scan_time=30, brg_data=(not test.internal_brg), gw_data=test.internal_brg)
|
|
102
|
+
cert_common.display_data(df, nfpkt=True, tbc=True, name_prefix=f"stress_{pacer_interval}_", dir=test.dir)
|
|
103
|
+
test, hbs = cert_common.scan_for_mgmt_pkts(test, [eval_pkt(f'Brg2GwHbV{test.active_brg.api_version}')])
|
|
104
|
+
hbs = [p[MGMT_PKT].pkt for p in hbs]
|
|
105
|
+
print(f"result of first df\n")
|
|
106
|
+
check = False
|
|
107
|
+
test = metric_checking_df(test, check, pacer_interval, df, repetition_value_expected, brg_latency_expected, num_of_sim_tags)
|
|
108
|
+
time.sleep(30)
|
|
109
|
+
# second df
|
|
110
|
+
df = cert_common.data_scan(test, scan_time=60, brg_data=(not test.internal_brg), gw_data=test.internal_brg)
|
|
111
|
+
cert_common.display_data(df, nfpkt=True, tbc=True, name_prefix=f"stress_{pacer_interval}_", dir=test.dir)
|
|
112
|
+
test, hbs = cert_common.scan_for_mgmt_pkts(test, [eval_pkt(f'Brg2GwHbV{test.active_brg.api_version}')])
|
|
113
|
+
hbs = [p[MGMT_PKT].pkt for p in hbs]
|
|
114
|
+
print(f"result of second df\n")
|
|
115
|
+
check = True
|
|
116
|
+
test = metric_checking_df(test, check, pacer_interval, df, repetition_value_expected, brg_latency_expected, num_of_sim_tags)
|
|
117
|
+
test = metric_checking_HB(test, hbs, tx_queue_expected, pacer_increment_expected)
|
|
118
|
+
return test
|
|
119
|
+
|
|
120
|
+
def rep3(test, datapath_module, num_of_sim_tags, pixel_sim_thread):
|
|
121
|
+
pacer_interval = 20
|
|
122
|
+
test = combination_func(test, datapath_module, pacer_interval=pacer_interval, num_of_sim_tags=num_of_sim_tags, repetition_value_expected=[2,3], tx_queue_expected=[20,40], pacer_increment_expected=[0,2], brg_latency_expected=[0,10])
|
|
123
|
+
time.sleep(5)
|
|
124
|
+
return test
|
|
125
|
+
|
|
126
|
+
def rep2(test, datapath_module, num_of_sim_tags, pixel_sim_thread):
|
|
127
|
+
pacer_interval = 15
|
|
128
|
+
test = combination_func(test, datapath_module, pacer_interval=pacer_interval, num_of_sim_tags=num_of_sim_tags, repetition_value_expected=[1.5,2.5], tx_queue_expected=[20,40], pacer_increment_expected=[0,2], brg_latency_expected=[10,200])
|
|
129
|
+
time.sleep(5)
|
|
130
|
+
return test
|
|
131
|
+
|
|
132
|
+
def rep1(test, datapath_module, num_of_sim_tags, pixel_sim_thread):
|
|
133
|
+
pacer_interval = 9
|
|
134
|
+
test = combination_func(test, datapath_module, pacer_interval=pacer_interval, num_of_sim_tags=num_of_sim_tags, repetition_value_expected=[1,2], tx_queue_expected=[20,40], pacer_increment_expected=[0,2], brg_latency_expected=[200,300])
|
|
135
|
+
time.sleep(5)
|
|
136
|
+
return test
|
|
137
|
+
|
|
138
|
+
def rep1_adaptive_pacer(test, datapath_module, num_of_sim_tags, pixel_sim_thread):
|
|
139
|
+
pacer_interval = 1
|
|
140
|
+
test = combination_func(test, datapath_module, pacer_interval=pacer_interval, num_of_sim_tags=num_of_sim_tags, repetition_value_expected=[1,2], tx_queue_expected=[20,40], pacer_increment_expected=[2,20], brg_latency_expected=[300,1000])
|
|
141
|
+
time.sleep(5)
|
|
142
|
+
return test
|
|
143
|
+
|
|
144
|
+
def pixels_burst(test, datapath_module, num_of_sim_tags, pixel_sim_thread):
|
|
145
|
+
#NOTE: I had to change the pattern because there isn't option to increase the the pixels number in the same thread it will cause error "index out of bound"
|
|
146
|
+
pixel_sim_thread.stop()
|
|
147
|
+
pacer_interval = 15
|
|
148
|
+
test = configure_pacer_n_times(test, 2, pacer_interval, datapath_module)
|
|
149
|
+
pixel_sim_thread = cert_data_sim.DataSimThread(test=test, num_of_pixels=num_of_sim_tags, duplicates=1, delay=0, pkt_types=[0],pixels_type=GEN3)
|
|
150
|
+
pixel_sim_thread.start()
|
|
151
|
+
df = cert_common.data_scan(test, scan_time=180 , brg_data=(not test.internal_brg), gw_data=test.internal_brg)
|
|
152
|
+
check = True
|
|
153
|
+
test = metric_checking_df(test, check, pacer_interval, df, [2,3], [0,10], 200)
|
|
154
|
+
# we must have df, so we will try twice again to get it
|
|
155
|
+
if test.rc == TEST_FAILED:
|
|
156
|
+
for i in range(2):
|
|
157
|
+
if test.rc == TEST_PASSED:
|
|
158
|
+
break
|
|
159
|
+
df = cert_common.data_scan(test, scan_time=30 , brg_data=(not test.internal_brg), gw_data=test.internal_brg)
|
|
160
|
+
test =metric_checking_df(test, check, pacer_interval, df, [2,3], [0,10], 200)
|
|
161
|
+
pixel_sim_thread.stop()
|
|
162
|
+
#change the number of pixels to 400, and check that the repetition value is 1 in short time
|
|
163
|
+
pixel_sim_thread = cert_data_sim.DataSimThread(test=test, num_of_pixels=400, duplicates=1, delay=0, pkt_types=[0],pixels_type=GEN3)
|
|
164
|
+
pixel_sim_thread.start()
|
|
165
|
+
df = cert_common.data_scan(test, scan_time=30 , brg_data=(not test.internal_brg), gw_data=test.internal_brg)
|
|
166
|
+
test = metric_checking_df(test, check, pacer_interval, df, [1,2], [0,10], 400)
|
|
167
|
+
pixel_sim_thread.stop()
|
|
168
|
+
return test
|
|
169
|
+
|
|
170
|
+
def run(test):
|
|
171
|
+
# Test prolog
|
|
172
|
+
datapath_module = eval_pkt(f'ModuleDatapathV{test.active_brg.api_version}')
|
|
173
|
+
test = cert_common.test_prolog(test)
|
|
174
|
+
if test.rc == TEST_FAILED or test.reason != TEST_SUCCESS:
|
|
175
|
+
return cert_common.test_epilog(test)
|
|
176
|
+
#config GW deduplication pkts = 0 "
|
|
177
|
+
print("Configuring GW with !deduplication_pkts 0")
|
|
178
|
+
cert_config.gw_action(test, f"{DEDUPLICATION_PKTS} 0")
|
|
179
|
+
if test.rc == TEST_FAILED and test.exit_on_param_failure:
|
|
180
|
+
return cert_common.test_epilog(test, revert_gws=True)
|
|
181
|
+
|
|
182
|
+
STRESS_TEST_MAP = {"rep3":rep3, "rep2": rep2 ,"rep1": rep1, "rep1_adaptive_pacer":rep1_adaptive_pacer, "pixels_burst":pixels_burst}
|
|
183
|
+
num_of_pixels = 300
|
|
184
|
+
pixel_sim_thread = cert_data_sim.DataSimThread(test=test, num_of_pixels=num_of_pixels, duplicates=1, delay=0, pkt_types=[0],pixels_type=GEN3)
|
|
185
|
+
pixel_sim_thread.start()
|
|
186
|
+
time.sleep(30)
|
|
187
|
+
for param in test.params:
|
|
188
|
+
functionality_run_print(param.name)
|
|
189
|
+
test = STRESS_TEST_MAP[param.value](test, datapath_module, num_of_pixels, pixel_sim_thread)
|
|
190
|
+
generate_log_file(test, param.name)
|
|
191
|
+
field_functionality_pass_fail_print(test, param.name)
|
|
192
|
+
test.set_phase_rc(param.name, test.rc)
|
|
193
|
+
test.add_phase_reason(param.name, test.reason)
|
|
194
|
+
if test.rc == TEST_FAILED and test.exit_on_param_failure:
|
|
195
|
+
break
|
|
196
|
+
else:
|
|
197
|
+
test.reset_result()
|
|
198
|
+
time.sleep(5)
|
|
199
|
+
pixel_sim_thread.stop()
|
|
200
|
+
# Re-enable unified packets deduplication
|
|
201
|
+
cert_config.gw_action(test, f"{DEDUPLICATION_PKTS} 1")
|
|
202
|
+
|
|
203
|
+
return cert_common.test_epilog(test, revert_brgs=True, revert_gws=True, modules=[datapath_module])
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "stress test",
|
|
3
|
+
"module": "DataPath",
|
|
4
|
+
"purpose": [
|
|
5
|
+
"verify the functionality of the bridge under stress conditions",
|
|
6
|
+
"verify that the algorithms 'adaptive repetition' and 'adaptive pacer' are working correctly",
|
|
7
|
+
"verify an edge case where the repetition stable and we got a lot of packets in short time"
|
|
8
|
+
],
|
|
9
|
+
"documentation": [
|
|
10
|
+
"https://community.wiliot.com/customers/s/article/Wiliot-Network-Data-Path-Module-Relaying-data",
|
|
11
|
+
"add more links here - about adaptive repetition, adaptive pacer"
|
|
12
|
+
],
|
|
13
|
+
"initialCondition": "Bridge configured to defaults",
|
|
14
|
+
"procedure": [
|
|
15
|
+
"Test prolog",
|
|
16
|
+
"define simulation with 200 packets with 0 delay(0.02 sec) ",
|
|
17
|
+
"rep 3 - config pacer interval 15, then check repetition value = 3, tx_queue = 0-20, pacer increment = 0, brg latency = 0, num of tags = all tags ",
|
|
18
|
+
"rep 2 - config pacer interval 9, then check repetition value = 2, tx_queue = 20-40, pacer increment = 0, brg latency = 0-200, num of tags = all tags",
|
|
19
|
+
"rep 1 - config pacer interval 6, then check repetition value = 1, tx_queue = 40-60, pacer increment = 0, brg latency = 200-300, num of tags = all tags",
|
|
20
|
+
"rep 1 adaptive pacer - config pacer interval 1 , then check repetition value = 1, tx_queue > 60, pacer increment = 3 , brg latency > 300 , num of tags = all tags",
|
|
21
|
+
"pixels burst - config pacer interval 15, then add more 200 packets with 0 delay(0.02 sec), then check repetition value = 1 and not 2",
|
|
22
|
+
"Test epilog"
|
|
23
|
+
],
|
|
24
|
+
"expectedOutcome": "all metrics values will be as expected then the bridge is working correctly and the algorithms are working correctly",
|
|
25
|
+
"mandatory": 1,
|
|
26
|
+
"multiBridgeTest": 0,
|
|
27
|
+
"gwOnlyTest": 0,
|
|
28
|
+
"allSupportedValues": ["rep3", "rep2", "rep1", "rep1_adaptive_pacer", "pixels_burst"]
|
|
29
|
+
|
|
30
|
+
}
|
|
@@ -0,0 +1,210 @@
|
|
|
1
|
+
from brg_certificate.cert_prints import *
|
|
2
|
+
from brg_certificate.cert_defines import *
|
|
3
|
+
from brg_certificate.wlt_types import *
|
|
4
|
+
import brg_certificate.cert_common as cert_common
|
|
5
|
+
import brg_certificate.cert_config as cert_config
|
|
6
|
+
import brg_certificate.cert_data_sim as cert_data_sim
|
|
7
|
+
from brg_certificate.cert_gw_sim import DEDUPLICATION_PKTS
|
|
8
|
+
import statistics
|
|
9
|
+
import time
|
|
10
|
+
|
|
11
|
+
def configure_pacer_n_times(test, num_of_times, pacer_interval, datapath_module):
|
|
12
|
+
test = cert_config.brg_configure(test, fields=[BRG_PACER_INTERVAL], values=[pacer_interval], module=datapath_module)[0]
|
|
13
|
+
if test.rc == TEST_FAILED and test.reason != TEST_SUCCESS:
|
|
14
|
+
for i in range(num_of_times):
|
|
15
|
+
if test.rc == TEST_PASSED:
|
|
16
|
+
return test
|
|
17
|
+
print(f"sleeping for 10 seconds before trying to configure pacer again\n")
|
|
18
|
+
time.sleep(10)
|
|
19
|
+
test = cert_config.brg_configure(test, fields=[BRG_PACER_INTERVAL], values=[pacer_interval], module=datapath_module)[0]
|
|
20
|
+
test.add_reason("Didn't succeed to configure after two attempts - No pkt was found!")
|
|
21
|
+
return test
|
|
22
|
+
|
|
23
|
+
def metric_checking_HB(test, mgmt_type_list, tx_queue_expected, pacer_increment_expected):
|
|
24
|
+
if not mgmt_type_list:
|
|
25
|
+
test.add_reason("\nDidn't find HB pkt, therefore will not check tx_queue and pacer increment\n")
|
|
26
|
+
print(f"Didn't find HB pkt, therefore will not check tx_queue and pacer increment")
|
|
27
|
+
else:
|
|
28
|
+
#check tx queue
|
|
29
|
+
watermarks = [pkt.tx_queue_watermark for pkt in mgmt_type_list]
|
|
30
|
+
half_index = len(watermarks) // 2
|
|
31
|
+
tx_queue_HB = statistics.mean(watermarks[half_index:])
|
|
32
|
+
if not (tx_queue_expected[0] <= tx_queue_HB <= tx_queue_expected[1]):
|
|
33
|
+
test.add_reason(f"tx_queue: {tx_queue_HB}")
|
|
34
|
+
print(f"\ntx_queue value is wrong!\nexpected: {tx_queue_expected}, got: {tx_queue_HB}")
|
|
35
|
+
else:
|
|
36
|
+
test.add_reason(f"tx_queue: {tx_queue_HB}")
|
|
37
|
+
print(f"\ntx_queue from HB : {tx_queue_HB}\n")
|
|
38
|
+
|
|
39
|
+
# check pacer increment
|
|
40
|
+
pacer_increment_HB = [pkt.effective_pacer_increment for pkt in mgmt_type_list]
|
|
41
|
+
average_pacer_increment_HB = statistics.mean(pacer_increment_HB)
|
|
42
|
+
if not (pacer_increment_expected[0] <= average_pacer_increment_HB <= pacer_increment_expected[1]):
|
|
43
|
+
test.add_reason(f"pacer_increment:{average_pacer_increment_HB}")
|
|
44
|
+
print(f"\npacer_increment value is wrong\nexpected: {pacer_increment_expected}\ngot: {average_pacer_increment_HB}")
|
|
45
|
+
else:
|
|
46
|
+
test.add_reason(f"pacer_increment: {average_pacer_increment_HB}")
|
|
47
|
+
print(f"\naverage pacer_increment from HB: {average_pacer_increment_HB}\n")
|
|
48
|
+
return test
|
|
49
|
+
|
|
50
|
+
def metric_checking_df(test, check, pacer_interval, df, repetition_value_expected, brg_latency_expected, num_of_pixels_expected):
|
|
51
|
+
if df.empty:
|
|
52
|
+
print(f" df is empty, therefore will not check repetitions, brg latency and num of tags")
|
|
53
|
+
test.rc = TEST_FAILED
|
|
54
|
+
test.add_reason(f"df is empty, therefore will not check repetitions, brg latency and num of tags")
|
|
55
|
+
else:
|
|
56
|
+
print(f"result of pacer interval: {pacer_interval}\n")
|
|
57
|
+
# check repetition value
|
|
58
|
+
payload_counts_per_tag = df.groupby(TAG_ID)[PAYLOAD].value_counts()
|
|
59
|
+
average_payload_count = round(payload_counts_per_tag.mean(), 2)
|
|
60
|
+
if not repetition_value_expected[0] <= average_payload_count <= repetition_value_expected[1]:
|
|
61
|
+
if check:
|
|
62
|
+
test.rc = TEST_FAILED
|
|
63
|
+
test.add_reason(f"Repetition:{average_payload_count}, expected: {repetition_value_expected},")
|
|
64
|
+
print(f"Repetition value is wrong! \nexpected:{repetition_value_expected}\ngot: {average_payload_count}")
|
|
65
|
+
else:
|
|
66
|
+
test.add_reason(f"Repetition:{average_payload_count}, expected: {repetition_value_expected},")
|
|
67
|
+
print(f"Repetition value is wrong! \nexpected:{repetition_value_expected}\ngot: {average_payload_count}")
|
|
68
|
+
else:
|
|
69
|
+
test.add_reason(f"Repetition value: {average_payload_count}")
|
|
70
|
+
print(f"Repetition value is correct! got: {average_payload_count}")
|
|
71
|
+
|
|
72
|
+
# check num of tags, with tolerance of 5%
|
|
73
|
+
num_of_tags = len(df[TAG_ID].unique())
|
|
74
|
+
if not num_of_pixels_expected*0.95 <= num_of_tags <= num_of_pixels_expected*1.05:
|
|
75
|
+
test.add_reason(f"num of tags: {num_of_tags}")
|
|
76
|
+
print(f"\n num of tags is not as expected\nexpected: {num_of_pixels_expected}, got: {num_of_tags}")
|
|
77
|
+
else:
|
|
78
|
+
test.add_reason(f"num of tags: {num_of_tags}")
|
|
79
|
+
print(f"\nnum of tags from df: {num_of_tags}\n")
|
|
80
|
+
|
|
81
|
+
#check brg_latency
|
|
82
|
+
if check:
|
|
83
|
+
brg_latency_avg = round(df[BRG_LATENCY].mean(),2)
|
|
84
|
+
if not (brg_latency_expected[0] <= brg_latency_avg <= brg_latency_expected[1]):
|
|
85
|
+
test.add_reason(f"brg_latency:{brg_latency_avg}")
|
|
86
|
+
print(f"Average brg_latency: {brg_latency_avg}")
|
|
87
|
+
else:
|
|
88
|
+
test.add_reason(f"brg_latency: {brg_latency_avg}")
|
|
89
|
+
print(f"Average brg_latency: {brg_latency_avg}")
|
|
90
|
+
else:
|
|
91
|
+
brg_latency_avg = round(df[BRG_LATENCY].mean(),2)
|
|
92
|
+
print(f"Average brg_latency: {brg_latency_avg}")
|
|
93
|
+
return test
|
|
94
|
+
|
|
95
|
+
def combination_func(test, datapath_module, pacer_interval, num_of_sim_tags, repetition_value_expected, tx_queue_expected, pacer_increment_expected, brg_latency_expected):
|
|
96
|
+
test = configure_pacer_n_times(test, 2, pacer_interval, datapath_module)
|
|
97
|
+
time.sleep(30)
|
|
98
|
+
# first df
|
|
99
|
+
df = cert_common.data_scan(test, scan_time=30, brg_data=(not test.internal_brg), gw_data=test.internal_brg)
|
|
100
|
+
cert_common.display_data(df, nfpkt=True, tbc=True, name_prefix=f"stress_{pacer_interval}_", dir=test.dir)
|
|
101
|
+
test, hbs = cert_common.scan_for_mgmt_pkts(test, [eval_pkt(f'Brg2GwHbV{test.active_brg.api_version}')])
|
|
102
|
+
print(f"number of HB packets: {len(hbs)}") # TODO remove
|
|
103
|
+
for p in hbs: # TODO remove
|
|
104
|
+
print(f"WATERMARK:{p[MGMT_PKT].pkt.tx_queue_watermark}, pacer increment:{p[MGMT_PKT].pkt.effective_pacer_increment} ") # TODO remove
|
|
105
|
+
hbs = [p[MGMT_PKT].pkt for p in hbs]
|
|
106
|
+
print(f"result of first df\n")
|
|
107
|
+
check = False
|
|
108
|
+
test = metric_checking_df(test, check, pacer_interval, df, repetition_value_expected, brg_latency_expected, num_of_sim_tags)
|
|
109
|
+
time.sleep(30)
|
|
110
|
+
# second df
|
|
111
|
+
df = cert_common.data_scan(test, scan_time=60, brg_data=(not test.internal_brg), gw_data=test.internal_brg)
|
|
112
|
+
cert_common.display_data(df, nfpkt=True, tbc=True, name_prefix=f"stress_{pacer_interval}_", dir=test.dir)
|
|
113
|
+
test, hbs = cert_common.scan_for_mgmt_pkts(test, [eval_pkt(f'Brg2GwHbV{test.active_brg.api_version}')])
|
|
114
|
+
hbs = [p[MGMT_PKT].pkt for p in hbs]
|
|
115
|
+
print(f"result of second df\n")
|
|
116
|
+
check = True
|
|
117
|
+
test = metric_checking_df(test, check, pacer_interval, df, repetition_value_expected, brg_latency_expected, num_of_sim_tags)
|
|
118
|
+
test = metric_checking_HB(test, hbs, tx_queue_expected, pacer_increment_expected)
|
|
119
|
+
return test
|
|
120
|
+
|
|
121
|
+
def rep3(test, datapath_module, num_of_sim_tags, pixel_sim_thread):
|
|
122
|
+
# step 1 - config pacer interval=15 , then check repetition value = 3, tx_queue ~ 0, pacer increment ~ 0, brg latency ~ 0 , num of tags = all tags.
|
|
123
|
+
pacer_interval = 20
|
|
124
|
+
test = combination_func(test, datapath_module, pacer_interval=pacer_interval, num_of_sim_tags=num_of_sim_tags, repetition_value_expected=[2,3], tx_queue_expected=[20,40], pacer_increment_expected=[0,2], brg_latency_expected=[0,10])
|
|
125
|
+
time.sleep(5)
|
|
126
|
+
return test
|
|
127
|
+
|
|
128
|
+
def rep2(test, datapath_module, num_of_sim_tags, pixel_sim_thread):
|
|
129
|
+
#"step 2 - config pacer interval 9, then check repetition value = 2, tx_queue = 20-40, pacer increment = 0, brg latency = 0-200, num of tags = all tags"
|
|
130
|
+
pacer_interval = 15
|
|
131
|
+
test = combination_func(test, datapath_module, pacer_interval=pacer_interval, num_of_sim_tags=num_of_sim_tags, repetition_value_expected=[1.5,2.5], tx_queue_expected=[20,40], pacer_increment_expected=[0,2], brg_latency_expected=[10,200])
|
|
132
|
+
# for checking if it's affect of the running that df is empty.
|
|
133
|
+
time.sleep(5)
|
|
134
|
+
return test
|
|
135
|
+
|
|
136
|
+
def rep1(test, datapath_module, num_of_sim_tags, pixel_sim_thread):
|
|
137
|
+
# "step 3 - config pacer interval 6 , then check repetition value = 1, tx_queue 40-60, pacer increment ~ 0, brg latency 200-300 , num of tags = all tags"
|
|
138
|
+
pacer_interval = 9
|
|
139
|
+
test = combination_func(test, datapath_module, pacer_interval=pacer_interval, num_of_sim_tags=num_of_sim_tags, repetition_value_expected=[1,2], tx_queue_expected=[20,40], pacer_increment_expected=[0,2], brg_latency_expected=[200,300])
|
|
140
|
+
time.sleep(5)
|
|
141
|
+
return test
|
|
142
|
+
|
|
143
|
+
def rep1_adaptive_pacer(test, datapath_module, num_of_sim_tags, pixel_sim_thread):
|
|
144
|
+
# "step 4 - config pacer interval 1 , then check repetition value = 1, tx_queue > 60, pacer increment = 3 , brg latency > 300 , num of tags = all tags"
|
|
145
|
+
pacer_interval = 1
|
|
146
|
+
test = combination_func(test, datapath_module, pacer_interval=pacer_interval, num_of_sim_tags=num_of_sim_tags, repetition_value_expected=[1,2], tx_queue_expected=[20,40], pacer_increment_expected=[2,20], brg_latency_expected=[300,1000])
|
|
147
|
+
time.sleep(5)
|
|
148
|
+
return test
|
|
149
|
+
|
|
150
|
+
def pixels_burst(test, datapath_module, num_of_sim_tags, pixel_sim_thread):
|
|
151
|
+
#NOTE: I had to change the pattern because there isn't option to increase the the pixels number in the same thread it will cause error "index out of bound"
|
|
152
|
+
# "step 5- config pacer interval 15 , then add more 200 packets with 0 delay(0.02 sec) , then check repetition value = 1 and not 2 "
|
|
153
|
+
pixel_sim_thread.stop()
|
|
154
|
+
pacer_interval = 15
|
|
155
|
+
test = configure_pacer_n_times(test, 2, pacer_interval, datapath_module)
|
|
156
|
+
pixel_sim_thread = cert_data_sim.DataSimThread(test=test, num_of_pixels=num_of_sim_tags, duplicates=1, delay=0, pkt_types=[0],pixels_type=GEN2)
|
|
157
|
+
pixel_sim_thread.start()
|
|
158
|
+
df = cert_common.data_scan(test, scan_time=180 , brg_data=(not test.internal_brg), gw_data=test.internal_brg)
|
|
159
|
+
check = True
|
|
160
|
+
test = metric_checking_df(test, check, pacer_interval, df, [2,3], [0,10], 200)
|
|
161
|
+
# we must have df, so we will try twice again to get it
|
|
162
|
+
if test.rc == TEST_FAILED:
|
|
163
|
+
for i in range(2):
|
|
164
|
+
if test.rc == TEST_PASSED:
|
|
165
|
+
break
|
|
166
|
+
df = cert_common.data_scan(test, scan_time=30 , brg_data=(not test.internal_brg), gw_data=test.internal_brg)
|
|
167
|
+
test =metric_checking_df(test, check, pacer_interval, df, [2,3], [0,10], 200)
|
|
168
|
+
pixel_sim_thread.stop()
|
|
169
|
+
#change the number of pixels to 400, and check that the repetition value is 1 in short time
|
|
170
|
+
pixel_sim_thread = cert_data_sim.DataSimThread(test=test, num_of_pixels=400, duplicates=1, delay=0, pkt_types=[0],pixels_type=GEN2)
|
|
171
|
+
pixel_sim_thread.start()
|
|
172
|
+
df = cert_common.data_scan(test, scan_time=30 , brg_data=(not test.internal_brg), gw_data=test.internal_brg)
|
|
173
|
+
test = metric_checking_df(test, check, pacer_interval, df, [1,2], [0,10], 400)
|
|
174
|
+
pixel_sim_thread.stop()
|
|
175
|
+
return test
|
|
176
|
+
|
|
177
|
+
def run(test):
|
|
178
|
+
# Test prolog
|
|
179
|
+
datapath_module = eval_pkt(f'ModuleDatapathV{test.active_brg.api_version}')
|
|
180
|
+
test = cert_common.test_prolog(test)
|
|
181
|
+
if test.rc == TEST_FAILED or test.reason != TEST_SUCCESS:
|
|
182
|
+
return cert_common.test_epilog(test)
|
|
183
|
+
#config GW deduplication pkts = 0 "
|
|
184
|
+
print("Configuring GW with !deduplication_pkts 0")
|
|
185
|
+
cert_config.gw_action(test, f"{DEDUPLICATION_PKTS} 0")
|
|
186
|
+
if test.rc == TEST_FAILED and test.exit_on_param_failure:
|
|
187
|
+
return cert_common.test_epilog(test, revert_gws=True)
|
|
188
|
+
|
|
189
|
+
STRESS_TEST_MAP = {"rep3":rep3, "rep2": rep2 ,"rep1": rep1, "rep1_adaptive_pacer":rep1_adaptive_pacer, "pixels_burst":pixels_burst}
|
|
190
|
+
num_of_pixels = 300
|
|
191
|
+
pixel_sim_thread = cert_data_sim.DataSimThread(test=test, num_of_pixels=num_of_pixels, duplicates=1, delay=0, pkt_types=[0],pixels_type=GEN2)
|
|
192
|
+
pixel_sim_thread.start()
|
|
193
|
+
time.sleep(30)
|
|
194
|
+
for param in test.params:
|
|
195
|
+
functionality_run_print(param.name)
|
|
196
|
+
test = STRESS_TEST_MAP[param.value](test, datapath_module, num_of_pixels, pixel_sim_thread)
|
|
197
|
+
generate_log_file(test, param.name)
|
|
198
|
+
field_functionality_pass_fail_print(test, param.name)
|
|
199
|
+
test.set_phase_rc(param.name, test.rc)
|
|
200
|
+
test.add_phase_reason(param.name, test.reason)
|
|
201
|
+
if test.rc == TEST_FAILED and test.exit_on_param_failure:
|
|
202
|
+
break
|
|
203
|
+
else:
|
|
204
|
+
test.reset_result()
|
|
205
|
+
time.sleep(5)
|
|
206
|
+
pixel_sim_thread.stop()
|
|
207
|
+
# Re-enable unified packets deduplication
|
|
208
|
+
cert_config.gw_action(test, f"{DEDUPLICATION_PKTS} 1")
|
|
209
|
+
|
|
210
|
+
return cert_common.test_epilog(test, revert_brgs=True, revert_gws=True, modules=[datapath_module])
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "tx_repetition_algo_test",
|
|
3
|
+
"module": "datapath",
|
|
4
|
+
"purpose": "Test the tx repetition adjustment algorithm",
|
|
5
|
+
"documentaion": "<TEST_DOCUMENTATION_LINK>",
|
|
6
|
+
"initialCondition": "Bridge configured to defaults",
|
|
7
|
+
"procedure": ["Test prolog", "Configure bridge tx repetition to 0 to activate tx repetition adjustment algorithm, and packets de-dupliction to 0 to enable analysis", "Scan for packets and examine tx repetitions auto adjustment over time", "Test epilog and revert to defaults"],
|
|
8
|
+
"expectedOutcome": "All configurations completed successfully and tx repetition algorithm works as expected",
|
|
9
|
+
"mandatory": 0,
|
|
10
|
+
"multiBridgeTest": 0,
|
|
11
|
+
"gwOnlyTest": 0,
|
|
12
|
+
"allSupportedValues": []
|
|
13
|
+
}
|
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
# This test runs tx repetitions value = 0 to examine the algo
|
|
2
|
+
# the BRG cfg is meant to stretch the BRG tx queue so the algo is actively changing the tx rep val
|
|
3
|
+
|
|
4
|
+
from brg_certificate.cert_prints import *
|
|
5
|
+
from brg_certificate.cert_defines import *
|
|
6
|
+
from brg_certificate.wlt_types import *
|
|
7
|
+
import brg_certificate.cert_common as cert_common
|
|
8
|
+
import brg_certificate.cert_config as cert_config
|
|
9
|
+
import os, statistics, math
|
|
10
|
+
import matplotlib.pyplot as plt
|
|
11
|
+
import plotly.graph_objects as go
|
|
12
|
+
from brg_certificate.cert_gw_sim import DEDUPLICATION_PKTS
|
|
13
|
+
|
|
14
|
+
SCAN_TIME = 60 * 30
|
|
15
|
+
|
|
16
|
+
def track_tx_rep(test, sorted_database):
|
|
17
|
+
tx_reps = []
|
|
18
|
+
times = []
|
|
19
|
+
for rep in sorted_database:
|
|
20
|
+
val = sorted_database[rep]
|
|
21
|
+
tx_reps.append(val[0])
|
|
22
|
+
times.append(val[1])
|
|
23
|
+
|
|
24
|
+
# Plotting the graph
|
|
25
|
+
plt.plot(times, tx_reps)
|
|
26
|
+
plt.xlabel(f'Time (total of {SCAN_TIME} seconds)')
|
|
27
|
+
plt.ylabel('TX Repetitions')
|
|
28
|
+
plt.title('TX Rep Algo - pkts rep over time')
|
|
29
|
+
plt.grid(True)
|
|
30
|
+
|
|
31
|
+
# Close the Matplotlib plot
|
|
32
|
+
plt.close()
|
|
33
|
+
|
|
34
|
+
# Create a line plot
|
|
35
|
+
fig = go.Figure()
|
|
36
|
+
fig.add_trace(go.Scatter(x=times, y=tx_reps, mode='lines'))
|
|
37
|
+
# Add titles and labels
|
|
38
|
+
fig.update_layout(
|
|
39
|
+
title='TX Rep Algo - pkts rep over time',
|
|
40
|
+
xaxis_title=f'Time (total of {SCAN_TIME} seconds)',
|
|
41
|
+
yaxis_title='TX Repetitions',
|
|
42
|
+
showlegend=False,
|
|
43
|
+
template="plotly_white"
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
# Save the graph as an HTML file
|
|
47
|
+
html_file_path = os.path.join(test.dir, "tx_rep_algo_graph.html")
|
|
48
|
+
fig.write_html(html_file_path)
|
|
49
|
+
|
|
50
|
+
return test, tx_reps
|
|
51
|
+
|
|
52
|
+
def tx_rep_analysis(test):
|
|
53
|
+
# Clear data path
|
|
54
|
+
cert_common.wait_time_n_print(CLEAR_DATA_PATH_TIMEOUT)
|
|
55
|
+
test.mqttc.flush_pkts()
|
|
56
|
+
|
|
57
|
+
# Collect pkts
|
|
58
|
+
mqtt_scan_wait(test, SCAN_TIME)
|
|
59
|
+
pkts = cert_mqtt.get_unified_data_pkts(test)
|
|
60
|
+
print("Found {} unified packets".format(len(pkts)))
|
|
61
|
+
|
|
62
|
+
# Count payloads
|
|
63
|
+
pkt_payload_counter = {} # idx 0 - payload, idx 1 - payload ts
|
|
64
|
+
for p in pkts:
|
|
65
|
+
cur_pkt = p[PAYLOAD]
|
|
66
|
+
if cur_pkt in pkt_payload_counter:
|
|
67
|
+
pkt_payload_counter[cur_pkt] = (pkt_payload_counter[cur_pkt][0] + 1, pkt_payload_counter[cur_pkt][1])
|
|
68
|
+
else:
|
|
69
|
+
pkt_payload_counter[cur_pkt] = (1, p[TIMESTAMP])
|
|
70
|
+
generate_log_file(test, "0")
|
|
71
|
+
|
|
72
|
+
# Sort the data according to the time value
|
|
73
|
+
sorted_database = dict(sorted(pkt_payload_counter.items(), key=lambda item: item[1][1]))
|
|
74
|
+
test, tx_reps = track_tx_rep(test, sorted_database)
|
|
75
|
+
|
|
76
|
+
# Calculate total average, top val & min val
|
|
77
|
+
avg = statistics.mean(tx_reps)
|
|
78
|
+
ceil = math.ceil(avg)
|
|
79
|
+
floor = math.floor(avg)
|
|
80
|
+
print(f"Avraged {avg} repetitions. ceil[{ceil}] floor[{floor}]")
|
|
81
|
+
|
|
82
|
+
not_in_range = 0
|
|
83
|
+
for i in tx_reps:
|
|
84
|
+
if i > ceil or i < floor:
|
|
85
|
+
not_in_range += 1
|
|
86
|
+
if not_in_range:
|
|
87
|
+
test.rc = TEST_FAILED
|
|
88
|
+
test.add_reason(f"There are a total of {not_in_range} payloads outside the average (out of {len(tx_reps)})")
|
|
89
|
+
print(f"total counted tx_reps[{tx_reps}]")
|
|
90
|
+
return test
|
|
91
|
+
|
|
92
|
+
def run(test):
|
|
93
|
+
|
|
94
|
+
test = cert_common.test_prolog(test)
|
|
95
|
+
if test.rc == TEST_FAILED or test.reason != TEST_SUCCESS:
|
|
96
|
+
return cert_common.test_epilog(test)
|
|
97
|
+
|
|
98
|
+
datapath_module = eval_pkt(f'ModuleDatapathV{test.active_brg.api_version}')
|
|
99
|
+
|
|
100
|
+
print("Configuring GW")
|
|
101
|
+
# Set packets deduplication off to count the number of pkts from the BRG
|
|
102
|
+
cert_config.gw_action(test, f"{DEDUPLICATION_PKTS} 0")
|
|
103
|
+
if test.rc == TEST_FAILED and test.exit_on_param_failure:
|
|
104
|
+
return cert_common.test_epilog(test, revert_gws=True)
|
|
105
|
+
|
|
106
|
+
test = cert_config.brg_configure(test, fields=[BRG_TX_REPETITION, BRG_PACER_INTERVAL], values=[0, 1], module=datapath_module)[0]
|
|
107
|
+
if test.rc == TEST_FAILED and test.exit_on_param_failure:
|
|
108
|
+
return cert_common.test_epilog(test, revert_brgs=True, revert_gws=True, modules=[datapath_module])
|
|
109
|
+
test = tx_rep_analysis(test)
|
|
110
|
+
|
|
111
|
+
# Re-enable unified packets deduplication
|
|
112
|
+
cert_config.gw_action(test, f"{DEDUPLICATION_PKTS} 1")
|
|
113
|
+
return cert_common.test_epilog(test, revert_brgs=True, revert_gws=True, modules=[datapath_module])
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "tx_repetition_test",
|
|
3
|
+
"module": "datapath",
|
|
4
|
+
"purpose": "Test tx repetition feature configuration and functionality in the bridge for different tx repetition values",
|
|
5
|
+
"documentaion": "<TEST_DOCUMENTATION_LINK>",
|
|
6
|
+
"initialCondition": "Bridge & data simulator configured to defaults",
|
|
7
|
+
"procedure": ["Test prolog", "Tx repetition configuration", "Generate pixels packets & scan for packets in the bridge", "Compare repetitions mean per packet to the configured value", "Repeat for all given tx repetition values", "Test epilog and revert to defaults"],
|
|
8
|
+
"expectedOutcome": "All values configured successfully and actual repetitions found matching to the configured values",
|
|
9
|
+
"mandatory": 1,
|
|
10
|
+
"multiBridgeTest": 0,
|
|
11
|
+
"gwOnlyTest": 0,
|
|
12
|
+
"allSupportedValues": [1, 2, 3, 4, 5, 6]
|
|
13
|
+
}
|
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
# This test will run on "params" mode when it gets parameters in its call - check config only
|
|
2
|
+
# Otherwise it will run on "auto" mode - collect packets and analyse packets actual repetitions
|
|
3
|
+
|
|
4
|
+
from brg_certificate.cert_prints import *
|
|
5
|
+
from brg_certificate.cert_defines import *
|
|
6
|
+
from brg_certificate.wlt_types import *
|
|
7
|
+
import brg_certificate.cert_common as cert_common
|
|
8
|
+
import brg_certificate.cert_config as cert_config
|
|
9
|
+
import brg_certificate.cert_data_sim as cert_data_sim
|
|
10
|
+
from brg_certificate.cert_gw_sim import DEDUPLICATION_PKTS
|
|
11
|
+
import statistics
|
|
12
|
+
|
|
13
|
+
TX_REPETITION_THRESHOLD = 0.5
|
|
14
|
+
|
|
15
|
+
def tx_repetitions_analysis(test, repetitions):
|
|
16
|
+
cert_common.wait_time_n_print(CLEAR_DATA_PATH_TIMEOUT)
|
|
17
|
+
test.mqttc.flush_pkts()
|
|
18
|
+
if test.data == DATA_SIMULATION:
|
|
19
|
+
pixel_sim_thread = cert_data_sim.DataSimThread(test=test, num_of_pixels=15, duplicates=1, delay=100, pkt_types=[0])
|
|
20
|
+
pixel_sim_thread.start()
|
|
21
|
+
mqtt_scan_wait(test, 60)
|
|
22
|
+
if test.data == DATA_SIMULATION:
|
|
23
|
+
pixel_sim_thread.stop() # stop generating pkts on data simulator
|
|
24
|
+
cert_common.wait_time_n_print(CLEAR_DATA_PATH_TIMEOUT) # Wait for sim queue to free
|
|
25
|
+
pkts = cert_mqtt.get_unified_data_pkts(test)
|
|
26
|
+
print(f"Found {len(pkts)} packets")
|
|
27
|
+
if len(pkts) == 0:
|
|
28
|
+
test.rc = TEST_FAILED
|
|
29
|
+
test.add_reason(f"For repetitions {repetitions} found 0 pkts!")
|
|
30
|
+
generate_log_file(test, repetitions)
|
|
31
|
+
return test
|
|
32
|
+
pkt_payload_counter = {}
|
|
33
|
+
for p in pkts:
|
|
34
|
+
cur_pkt = p[PAYLOAD]
|
|
35
|
+
if cur_pkt in pkt_payload_counter:
|
|
36
|
+
pkt_payload_counter[cur_pkt] += 1
|
|
37
|
+
else:
|
|
38
|
+
pkt_payload_counter[cur_pkt] = 1
|
|
39
|
+
generate_log_file(test, repetitions)
|
|
40
|
+
avg = statistics.mean([pkt_payload_counter[p] for p in pkt_payload_counter])
|
|
41
|
+
txt = f"For TX repetition = {repetitions}, average {round(avg, 3)} repetitions"
|
|
42
|
+
print(txt)
|
|
43
|
+
if (avg / float(repetitions)) <= TX_REPETITION_THRESHOLD or (avg / float(repetitions)) > 1:
|
|
44
|
+
test.rc = TEST_FAILED
|
|
45
|
+
test.add_reason(txt)
|
|
46
|
+
return test
|
|
47
|
+
|
|
48
|
+
def run(test):
|
|
49
|
+
|
|
50
|
+
fields = [BRG_TX_REPETITION, BRG_PKT_FILTER, BRG_RX_CHANNEL]
|
|
51
|
+
|
|
52
|
+
datapath_module = eval_pkt(f'ModuleDatapathV{test.active_brg.api_version}')
|
|
53
|
+
|
|
54
|
+
test = cert_common.test_prolog(test)
|
|
55
|
+
if test.rc == TEST_FAILED or test.reason != TEST_SUCCESS:
|
|
56
|
+
return cert_common.test_epilog(test)
|
|
57
|
+
|
|
58
|
+
print("Configuring GW with !deduplication_pkts 0")
|
|
59
|
+
cert_config.gw_action(test, f"{DEDUPLICATION_PKTS} 0")
|
|
60
|
+
if test.rc == TEST_FAILED and test.exit_on_param_failure:
|
|
61
|
+
return cert_common.test_epilog(test, revert_gws=True)
|
|
62
|
+
|
|
63
|
+
for param in test.params:
|
|
64
|
+
test = cert_config.brg_configure(test, fields=fields, values=[param.value, ag.PKT_FILTER_RANDOM_FIRST_ARRIVING_PKT, ag.RX_CHANNEL_37], module=datapath_module)[0]
|
|
65
|
+
if test.rc == TEST_FAILED and test.exit_on_param_failure:
|
|
66
|
+
break
|
|
67
|
+
tx_repetitions_analysis(test, param.name)
|
|
68
|
+
field_functionality_pass_fail_print(test, fields[0], value=param.name)
|
|
69
|
+
test.set_phase_rc(param.name, test.rc)
|
|
70
|
+
test.add_phase_reason(param.name, test.reason)
|
|
71
|
+
if test.rc == TEST_FAILED and test.exit_on_param_failure:
|
|
72
|
+
break
|
|
73
|
+
else:
|
|
74
|
+
test.reset_result()
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
# Re-enable unified packets deduplication
|
|
78
|
+
cert_config.gw_action(test, f"{DEDUPLICATION_PKTS} 1")
|
|
79
|
+
return cert_common.test_epilog(test, revert_brgs=True, revert_gws=True, modules=[datapath_module])
|