wiliot-certificate 1.3.0a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. gw_certificate/__init__.py +0 -0
  2. gw_certificate/ag/ut_defines.py +361 -0
  3. gw_certificate/ag/wlt_types.py +85 -0
  4. gw_certificate/ag/wlt_types_ag.py +5310 -0
  5. gw_certificate/ag/wlt_types_data.py +64 -0
  6. gw_certificate/api/extended_api.py +1547 -0
  7. gw_certificate/api_if/__init__.py +0 -0
  8. gw_certificate/api_if/api_validation.py +40 -0
  9. gw_certificate/api_if/gw_capabilities.py +18 -0
  10. gw_certificate/common/analysis_data_bricks.py +1455 -0
  11. gw_certificate/common/debug.py +63 -0
  12. gw_certificate/common/utils.py +219 -0
  13. gw_certificate/common/utils_defines.py +102 -0
  14. gw_certificate/common/wltPb_pb2.py +72 -0
  15. gw_certificate/common/wltPb_pb2.pyi +227 -0
  16. gw_certificate/gw_certificate.py +138 -0
  17. gw_certificate/gw_certificate_cli.py +70 -0
  18. gw_certificate/interface/ble_simulator.py +91 -0
  19. gw_certificate/interface/ble_sniffer.py +189 -0
  20. gw_certificate/interface/if_defines.py +35 -0
  21. gw_certificate/interface/mqtt.py +469 -0
  22. gw_certificate/interface/packet_error.py +22 -0
  23. gw_certificate/interface/pkt_generator.py +720 -0
  24. gw_certificate/interface/uart_if.py +193 -0
  25. gw_certificate/interface/uart_ports.py +20 -0
  26. gw_certificate/templates/results.html +241 -0
  27. gw_certificate/templates/stage.html +22 -0
  28. gw_certificate/templates/table.html +6 -0
  29. gw_certificate/templates/test.html +38 -0
  30. gw_certificate/tests/__init__.py +11 -0
  31. gw_certificate/tests/actions.py +131 -0
  32. gw_certificate/tests/bad_crc_to_PER_quantization.csv +51 -0
  33. gw_certificate/tests/connection.py +181 -0
  34. gw_certificate/tests/downlink.py +174 -0
  35. gw_certificate/tests/generic.py +161 -0
  36. gw_certificate/tests/registration.py +288 -0
  37. gw_certificate/tests/static/__init__.py +0 -0
  38. gw_certificate/tests/static/connection_defines.py +9 -0
  39. gw_certificate/tests/static/downlink_defines.py +9 -0
  40. gw_certificate/tests/static/generated_packet_table.py +209 -0
  41. gw_certificate/tests/static/packet_table.csv +10051 -0
  42. gw_certificate/tests/static/references.py +4 -0
  43. gw_certificate/tests/static/uplink_defines.py +20 -0
  44. gw_certificate/tests/throughput.py +244 -0
  45. gw_certificate/tests/uplink.py +683 -0
  46. wiliot_certificate-1.3.0a1.dist-info/LICENSE +21 -0
  47. wiliot_certificate-1.3.0a1.dist-info/METADATA +113 -0
  48. wiliot_certificate-1.3.0a1.dist-info/RECORD +51 -0
  49. wiliot_certificate-1.3.0a1.dist-info/WHEEL +5 -0
  50. wiliot_certificate-1.3.0a1.dist-info/entry_points.txt +2 -0
  51. wiliot_certificate-1.3.0a1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,683 @@
1
+ import os
2
+ import time
3
+ from typing import Literal
4
+ import pandas as pd
5
+ import plotly.express as px
6
+ import tabulate
7
+ import pkg_resources
8
+
9
+ from gw_certificate.ag.ut_defines import PAYLOAD, LAT, LNG
10
+ from gw_certificate.common.debug import debug_print
11
+ from gw_certificate.api_if.gw_capabilities import GWCapabilities
12
+ from gw_certificate.interface.ble_simulator import BLESimulator
13
+ from gw_certificate.interface.if_defines import DEFAULT_DELAY, LOCATION
14
+ from gw_certificate.tests.static.uplink_defines import *
15
+ from gw_certificate.interface.mqtt import MqttClient, Serialization
16
+ from gw_certificate.interface.pkt_generator import BrgPktGenerator, apply_adva_bitmask
17
+ from gw_certificate.tests.static.generated_packet_table import UplinkRunData, UnifiedRunData, SensorRunData, MgmtRunData, PacketTableHelper
18
+ from gw_certificate.tests.generic import PassCriteria, PERFECT_SCORE, MINIMUM_SCORE, INCONCLUSIVE_MINIMUM, INIT_INCONCLUSIVE_MINIMUM, GenericTest, GenericStage
19
+ from gw_certificate.interface.packet_error import PacketError
20
+ from gw_certificate.api_if.api_validation import MESSAGE_TYPES, validate_message
21
+ from gw_certificate.tests.static.generated_packet_table import CSV_NAME
22
+
23
+
24
+ # HELPER DEFINES
25
+ TABLE_SUFFIX = "Table"
26
+ ERR_SUM_MISSING_PKTS = "Insufficient amount of packets were scanned & uploaded by the gateway. "
27
+ ERR_SUM_INVALID_TS = "Invalid timestamps were uploaded by the gateway. "
28
+
29
+ # HELPER FUNCTIONS
30
+
31
+ def process_payload(packet:dict):
32
+ payload = packet[PAYLOAD]
33
+ payload = payload.upper()
34
+ if len(payload) == 62 and payload[:4] == '1E16':
35
+ payload = payload [4:]
36
+ # big2little endian
37
+ if payload[:4] == 'FCC6':
38
+ payload = 'C6FC' + payload[4:]
39
+ packet[PAYLOAD] = payload
40
+ return packet
41
+
42
+ # HELPER CLASSES
43
+ class TimestampsHelper(PacketTableHelper):
44
+ def __init__(self):
45
+ self.ts_errors = []
46
+ super().__init__()
47
+
48
+ def set_adv_timestamp(self, data_payload, timestamp):
49
+ self.set_field(data_payload, ADV_TIMESTAMP, timestamp)
50
+
51
+ def set_adv_timestamp_current(self, data_payload):
52
+ cur_ts = time.time_ns() // 1_000_000
53
+ self.set_field(data_payload, ADV_TIMESTAMP, cur_ts)
54
+
55
+ def get_adv_timestamp(self, data_payload):
56
+ return self.get_field(data_payload, ADV_TIMESTAMP)
57
+
58
+ def get_advertised_entries(self):
59
+ """
60
+ return the lines that contains a packets advertied already. These has the 'adv_timestamp' field.
61
+ """
62
+ return self.table.loc[self.table[ADV_TIMESTAMP].notna()]
63
+
64
+ def validate_timestamps(self, received_pkts:list):
65
+ packets_sent_df = self.get_advertised_entries().copy()
66
+
67
+ # Convert received packets into a DataFrame for vectorized processing
68
+ received_df = pd.DataFrame(received_pkts)
69
+ received_df = received_df[[PAYLOAD, TIMESTAMP]]
70
+ received_df[TIMESTAMP] = pd.to_numeric(received_df[TIMESTAMP], errors='coerce')
71
+
72
+ # Map payloads to their received timestamps
73
+ payload_to_ts = received_df.groupby(PAYLOAD)[TIMESTAMP].first().to_dict()
74
+
75
+ # Calculate adv_duration once
76
+ def calculate_adv_duration(row):
77
+ if pd.notna(row['duplication']) and pd.notna(row['time_delay']):
78
+ return row['duplication'] * row['time_delay']
79
+ return DEFAULT_DELAY
80
+
81
+ packets_sent_df['adv_duration'] = packets_sent_df.apply(calculate_adv_duration, axis=1)
82
+
83
+ # Validate timestamps using vectorized operations
84
+ def validate_row(row):
85
+ if row[PAYLOAD] in payload_to_ts:
86
+ received_ts = payload_to_ts[row[PAYLOAD]]
87
+ advertised_ts = row[ADV_TIMESTAMP]
88
+ adv_duration = row['adv_duration']
89
+
90
+ min_accepted_ts = int(advertised_ts - (adv_duration + TS_DEVIATION))
91
+ max_accepted_ts = int(advertised_ts + TS_TOLERANCE + TS_DEVIATION)
92
+
93
+ if not (min_accepted_ts < received_ts < max_accepted_ts):
94
+ self.ts_errors.append(
95
+ f"Timestamp {received_ts} is too far off the accepted range "
96
+ f"{min_accepted_ts}-{max_accepted_ts} for payload: {row[PAYLOAD]}"
97
+ )
98
+ return received_ts
99
+ return None
100
+
101
+ packets_sent_df[REC_TIMESTAMP] = packets_sent_df.apply(validate_row, axis=1)
102
+
103
+ # Validate no 2 packets hold the same timestamp
104
+ if REC_TIMESTAMP in packets_sent_df.columns:
105
+ duplicates = packets_sent_df[REC_TIMESTAMP].value_counts()
106
+ duplicated_ts = duplicates[duplicates > 1].index
107
+
108
+ for ts in duplicated_ts:
109
+ self.ts_errors.append(f"Multiple packets were uploaded with identical timestamp (ts = {int(ts)})")
110
+
111
+ def add_ts_errs_to_report(self, stage:GenericStage):
112
+ for idx, ts_err in enumerate(self.ts_errors):
113
+ stage.add_to_stage_report(ts_err)
114
+ if idx == 3 and (len(self.ts_errors) - 1) > idx:
115
+ stage.add_to_stage_report(f'Additional errors ({len(self.ts_errors) - 1 - idx}) are suppressed to avoid clutter')
116
+ break
117
+
118
+ def is_ts_error(self) -> bool:
119
+ return len(self.ts_errors) > 0
120
+
121
+
122
+ # TEST STAGES
123
+
124
+ class UplinkTestError(Exception):
125
+ pass
126
+
127
+ class GenericUplinkStage(GenericStage):
128
+ def __init__(self, mqttc:MqttClient, ble_sim:BLESimulator, gw_capabilities:GWCapabilities, stage_name,
129
+ **kwargs):
130
+ self.__dict__.update(kwargs)
131
+ super().__init__(stage_name=stage_name, **self.__dict__)
132
+
133
+ # Clients
134
+ self.mqttc = mqttc
135
+ self.ble_sim = ble_sim
136
+
137
+ # Packets list
138
+ self.local_pkts = []
139
+ self.mqtt_pkts = []
140
+
141
+ # GW Capabilities
142
+ self.gw_capabilities = gw_capabilities
143
+
144
+ # Packet Error / Run data
145
+ self.packet_error = PacketError()
146
+ self.run_data = UplinkRunData
147
+
148
+ # Unified stage
149
+ self.run_data_unified = UnifiedRunData
150
+
151
+ self.ts_records = TimestampsHelper()
152
+
153
+ def prepare_stage(self, reset_ble_sim=True):
154
+ super().prepare_stage()
155
+ self.mqttc.flush_messages()
156
+ if reset_ble_sim:
157
+ self.ble_sim.set_sim_mode(True)
158
+
159
+ def fetch_mqtt_from_stage(self):
160
+ mqtt_pkts = self.mqttc.get_all_tags_pkts()
161
+ # self.mqtt_packets is a list of pkt jsons: [{timestamp:..., aliasbr.. payload...}, {...}]
162
+ self.mqtt_pkts = list(map(lambda p: process_payload(p), mqtt_pkts))
163
+
164
+ ## TODO - REWRITE
165
+ def compare_local_mqtt(self):
166
+ self.fetch_mqtt_from_stage()
167
+ local_pkts_df = pd.DataFrame(self.local_pkts, columns=[PAYLOAD, 'duplication', 'time_delay', 'aliasBridgeId'])
168
+ mqtt_pkts_df = pd.DataFrame(self.mqtt_pkts)
169
+ comparison = local_pkts_df
170
+
171
+ if PAYLOAD not in mqtt_pkts_df.columns:
172
+ mqtt_pkts_df[PAYLOAD] = ''
173
+ received_pkts_df = pd.merge(local_pkts_df[PAYLOAD], mqtt_pkts_df[PAYLOAD], how='inner')
174
+
175
+ received_pkts = set(received_pkts_df[PAYLOAD])
176
+
177
+ self.pkts_received_count = pd.Series.count(received_pkts_df)
178
+ unique_received_count = len(received_pkts)
179
+ self.pkts_filtered_out_count = self.pkts_received_count - unique_received_count
180
+
181
+ comparison[RECEIVED] = comparison[PAYLOAD].isin(received_pkts)
182
+ comparison['pkt_id'] = comparison[PAYLOAD].apply(lambda x: x[-8:])
183
+ self.comparison = comparison
184
+
185
+ def generate_stage_report(self):
186
+ """
187
+ Generates report for the stage
188
+ """
189
+ self.compare_local_mqtt()
190
+ self.ts_records.validate_timestamps(self.mqtt_pkts)
191
+
192
+ num_pkts_sent = len(self.comparison)
193
+ num_pkts_received = self.comparison['received'].eq(True).sum()
194
+ self.stage_pass = num_pkts_received / num_pkts_sent * PERFECT_SCORE
195
+ if self.stage_pass < self.pass_min:
196
+ self.error_summary = ERR_SUM_MISSING_PKTS
197
+ if self.ts_records.is_ts_error():
198
+ self.stage_pass = min(self.stage_pass, self.inconclusive_min)
199
+ self.error_summary += ERR_SUM_INVALID_TS
200
+
201
+ self.add_report_header()
202
+ self.add_to_stage_report(f'Number of unique packets sent: {num_pkts_sent}')
203
+ self.add_to_stage_report(f'Number of unique packets received: {num_pkts_received}')
204
+ self.add_to_stage_report(f'Number of total packets received: {self.pkts_received_count}')
205
+ self.add_to_stage_report(f'Number of duplicates out of total: {self.pkts_filtered_out_count}\n')
206
+
207
+ not_received = self.comparison[self.comparison[RECEIVED]==False][REPORT_COLUMNS]
208
+ if len(not_received) > 0:
209
+ self.add_to_stage_report('Packets not received:')
210
+ self.add_to_stage_report(tabulate.tabulate(not_received, headers='keys', showindex=False))
211
+
212
+ self.ts_records.add_ts_errs_to_report(self)
213
+
214
+ self.comparison.to_csv(self.csv_path)
215
+ self.add_to_stage_report(f'Stage data saved - {self.csv_path}')
216
+ debug_print(self.report)
217
+
218
+ # Generate HTML
219
+ table_html = self.template_engine.render_template('table.html', dataframe=self.comparison.to_html(table_id=self.stage_name + TABLE_SUFFIX),
220
+ table_id=self.stage_name + TABLE_SUFFIX)
221
+ self.report_html = self.template_engine.render_template('stage.html', stage=self,
222
+ stage_report=self.report.split('\n'), table=table_html)
223
+
224
+ return self.report
225
+
226
+ class ManagementPacketStage(GenericUplinkStage):
227
+ def __init__(self, **kwargs):
228
+ self.stage_tooltip = "Simulates management advertisements from a single bridge. Expects the gateway to scan & upload them"
229
+ self.__dict__.update(kwargs)
230
+ super().__init__(**self.__dict__, stage_name=type(self).__name__)
231
+ self.run_data = MgmtRunData().data
232
+
233
+ def run(self):
234
+ super().run()
235
+ for index, row in self.run_data.iterrows():
236
+ data = row[ADVA_PAYLOAD]
237
+ # cur_ts = time.time_ns() // 1_000_000
238
+ self.local_pkts.append((row[PAYLOAD], row['duplication'], row['time_delay'], row['adva']))
239
+ self.ble_sim.send_packet(raw_packet=data, duplicates=row['duplication'], delay=row['time_delay'])
240
+ self.ts_records.set_adv_timestamp_current(data)
241
+ time.sleep(10)
242
+
243
+ def generate_stage_report(self):
244
+ self.compare_local_mqtt()
245
+ self.ts_records.validate_timestamps(self.mqtt_pkts)
246
+
247
+ num_pkts_sent = len(self.comparison)
248
+ num_pkts_received = self.comparison['received'].eq(True).sum()
249
+ self.stage_pass = num_pkts_received / num_pkts_sent * PERFECT_SCORE
250
+ if self.stage_pass < self.pass_min:
251
+ self.error_summary = ERR_SUM_MISSING_PKTS
252
+ if self.ts_records.is_ts_error():
253
+ self.stage_pass = min(self.stage_pass, self.inconclusive_min)
254
+ self.error_summary += ERR_SUM_INVALID_TS
255
+
256
+ self.add_report_header()
257
+ self.add_to_stage_report(f'Number of unique packets sent: {num_pkts_sent}')
258
+ self.add_to_stage_report(f'Number of unique packets received: {num_pkts_received}\n')
259
+
260
+ not_received = self.comparison[self.comparison[RECEIVED]==False][REPORT_COLUMNS]
261
+ if len(not_received) > 0:
262
+ self.add_to_stage_report('Packets not received:')
263
+ self.add_to_stage_report(tabulate.tabulate(not_received, headers='keys', showindex=False))
264
+ self.add_to_stage_report('Check the CSV for more info')
265
+
266
+ self.ts_records.add_ts_errs_to_report(self)
267
+
268
+ self.comparison.to_csv(self.csv_path)
269
+ self.add_to_stage_report(f'Stage data saved - {self.csv_path}')
270
+ debug_print(self.report)
271
+
272
+ # Generate HTML
273
+ self.report_html = self.template_engine.render_template('stage.html', stage=self,
274
+ stage_report=self.report.split('\n'))
275
+
276
+ return self.report
277
+
278
+ class DataPacketStage(GenericUplinkStage):
279
+
280
+ def __init__(self, **kwargs):
281
+ self.stage_tooltip = "Simulates advertisements from three bridges. Expects the gateway to scan & upload them"
282
+ self.__dict__.update(kwargs)
283
+ super().__init__(**self.__dict__, stage_name=type(self).__name__)
284
+ self.run_data = UnifiedRunData().data
285
+
286
+ def run(self):
287
+ super().run()
288
+ for index, row in self.run_data.iterrows():
289
+ data = row[ADVA_PAYLOAD]
290
+ # cur_ts = time.time_ns() // 1_000_000
291
+ self.local_pkts.append((row[PAYLOAD], row['duplication'], row['time_delay'], row['adva']))
292
+ self.ble_sim.send_packet(raw_packet=data, duplicates=row['duplication'], delay=row['time_delay'])
293
+ self.ts_records.set_adv_timestamp_current(data)
294
+ time.sleep(5)
295
+
296
+
297
+
298
+ class SensorPacketStage(GenericUplinkStage):
299
+ def __init__(self, **kwargs):
300
+ self.__dict__.update(kwargs)
301
+ super().__init__(**self.__dict__, stage_name=type(self).__name__)
302
+ self.pkt_gen = BrgPktGenerator()
303
+ self.stage_tooltip = "Simulates sensor packets advertisements. Expects the gateway to scan & upload them"
304
+ self.error_summary = ERR_SUM_MISSING_PKTS
305
+
306
+ def run(self):
307
+
308
+ def remove_pre_uuid(payload:str) -> str:
309
+ return payload[16:]
310
+
311
+ super().run()
312
+ run_data = SensorRunData()
313
+ run_data = run_data.data
314
+ for index, row in run_data.iterrows():
315
+ data = row[ADVA_PAYLOAD]
316
+ si = row['si']
317
+ # Save to local_pkts once for data and once for side info. Each with corresponding adva.
318
+ self.local_pkts.append((row[PAYLOAD], row['duplication'], row['time_delay'], apply_adva_bitmask(row['bridge_id'], 'random_static')))
319
+ self.local_pkts.append((remove_pre_uuid(row['si']), row['duplication'], row['time_delay'], row['adva']))
320
+ self.ble_sim.send_data_si_pair(data_packet=data, si_packet=si, duplicates=row['duplication'], delay=row['time_delay'])
321
+ self.ts_records.set_adv_timestamp_current(data)
322
+ time.sleep(5)
323
+
324
+ def compare_local_mqtt(self):
325
+ self.fetch_mqtt_from_stage()
326
+ local_pkts_df = pd.DataFrame(self.local_pkts, columns=[PAYLOAD, 'duplication', 'time_delay', 'aliasBridgeId'])
327
+ mqtt_pkts_df = pd.DataFrame(self.mqtt_pkts)
328
+ comparison = local_pkts_df
329
+
330
+ if not set(SHARED_COLUMNS) <= set(mqtt_pkts_df.columns):
331
+ missing_columns = list(set(SHARED_COLUMNS) - set(mqtt_pkts_df.columns))
332
+ for missing_column in missing_columns:
333
+ if missing_column in OBJECT_COLUMNS:
334
+ mqtt_pkts_df[missing_column] = ''
335
+ if missing_column in INT64_COLUMNS:
336
+ mqtt_pkts_df[missing_column] = 0
337
+ received_pkts_df = pd.merge(local_pkts_df[SHARED_COLUMNS], mqtt_pkts_df[SHARED_COLUMNS], how='inner')
338
+
339
+ received_pkts = set(received_pkts_df[PAYLOAD])
340
+
341
+ self.pkts_received_count = pd.Series.count(received_pkts_df)
342
+ unique_received_count = len(received_pkts)
343
+ self.pkts_filtered_out_count = self.pkts_received_count - unique_received_count
344
+
345
+ comparison[RECEIVED] = comparison[PAYLOAD].isin(received_pkts)
346
+ comparison['pkt_id'] = comparison['payload'].apply(lambda x: x[-8:])
347
+ self.comparison = comparison
348
+
349
+ def generate_stage_report(self):
350
+ self.compare_local_mqtt()
351
+ print(self.comparison)
352
+ report = []
353
+ num_pkts_sent = len(self.comparison)
354
+ num_pkts_received = self.comparison['received'].eq(True).sum()
355
+ pkt_id_pairs = self.comparison.groupby('pkt_id').filter(lambda x: x['received'].all() and len(x) == 2)
356
+ unique_pkt_ids = pkt_id_pairs['pkt_id'].unique()
357
+ num_pairs = len(unique_pkt_ids)
358
+
359
+ if num_pairs > 1:
360
+ self.stage_pass = PERFECT_SCORE
361
+ else:
362
+ self.stage_pass = MINIMUM_SCORE
363
+
364
+ self.add_report_header()
365
+ self.add_to_stage_report((f'Number of sensor packets sent: {int(num_pkts_sent / 2)}'))
366
+ self.add_to_stage_report((f'Number of sensor packets received correctly: {num_pairs}\n'))
367
+
368
+ not_received = self.comparison[self.comparison[RECEIVED]==False][REPORT_COLUMNS]
369
+ if len(not_received) > 0:
370
+ self.add_to_stage_report('Packets not received:')
371
+ self.add_to_stage_report(tabulate.tabulate(not_received, headers='keys', showindex=False))
372
+ self.comparison.to_csv(self.csv_path)
373
+ self.add_to_stage_report(f'Stage data saved - {self.csv_path}')
374
+ debug_print(self.report)
375
+
376
+ # Generate HTML
377
+ table_html = self.template_engine.render_template('table.html', dataframe=self.comparison.to_html(table_id=self.stage_name + TABLE_SUFFIX),
378
+ table_id=self.stage_name + TABLE_SUFFIX)
379
+ self.report_html = self.template_engine.render_template('stage.html', stage=self,
380
+ stage_report=self.report.split('\n'), table=table_html)
381
+
382
+ return self.report
383
+
384
+
385
+ class ApiValidationStage(GenericUplinkStage):
386
+ def __init__(self, **kwargs):
387
+ self.stage_tooltip = "Validates the JSON structure of messages uploaded by the gateway in previous stages"
388
+ self.__dict__.update(kwargs)
389
+ super().__init__(**self.__dict__, stage_name=type(self).__name__)
390
+
391
+ def prepare_stage(self):
392
+ super().prepare_stage(reset_ble_sim=False)
393
+ self.mqttc.flush_messages()
394
+
395
+ def generate_stage_report(self, **kwargs):
396
+ report = []
397
+ all_validations = []
398
+ self.stage_pass = PERFECT_SCORE
399
+
400
+ # Set stage as FAIL if no messages were received:
401
+ if len(self.all_messages_in_test) == 0:
402
+ self.stage_pass = MINIMUM_SCORE
403
+ self.error_summary = "No packets were received"
404
+
405
+ for idx, message in enumerate(self.all_messages_in_test):
406
+ message_body = message.body
407
+ if len(message_body['packets']) == 0:
408
+ continue
409
+ validation = validate_message(MESSAGE_TYPES.DATA, message_body)
410
+ errors = []
411
+ for e in validation[1]:
412
+ if e.message not in errors:
413
+ errors.append(e.message)
414
+ all_validations.append({'valid':validation[0], 'errors': errors, 'message': message_body,})
415
+ if not validation[0]:
416
+ if 'Validation Errors:' not in report:
417
+ report.append('Validation Errors:')
418
+ report.append(f'- Message (idx={idx}, json timestamp={message_body.get(TIMESTAMP)}) Errors:')
419
+ for e in errors:
420
+ report.append(e)
421
+ self.stage_pass = MINIMUM_SCORE
422
+ self.error_summary = "API (JSON strcture) is invalid"
423
+
424
+ self.add_report_header()
425
+ # Add all messages that failed to validate to report
426
+ for line in report:
427
+ self.add_to_stage_report(line)
428
+ all_validations_df = pd.DataFrame(all_validations)
429
+ all_validations_df.to_csv(self.csv_path)
430
+ self.add_to_stage_report(f'Stage data saved - {self.csv_path}')
431
+ debug_print(self.report)
432
+
433
+ #Generate HTML
434
+ table_html = self.template_engine.render_template('table.html', dataframe=all_validations_df.to_html(table_id=self.stage_name + TABLE_SUFFIX),
435
+ table_id=self.stage_name + TABLE_SUFFIX)
436
+ self.report_html = self.template_engine.render_template('stage.html', stage=self,
437
+ stage_report=self.report.split('\n'))
438
+ return self.report
439
+
440
+ class SequentialSequenceIdStage(GenericUplinkStage):
441
+ def __init__(self, **kwargs):
442
+ self.__dict__.update(kwargs)
443
+ self.stage_tooltip = "Validates expected sequenceId in all packets"
444
+ super().__init__(**self.__dict__, stage_name=type(self).__name__)
445
+
446
+ def prepare_stage(self):
447
+ super().prepare_stage(reset_ble_sim=False)
448
+ self.mqttc.flush_messages()
449
+
450
+ def generate_stage_report(self, **kwargs):
451
+ report = []
452
+ self.stage_pass = PERFECT_SCORE
453
+ required_sequenceId = None
454
+ sequenceId_valid = True
455
+
456
+ def is_sequenceId_incremental(idx, message):
457
+ nonlocal required_sequenceId, sequenceId_valid
458
+ packets = message['packets']
459
+
460
+ # check that there is sequenceId in all packets
461
+ packets_w_seqid = list(filter(lambda p: 'sequenceId' in p, packets))
462
+ if len(packets_w_seqid) == 0:
463
+ sequenceId_valid = False
464
+ report.append(f'No sequenceId in message {idx}. Expected sequenceId in all packets')
465
+ self.error_summary += 'No SequenceId in packets.'
466
+ return False
467
+
468
+ # initialize the required sequenceId
469
+ if idx == 0:
470
+ first_pkt = packets[0]
471
+ required_sequenceId = first_pkt['sequenceId']
472
+
473
+ # check that for every packet in message the sequenceId is incremental:
474
+ for pkt in packets:
475
+ pkt_sequenceId = pkt['sequenceId']
476
+ if pkt_sequenceId != required_sequenceId:
477
+ if sequenceId_valid == True:
478
+ report.append(f'SequenceId is not incremental. Expected sequenceId is {required_sequenceId} but the packet sequenceId is {pkt_sequenceId}')
479
+ self.stage_pass = MINIMUM_SCORE
480
+ self.error_summary = self.error_summary + 'SequenceId is not incremental. '
481
+ sequenceId_valid = False
482
+ break
483
+ required_sequenceId += 1
484
+
485
+ # Set message type according to coupling, location
486
+ for idx, message in enumerate(self.all_messages_in_test):
487
+ message_body = message.body
488
+ is_sequenceId_incremental(idx=idx, message=message_body)
489
+
490
+ self.add_report_header()
491
+ self.add_to_stage_report(f"{'---SequenceId is incremental' if sequenceId_valid else '---SequenceId is NOT incremental'}")
492
+ for line in report:
493
+ self.add_to_stage_report(line)
494
+ debug_print(self.report)
495
+
496
+ #Generate HTML
497
+ self.report_html = self.template_engine.render_template('stage.html', stage=self,
498
+ stage_report=self.report.split('\n'))
499
+ return self.report
500
+
501
+ class AliasBridgeIDStage(GenericUplinkStage):
502
+ def __init__(self, **kwargs):
503
+ self.stage_tooltip = "Validates the uploaded aliasBridgeId is as expected per payload"
504
+ # Data extracted from the test csv
505
+ self.all_test_payloads = None
506
+ self.alias_bridge_id_df = None
507
+ self.__dict__.update(kwargs)
508
+ super().__init__(**self.__dict__, stage_name=type(self).__name__)
509
+
510
+ def prepare_stage(self):
511
+ super().prepare_stage(reset_ble_sim=False)
512
+ self.mqttc.flush_messages()
513
+
514
+ def get_data_from_test_csv(self):
515
+ relative_path = 'static/' + CSV_NAME
516
+ csv_path = pkg_resources.resource_filename(__name__, relative_path)
517
+ df = pd.read_csv(csv_path)
518
+
519
+ # Store all test payloads
520
+ all_payloads = df[ADVA_PAYLOAD].str[12:]
521
+ self.all_test_payloads = all_payloads.tolist()
522
+
523
+ def _parser(row, desired:Literal['adva', 'without_adva']):
524
+ if desired == 'adva':
525
+ output_string = row.at[ADVA_PAYLOAD][:12]
526
+ elif desired == 'without_adva':
527
+ output_string = row.at[ADVA_PAYLOAD][12:]
528
+ else:
529
+ raise ValueError
530
+ return output_string
531
+
532
+ # Create data set for alias bridge verification
533
+ alias_bridge_id_df = df[(df['test'] == 'unified') | (df['test'] == 'mgmt')].copy()
534
+ alias_bridge_id_df['payload'] = alias_bridge_id_df.apply(lambda row: _parser(row, 'without_adva'), axis=1)
535
+ # Take the adva from the payload
536
+ alias_bridge_id_df['alias_bridge_id'] = alias_bridge_id_df.apply(lambda row: _parser(row, 'adva'), axis=1)
537
+ # Convert bridge_id to little endian
538
+ alias_bridge_id_df['alias_bridge_id'] = alias_bridge_id_df['alias_bridge_id'].apply(lambda x: ''.join(format(byte, '02X') for byte in bytes.fromhex(x)[::-1]))
539
+ self.alias_bridge_id_df = alias_bridge_id_df
540
+
541
+ def generate_stage_report(self, **kwargs):
542
+ report = []
543
+ self.stage_pass = PERFECT_SCORE
544
+ self.get_data_from_test_csv()
545
+ aliasBridgeId_valid = True
546
+
547
+ def filter_non_test_packets(message):
548
+ packets = message['packets']
549
+ filtered_pkts = []
550
+ for pkt in packets:
551
+ pkt = process_payload(pkt)
552
+ payload = pkt['payload']
553
+ if any(payload in test_payload for test_payload in self.all_test_payloads):
554
+ filtered_pkts.append(pkt)
555
+ message['packets'] = filtered_pkts
556
+
557
+ def is_alias_bridge_id_valid(message):
558
+ nonlocal aliasBridgeId_valid
559
+ packets = message['packets']
560
+
561
+ for pkt in packets:
562
+ if 'aliasBridgeId' in pkt:
563
+ pkt_payload = pkt['payload']
564
+ pkt_alias_bridge_id = pkt['aliasBridgeId']
565
+ validation_data = self.alias_bridge_id_df[self.alias_bridge_id_df['payload'].str.contains(pkt_payload, case=False)]
566
+ required_bridge_id = validation_data['alias_bridge_id'].iat[0]
567
+ if required_bridge_id != pkt_alias_bridge_id.upper():
568
+ report.append(f"Alias bridge ID of the packet does not match. The required alias bridge ID is {required_bridge_id} but the packet alias bridge ID is {pkt_alias_bridge_id}")
569
+ self.stage_pass = MINIMUM_SCORE
570
+ self.error_summary = "aliasBridgeId doesn't match the expected one of a packet. "
571
+ aliasBridgeId_valid = False
572
+
573
+ # Set stage as FAIL if no messages were received:
574
+ if len(self.all_messages_in_test) == 0:
575
+ self.stage_pass = MINIMUM_SCORE
576
+ self.error_summary = "No packets were received"
577
+
578
+ # Set message type according to coupling, location
579
+ for idx, message in enumerate(self.all_messages_in_test):
580
+ message_body = message.body
581
+ filter_non_test_packets(message_body)
582
+ if len(message_body['packets']) == 0:
583
+ continue
584
+ is_alias_bridge_id_valid(message=message_body)
585
+
586
+ self.add_report_header()
587
+ self.add_to_stage_report(f"{'---Alias bridge ID is valid' if aliasBridgeId_valid else '---Alias bridge ID is NOT valid'}")
588
+ for line in report:
589
+ self.add_to_stage_report(line)
590
+ # Add all messages that failed to validate to report
591
+ debug_print(self.report)
592
+
593
+ #Generate HTML
594
+ self.report_html = self.template_engine.render_template('stage.html', stage=self,
595
+ stage_report=self.report.split('\n'))
596
+ return self.report
597
+
598
+
599
+
600
+ class GeolocationStage(GenericUplinkStage):
601
+ def __init__(self, **kwargs):
602
+ self.stage_tooltip = "Checks if lat/lng were uploaded under 'location' (optional JSON key) in the uploaded data messages"
603
+ self.__dict__.update(kwargs)
604
+ super().__init__(**self.__dict__, stage_name=type(self).__name__)
605
+ self.graph_html_path = os.path.join(self.test_dir, f'{self.stage_name}.html')
606
+
607
+
608
+ def prepare_stage(self):
609
+ super().prepare_stage(reset_ble_sim=False)
610
+ self.mqttc.flush_messages()
611
+
612
+ def generate_stage_report(self, **kwargs):
613
+ locations_list = []
614
+ locations_df = pd.DataFrame()
615
+ self.stage_pass = MINIMUM_SCORE
616
+ self.error_summary = "No coordinates were uploaded. "
617
+
618
+ # Set message type according to coupling, location
619
+ for message in self.all_messages_in_test:
620
+ message = message.body
621
+ timestamp = message[TIMESTAMP]
622
+ if LOCATION in message.keys():
623
+ loc = message[LOCATION]
624
+ loc.update({TIMESTAMP:timestamp})
625
+ locations_list.append(loc)
626
+ num_unique_locs = 0
627
+ if len(locations_list) > 0:
628
+ self.stage_pass = PERFECT_SCORE
629
+ self.error_summary = ''
630
+ locations_df = pd.DataFrame(locations_list)
631
+ num_unique_locs = locations_df[['lat', 'lng']].drop_duplicates().shape[0]
632
+ fig = px.scatter_mapbox(locations_df, lat=LAT, lon=LNG, color='timestamp', zoom=10)
633
+ fig.update(layout_coloraxis_showscale=False)
634
+ fig.update_layout(scattermode="group", scattergap=0.95, mapbox_style="open-street-map")
635
+
636
+ self.add_report_header()
637
+ self.add_to_stage_report(f'Number of unique locations received: {num_unique_locs}')
638
+ # Export all stage data
639
+ locations_df.to_csv(self.csv_path)
640
+ self.add_to_stage_report(f'Stage data saved - {self.csv_path}')
641
+ if num_unique_locs > 0:
642
+ fig.write_html(self.graph_html_path)
643
+ debug_print(self.report)
644
+
645
+ #Generate HTML
646
+ graph_div = fig.to_html(full_html=False, include_plotlyjs='cdn') if num_unique_locs > 0 else "No graph to display"
647
+ self.report_html = self.template_engine.render_template('stage.html', stage=self,
648
+ stage_report=self.report.split('\n'), graph = graph_div)
649
+ return self.report
650
+
651
+
652
+ # TEST CLASS
653
+ TX_STAGES = [ManagementPacketStage, DataPacketStage, SensorPacketStage]
654
+ UNCOUPLED_STAGES = [ManagementPacketStage, DataPacketStage, SensorPacketStage,
655
+ SequentialSequenceIdStage, AliasBridgeIDStage]
656
+
657
+ class UplinkTest(GenericTest):
658
+ def __init__(self, **kwargs):
659
+ self.test_tooltip = "Stages related to gateway BLE scans & MQTT data uploads"
660
+ self.__dict__.update(kwargs)
661
+ super().__init__(**self.__dict__, test_name=type(self).__name__)
662
+ self.all_messages_in_test = []
663
+ stages = UNCOUPLED_STAGES
664
+ if self.mqttc.get_serialization() == Serialization.JSON:
665
+ stages = stages + [ApiValidationStage]
666
+ if self.gw_capabilities.geoLocationSupport:
667
+ stages.append(GeolocationStage)
668
+ self.stages = [stage(**self.__dict__) for stage in stages]
669
+
670
+
671
+ def run(self):
672
+ super().run()
673
+ self.test_pass = PERFECT_SCORE
674
+ for stage in self.stages:
675
+ stage.prepare_stage()
676
+ stage.run()
677
+ if self.aggregation_time != 0 and type(stage) in TX_STAGES:
678
+ debug_print(f"Waiting {self.aggregation_time} seconds for packets to be uploaded before processing results..")
679
+ time.sleep(self.aggregation_time)
680
+ self.add_to_test_report(stage.generate_stage_report())
681
+ self.test_pass = PassCriteria.calc_for_test(self.test_pass, stage)
682
+ self.all_messages_in_test.extend(self.mqttc.get_all_messages_from_topic('data'))
683
+