fprime-gds 3.4.4a1__py3-none-any.whl → 3.4.4a3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. fprime_gds/common/data_types/event_data.py +6 -1
  2. fprime_gds/common/data_types/exceptions.py +16 -11
  3. fprime_gds/common/loaders/ch_json_loader.py +107 -0
  4. fprime_gds/common/loaders/ch_xml_loader.py +5 -5
  5. fprime_gds/common/loaders/cmd_json_loader.py +85 -0
  6. fprime_gds/common/loaders/dict_loader.py +1 -1
  7. fprime_gds/common/loaders/event_json_loader.py +108 -0
  8. fprime_gds/common/loaders/event_xml_loader.py +10 -6
  9. fprime_gds/common/loaders/json_loader.py +222 -0
  10. fprime_gds/common/loaders/xml_loader.py +31 -9
  11. fprime_gds/common/pipeline/dictionaries.py +38 -3
  12. fprime_gds/common/tools/seqgen.py +4 -4
  13. fprime_gds/common/utils/string_util.py +57 -65
  14. fprime_gds/common/zmq_transport.py +4 -2
  15. fprime_gds/executables/cli.py +21 -39
  16. fprime_gds/executables/comm.py +3 -10
  17. fprime_gds/executables/data_product_writer.py +935 -0
  18. fprime_gds/executables/utils.py +23 -11
  19. fprime_gds/flask/sequence.py +1 -1
  20. fprime_gds/flask/static/addons/commanding/command-input.js +3 -2
  21. {fprime_gds-3.4.4a1.dist-info → fprime_gds-3.4.4a3.dist-info}/METADATA +14 -13
  22. {fprime_gds-3.4.4a1.dist-info → fprime_gds-3.4.4a3.dist-info}/RECORD +27 -22
  23. {fprime_gds-3.4.4a1.dist-info → fprime_gds-3.4.4a3.dist-info}/WHEEL +1 -1
  24. {fprime_gds-3.4.4a1.dist-info → fprime_gds-3.4.4a3.dist-info}/entry_points.txt +2 -3
  25. {fprime_gds-3.4.4a1.dist-info → fprime_gds-3.4.4a3.dist-info}/LICENSE.txt +0 -0
  26. {fprime_gds-3.4.4a1.dist-info → fprime_gds-3.4.4a3.dist-info}/NOTICE.txt +0 -0
  27. {fprime_gds-3.4.4a1.dist-info → fprime_gds-3.4.4a3.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,935 @@
1
+ # ------------------------------------------------------------------------------------------
2
+ # Program: Data Product Writer
3
+ #
4
+ # Filename: data_product_writer.py
5
+ #
6
+ # Author: Garth Watney
7
+ #
8
+ # The "Data Product Writer" program is designed to interpret and convert binary data products
9
+ # from the F' flight software into JSON format. Each data product comprises multiple records,
10
+ # and the program's role is to deserialize this binary data using JSON dictionaries and
11
+ # header specifications, producing a readable JSON file.
12
+ #
13
+ # Argument Parsing:
14
+ # The program starts by parsing command-line arguments to obtain the path to the binary
15
+ # data product file.
16
+ #
17
+ # Reading JSON Dictionary:
18
+ # It reads the F' JSON dictionary file, which provides the necessary context for
19
+ # interpreting the binary data, catching any JSON decoding errors to handle malformed
20
+ # files gracefully.
21
+ #
22
+ # Data Validation:
23
+ # Before proceeding, the program checks for duplicate record identifiers within the
24
+ # JSON dictionary to ensure data integrity.
25
+ #
26
+ #
27
+ # Binary File Processing:
28
+ # The program opens the binary file for reading, initializes counters for tracking the
29
+ # total bytes read and a variable for calculating the CRC checksum. The header data is
30
+ # read first, followed by the individual records, each deserialized based on the JSON
31
+ # dictionary and header specification.
32
+ #
33
+ # CRC Validation:
34
+ # After processing the records, the program validates the CRC checksum to ensure the
35
+ # data's integrity has been maintained throughout the reading process.
36
+ #
37
+ # Exception Handling:
38
+ # Throughout its execution, the program is equipped to handle various exceptions,
39
+ # ranging from file not found scenarios to specific data-related errors like CRC
40
+ # mismatches or duplicate record IDs. Each exception triggers an error handling
41
+ # routine that provides informative feedback and terminates the program gracefully.
42
+ #
43
+ # Output Generation:
44
+ # Upon successful processing, the program writes the deserialized data to a JSON file.
45
+ # The output file's name is derived from the binary file's name with a change in its
46
+ # extension to `.json`, facilitating easy association between the input and output files.
47
+ #
48
+ # The "Data Product Writer" program is executed from the command line and requires specific
49
+ # arguments to function correctly. The primary argument it needs is the path to the binary
50
+ # data product file that will be processed.
51
+ #
52
+ # Usage:
53
+ # python data_product_writer.py <binFile> <json dictionary>
54
+ #
55
+ # Where:
56
+ # <binFile> is the path to the binary file generated by the F' flight software that contains
57
+ # the data product to be deserialized and written to a JSON file.
58
+ #
59
+ # <json dictionary> is the path to the json dictionary that is generated upon an F' build
60
+ #
61
+ #
62
+ # The program does not require any additional command-line options or flags for its basic
63
+ # operation. Once executed with the correct binary file path and with the necessary JSON
64
+ # files in place, it will perform the series of steps outlined previously, culminating in
65
+ # the generation of a JSON file with the same base name as the binary file but with a .json
66
+ # extension.
67
+ #
68
+ # ------------------------------------------------------------------------------------------
69
+
70
+ import struct
71
+ import json
72
+ import os
73
+ import sys
74
+ from typing import List, Dict, Union, ForwardRef
75
+ from pydantic import BaseModel, field_validator
76
+ from typing import List, Union
77
+ import argparse
78
+ from binascii import crc32
79
+
80
+ class bcolors:
81
+ HEADER = '\033[95m'
82
+ OKBLUE = '\033[94m'
83
+ OKGREEN = '\033[92m'
84
+ WARNING = '\033[93m'
85
+ FAIL = '\033[91m'
86
+ ENDC = '\033[0m'
87
+ BOLD = '\033[1m'
88
+ UNDERLINE = '\033[4m'
89
+
90
+
91
+ # This defines the data in the container header. When this is ultimately defined in the json dictionary,
92
+ # then this can be removed.
93
+ header_data = {
94
+ "typeDefinitions" : [
95
+ {
96
+ "kind": "array",
97
+ "qualifiedName": "UserDataArray",
98
+ "size": 32,
99
+ "elementType": {
100
+ "name": "U8",
101
+ "kind": "integer",
102
+ "signed": False,
103
+ "size": 8
104
+ }
105
+ },
106
+
107
+ {
108
+ "kind": "struct",
109
+ "qualifiedName": "timeStruct",
110
+ "members": {
111
+ "seconds": {
112
+ "type": {
113
+ "name": "U32",
114
+ "kind": "integer",
115
+ "signed": False,
116
+ "size": 32
117
+ }
118
+ },
119
+ "useconds": {
120
+ "type": {
121
+ "name": "U32",
122
+ "kind": "integer",
123
+ "signed": False,
124
+ "size": 32
125
+ }
126
+ },
127
+ "timeBase": {
128
+ "type": {
129
+ "name": "U16",
130
+ "kind": "integer",
131
+ "signed": False,
132
+ "size": 16
133
+ }
134
+ },
135
+ "context": {
136
+ "type": {
137
+ "name": "U8",
138
+ "kind": "integer",
139
+ "signed": False,
140
+ "size": 8
141
+ }
142
+ }
143
+ }
144
+ }
145
+ ],
146
+
147
+ "enums" : [
148
+ ],
149
+
150
+ "header": {
151
+ "PacketDescriptor": {
152
+ "type": {
153
+ "name": "U32",
154
+ "kind": "integer",
155
+ "signed": False,
156
+ "size": 32
157
+ }
158
+ },
159
+ "Id": {
160
+ "type": {
161
+ "name": "U32",
162
+ "kind": "integer",
163
+ "signed": False,
164
+ "size": 32
165
+ }
166
+ },
167
+ "Priority": {
168
+ "type": {
169
+ "name": "U32",
170
+ "kind": "integer",
171
+ "signed": False,
172
+ "size": 32
173
+ }
174
+ },
175
+ "TimeTag": {
176
+ "type": {
177
+ "name": "timeStruct",
178
+ "kind": "qualifiedIdentifier"
179
+ }
180
+ },
181
+ "ProcTypes": {
182
+ "type": {
183
+ "name": "U8",
184
+ "kind": "integer",
185
+ "signed": False,
186
+ "size": 8
187
+ }
188
+ },
189
+ "UserData": {
190
+ "type": {
191
+ "name": "UserDataArray",
192
+ "kind": "qualifiedIdentifier"
193
+ }
194
+ },
195
+ "DpState": {
196
+ "type": {
197
+ "name": "U8",
198
+ "kind": "integer",
199
+ "signed": False,
200
+ "size": 8
201
+ }
202
+ },
203
+ "DataSize": {
204
+ "type": {
205
+ "name": "U16",
206
+ "kind": "integer",
207
+ "signed": False,
208
+ "size": 16
209
+ }
210
+ }
211
+ },
212
+
213
+ "headerHash": {
214
+ "type": {
215
+ "name": "U32",
216
+ "kind": "integer",
217
+ "signed": False,
218
+ "size": 32
219
+ }
220
+ },
221
+
222
+ "dataId": {
223
+ "type": {
224
+ "name": "U32",
225
+ "kind": "integer",
226
+ "signed": False,
227
+ "size": 32
228
+ }
229
+ },
230
+
231
+ "dataSize": {
232
+ "type": {
233
+ "name": "U16",
234
+ "kind": "integer",
235
+ "signed": False,
236
+ "size": 16
237
+ }
238
+ },
239
+
240
+ "dataHash": {
241
+ "type": {
242
+ "name": "U32",
243
+ "kind": "integer",
244
+ "signed": False,
245
+ "size": 32
246
+ }
247
+ }
248
+ }
249
+
250
+ # Deserialize the binary file big endian
251
+ BIG_ENDIAN = ">"
252
+
253
+
254
+ # -------------------------------------------------------------------------------------------------------------------------
255
+ # Function parse_args
256
+ #
257
+ # Description:
258
+ # Parse the input arguments either as passed into this function or on the command line
259
+ # -------------------------------------------------------------------------------------------------------------------------
260
+ def parse_args(args=None):
261
+ parser = argparse.ArgumentParser(description='Data Product Writer.')
262
+ parser.add_argument('binFile', help='Data Product Binary file')
263
+ parser.add_argument('jsonDict', help='JSON Dictionary')
264
+ if args is None:
265
+ args = sys.argv[1:]
266
+ return parser.parse_args(args)
267
+
268
+
269
+ # -------------------------------------------------------------------------------------
270
+ # These are common Pydantic classes that
271
+ # are used by both the dictionary json and the data product json
272
+ #
273
+ # -------------------------------------------------------------------------------------
274
+
275
+ class IntegerType(BaseModel):
276
+ name: str
277
+ kind: str
278
+ size: int
279
+ signed: bool
280
+
281
+ @field_validator('kind')
282
+ def kind_qualifiedIdentifier(cls, v):
283
+ if v != "integer":
284
+ raise ValueError('Check the "kind" field')
285
+ return v
286
+
287
+ class FloatType(BaseModel):
288
+ name: str
289
+ kind: str
290
+ size: int
291
+
292
+ @field_validator('kind')
293
+ def kind_qualifiedIdentifier(cls, v):
294
+ if v != "float":
295
+ raise ValueError('Check the "kind" field')
296
+ return v
297
+
298
+ class BoolType(BaseModel):
299
+ name: str
300
+ kind: str
301
+ size: int
302
+
303
+ @field_validator('kind')
304
+ def kind_qualifiedIdentifier(cls, v):
305
+ if v != "bool":
306
+ raise ValueError('Check the "kind" field')
307
+ return v
308
+
309
+ Type = ForwardRef('Type')
310
+ ArrayType = ForwardRef('ArrayType')
311
+
312
+ class QualifiedType(BaseModel):
313
+ kind: str
314
+ name: str
315
+
316
+ @field_validator('kind')
317
+ def kind_qualifiedIdentifier(cls, v):
318
+ if v != "qualifiedIdentifier":
319
+ raise ValueError('Check the "kind" field')
320
+ return v
321
+
322
+ class StructMember(BaseModel):
323
+ type: Union[IntegerType, FloatType, BoolType, QualifiedType]
324
+ size: int = 1
325
+
326
+ class StructType(BaseModel):
327
+ kind: str
328
+ qualifiedName: str
329
+ members: Dict[str, StructMember]
330
+
331
+ @field_validator('kind')
332
+ def kind_qualifiedIdentifier(cls, v):
333
+ if v != "struct":
334
+ raise ValueError('Check the "kind" field')
335
+ return v
336
+
337
+ class ArrayType(BaseModel):
338
+ kind: str
339
+ qualifiedName: str
340
+ size: int
341
+ elementType: Union[StructType, ArrayType, IntegerType, FloatType, QualifiedType]
342
+
343
+ @field_validator('kind')
344
+ def kind_qualifiedIdentifier(cls, v):
345
+ if v != "array":
346
+ raise ValueError('Check the "kind" field')
347
+ return v
348
+
349
+ class EnumeratedConstant(BaseModel):
350
+ name: str
351
+ value: int
352
+
353
+
354
+ class EnumType(BaseModel):
355
+ kind: str
356
+ qualifiedName: str
357
+ #representationType: RepresentationType
358
+ representationType: IntegerType
359
+ enumeratedConstants: List[EnumeratedConstant]
360
+
361
+ @field_validator('kind')
362
+ def kind_qualifiedIdentifier(cls, v):
363
+ if v != "enum":
364
+ raise ValueError('Check the "kind" field')
365
+ return v
366
+
367
+
368
+ class Type(BaseModel):
369
+ type: Union[StructType, ArrayType, IntegerType, FloatType, BoolType, QualifiedType]
370
+
371
+ class RecordStruct(BaseModel):
372
+ name: str
373
+ type: Union[StructType, ArrayType, IntegerType, FloatType, BoolType, QualifiedType]
374
+ array: bool
375
+ id: int
376
+ annotation: str
377
+
378
+ class ContainerStruct(BaseModel):
379
+ name: str
380
+ id: int
381
+ defaultPriority: int
382
+ annotation: str
383
+
384
+ # -------------------------------------------------------------------------------------
385
+ # These Pydantic classes define the FPRIME_DICTIONARY_FILE
386
+ #
387
+ # -------------------------------------------------------------------------------------
388
+
389
+ class FprimeDict(BaseModel):
390
+ metadata: Dict[str, Union[str, List[str]]]
391
+ typeDefinitions: List[Union[ArrayType, StructType, EnumType]]
392
+ records: List[RecordStruct]
393
+ containers: List[ContainerStruct]
394
+
395
+ # -------------------------------------------------------------------------------------
396
+ # This Pydantic class defines the data product json
397
+ #
398
+ # -------------------------------------------------------------------------------------
399
+
400
+ class DPHeader(BaseModel):
401
+ typeDefinitions: List[Union[ArrayType, StructType, EnumType]]
402
+ header: Dict[str, Type]
403
+ headerHash: Type
404
+ dataId: Type
405
+ dataSize: Type
406
+ dataHash: Type
407
+
408
+ ArrayType.model_rebuild()
409
+ StructType.model_rebuild()
410
+ Type.model_rebuild()
411
+
412
+ TypeKind = Union[StructType, ArrayType, IntegerType, FloatType, EnumType, BoolType, QualifiedType]
413
+ TypeDef = Union[ArrayType, StructType]
414
+
415
+ # Map the JSON types to struct format strings
416
+ type_mapping = {
417
+ 'U8': 'B', # Unsigned 8-bit integer
418
+ 'I8': 'b', # Signed 8-bit integer
419
+ 'U16': 'H', # Unsigned 16-bit integer
420
+ 'I16': 'h', # Signed 16 bit integer
421
+ 'U32': 'I', # Unsigned 32-bit integer
422
+ 'I32': 'i', # Signed 32-bit integer
423
+ 'I64': 'q', # Signed 64-bit integer
424
+ 'U64': 'Q', # Unsigned 64-bit integer
425
+ 'F32': 'f', # 32-bit float
426
+ 'F64': 'd', # 64-bit float
427
+ 'bool': '?' # An 8 bit boolean
428
+ # Add more mappings as needed
429
+ }
430
+
431
+ # --------------------------------------------------------------------------------------------------
432
+ # class RecordIDNotFound
433
+ #
434
+ # Description:
435
+ # Custom exception class for signaling the absence of a specified record ID in a JSON dictionary.
436
+ # It stores the missing record ID and provides a descriptive error message when invoked.
437
+ #
438
+ # Attributes:
439
+ # recordId: The missing record ID.
440
+ #
441
+ # Methods:
442
+ # __init__(self, recordId): Initializes the exception with the missing record ID.
443
+ # __str__(self): Returns an error message indicating the missing record ID.
444
+ # ---------------------------------------------------------------------------------------------------
445
+
446
+ class RecordIDNotFound(Exception):
447
+
448
+ def __init__(self, recordId):
449
+ self.recordId = recordId
450
+
451
+ def __str__(self):
452
+ return f"Record ID {self.recordId} was not found in the JSON dictionary"
453
+
454
+ # ----------------------------------------------------------------------------------------------------------------
455
+ # class DictionaryError
456
+ #
457
+ # Description:
458
+ # Custom exception class for signaling errors while parsing a JSON dictionary file.
459
+ # It stores the file name and the line number where the error occurred, providing context in the error message.
460
+ #
461
+ # Attributes:
462
+ # jsonFile: The name of the JSON file being parsed.
463
+ # lineNo: The line number in the file where the error was detected.
464
+ #
465
+ # Methods:
466
+ # __init__(self, jsonFile, lineNo): Initializes the exception with the JSON file name and the line number.
467
+ # __str__(self): Returns an error message indicating the file and line number where the error occurred.
468
+ # ----------------------------------------------------------------------------------------------------------------
469
+
470
+ class DictionaryError(Exception):
471
+
472
+ def __init__(self, jsonFile, lineNo):
473
+ self.jsonFile = jsonFile
474
+ self.lineNo = lineNo
475
+
476
+ def __str__(self):
477
+ return f"DictionaryError parsing {self.jsonFile}, line number: {self.lineNo}"
478
+
479
+ # -------------------------------------------------------------------------------------------------------------------
480
+ # class CRCError
481
+ #
482
+ # Description:
483
+ # Custom exception class for signaling CRC hash mismatches during data validation.
484
+ # It specifies whether the error occurred in the header or data, the expected CRC value, and the calculated CRC value.
485
+ #
486
+ # Attributes:
487
+ # headerOrData: Indicates whether the mismatch occurred in the header or the data section.
488
+ # expected: The expected CRC value.
489
+ # calculated: The calculated CRC value that did not match the expected value.
490
+ #
491
+ # Methods:
492
+ # __init__(self, headerOrData, expected, calculated): Initializes the exception with details of the mismatch.
493
+ # __str__(self): Returns an error message detailing the mismatch with expected and calculated CRC values.
494
+ # --------------------------------------------------------------------------------------------------------------------
495
+
496
+ class CRCError(Exception):
497
+
498
+ def __init__(self, headerOrData, expected, calculated):
499
+ self.headerOrData = headerOrData
500
+ self.expected = expected
501
+ self.calculated = calculated
502
+
503
+ def __str__(self):
504
+ return f"CRC Hash mismatch for {self.headerOrData}: Expected {self.expected:#x}, Calculated {self.calculated:#x}"
505
+
506
+ # --------------------------------------------------------------------------------------------------------------------
507
+ # class DuplicateRecordID
508
+ #
509
+ # Description:
510
+ # Custom exception class for indicating the presence of a duplicate record identifier in a JSON dictionary.
511
+ # It stores the duplicate identifier and provides a descriptive error message when invoked.
512
+ #
513
+ # Attributes:
514
+ # identifier: The duplicate record identifier that triggered the exception.
515
+ #
516
+ # Methods:
517
+ # __init__(self, identifier): Initializes the exception with the duplicate identifier.
518
+ # __str__(self): Returns an error message indicating the duplicate record identifier.
519
+ # ----------------------------------------------------------------------------------------------------------------------
520
+ class DuplicateRecordID(Exception):
521
+
522
+ def __init__(self, identifier):
523
+ self.identifier = identifier
524
+
525
+ def __str__(self):
526
+ return f"In the Dictionary JSON there is a duplicate Record identifier: {self.identifier}"
527
+
528
+ # --------------------------------------------------------------------------------------------------------------------
529
+ # class DataProductWriter
530
+ #
531
+ # Description:
532
+ # This is the main class that processes the data. It is a container for managing global variables
533
+ #
534
+ # --------------------------------------------------------------------------------------------------------------------
535
+ class DataProductWriter:
536
+ def __init__(self, jsonDict, binaryFileName):
537
+ self.jsonDict = jsonDict
538
+ self.binaryFileName = binaryFileName
539
+ self.totalBytesRead = 0
540
+ self.calculatedCRC = 0
541
+
542
+
543
+ # ----------------------------------------------------------------------------------------------
544
+ # Function: read_and_deserialize
545
+ #
546
+ # Description:
547
+ # Reads specified bytes from a binary file, updates CRC, increments byte count,
548
+ # and deserializes bytes into an integer.
549
+ #
550
+ # Parameters:
551
+ # nbytes (int): Number of bytes to read.
552
+ # intType (IntegerType): Integer type for deserialization.
553
+ #
554
+ # Returns:
555
+ # int: Deserialized integer.
556
+ #
557
+ # Exceptions:
558
+ # IOError: If reading specified bytes fails.
559
+ # KeyError: If intType is unrecognized in type_mapping.
560
+ # ----------------------------------------------------------------------------------------------
561
+
562
+ def read_and_deserialize(self, nbytes: int, intType: IntegerType) -> int:
563
+
564
+ bytes_read = self.binaryFile.read(nbytes)
565
+ if len(bytes_read) != nbytes:
566
+ raise IOError(f"Tried to read {nbytes} bytes from the binary file, but failed.")
567
+
568
+ self.calculatedCRC = crc32(bytes_read, self.calculatedCRC) & 0xffffffff
569
+ self.totalBytesRead += nbytes
570
+
571
+ try:
572
+ format_str = f'{BIG_ENDIAN}{type_mapping[intType.name]}'
573
+ except KeyError:
574
+ raise KeyError(f"Unrecognized JSON Dictionary Type: {intType}")
575
+ data = struct.unpack(format_str, bytes_read)[0]
576
+
577
+
578
+ return data
579
+
580
+ # -----------------------------------------------------------------------------------------------------------------------
581
+ # Function: get_struct_type
582
+ #
583
+ # Description:
584
+ # Searches for a structure with a matching identifier in a list of type definitions
585
+ # and returns the matching structure if found.
586
+ #
587
+ # Parameters:
588
+ # typeList (List[TypeDef]): A list of type definitions to search through.
589
+ # identifier (str): The identifier to match against the qualifiedName attribute of each structure.
590
+ #
591
+ # Returns:
592
+ # TypeDef: The structure that matches the identifier, or None if no match is found.
593
+ #
594
+ # Exceptions:
595
+ # No explicit exceptions are raised by this function.
596
+ # -----------------------------------------------------------------------------------------------------------------------
597
+
598
+ def get_struct_type(self, typeList: List[TypeDef], identifier: str) -> TypeDef:
599
+
600
+ for structure in typeList:
601
+ if structure.qualifiedName == identifier:
602
+ return structure
603
+
604
+ return None
605
+
606
+ # -----------------------------------------------------------------------------------------------------------------------
607
+ # Function: read_field
608
+ #
609
+ # Description:
610
+ # Reads and deserializes a field from a binary file, determining the field's size and type
611
+ # based on the provided configuration, which may be an integer, float, or boolean.
612
+ #
613
+ # Parameters:
614
+ # field_config (Union[IntegerType, FloatType, BoolType]): Configuration specifying the type and size
615
+ # of the field to read.
616
+ #
617
+ # Returns:
618
+ # Union[int, float, bool]: The deserialized value of the field, which can be an integer, float, or boolean.
619
+ #
620
+ # Exceptions:
621
+ # AssertionError: If the field_config is not an IntegerType, FloatType, or BoolType.
622
+ # -----------------------------------------------------------------------------------------------------------------------
623
+
624
+ def read_field(self, field_config: Union[IntegerType, FloatType, BoolType]) -> Union[int, float, bool]:
625
+
626
+ if type(field_config) is IntegerType:
627
+ sizeBytes = field_config.size // 8
628
+
629
+ elif type(field_config) is FloatType:
630
+ sizeBytes = field_config.size // 8
631
+
632
+ elif type(field_config) is BoolType:
633
+ sizeBytes = field_config.size // 8
634
+
635
+ else:
636
+ assert False, "Unsupported typeKind encountered"
637
+
638
+ return self.read_and_deserialize(sizeBytes, field_config)
639
+
640
+
641
+ # -----------------------------------------------------------------------------------------------------------------------
642
+ # Function: get_struct_item
643
+ #
644
+ # Description:
645
+ # This function recursively reads and processes a field from a binary file, adding it to a parent dictionary.
646
+ # The process varies depending on the field's type:
647
+ # - For basic types (IntegerType, FloatType, BoolType), it directly reads and assigns the value.
648
+ # - For EnumType, it reads the value, finds the corresponding enum identifier, and assigns it.
649
+ # - For ArrayType, it creates a list, iteratively fills it with elements read recursively, and assigns the list.
650
+ # - For StructType, it constructs a nested dictionary by recursively processing each struct member.
651
+ # - For QualifiedType, it resolves the actual type from typeList and recursively processes the field.
652
+ # This approach allows the function to handle complex, nested data structures by adapting to the field's type,
653
+ # ensuring each is read and stored appropriately in the parent dictionary.
654
+ #
655
+ # Parameters:
656
+ # field_name (str): The name of the field to be read and added to the dictionary.
657
+ # typeKind (TypeKind): The type information of the field, determining how it should be read.
658
+ # typeList (List[TypeDef]): A list of type definitions, used for resolving qualified types.
659
+ # parent_dict (Dict[str, int]): The dictionary to which the read field value will be added.
660
+ #
661
+ # Returns:
662
+ # None: The function does not return a value but modifies parent_dict in place.
663
+ #
664
+ # Exceptions:
665
+ # AssertionError: If an unsupported typeKind is encountered.
666
+
667
+ #
668
+ # -----------------------------------------------------------------------------------------------------------------------
669
+
670
+ def get_struct_item(self, field_name: str, typeKind: TypeKind, typeList: List[TypeDef], parent_dict: Dict[str, int]):
671
+
672
+ if isinstance(typeKind, IntegerType):
673
+ parent_dict[field_name] = self.read_field(typeKind)
674
+
675
+ elif isinstance(typeKind, FloatType):
676
+ parent_dict[field_name] = self.read_field(typeKind)
677
+
678
+ elif isinstance(typeKind, BoolType):
679
+ parent_dict[field_name] = self.read_field(typeKind)
680
+
681
+
682
+ elif isinstance(typeKind, EnumType):
683
+ value = self.read_field(typeKind.representationType)
684
+ enum_mapping = typeKind.enumeratedConstants
685
+ reverse_mapping = {enum.value: enum.name for enum in enum_mapping}
686
+ parent_dict[field_name] = reverse_mapping[value]
687
+
688
+
689
+ elif isinstance(typeKind, ArrayType):
690
+ array_list = []
691
+ for item in range(typeKind.size):
692
+ element_dict = {}
693
+ self.get_struct_item("arrayElement", typeKind.elementType, typeList, element_dict)
694
+ array_list.append(element_dict["arrayElement"])
695
+ parent_dict[field_name] = array_list
696
+
697
+ elif isinstance(typeKind, StructType):
698
+ array_list = []
699
+ for key, member in typeKind.members.items():
700
+ for i in range(member.size):
701
+ element_dict = {}
702
+ self.get_struct_item(key, member.type, typeList, element_dict)
703
+ #array_list.append(element_dict[key])
704
+ array_list.append(element_dict)
705
+ parent_dict[field_name] = array_list
706
+
707
+ elif isinstance(typeKind, QualifiedType):
708
+ qualType = self.get_struct_type(typeList, typeKind.name)
709
+ self.get_struct_item(field_name, qualType, typeList, parent_dict)
710
+
711
+ else:
712
+ assert False, "Unsupported typeKind encountered"
713
+
714
+
715
+ # -----------------------------------------------------------------------------------------------------------------------
716
+ # Function: get_header_info
717
+ #
718
+ # Description:
719
+ # Extracts header information from a given DPHeader object, populating and returning a dictionary with the data.
720
+ # Iterates over header fields, reading each and updating a root dictionary with the field values. After processing all fields,
721
+ # it reads and compares the header hash with a computed CRC value to verify data integrity. If the CRC check fails, it raises
722
+ # a CRCError. This function demonstrates a pattern of custom exceptions to manage and validate
723
+ # binary data parsing.
724
+ #
725
+ # Parameters:
726
+ # headerJSON (DPHeader): The DPHeader object containing header information and type definitions.
727
+ #
728
+ # Returns:
729
+ # Dict[str, int]: A dictionary populated with the header fields and their corresponding values.
730
+ #
731
+ # Exceptions:
732
+ # CRCError: Raised if the computed CRC does not match the expected header hash value.
733
+ # -----------------------------------------------------------------------------------------------------------------------
734
+
735
+ def get_header_info(self, headerJSON: DPHeader) -> Dict[str, int]:
736
+
737
+ header_fields = headerJSON.header
738
+ rootDict = {}
739
+
740
+ for field_name, field_info in header_fields.items():
741
+ self.get_struct_item(field_name, field_info.type, headerJSON.typeDefinitions, rootDict)
742
+
743
+ computedHash = self.calculatedCRC
744
+ rootDict['headerHash'] = self.read_field(headerJSON.headerHash.type)
745
+ self.calculatedCRC = 0
746
+
747
+ if rootDict['headerHash'] != computedHash:
748
+ raise CRCError("Header", rootDict['headerHash'], computedHash)
749
+
750
+ return rootDict
751
+
752
+ # ------------------------------------------------------------------------------------------
753
+ # Function: get_record_data
754
+ #
755
+ # Description:
756
+ # Retrieves and processes the record data based on a given header and dictionary
757
+ # definition. The function first reads the 'dataId' from the header to identify the
758
+ # relevant record. It then processes the record's data, handling both scalar values and
759
+ # arrays by reading each item according to its type. For arrays, it also reads the
760
+ # 'dataSize' from the header to determine the number of items to process.
761
+ #
762
+ # Parameters:
763
+ # - headerJSON (DPHeader): An object containing the header information, including
764
+ # identifiers and sizes for the data to be processed.
765
+ # - dictJSON (FprimeDict): An object containing definitions for records and types,
766
+ # which are used to process the data correctly.
767
+ #
768
+ # Returns:
769
+ # Dict[str, int]: A dictionary with the processed data, including 'dataId', optionally
770
+ # 'size' for arrays, and the data itself. For arrays, the data is indexed by its position
771
+ # within the array.
772
+
773
+ # ------------------------------------------------------------------------------------------
774
+
775
+ def get_record_data(self, headerJSON: DPHeader, dictJSON: FprimeDict) -> Dict[str, int]:
776
+ rootDict = {}
777
+ # Go through all the Records and find the one that matches recordId
778
+ rootDict['dataId'] = self.read_field(headerJSON.dataId.type)
779
+ for record in dictJSON.records:
780
+ if record.id == rootDict['dataId']:
781
+ print(f'Processing Record ID {record.id}')
782
+ if record.array:
783
+ dataSize = self.read_field(headerJSON.dataSize.type)
784
+ rootDict['size'] = dataSize
785
+ array_data = []
786
+ for i in range(dataSize):
787
+ element_dict = {}
788
+ self.get_struct_item("arrayElement", record.type, dictJSON.typeDefinitions, element_dict)
789
+ array_data.append(element_dict["arrayElement"])
790
+ rootDict['data'] = array_data
791
+ else:
792
+ # For non-array records, directly use 'data' as the key.
793
+ self.get_struct_item("data", record.type, dictJSON.typeDefinitions, rootDict)
794
+
795
+ return rootDict
796
+ raise RecordIDNotFound(rootDict['dataId'])
797
+
798
+
799
+
800
+ # --------------------------------------------------------------------------------------------------------------------
801
+ # Function handleException
802
+ #
803
+ # Description:
804
+ # Function for handling exceptions by displaying an error message and terminating the program.
805
+ # It displays the provided exception message with color-coded output for emphasis.
806
+ # After displaying the messages, it immediately exits the program, halting further execution.
807
+ #
808
+ # Parameters:
809
+ # msg (str): The specific error message to be displayed.
810
+ #
811
+ # Returns:
812
+ # None: This function does not return a value. It terminates the program execution with sys.exit().
813
+ #
814
+ # Exceptions:
815
+ # No explicit exceptions are raised by this function, but it triggers the program's termination.
816
+ # -----------------------------------------------------------------------------------------------------------------
817
+ def handleException(self, msg):
818
+ errorMessage = f"*** Error in processing: "
819
+ print(bcolors.FAIL)
820
+ print(errorMessage)
821
+ print(bcolors.WARNING)
822
+ print(msg)
823
+ print(bcolors.ENDC)
824
+ sys.exit()
825
+
826
+
827
+ # -------------------------------------------------------------------------------------------------------------------------
828
+ # Function check_record_data
829
+ #
830
+ # Description:
831
+ # Validates record data in a given dictionary JSON object by ensuring there are no duplicate record identifiers.
832
+ # Iterates through the records in the dictionary, checking each record's identifier against a set that
833
+ # tracks unique identifiers. If a duplicate identifier is detected, a DuplicateRecordID exception is raised.
834
+ #
835
+ # Parameters:
836
+ # dictJSON (FprimeDict): The dictionary JSON object containing records to be validated.
837
+ #
838
+ # Returns:
839
+ # None: This function does not return a value. It either completes successfully or raises an exception.
840
+ #
841
+ # Exceptions:
842
+ # DuplicateRecordID: Raised if a duplicate record identifier is found in the dictionary JSON object.
843
+
844
+ # -----------------------------------------------------------------------------------------------------------------------
845
+ def check_record_data(self, dictJSON: FprimeDict):
846
+ idSet = set()
847
+ for record in dictJSON.records:
848
+ if record.id in idSet:
849
+ raise DuplicateRecordID(record.id)
850
+ else:
851
+ idSet.add(record.id)
852
+
853
+
854
+
855
+ # -------------------------------------------------------------------------------------------------------------------------
856
+ # Function process
857
+ #
858
+ # Description:
859
+ # Main processing
860
+ # -------------------------------------------------------------------------------------------------------------------------
861
+ def process(self):
862
+
863
+ try:
864
+
865
+ # Read the F prime JSON dictionary
866
+ print(f"Parsing {self.jsonDict}...")
867
+ try:
868
+ with open(self.jsonDict, 'r') as fprimeDictFile:
869
+ dictJSON = FprimeDict(**json.load(fprimeDictFile))
870
+ except json.JSONDecodeError as e:
871
+ raise DictionaryError(self.jsonDict, e.lineno)
872
+
873
+ self.check_record_data(dictJSON)
874
+
875
+ headerJSON = DPHeader(**header_data)
876
+
877
+ with open(self.binaryFileName, 'rb') as self.binaryFile:
878
+
879
+ # Read the header data up until the Records
880
+ headerData = self.get_header_info(headerJSON)
881
+
882
+ # Read the total data size
883
+ dataSize = headerData['DataSize']
884
+
885
+ # Restart the count of bytes read
886
+ self.totalBytesRead = 0
887
+
888
+ recordList = [headerData]
889
+
890
+ while self.totalBytesRead < dataSize:
891
+
892
+ recordData = self.get_record_data(headerJSON, dictJSON)
893
+ recordList.append(recordData)
894
+
895
+ computedCRC = self.calculatedCRC
896
+ # Read the data checksum
897
+ headerData['dataHash'] = self.read_field(headerJSON.dataHash.type)
898
+
899
+ if computedCRC != headerData['dataHash']:
900
+ raise CRCError("Data", headerData['dataHash'], computedCRC)
901
+
902
+
903
+ except (FileNotFoundError, RecordIDNotFound, IOError, KeyError, json.JSONDecodeError,
904
+ DictionaryError, CRCError, DuplicateRecordID) as e:
905
+ self.handleException(e)
906
+
907
+ except (ValueError) as e:
908
+ error = e.errors()[0]
909
+ msg = f'ValueError in JSON file {error["loc"]}: {error["msg"]}'
910
+ self.handleException(msg)
911
+
912
+
913
+ # Output the generated json to a file
914
+ baseName = os.path.basename(self.binaryFileName)
915
+ outputJsonFile = os.path.splitext(baseName)[0] + '.json'
916
+ if outputJsonFile.startswith('._'):
917
+ outputJsonFile = outputJsonFile.replace('._', '')
918
+ with open(outputJsonFile, 'w') as file:
919
+ json.dump(recordList, file, indent=2)
920
+
921
+ print(f'Output data generated in {outputJsonFile}')
922
+
923
+
924
+ # ------------------------------------------------------------------------------------------
925
+ # main program
926
+ #
927
+ # ------------------------------------------------------------------------------------------
928
+ def main():
929
+ args = parse_args()
930
+ DataProductWriter(args.jsonDict, args.binFile).process()
931
+
932
+ if __name__ == "main":
933
+ sys.exit(main())
934
+
935
+