zea 0.0.7__py3-none-any.whl → 0.0.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. zea/__init__.py +1 -1
  2. zea/backend/tensorflow/dataloader.py +0 -4
  3. zea/beamform/pixelgrid.py +1 -1
  4. zea/data/__init__.py +0 -9
  5. zea/data/augmentations.py +221 -28
  6. zea/data/convert/__init__.py +1 -6
  7. zea/data/convert/__main__.py +123 -0
  8. zea/data/convert/camus.py +99 -39
  9. zea/data/convert/echonet.py +183 -82
  10. zea/data/convert/echonetlvh/README.md +2 -3
  11. zea/data/convert/echonetlvh/{convert_raw_to_usbmd.py → __init__.py} +173 -102
  12. zea/data/convert/echonetlvh/manual_rejections.txt +73 -0
  13. zea/data/convert/echonetlvh/precompute_crop.py +43 -64
  14. zea/data/convert/picmus.py +37 -40
  15. zea/data/convert/utils.py +86 -0
  16. zea/data/convert/{matlab.py → verasonics.py} +33 -61
  17. zea/data/data_format.py +124 -4
  18. zea/data/dataloader.py +12 -7
  19. zea/data/datasets.py +109 -70
  20. zea/data/file.py +91 -82
  21. zea/data/file_operations.py +496 -0
  22. zea/data/preset_utils.py +1 -1
  23. zea/display.py +7 -8
  24. zea/internal/checks.py +6 -12
  25. zea/internal/operators.py +4 -0
  26. zea/io_lib.py +108 -160
  27. zea/models/__init__.py +1 -1
  28. zea/models/diffusion.py +62 -11
  29. zea/models/lv_segmentation.py +2 -0
  30. zea/ops.py +398 -158
  31. zea/scan.py +18 -8
  32. zea/tensor_ops.py +82 -62
  33. zea/tools/fit_scan_cone.py +90 -160
  34. zea/tracking/__init__.py +16 -0
  35. zea/tracking/base.py +94 -0
  36. zea/tracking/lucas_kanade.py +474 -0
  37. zea/tracking/segmentation.py +110 -0
  38. zea/utils.py +11 -2
  39. {zea-0.0.7.dist-info → zea-0.0.8.dist-info}/METADATA +3 -1
  40. {zea-0.0.7.dist-info → zea-0.0.8.dist-info}/RECORD +43 -35
  41. {zea-0.0.7.dist-info → zea-0.0.8.dist-info}/WHEEL +0 -0
  42. {zea-0.0.7.dist-info → zea-0.0.8.dist-info}/entry_points.txt +0 -0
  43. {zea-0.0.7.dist-info → zea-0.0.8.dist-info}/licenses/LICENSE +0 -0
@@ -4,24 +4,18 @@ Script to convert the EchoNet-LVH database to zea format.
4
4
  Each video is cropped so that the scan cone is centered
5
5
  without padding, such that it can be converted to polar domain.
6
6
 
7
- This cropping requires first computing scan cone parameters
8
- using `data/convert/echonetlvh/precompute_crop.py`, which
9
- are then passed to this script.
10
- """
11
-
12
- import os
13
-
14
- os.environ["KERAS_BACKEND"] = "jax"
7
+ .. note::
8
+ This cropping requires first computing scan cone parameters
9
+ using :mod:`zea.data.convert.echonetlvh.precompute_crop`, which
10
+ are then passed to this script.
15
11
 
12
+ For more information about the dataset, resort to the following links:
16
13
 
17
- if __name__ == "__main__":
18
- from zea import init_device
19
-
20
- init_device("auto:1")
14
+ - The original dataset can be found at `this link <https://stanfordaimi.azurewebsites.net/datasets/5b7fcc28-579c-4285-8b72-e4238eac7bd1>`_.
15
+ """
21
16
 
22
- import argparse
23
17
  import csv
24
- import sys
18
+ import os
25
19
  from concurrent.futures import ProcessPoolExecutor, as_completed
26
20
  from pathlib import Path
27
21
 
@@ -30,50 +24,71 @@ import numpy as np
30
24
  from jax import jit, vmap
31
25
  from tqdm import tqdm
32
26
 
27
+ from zea import log
33
28
  from zea.data import generate_zea_dataset
34
29
  from zea.data.convert.echonet import H5Processor
30
+ from zea.data.convert.echonetlvh.precompute_crop import precompute_cone_parameters
31
+ from zea.data.convert.utils import load_avi, unzip
35
32
  from zea.display import cartesian_to_polar_matrix
36
- from zea.io_lib import load_video
37
33
  from zea.tensor_ops import translate
38
34
 
39
35
 
40
- def get_args():
41
- """Parse command line arguments."""
42
- parser = argparse.ArgumentParser(description="Convert EchoNet-LVH to zea format")
43
- parser.add_argument(
44
- "--source",
45
- type=str,
46
- required=True,
47
- )
48
- parser.add_argument("--output", type=str, required=True)
49
- parser.add_argument("--output_numpy", type=str, default=None)
50
- parser.add_argument("--file_list", type=str, help="Optional path to list of files")
51
- parser.add_argument("--use_hyperthreading", action="store_true", help="Enable hyperthreading")
52
- parser.add_argument(
53
- "--batch",
54
- type=str,
55
- help="Specify which BatchX directory to process, e.g. --batch=Batch2",
56
- )
57
- parser.add_argument(
58
- "--max_files",
59
- type=int,
60
- default=None,
61
- help="Maximum number of files to process (for testing)",
62
- )
63
- # if neither is specified, both will be converted
64
- parser.add_argument(
65
- "--convert_measurements",
66
- action="store_true",
67
- help="Only convert measurements CSV file",
68
- )
69
- parser.add_argument("--convert_images", action="store_true", help="Only convert image files")
70
- return parser.parse_args()
36
+ def overwrite_splits(source_dir):
37
+ """
38
+ Overwrite MeasurementsList.csv splits based on manual_rejections.txt
39
+
40
+ Args:
41
+ source_dir: Source directory containing MeasurementsList.csv and manual_rejections.txt
42
+ Returns:
43
+ None
44
+ """
45
+ current_dir = os.path.dirname(os.path.abspath(__file__))
46
+ rejection_path = os.path.join(current_dir, "manual_rejections.txt")
47
+ try:
48
+ with open(rejection_path) as f:
49
+ rejected_hashes = [line.strip() for line in f]
50
+ except FileNotFoundError:
51
+ log.warning(f"{rejection_path} not found, skipping rejections.")
52
+ return
53
+
54
+ csv_path = Path(source_dir) / "MeasurementsList.csv"
55
+ temp_path = Path(source_dir) / "MeasurementsList_temp.csv"
56
+ try:
57
+ rejection_counter = 0
58
+ with (
59
+ csv_path.open("r", newline="", encoding="utf-8") as infile,
60
+ temp_path.open("w", encoding="utf-8", newline="") as outfile,
61
+ ):
62
+ reader = csv.DictReader(infile)
63
+ writer = csv.DictWriter(outfile, fieldnames=reader.fieldnames)
64
+ writer.writeheader()
65
+ for row in reader:
66
+ if row["HashedFileName"] in rejected_hashes:
67
+ row["split"] = "rejected"
68
+ rejection_counter += 1
69
+ writer.writerow(row)
70
+ assert rejection_counter == 278, (
71
+ f"Expected 278 rejections, but applied only {rejection_counter}."
72
+ )
73
+ except FileNotFoundError:
74
+ log.warning(f"{csv_path} not found, skipping rejections.")
75
+ return
76
+ temp_path.replace(csv_path)
77
+ log.info(f"Overwritten {rejection_counter}/278 rejections to {csv_path}")
78
+ return
71
79
 
72
80
 
73
81
  def load_splits(source_dir):
74
- """Load splits from MeasurementsList.csv and return avi filenames"""
82
+ """
83
+ Load splits from MeasurementsList.csv and return avi filenames
84
+
85
+ Args:
86
+ source_dir: Source directory containing MeasurementsList.csv
87
+ Returns:
88
+ Dictionary with keys 'train', 'val', 'test', 'rejected' and values as lists of avi filenames
89
+ """
75
90
  csv_path = Path(source_dir) / "MeasurementsList.csv"
76
- splits = {"train": [], "val": [], "test": []}
91
+ splits = {"train": [], "val": [], "test": [], "rejected": []}
77
92
  with open(csv_path, newline="", encoding="utf-8") as csvfile:
78
93
  reader = csv.DictReader(csvfile)
79
94
  file_split_map = {}
@@ -87,7 +102,17 @@ def load_splits(source_dir):
87
102
 
88
103
 
89
104
  def find_avi_file(source_dir, hashed_filename, batch=None):
90
- """Find AVI file in the specified batch directory or any batch if not specified."""
105
+ """
106
+ Find AVI file in the specified batch directory or any batch if not specified.
107
+
108
+ Args:
109
+ source_dir: Source directory containing BatchX subdirectories
110
+ hashed_filename: Hashed filename (with or without .avi extension)
111
+ batch: Specific batch directory to search in (e.g., "Batch2"), or None to search all batches
112
+
113
+ Returns:
114
+ Path to the AVI file if found, else None
115
+ """
91
116
  # If filename already has .avi extension, strip it
92
117
  if hashed_filename.endswith(".avi"):
93
118
  hashed_filename = hashed_filename[:-4]
@@ -206,10 +231,9 @@ class LVHProcessor(H5Processor):
206
231
 
207
232
  def __init__(self, *args, cone_params=None, **kwargs):
208
233
  super().__init__(*args, **kwargs)
209
- # self.cart2pol_jit = jit(cartesian_to_polar_matrix_jax)
234
+ # Store the pre-computed cone parameters
210
235
  self.cart2pol_jit = jit(cartesian_to_polar_matrix)
211
236
  self.cart2pol_batched = vmap(self.cart2pol_jit)
212
- # Store the pre-computed cone parameters
213
237
  self.cone_parameters = cone_params or {}
214
238
 
215
239
  def get_split(self, avi_file: str, sequence):
@@ -232,22 +256,26 @@ class LVHProcessor(H5Processor):
232
256
  raise UserWarning("Unknown split for file: " + filename)
233
257
 
234
258
  def __call__(self, avi_file):
235
- print(avi_file)
259
+ """Takes a single avi_file and generates a zea dataset
260
+
261
+ Args:
262
+ avi_file: String or path to avi_file to be processed
263
+
264
+ Returns:
265
+ zea dataset
266
+ """
267
+
236
268
  avi_filename = Path(avi_file).stem + ".avi"
237
- sequence = jnp.array(load_video(avi_file))
269
+ sequence = np.array(load_avi(avi_file))
238
270
 
239
271
  sequence = translate(sequence, self.range_from, self._process_range)
240
-
241
272
  # Get pre-computed cone parameters for this file
242
273
  cone_params = self.cone_parameters.get(avi_filename)
243
-
244
274
  if cone_params is not None:
245
275
  # Apply pre-computed cropping parameters
246
276
  sequence = crop_sequence_with_params(sequence, cone_params)
247
277
  else:
248
- print(f"Warning: No cone parameters for {avi_filename}, using original sequence")
249
-
250
- # Convert to JAX array for polar conversion
278
+ log.warning(f"No cone parameters for {avi_filename}, using original sequence")
251
279
  sequence = jnp.array(sequence)
252
280
 
253
281
  split = self.get_split(avi_file, sequence)
@@ -282,7 +310,7 @@ def transform_measurement_coordinates_with_cone_params(row, cone_params):
282
310
  A new row with transformed coordinates, or None if cone_params is None
283
311
  """
284
312
  if cone_params is None:
285
- print(f"Warning: No cone parameters for file {row['HashedFileName']}")
313
+ log.warning(f"No cone parameters for file {row['HashedFileName']}")
286
314
  return None
287
315
 
288
316
  new_row = dict(row)
@@ -324,7 +352,7 @@ def transform_measurement_coordinates_with_cone_params(row, cone_params):
324
352
  )
325
353
 
326
354
  if is_out_of_bounds:
327
- print(f"Warning: Transformed coordinates out of bounds for file {row['HashedFileName']}")
355
+ log.warning(f"Transformed coordinates out of bounds for file {row['HashedFileName']}")
328
356
 
329
357
  # Convert back to string if original was string
330
358
  for k in ["X1", "X2", "Y1", "Y2"]:
@@ -353,7 +381,7 @@ def convert_measurements_csv(source_csv, output_csv, cone_params_csv=None):
353
381
  if cone_params_csv and Path(cone_params_csv).exists():
354
382
  cone_parameters = load_cone_parameters(cone_params_csv)
355
383
  else:
356
- print("Warning: No cone parameters file found. Measurements will not be transformed.")
384
+ log.warning("No cone parameters file found. Measurements will not be transformed.")
357
385
 
358
386
  # Apply coordinate transformation and track skipped rows
359
387
  transformed_rows = []
@@ -371,7 +399,7 @@ def convert_measurements_csv(source_csv, output_csv, cone_params_csv=None):
371
399
  else:
372
400
  skipped_files.add(row["HashedFileName"])
373
401
  except Exception as e:
374
- print(f"Error processing row for file {row['HashedFileName']}: {str(e)}")
402
+ log.error(f"Error processing row for file {row['HashedFileName']}: {str(e)}")
375
403
  skipped_files.add(row["HashedFileName"])
376
404
 
377
405
  # Save to new CSV file
@@ -389,30 +417,65 @@ def convert_measurements_csv(source_csv, output_csv, cone_params_csv=None):
389
417
  writer.writeheader()
390
418
 
391
419
  # Print summary
392
- print("\nConversion Summary:")
393
- print(f"Total rows processed: {len(rows)}")
394
- print(f"Rows successfully converted: {len(transformed_rows)}")
395
- print(f"Rows skipped: {len(rows) - len(transformed_rows)}")
420
+ log.info("Conversion Summary:")
421
+ log.info(f"Total rows processed: {len(rows)}")
422
+ log.info(f"Rows successfully converted: {len(transformed_rows)}")
423
+ log.info(f"Rows skipped: {len(rows) - len(transformed_rows)}")
396
424
  if skipped_files:
397
- print("\nSkipped files:")
425
+ log.info("Skipped files:")
398
426
  for filename in sorted(skipped_files):
399
- print(f" - {filename}")
400
- print(f"\nConverted measurements saved to {output_csv}")
427
+ log.info(f" - {filename}")
428
+ log.info(f"Converted measurements saved to {output_csv}")
401
429
 
402
430
  except Exception as e:
403
- print(f"Error processing CSV file: {str(e)}")
431
+ log.error(f"Error processing CSV file: {str(e)}")
404
432
  raise
405
433
 
406
434
 
407
- if __name__ == "__main__":
408
- args = get_args()
435
+ def _process_file_worker(avi_file, dst, splits, cone_parameters, range_from, process_range):
436
+ """
437
+ Function for a hyperthreading worker to process a single file.
438
+
439
+ Args:
440
+ avi_file: Path to the AVI file to process
441
+ dst: Destination directory for output
442
+ splits: Dictionary of splits
443
+ cone_parameters: Dictionary of cone parameters
444
+ range_from: Range from value for processing
445
+ process_range: Process range value for processing
446
+ Returns:
447
+ Result of processing the file
448
+ """
449
+
450
+ # create a fresh processor inside the worker process
451
+ proc = LVHProcessor(path_out_h5=dst, splits=splits, cone_params=cone_parameters)
452
+ # if LVHProcessor needs range_from/_process_range set, set them here
453
+ proc.range_from = range_from
454
+ proc._process_range = process_range
455
+ return proc(avi_file)
456
+
457
+
458
+ def convert_echonetlvh(args):
459
+ """
460
+ Conversion script for the EchoNet-LVH dataset.
461
+ Unzips, overwrites splits if needed, precomputes cone parameters,
462
+ and converts images and/or measurements to zea format and saves dataset.
463
+ Is called with argparse arguments through zea/zea/data/convert/__main__.py
464
+
465
+ Args:
466
+ args (argparse.Namespace): Command-line arguments
467
+ """
468
+ # Check if unzip is needed
469
+ src = unzip(args.src, "echonetlvh")
470
+
471
+ # Overwrite the splits if manual rejections are provided
472
+ if not args.no_rejection:
473
+ overwrite_splits(args.src)
409
474
 
410
475
  # Check that cone parameters exist
411
- cone_params_csv = Path(args.output) / "cone_parameters.csv"
476
+ cone_params_csv = Path(args.dst) / "cone_parameters.csv"
412
477
  if not cone_params_csv.exists():
413
- print(f"Error: Cone parameters not found at {cone_params_csv}")
414
- print("Please run precompute_crop.py first to generate the parameters.")
415
- sys.exit(1)
478
+ precompute_cone_parameters(args)
416
479
 
417
480
  # If no specific conversion is requested, convert both
418
481
  if not (args.convert_measurements or args.convert_images):
@@ -421,30 +484,30 @@ if __name__ == "__main__":
421
484
 
422
485
  # Convert images if requested
423
486
  if args.convert_images:
424
- source_path = Path(args.source)
487
+ source_path = Path(src)
425
488
  splits = load_splits(source_path)
426
489
 
427
490
  # Load precomputed cone parameters
428
491
  cone_parameters = load_cone_parameters(cone_params_csv)
429
- print(f"Loaded cone parameters for {len(cone_parameters)} files")
492
+ log.info(f"Loaded cone parameters for {len(cone_parameters)} files")
430
493
 
431
494
  files_to_process = []
432
495
  for split_files in splits.values():
433
496
  for avi_filename in split_files:
434
497
  # Strip .avi if present
435
498
  base_filename = avi_filename[:-4] if avi_filename.endswith(".avi") else avi_filename
436
- avi_file = find_avi_file(args.source, base_filename, batch=args.batch)
499
+ avi_file = find_avi_file(src, base_filename, batch=args.batch)
437
500
  if avi_file:
438
501
  files_to_process.append(avi_file)
439
502
  else:
440
- print(
503
+ log.warning(
441
504
  f"Warning: Could not find AVI file for {base_filename} in batch "
442
505
  f"{args.batch if args.batch else 'any'}"
443
506
  )
444
507
 
445
508
  # List files that have already been processed
446
509
  files_done = []
447
- for _, _, filenames in os.walk(args.output):
510
+ for _, _, filenames in os.walk(args.dst):
448
511
  for filename in filenames:
449
512
  if filename.endswith(".hdf5"):
450
513
  files_done.append(filename.replace(".hdf5", ""))
@@ -455,45 +518,53 @@ if __name__ == "__main__":
455
518
  # Limit files if max_files is specified
456
519
  if args.max_files is not None:
457
520
  files_to_process = files_to_process[: args.max_files]
458
- print(f"Limited to processing {args.max_files} files due to max_files parameter")
521
+ log.info(f"Limited to processing {args.max_files} files due to max_files parameter")
459
522
 
460
- print(f"Files left to process: {len(files_to_process)}")
523
+ log.info(f"Files left to process: {len(files_to_process)}")
461
524
 
462
525
  # Initialize processor with splits and cone parameters
463
- processor = LVHProcessor(
464
- path_out_h5=args.output,
465
- path_out=args.output_numpy,
466
- splits=splits,
467
- cone_params=cone_parameters,
468
- )
469
-
470
- print("Starting the conversion process.")
471
-
472
- if args.use_hyperthreading:
473
- with ProcessPoolExecutor() as executor:
474
- futures = {executor.submit(processor, file): file for file in files_to_process}
526
+ processor = LVHProcessor(path_out_h5=args.dst, splits=splits, cone_params=cone_parameters)
527
+
528
+ log.info("Starting the conversion process.")
529
+
530
+ if not args.no_hyperthreading:
531
+ # DO NOT create a processor here for submission
532
+ with ProcessPoolExecutor(max_workers=min(64, os.cpu_count())) as executor:
533
+ futures = {
534
+ executor.submit(
535
+ _process_file_worker,
536
+ str(file), # avi_file
537
+ args.dst, # dst (Path or str)
538
+ splits, # splits (picklable dict of lists)
539
+ cone_parameters, # cone params dict (picklable)
540
+ processor.range_from, # only if needed; better pass primitives
541
+ processor._process_range,
542
+ ): file
543
+ for file in files_to_process
544
+ }
475
545
  for future in tqdm(as_completed(futures), total=len(files_to_process)):
476
546
  try:
477
547
  future.result()
478
548
  except Exception as e:
479
- print(f"Error processing file: {str(e)}")
549
+ log.error(f"Error processing file: {str(e)}")
480
550
  else:
551
+ log.info("Converting without hyperthreading")
481
552
  for file in tqdm(files_to_process):
482
553
  try:
483
554
  processor(file)
484
555
  except Exception as e:
485
- print(f"Error processing {file}: {str(e)}")
556
+ log.error(f"Error processing {file}: {str(e)}")
486
557
 
487
- print("All image conversion tasks are completed.")
558
+ log.info("All image conversion tasks are completed.")
488
559
 
489
560
  # Convert measurements if requested
490
561
  if args.convert_measurements:
491
- source_path = Path(args.source)
562
+ source_path = Path(src)
492
563
  measurements_csv = source_path / "MeasurementsList.csv"
493
564
  if measurements_csv.exists():
494
- output_csv = Path(args.output) / "MeasurementsList.csv"
565
+ output_csv = Path(args.dst) / "MeasurementsList.csv"
495
566
  convert_measurements_csv(measurements_csv, output_csv, cone_params_csv)
496
567
  else:
497
- print("Warning: MeasurementsList.csv not found in source directory")
568
+ log.warning("MeasurementsList.csv not found in source directory")
498
569
 
499
- print("All tasks are completed.")
570
+ log.info("All tasks are completed.")
@@ -0,0 +1,73 @@
1
+ 0X11181B7E91E930CF
2
+ 0X111C213706B3A146
3
+ 0X13CC23E3AA9AB751
4
+ 0X1C6CAFF5CB0299C0
5
+ 0X1D7074550AF6F017
6
+ 0X227355B3DA6C4B7A
7
+ 0X23869862AA2B7A6C
8
+ 0X25BD4F8A91A524ED
9
+ 0X2E241BF70EED0CFA
10
+ 0X310DD4EE1DA13B80
11
+ 0X3FA4417F5B45F121
12
+ 0X488149AC9B0C76E2
13
+ 0X4885907B96B1BAB4
14
+ 0X4DAD29CD33A5EF93
15
+ 0X51E5F62690062FDC
16
+ 0X52ADC74C8CC6B8BF
17
+ 0X53E2CDA115A9A9F0
18
+ 0X56E51030B25F8380
19
+ 0X5AA2CEBB492A7CBD
20
+ 0X5C012F7585F8D394
21
+ 0X5F7DF2135352442B
22
+ 0X61B21A4616889011
23
+ 0X67AE0A2349838A2F
24
+ 0X687C7044FE93C8EE
25
+ 0X6ADC75E01F62D32A
26
+ 0X6CDFB4E3FBDC001B
27
+ 0X6E149B6ED5DB1AE9
28
+ 0X73DF29FFB2B683F0
29
+ 0X7E8F8D3A80E492FE
30
+ 0X80B0B9C83894F2F3
31
+ 0X80F8DC9F3491FC58
32
+ 0X83FC5AE3242FD375
33
+ 0X8E3726C5DC9819E3
34
+ 0X9B3A721521F6F71D
35
+ 0X9D011879D04756AD
36
+ 0XA685D6BE72086604
37
+ 0XA8DD5136FEA6B60E
38
+ 0XAD914D49A9577C03
39
+ 0XAF4C4BD0D33C9CE8
40
+ 0XB27AF4716B649D0E
41
+ 0XB5E17F4A0497BD92
42
+ 0XB616C5D99F1DD73
43
+ 0XBF449F4AAC0FA294
44
+ 0XC64829FD975EFBAD
45
+ 0XC6BFC34800F8397D
46
+ 0XCF0F583953B37563
47
+ 0XD0537A6E187120FF
48
+ 0XD1357C20D9D00A17
49
+ 0XD7D8BE5D94D25347
50
+ 0XDAD3FA61A927124B
51
+ 0XDDC98ED577E19D03
52
+ 0XEC65FFB6F7DCDA70
53
+ 0XED0CDC255553A242
54
+ 0XF49D9E0C9D9B266B
55
+ 0XF5465778B6D691DB
56
+ 0XF7B8A81B72C467DB
57
+ 0XFA0D127A284CCA1A
58
+ 0X1C9440E0E259157E
59
+ 0X36A0E840E9CB0CE1
60
+ 0X4547546741BB73E8
61
+ 0X4FE26154CFCECDD7
62
+ 0X5D1ADF0993864E58
63
+ 0X5F2928BCB0F4B94C
64
+ 0X6935644F26AE8EA
65
+ 0X6A4C7F69D0046092
66
+ 0X716C88F4E946105A
67
+ 0XC0D63F1F9088CA81
68
+ 0XEBF79B6D25606A2D
69
+ 0XFFB05BDB3A97322
70
+ 0X7D8208DCF7643708
71
+ 0XA836692EB71E582B
72
+ 0XB40B08D1642CEEE
73
+ 0XF8D61AF7DE6AC700