mapillary-tools 0.14.0a1__py3-none-any.whl → 0.14.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. mapillary_tools/__init__.py +1 -1
  2. mapillary_tools/api_v4.py +5 -4
  3. mapillary_tools/authenticate.py +9 -9
  4. mapillary_tools/blackvue_parser.py +79 -22
  5. mapillary_tools/camm/camm_parser.py +5 -5
  6. mapillary_tools/commands/__main__.py +1 -2
  7. mapillary_tools/config.py +41 -18
  8. mapillary_tools/constants.py +3 -2
  9. mapillary_tools/exceptions.py +1 -1
  10. mapillary_tools/exif_read.py +65 -65
  11. mapillary_tools/exif_write.py +7 -7
  12. mapillary_tools/exiftool_read.py +23 -46
  13. mapillary_tools/exiftool_read_video.py +88 -49
  14. mapillary_tools/exiftool_runner.py +4 -24
  15. mapillary_tools/ffmpeg.py +417 -242
  16. mapillary_tools/geo.py +4 -21
  17. mapillary_tools/geotag/__init__.py +0 -1
  18. mapillary_tools/geotag/{geotag_from_generic.py → base.py} +34 -50
  19. mapillary_tools/geotag/factory.py +105 -103
  20. mapillary_tools/geotag/geotag_images_from_exif.py +15 -51
  21. mapillary_tools/geotag/geotag_images_from_exiftool.py +118 -63
  22. mapillary_tools/geotag/geotag_images_from_gpx.py +33 -16
  23. mapillary_tools/geotag/geotag_images_from_gpx_file.py +2 -34
  24. mapillary_tools/geotag/geotag_images_from_nmea_file.py +0 -3
  25. mapillary_tools/geotag/geotag_images_from_video.py +51 -14
  26. mapillary_tools/geotag/geotag_videos_from_exiftool.py +123 -0
  27. mapillary_tools/geotag/geotag_videos_from_gpx.py +35 -123
  28. mapillary_tools/geotag/geotag_videos_from_video.py +14 -147
  29. mapillary_tools/geotag/image_extractors/base.py +18 -0
  30. mapillary_tools/geotag/image_extractors/exif.py +60 -0
  31. mapillary_tools/geotag/image_extractors/exiftool.py +18 -0
  32. mapillary_tools/geotag/options.py +26 -3
  33. mapillary_tools/geotag/utils.py +62 -0
  34. mapillary_tools/geotag/video_extractors/base.py +18 -0
  35. mapillary_tools/geotag/video_extractors/exiftool.py +70 -0
  36. mapillary_tools/geotag/video_extractors/gpx.py +116 -0
  37. mapillary_tools/geotag/video_extractors/native.py +135 -0
  38. mapillary_tools/gpmf/gpmf_parser.py +16 -16
  39. mapillary_tools/gpmf/gps_filter.py +5 -3
  40. mapillary_tools/history.py +8 -3
  41. mapillary_tools/mp4/construct_mp4_parser.py +9 -8
  42. mapillary_tools/mp4/mp4_sample_parser.py +27 -27
  43. mapillary_tools/mp4/simple_mp4_builder.py +10 -9
  44. mapillary_tools/mp4/simple_mp4_parser.py +13 -12
  45. mapillary_tools/process_geotag_properties.py +21 -15
  46. mapillary_tools/process_sequence_properties.py +49 -49
  47. mapillary_tools/sample_video.py +15 -14
  48. mapillary_tools/serializer/description.py +587 -0
  49. mapillary_tools/serializer/gpx.py +132 -0
  50. mapillary_tools/telemetry.py +6 -5
  51. mapillary_tools/types.py +64 -635
  52. mapillary_tools/upload.py +176 -197
  53. mapillary_tools/upload_api_v4.py +94 -51
  54. mapillary_tools/uploader.py +284 -138
  55. mapillary_tools/utils.py +16 -18
  56. {mapillary_tools-0.14.0a1.dist-info → mapillary_tools-0.14.0b1.dist-info}/METADATA +87 -31
  57. mapillary_tools-0.14.0b1.dist-info/RECORD +75 -0
  58. {mapillary_tools-0.14.0a1.dist-info → mapillary_tools-0.14.0b1.dist-info}/WHEEL +1 -1
  59. mapillary_tools/geotag/geotag_images_from_exiftool_both_image_and_video.py +0 -77
  60. mapillary_tools/geotag/geotag_videos_from_exiftool_video.py +0 -151
  61. mapillary_tools/video_data_extraction/cli_options.py +0 -22
  62. mapillary_tools/video_data_extraction/extract_video_data.py +0 -157
  63. mapillary_tools/video_data_extraction/extractors/base_parser.py +0 -75
  64. mapillary_tools/video_data_extraction/extractors/blackvue_parser.py +0 -49
  65. mapillary_tools/video_data_extraction/extractors/camm_parser.py +0 -62
  66. mapillary_tools/video_data_extraction/extractors/exiftool_runtime_parser.py +0 -74
  67. mapillary_tools/video_data_extraction/extractors/exiftool_xml_parser.py +0 -52
  68. mapillary_tools/video_data_extraction/extractors/generic_video_parser.py +0 -52
  69. mapillary_tools/video_data_extraction/extractors/gopro_parser.py +0 -58
  70. mapillary_tools/video_data_extraction/extractors/gpx_parser.py +0 -108
  71. mapillary_tools/video_data_extraction/extractors/nmea_parser.py +0 -24
  72. mapillary_tools/video_data_extraction/video_data_parser_factory.py +0 -39
  73. mapillary_tools-0.14.0a1.dist-info/RECORD +0 -78
  74. {mapillary_tools-0.14.0a1.dist-info → mapillary_tools-0.14.0b1.dist-info}/entry_points.txt +0 -0
  75. {mapillary_tools-0.14.0a1.dist-info → mapillary_tools-0.14.0b1.dist-info}/licenses/LICENSE +0 -0
  76. {mapillary_tools-0.14.0a1.dist-info → mapillary_tools-0.14.0b1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,116 @@
1
+ from __future__ import annotations
2
+
3
+ import dataclasses
4
+ import enum
5
+ import logging
6
+ import sys
7
+ import typing as T
8
+ from pathlib import Path
9
+
10
+ if sys.version_info >= (3, 12):
11
+ from typing import override
12
+ else:
13
+ from typing_extensions import override
14
+
15
+ from ... import exceptions, geo, telemetry, types
16
+ from ..utils import parse_gpx
17
+ from .base import BaseVideoExtractor
18
+ from .native import NativeVideoExtractor
19
+
20
+
21
+ LOG = logging.getLogger(__name__)
22
+
23
+
24
+ class SyncMode(enum.Enum):
25
+ # Sync by video GPS timestamps if found, otherwise rebase
26
+ SYNC = "sync"
27
+ # Sync by video GPS timestamps, and throw if not found
28
+ STRICT_SYNC = "strict_sync"
29
+ # Rebase all GPX timestamps to start from 0
30
+ REBASE = "rebase"
31
+
32
+
33
+ class GPXVideoExtractor(BaseVideoExtractor):
34
+ def __init__(
35
+ self, video_path: Path, gpx_path: Path, sync_mode: SyncMode = SyncMode.SYNC
36
+ ):
37
+ self.video_path = video_path
38
+ self.gpx_path = gpx_path
39
+ self.sync_mode = sync_mode
40
+
41
+ @override
42
+ def extract(self) -> types.VideoMetadata:
43
+ gpx_tracks = parse_gpx(self.gpx_path)
44
+
45
+ if 1 < len(gpx_tracks):
46
+ LOG.warning(
47
+ f"Found {len(gpx_tracks)} tracks in the GPX file {self.gpx_path}. Will merge points in all the tracks as a single track for interpolation"
48
+ )
49
+
50
+ gpx_points: T.Sequence[geo.Point] = sum(gpx_tracks, [])
51
+
52
+ native_extractor = NativeVideoExtractor(self.video_path)
53
+
54
+ try:
55
+ native_video_metadata = native_extractor.extract()
56
+ except exceptions.MapillaryVideoGPSNotFoundError as ex:
57
+ if self.sync_mode is SyncMode.STRICT_SYNC:
58
+ raise ex
59
+ self._rebase_times(gpx_points)
60
+ return types.VideoMetadata(
61
+ filename=self.video_path,
62
+ filetype=types.FileType.VIDEO,
63
+ points=gpx_points,
64
+ )
65
+
66
+ if self.sync_mode is SyncMode.REBASE:
67
+ self._rebase_times(gpx_points)
68
+ else:
69
+ offset = self._gpx_offset(gpx_points, native_video_metadata.points)
70
+ self._rebase_times(gpx_points, offset=offset)
71
+
72
+ return dataclasses.replace(native_video_metadata, points=gpx_points)
73
+
74
+ @classmethod
75
+ def _rebase_times(cls, points: T.Sequence[geo.Point], offset: float = 0.0) -> None:
76
+ """
77
+ Rebase point times to start from **offset**
78
+ """
79
+ if points:
80
+ first_timestamp = points[0].time
81
+ for p in points:
82
+ p.time = (p.time - first_timestamp) + offset
83
+
84
+ @classmethod
85
+ def _gpx_offset(
86
+ cls, gpx_points: T.Sequence[geo.Point], video_gps_points: T.Sequence[geo.Point]
87
+ ) -> float:
88
+ """
89
+ Calculate the offset that needs to be applied to the GPX points to sync with the video GPS points.
90
+
91
+ >>> gpx_points = [geo.Point(time=5, lat=1, lon=1, alt=None, angle=None)]
92
+ >>> GPXVideoExtractor._gpx_offset(gpx_points, gpx_points)
93
+ 0.0
94
+ >>> GPXVideoExtractor._gpx_offset(gpx_points, [])
95
+ 0.0
96
+ >>> GPXVideoExtractor._gpx_offset([], gpx_points)
97
+ 0.0
98
+ """
99
+ offset: float = 0.0
100
+
101
+ if not gpx_points or not video_gps_points:
102
+ return offset
103
+
104
+ gps_epoch_time: float | None = None
105
+ gps_point = video_gps_points[0]
106
+ if isinstance(gps_point, telemetry.GPSPoint):
107
+ if gps_point.epoch_time is not None:
108
+ gps_epoch_time = gps_point.epoch_time
109
+ elif isinstance(gps_point, telemetry.CAMMGPSPoint):
110
+ if gps_point.time_gps_epoch is not None:
111
+ gps_epoch_time = gps_point.time_gps_epoch
112
+
113
+ if gps_epoch_time is not None:
114
+ offset = gpx_points[0].time - gps_epoch_time
115
+
116
+ return offset
@@ -0,0 +1,135 @@
1
+ from __future__ import annotations
2
+
3
+ import sys
4
+ import typing as T
5
+ from pathlib import Path
6
+
7
+ if sys.version_info >= (3, 12):
8
+ from typing import override
9
+ else:
10
+ from typing_extensions import override
11
+
12
+ from ... import blackvue_parser, exceptions, geo, telemetry, types, utils
13
+ from ...camm import camm_parser
14
+ from ...gpmf import gpmf_gps_filter, gpmf_parser
15
+ from .base import BaseVideoExtractor
16
+
17
+
18
+ class GoProVideoExtractor(BaseVideoExtractor):
19
+ @override
20
+ def extract(self) -> types.VideoMetadata:
21
+ with self.video_path.open("rb") as fp:
22
+ gopro_info = gpmf_parser.extract_gopro_info(fp)
23
+
24
+ if gopro_info is None:
25
+ raise exceptions.MapillaryVideoGPSNotFoundError(
26
+ "No GPS data found from the video"
27
+ )
28
+
29
+ gps_points = gopro_info.gps
30
+ assert gps_points is not None, "must have GPS data extracted"
31
+ if not gps_points:
32
+ raise exceptions.MapillaryGPXEmptyError("Empty GPS data found")
33
+
34
+ gps_points = T.cast(
35
+ T.List[telemetry.GPSPoint], gpmf_gps_filter.remove_noisy_points(gps_points)
36
+ )
37
+ if not gps_points:
38
+ raise exceptions.MapillaryGPSNoiseError("GPS is too noisy")
39
+
40
+ video_metadata = types.VideoMetadata(
41
+ filename=self.video_path,
42
+ filesize=utils.get_file_size(self.video_path),
43
+ filetype=types.FileType.GOPRO,
44
+ points=T.cast(T.List[geo.Point], gps_points),
45
+ make=gopro_info.make,
46
+ model=gopro_info.model,
47
+ )
48
+
49
+ return video_metadata
50
+
51
+
52
+ class CAMMVideoExtractor(BaseVideoExtractor):
53
+ @override
54
+ def extract(self) -> types.VideoMetadata:
55
+ with self.video_path.open("rb") as fp:
56
+ camm_info = camm_parser.extract_camm_info(fp)
57
+
58
+ if camm_info is None:
59
+ raise exceptions.MapillaryVideoGPSNotFoundError(
60
+ "No GPS data found from the video"
61
+ )
62
+
63
+ if not camm_info.gps and not camm_info.mini_gps:
64
+ raise exceptions.MapillaryGPXEmptyError("Empty GPS data found")
65
+
66
+ return types.VideoMetadata(
67
+ filename=self.video_path,
68
+ filesize=utils.get_file_size(self.video_path),
69
+ filetype=types.FileType.CAMM,
70
+ points=T.cast(T.List[geo.Point], camm_info.gps or camm_info.mini_gps),
71
+ make=camm_info.make,
72
+ model=camm_info.model,
73
+ )
74
+
75
+
76
+ class BlackVueVideoExtractor(BaseVideoExtractor):
77
+ @override
78
+ def extract(self) -> types.VideoMetadata:
79
+ with self.video_path.open("rb") as fp:
80
+ blackvue_info = blackvue_parser.extract_blackvue_info(fp)
81
+
82
+ if blackvue_info is None:
83
+ raise exceptions.MapillaryVideoGPSNotFoundError(
84
+ "No GPS data found from the video"
85
+ )
86
+
87
+ if not blackvue_info.gps:
88
+ raise exceptions.MapillaryGPXEmptyError("Empty GPS data found")
89
+
90
+ video_metadata = types.VideoMetadata(
91
+ filename=self.video_path,
92
+ filesize=utils.get_file_size(self.video_path),
93
+ filetype=types.FileType.BLACKVUE,
94
+ points=blackvue_info.gps,
95
+ make=blackvue_info.make,
96
+ model=blackvue_info.model,
97
+ )
98
+
99
+ return video_metadata
100
+
101
+
102
+ class NativeVideoExtractor(BaseVideoExtractor):
103
+ def __init__(self, video_path: Path, filetypes: set[types.FileType] | None = None):
104
+ super().__init__(video_path)
105
+ self.filetypes = filetypes
106
+
107
+ @override
108
+ def extract(self) -> types.VideoMetadata:
109
+ ft = self.filetypes
110
+ extractor: BaseVideoExtractor
111
+
112
+ if ft is None or types.FileType.VIDEO in ft or types.FileType.GOPRO in ft:
113
+ extractor = GoProVideoExtractor(self.video_path)
114
+ try:
115
+ return extractor.extract()
116
+ except exceptions.MapillaryVideoGPSNotFoundError:
117
+ pass
118
+
119
+ if ft is None or types.FileType.VIDEO in ft or types.FileType.CAMM in ft:
120
+ extractor = CAMMVideoExtractor(self.video_path)
121
+ try:
122
+ return extractor.extract()
123
+ except exceptions.MapillaryVideoGPSNotFoundError:
124
+ pass
125
+
126
+ if ft is None or types.FileType.VIDEO in ft or types.FileType.BLACKVUE in ft:
127
+ extractor = BlackVueVideoExtractor(self.video_path)
128
+ try:
129
+ return extractor.extract()
130
+ except exceptions.MapillaryVideoGPSNotFoundError:
131
+ pass
132
+
133
+ raise exceptions.MapillaryVideoGPSNotFoundError(
134
+ "No GPS data found from the video"
135
+ )
@@ -39,7 +39,7 @@ class KLVDict(T.TypedDict):
39
39
  type: bytes
40
40
  structure_size: int
41
41
  repeat: int
42
- data: T.List[T.Any]
42
+ data: list[T.Any]
43
43
 
44
44
 
45
45
  GPMFSampleData: C.GreedyRange
@@ -143,7 +143,7 @@ class GoProInfo:
143
143
 
144
144
  def extract_gopro_info(
145
145
  fp: T.BinaryIO, telemetry_only: bool = False
146
- ) -> T.Optional[GoProInfo]:
146
+ ) -> GoProInfo | None:
147
147
  """
148
148
  Return the GoProInfo object if found. None indicates it's not a valid GoPro video.
149
149
  """
@@ -276,7 +276,7 @@ def _gps5_timestamp_to_epoch_time(dtstr: str):
276
276
  def _gps5_from_stream(
277
277
  stream: T.Sequence[KLVDict],
278
278
  ) -> T.Generator[telemetry.GPSPoint, None, None]:
279
- indexed: T.Dict[bytes, T.List[T.List[T.Any]]] = {
279
+ indexed: dict[bytes, list[list[T.Any]]] = {
280
280
  klv["key"]: klv["data"] for klv in stream
281
281
  }
282
282
 
@@ -362,7 +362,7 @@ def _gps9_from_stream(
362
362
  ) -> T.Generator[telemetry.GPSPoint, None, None]:
363
363
  NUM_VALUES = 9
364
364
 
365
- indexed: T.Dict[bytes, T.List[T.List[T.Any]]] = {
365
+ indexed: dict[bytes, list[list[T.Any]]] = {
366
366
  klv["key"]: klv["data"] for klv in stream
367
367
  }
368
368
 
@@ -444,8 +444,8 @@ def _find_first_device_id(stream: T.Sequence[KLVDict]) -> int:
444
444
  return device_id
445
445
 
446
446
 
447
- def _find_first_gps_stream(stream: T.Sequence[KLVDict]) -> T.List[telemetry.GPSPoint]:
448
- sample_points: T.List[telemetry.GPSPoint] = []
447
+ def _find_first_gps_stream(stream: T.Sequence[KLVDict]) -> list[telemetry.GPSPoint]:
448
+ sample_points: list[telemetry.GPSPoint] = []
449
449
 
450
450
  for klv in stream:
451
451
  if klv["key"] == b"STRM":
@@ -469,7 +469,7 @@ def _is_matrix_calibration(matrix: T.Sequence[float]) -> bool:
469
469
 
470
470
 
471
471
  def _build_matrix(
472
- orin: T.Union[bytes, T.Sequence[int]], orio: T.Union[bytes, T.Sequence[int]]
472
+ orin: bytes | T.Sequence[int], orio: bytes | T.Sequence[int]
473
473
  ) -> T.Sequence[float]:
474
474
  matrix = []
475
475
 
@@ -503,14 +503,14 @@ def _apply_matrix(
503
503
  yield sum(matrix[row_start + x] * values[x] for x in range(size))
504
504
 
505
505
 
506
- def _flatten(nested: T.Sequence[T.Sequence[float]]) -> T.List[float]:
507
- output: T.List[float] = []
506
+ def _flatten(nested: T.Sequence[T.Sequence[float]]) -> list[float]:
507
+ output: list[float] = []
508
508
  for row in nested:
509
509
  output.extend(row)
510
510
  return output
511
511
 
512
512
 
513
- def _get_matrix(klv: T.Dict[bytes, KLVDict]) -> T.Optional[T.Sequence[float]]:
513
+ def _get_matrix(klv: dict[bytes, KLVDict]) -> T.Sequence[float] | None:
514
514
  mtrx = klv.get(b"MTRX")
515
515
  if mtrx is not None:
516
516
  matrix: T.Sequence[float] = _flatten(mtrx["data"])
@@ -530,7 +530,7 @@ def _get_matrix(klv: T.Dict[bytes, KLVDict]) -> T.Optional[T.Sequence[float]]:
530
530
  def _scale_and_calibrate(
531
531
  stream: T.Sequence[KLVDict], key: bytes
532
532
  ) -> T.Generator[T.Sequence[float], None, None]:
533
- indexed: T.Dict[bytes, KLVDict] = {klv["key"]: klv for klv in stream}
533
+ indexed: dict[bytes, KLVDict] = {klv["key"]: klv for klv in stream}
534
534
 
535
535
  klv = indexed.get(key)
536
536
  if klv is None:
@@ -561,7 +561,7 @@ def _scale_and_calibrate(
561
561
 
562
562
 
563
563
  def _find_first_telemetry_stream(stream: T.Sequence[KLVDict], key: bytes):
564
- values: T.List[T.Sequence[float]] = []
564
+ values: list[T.Sequence[float]] = []
565
565
 
566
566
  for klv in stream:
567
567
  if klv["key"] == b"STRM":
@@ -684,7 +684,7 @@ def _load_telemetry_from_samples(
684
684
  return device_found
685
685
 
686
686
 
687
- def _is_gpmd_description(description: T.Dict) -> bool:
687
+ def _is_gpmd_description(description: dict) -> bool:
688
688
  return description["format"] == b"gpmd"
689
689
 
690
690
 
@@ -699,11 +699,11 @@ def _filter_gpmd_samples(track: TrackBoxParser) -> T.Generator[Sample, None, Non
699
699
  yield sample
700
700
 
701
701
 
702
- def _extract_camera_model_from_devices(device_names: T.Dict[int, bytes]) -> str:
702
+ def _extract_camera_model_from_devices(device_names: dict[int, bytes]) -> str:
703
703
  if not device_names:
704
704
  return ""
705
705
 
706
- unicode_names: T.List[str] = []
706
+ unicode_names: list[str] = []
707
707
  for name in device_names.values():
708
708
  try:
709
709
  unicode_names.append(name.decode("utf-8"))
@@ -730,7 +730,7 @@ def _extract_camera_model_from_devices(device_names: T.Dict[int, bytes]) -> str:
730
730
 
731
731
  def _iterate_read_sample_data(
732
732
  fp: T.BinaryIO, samples: T.Iterable[Sample]
733
- ) -> T.Generator[T.Tuple[Sample, bytes], None, None]:
733
+ ) -> T.Generator[tuple[Sample, bytes], None, None]:
734
734
  for sample in samples:
735
735
  fp.seek(sample.raw_sample.offset, io.SEEK_SET)
736
736
  yield (sample, fp.read(sample.raw_sample.size))
@@ -1,3 +1,5 @@
1
+ from __future__ import annotations
2
+
1
3
  import statistics
2
4
  import typing as T
3
5
 
@@ -96,7 +98,7 @@ def both(
96
98
  def dbscan(
97
99
  sequences: T.Sequence[PointSequence],
98
100
  merge_or_not: Decider,
99
- ) -> T.Dict[int, PointSequence]:
101
+ ) -> dict[int, PointSequence]:
100
102
  """
101
103
  One-dimension DBSCAN clustering: https://en.wikipedia.org/wiki/DBSCAN
102
104
  The input is a list of sequences, and it is guaranteed that all sequences are sorted by time.
@@ -107,7 +109,7 @@ def dbscan(
107
109
  """
108
110
 
109
111
  # find which sequences (keys) should be merged to which sequences (values)
110
- mergeto: T.Dict[int, int] = {}
112
+ mergeto: dict[int, int] = {}
111
113
  for left in range(len(sequences)):
112
114
  mergeto.setdefault(left, left)
113
115
  # find the first sequence to merge with
@@ -119,7 +121,7 @@ def dbscan(
119
121
  break
120
122
 
121
123
  # merge
122
- merged: T.Dict[int, PointSequence] = {}
124
+ merged: dict[int, PointSequence] = {}
123
125
  for idx, s in enumerate(sequences):
124
126
  merged.setdefault(mergeto[idx], []).extend(s)
125
127
 
@@ -1,3 +1,5 @@
1
+ from __future__ import annotations
2
+
1
3
  import json
2
4
  import logging
3
5
  import string
@@ -5,6 +7,7 @@ import typing as T
5
7
  from pathlib import Path
6
8
 
7
9
  from . import constants, types
10
+ from .serializer.description import DescriptionJSONSerializer
8
11
 
9
12
  JSONDict = T.Dict[str, T.Union[str, int, float, None]]
10
13
 
@@ -43,18 +46,20 @@ def write_history(
43
46
  md5sum: str,
44
47
  params: JSONDict,
45
48
  summary: JSONDict,
46
- metadatas: T.Optional[T.Sequence[types.Metadata]] = None,
49
+ metadatas: T.Sequence[types.Metadata] | None = None,
47
50
  ) -> None:
48
51
  if not constants.MAPILLARY_UPLOAD_HISTORY_PATH:
49
52
  return
50
53
  path = history_desc_path(md5sum)
51
54
  LOG.debug("Writing upload history: %s", path)
52
55
  path.resolve().parent.mkdir(parents=True, exist_ok=True)
53
- history: T.Dict[str, T.Any] = {
56
+ history: dict[str, T.Any] = {
54
57
  "params": params,
55
58
  "summary": summary,
56
59
  }
57
60
  if metadatas is not None:
58
- history["descs"] = [types.as_desc(metadata) for metadata in metadatas]
61
+ history["descs"] = [
62
+ DescriptionJSONSerializer.as_desc(metadata) for metadata in metadatas
63
+ ]
59
64
  with open(path, "w") as fp:
60
65
  fp.write(json.dumps(history))
@@ -1,4 +1,5 @@
1
1
  # pyre-ignore-all-errors[5, 16, 21, 58]
2
+ from __future__ import annotations
2
3
 
3
4
  import typing as T
4
5
 
@@ -42,7 +43,7 @@ BoxType = T.Literal[
42
43
 
43
44
  class BoxDict(T.TypedDict, total=True):
44
45
  type: BoxType
45
- data: T.Union[T.Sequence["BoxDict"], T.Dict[str, T.Any], bytes]
46
+ data: T.Sequence["BoxDict"] | dict[str, T.Any] | bytes
46
47
 
47
48
 
48
49
  _UNITY_MATRIX = [0x10000, 0, 0, 0, 0x10000, 0, 0, 0, 0x40000000]
@@ -376,7 +377,7 @@ class Box64ConstructBuilder:
376
377
  NOTE: Do not build data with this struct. For building, use Box32StructBuilder instead.
377
378
  """
378
379
 
379
- _box: T.Optional[C.Construct]
380
+ _box: C.Construct | None
380
381
 
381
382
  def __init__(
382
383
  self,
@@ -438,7 +439,7 @@ class Box64ConstructBuilder:
438
439
  def parse_box(self, data: bytes) -> BoxDict:
439
440
  return T.cast(BoxDict, self.Box.parse(data))
440
441
 
441
- def parse_boxlist(self, data: bytes) -> T.List[BoxDict]:
442
+ def parse_boxlist(self, data: bytes) -> list[BoxDict]:
442
443
  return T.cast(T.List[BoxDict], self.BoxList.parse(data))
443
444
 
444
445
 
@@ -464,7 +465,7 @@ class Box32ConstructBuilder(Box64ConstructBuilder):
464
465
  def parse_box(self, data: bytes) -> BoxDict:
465
466
  raise NotImplementedError("Box32ConstructBuilder does not support parsing")
466
467
 
467
- def parse_boxlist(self, data: bytes) -> T.List[BoxDict]:
468
+ def parse_boxlist(self, data: bytes) -> list[BoxDict]:
468
469
  raise NotImplementedError("Box32ConstructBuilder does not support parsing")
469
470
 
470
471
  def build_box(self, box: BoxDict) -> bytes:
@@ -584,7 +585,7 @@ MOOVWithoutSTBLBuilderConstruct = Box32ConstructBuilder(
584
585
 
585
586
 
586
587
  def find_box_at_pathx(
587
- box: T.Union[T.Sequence[BoxDict], BoxDict], path: T.Sequence[bytes]
588
+ box: T.Sequence[BoxDict] | BoxDict, path: T.Sequence[bytes]
588
589
  ) -> BoxDict:
589
590
  found = find_box_at_path(box, path)
590
591
  if found is None:
@@ -593,8 +594,8 @@ def find_box_at_pathx(
593
594
 
594
595
 
595
596
  def find_box_at_path(
596
- box: T.Union[T.Sequence[BoxDict], BoxDict], path: T.Sequence[bytes]
597
- ) -> T.Optional[BoxDict]:
597
+ box: T.Sequence[BoxDict] | BoxDict, path: T.Sequence[bytes]
598
+ ) -> BoxDict | None:
598
599
  if not path:
599
600
  return None
600
601
 
@@ -608,7 +609,7 @@ def find_box_at_path(
608
609
  if box["type"] == path[0]:
609
610
  if len(path) == 1:
610
611
  return box
611
- box_data = T.cast(T.Sequence[BoxDict], box["data"])
612
+ box_data = T.cast(T.List[BoxDict], box["data"])
612
613
  # ListContainer from construct is not sequence
613
614
  assert isinstance(box_data, T.Sequence), (
614
615
  f"expect a list of boxes but got {type(box_data)} at path {path}"
@@ -44,16 +44,16 @@ class Sample(T.NamedTuple):
44
44
  exact_timedelta: float
45
45
 
46
46
  # reference to the sample description
47
- description: T.Dict
47
+ description: dict
48
48
 
49
49
 
50
50
  def _extract_raw_samples(
51
51
  sizes: T.Sequence[int],
52
- chunk_entries: T.Sequence[T.Dict],
52
+ chunk_entries: T.Sequence[dict],
53
53
  chunk_offsets: T.Sequence[int],
54
54
  timedeltas: T.Sequence[int],
55
- composition_offsets: T.Optional[T.Sequence[int]],
56
- syncs: T.Optional[T.Set[int]],
55
+ composition_offsets: list[int] | None,
56
+ syncs: set[int] | None,
57
57
  ) -> T.Generator[RawSample, None, None]:
58
58
  if not sizes:
59
59
  return
@@ -130,7 +130,7 @@ def _extract_raw_samples(
130
130
 
131
131
  def _extract_samples(
132
132
  raw_samples: T.Iterator[RawSample],
133
- descriptions: T.List,
133
+ descriptions: list,
134
134
  timescale: int,
135
135
  ) -> T.Generator[Sample, None, None]:
136
136
  acc_delta = 0
@@ -154,21 +154,21 @@ STBLBoxlistConstruct = cparser.Box64ConstructBuilder(
154
154
 
155
155
  def extract_raw_samples_from_stbl_data(
156
156
  stbl: bytes,
157
- ) -> T.Tuple[T.List[T.Dict], T.Generator[RawSample, None, None]]:
158
- descriptions = []
159
- sizes = []
160
- chunk_offsets = []
161
- chunk_entries = []
162
- timedeltas: T.List[int] = []
163
- composition_offsets: T.Optional[T.List[int]] = None
164
- syncs: T.Optional[T.Set[int]] = None
157
+ ) -> tuple[list[dict], T.Generator[RawSample, None, None]]:
158
+ descriptions: list[dict] = []
159
+ sizes: list[int] = []
160
+ chunk_offsets: list[int] = []
161
+ chunk_entries: list[dict] = []
162
+ timedeltas: list[int] = []
163
+ composition_offsets: list[int] | None = None
164
+ syncs: set[int] | None = None
165
165
 
166
166
  stbl_children = T.cast(
167
167
  T.Sequence[cparser.BoxDict], STBLBoxlistConstruct.parse(stbl)
168
168
  )
169
169
 
170
170
  for box in stbl_children:
171
- data: T.Dict = T.cast(T.Dict, box["data"])
171
+ data: dict = T.cast(dict, box["data"])
172
172
 
173
173
  if box["type"] == b"stsd":
174
174
  descriptions = list(data["entries"])
@@ -227,32 +227,32 @@ class TrackBoxParser:
227
227
  )
228
228
  self.stbl_data = T.cast(bytes, stbl["data"])
229
229
 
230
- def extract_tkhd_boxdata(self) -> T.Dict:
230
+ def extract_tkhd_boxdata(self) -> dict:
231
231
  return T.cast(
232
- T.Dict, cparser.find_box_at_pathx(self.trak_children, [b"tkhd"])["data"]
232
+ dict, cparser.find_box_at_pathx(self.trak_children, [b"tkhd"])["data"]
233
233
  )
234
234
 
235
235
  def is_video_track(self) -> bool:
236
236
  hdlr = cparser.find_box_at_pathx(self.trak_children, [b"mdia", b"hdlr"])
237
237
  return T.cast(T.Dict[str, T.Any], hdlr["data"])["handler_type"] == b"vide"
238
238
 
239
- def extract_sample_descriptions(self) -> T.List[T.Dict]:
239
+ def extract_sample_descriptions(self) -> list[dict]:
240
240
  # TODO: return [] if parsing fail
241
241
  boxes = _STSDBoxListConstruct.parse(self.stbl_data)
242
242
  stsd = cparser.find_box_at_pathx(
243
243
  T.cast(T.Sequence[cparser.BoxDict], boxes), [b"stsd"]
244
244
  )
245
- return T.cast(T.List[T.Dict], T.cast(T.Dict, stsd["data"])["entries"])
245
+ return T.cast(T.List[dict], T.cast(dict, stsd["data"])["entries"])
246
246
 
247
- def extract_elst_boxdata(self) -> T.Optional[T.Dict]:
247
+ def extract_elst_boxdata(self) -> dict | None:
248
248
  box = cparser.find_box_at_path(self.trak_children, [b"edts", b"elst"])
249
249
  if box is None:
250
250
  return None
251
- return T.cast(T.Dict, box["data"])
251
+ return T.cast(dict, box["data"])
252
252
 
253
- def extract_mdhd_boxdata(self) -> T.Dict:
253
+ def extract_mdhd_boxdata(self) -> dict:
254
254
  box = cparser.find_box_at_pathx(self.trak_children, [b"mdia", b"mdhd"])
255
- return T.cast(T.Dict, box["data"])
255
+ return T.cast(dict, box["data"])
256
256
 
257
257
  def extract_raw_samples(self) -> T.Generator[RawSample, None, None]:
258
258
  _, raw_samples = extract_raw_samples_from_stbl_data(self.stbl_data)
@@ -261,7 +261,7 @@ class TrackBoxParser:
261
261
  def extract_samples(self) -> T.Generator[Sample, None, None]:
262
262
  descriptions, raw_samples = extract_raw_samples_from_stbl_data(self.stbl_data)
263
263
  mdhd = T.cast(
264
- T.Dict,
264
+ dict,
265
265
  cparser.find_box_at_pathx(self.trak_children, [b"mdia", b"mdhd"])["data"],
266
266
  )
267
267
  yield from _extract_samples(raw_samples, descriptions, mdhd["timescale"])
@@ -287,15 +287,15 @@ class MovieBoxParser:
287
287
  moov = sparser.parse_box_data_firstx(stream, [b"moov"])
288
288
  return cls(moov)
289
289
 
290
- def extract_mvhd_boxdata(self) -> T.Dict:
290
+ def extract_mvhd_boxdata(self) -> dict:
291
291
  mvhd = cparser.find_box_at_pathx(self.moov_children, [b"mvhd"])
292
- return T.cast(T.Dict, mvhd["data"])
292
+ return T.cast(dict, mvhd["data"])
293
293
 
294
- def extract_udta_boxdata(self) -> T.Dict | None:
294
+ def extract_udta_boxdata(self) -> dict | None:
295
295
  box = cparser.find_box_at_path(self.moov_children, [b"udta"])
296
296
  if box is None:
297
297
  return None
298
- return T.cast(T.Dict, box["data"])
298
+ return T.cast(dict, box["data"])
299
299
 
300
300
  def extract_tracks(self) -> T.Generator[TrackBoxParser, None, None]:
301
301
  for box in self.moov_children: