mapillary-tools 0.14.0a1__py3-none-any.whl → 0.14.0a2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mapillary_tools/__init__.py +1 -1
- mapillary_tools/api_v4.py +4 -4
- mapillary_tools/camm/camm_parser.py +5 -5
- mapillary_tools/commands/__main__.py +1 -2
- mapillary_tools/config.py +7 -5
- mapillary_tools/constants.py +1 -2
- mapillary_tools/exceptions.py +1 -1
- mapillary_tools/exif_read.py +65 -65
- mapillary_tools/exif_write.py +7 -7
- mapillary_tools/exiftool_read.py +23 -46
- mapillary_tools/exiftool_read_video.py +36 -34
- mapillary_tools/ffmpeg.py +24 -23
- mapillary_tools/geo.py +4 -21
- mapillary_tools/geotag/{geotag_from_generic.py → base.py} +32 -48
- mapillary_tools/geotag/factory.py +27 -34
- mapillary_tools/geotag/geotag_images_from_exif.py +15 -51
- mapillary_tools/geotag/geotag_images_from_exiftool.py +107 -59
- mapillary_tools/geotag/geotag_images_from_gpx.py +20 -10
- mapillary_tools/geotag/geotag_images_from_gpx_file.py +2 -34
- mapillary_tools/geotag/geotag_images_from_nmea_file.py +0 -3
- mapillary_tools/geotag/geotag_images_from_video.py +16 -14
- mapillary_tools/geotag/geotag_videos_from_exiftool.py +97 -0
- mapillary_tools/geotag/geotag_videos_from_gpx.py +14 -115
- mapillary_tools/geotag/geotag_videos_from_video.py +14 -147
- mapillary_tools/geotag/image_extractors/base.py +18 -0
- mapillary_tools/geotag/image_extractors/exif.py +60 -0
- mapillary_tools/geotag/image_extractors/exiftool.py +18 -0
- mapillary_tools/geotag/options.py +1 -0
- mapillary_tools/geotag/utils.py +62 -0
- mapillary_tools/geotag/video_extractors/base.py +18 -0
- mapillary_tools/geotag/video_extractors/exiftool.py +70 -0
- mapillary_tools/{video_data_extraction/extractors/gpx_parser.py → geotag/video_extractors/gpx.py} +57 -39
- mapillary_tools/geotag/video_extractors/native.py +157 -0
- mapillary_tools/gpmf/gpmf_parser.py +16 -16
- mapillary_tools/gpmf/gps_filter.py +5 -3
- mapillary_tools/history.py +4 -2
- mapillary_tools/mp4/construct_mp4_parser.py +9 -8
- mapillary_tools/mp4/mp4_sample_parser.py +27 -27
- mapillary_tools/mp4/simple_mp4_builder.py +10 -9
- mapillary_tools/mp4/simple_mp4_parser.py +13 -12
- mapillary_tools/process_geotag_properties.py +5 -7
- mapillary_tools/process_sequence_properties.py +40 -38
- mapillary_tools/sample_video.py +8 -8
- mapillary_tools/telemetry.py +6 -5
- mapillary_tools/types.py +33 -38
- mapillary_tools/utils.py +16 -18
- {mapillary_tools-0.14.0a1.dist-info → mapillary_tools-0.14.0a2.dist-info}/METADATA +1 -1
- mapillary_tools-0.14.0a2.dist-info/RECORD +72 -0
- mapillary_tools/geotag/__init__.py +0 -1
- mapillary_tools/geotag/geotag_images_from_exiftool_both_image_and_video.py +0 -77
- mapillary_tools/geotag/geotag_videos_from_exiftool_video.py +0 -151
- mapillary_tools/video_data_extraction/cli_options.py +0 -22
- mapillary_tools/video_data_extraction/extract_video_data.py +0 -157
- mapillary_tools/video_data_extraction/extractors/base_parser.py +0 -75
- mapillary_tools/video_data_extraction/extractors/blackvue_parser.py +0 -49
- mapillary_tools/video_data_extraction/extractors/camm_parser.py +0 -62
- mapillary_tools/video_data_extraction/extractors/exiftool_runtime_parser.py +0 -74
- mapillary_tools/video_data_extraction/extractors/exiftool_xml_parser.py +0 -52
- mapillary_tools/video_data_extraction/extractors/generic_video_parser.py +0 -52
- mapillary_tools/video_data_extraction/extractors/gopro_parser.py +0 -58
- mapillary_tools/video_data_extraction/extractors/nmea_parser.py +0 -24
- mapillary_tools/video_data_extraction/video_data_parser_factory.py +0 -39
- mapillary_tools-0.14.0a1.dist-info/RECORD +0 -78
- {mapillary_tools-0.14.0a1.dist-info → mapillary_tools-0.14.0a2.dist-info}/WHEEL +0 -0
- {mapillary_tools-0.14.0a1.dist-info → mapillary_tools-0.14.0a2.dist-info}/entry_points.txt +0 -0
- {mapillary_tools-0.14.0a1.dist-info → mapillary_tools-0.14.0a2.dist-info}/licenses/LICENSE +0 -0
- {mapillary_tools-0.14.0a1.dist-info → mapillary_tools-0.14.0a2.dist-info}/top_level.txt +0 -0
mapillary_tools/exif_write.py
CHANGED
|
@@ -1,11 +1,11 @@
|
|
|
1
1
|
# pyre-ignore-all-errors[5, 21, 24]
|
|
2
|
+
from __future__ import annotations
|
|
2
3
|
|
|
3
4
|
import datetime
|
|
4
5
|
import io
|
|
5
6
|
import json
|
|
6
7
|
import logging
|
|
7
8
|
import math
|
|
8
|
-
import typing as T
|
|
9
9
|
from pathlib import Path
|
|
10
10
|
|
|
11
11
|
import piexif
|
|
@@ -15,9 +15,9 @@ LOG = logging.getLogger(__name__)
|
|
|
15
15
|
|
|
16
16
|
|
|
17
17
|
class ExifEdit:
|
|
18
|
-
_filename_or_bytes:
|
|
18
|
+
_filename_or_bytes: str | bytes
|
|
19
19
|
|
|
20
|
-
def __init__(self, filename_or_bytes:
|
|
20
|
+
def __init__(self, filename_or_bytes: Path | bytes) -> None:
|
|
21
21
|
"""Initialize the object"""
|
|
22
22
|
if isinstance(filename_or_bytes, Path):
|
|
23
23
|
# make sure filename is resolved to avoid to be interpretted as bytes in piexif
|
|
@@ -25,12 +25,12 @@ class ExifEdit:
|
|
|
25
25
|
self._filename_or_bytes = str(filename_or_bytes.resolve())
|
|
26
26
|
else:
|
|
27
27
|
self._filename_or_bytes = filename_or_bytes
|
|
28
|
-
self._ef:
|
|
28
|
+
self._ef: dict = piexif.load(self._filename_or_bytes)
|
|
29
29
|
|
|
30
30
|
@staticmethod
|
|
31
31
|
def decimal_to_dms(
|
|
32
32
|
value: float, precision: int
|
|
33
|
-
) ->
|
|
33
|
+
) -> tuple[tuple[float, int], tuple[float, int], tuple[float, int]]:
|
|
34
34
|
"""
|
|
35
35
|
Convert decimal position to degrees, minutes, seconds in a fromat supported by EXIF
|
|
36
36
|
"""
|
|
@@ -40,7 +40,7 @@ class ExifEdit:
|
|
|
40
40
|
|
|
41
41
|
return (deg, 1), (min, 1), (sec, precision)
|
|
42
42
|
|
|
43
|
-
def add_image_description(self, data:
|
|
43
|
+
def add_image_description(self, data: dict) -> None:
|
|
44
44
|
"""Add a dict to image description."""
|
|
45
45
|
self._ef["0th"][piexif.ImageIFD.ImageDescription] = json.dumps(data)
|
|
46
46
|
|
|
@@ -201,7 +201,7 @@ class ExifEdit:
|
|
|
201
201
|
piexif.insert(exif_bytes, self._filename_or_bytes, output)
|
|
202
202
|
return output.read()
|
|
203
203
|
|
|
204
|
-
def write(self, filename:
|
|
204
|
+
def write(self, filename: Path | None = None) -> None:
|
|
205
205
|
"""Save exif data to file."""
|
|
206
206
|
if filename is None:
|
|
207
207
|
if not isinstance(self._filename_or_bytes, str):
|
mapillary_tools/exiftool_read.py
CHANGED
|
@@ -6,10 +6,10 @@ import typing as T
|
|
|
6
6
|
import xml.etree.ElementTree as ET
|
|
7
7
|
from pathlib import Path
|
|
8
8
|
|
|
9
|
-
from . import exif_read
|
|
9
|
+
from . import exif_read
|
|
10
10
|
|
|
11
11
|
|
|
12
|
-
EXIFTOOL_NAMESPACES:
|
|
12
|
+
EXIFTOOL_NAMESPACES: dict[str, str] = {
|
|
13
13
|
"Adobe": "http://ns.exiftool.org/APP14/Adobe/1.0/",
|
|
14
14
|
"Apple": "http://ns.exiftool.org/MakerNotes/Apple/1.0/",
|
|
15
15
|
"Composite": "http://ns.exiftool.org/Composite/1.0/",
|
|
@@ -53,11 +53,11 @@ EXIFTOOL_NAMESPACES: T.Dict[str, str] = {
|
|
|
53
53
|
|
|
54
54
|
|
|
55
55
|
LOG = logging.getLogger(__name__)
|
|
56
|
+
DESCRIPTION_TAG = "rdf:Description"
|
|
56
57
|
_FIELD_TYPE = T.TypeVar("_FIELD_TYPE", int, float, str)
|
|
57
|
-
_DESCRIPTION_TAG = "rdf:Description"
|
|
58
58
|
|
|
59
59
|
|
|
60
|
-
def expand_tag(ns_tag: str, namespaces:
|
|
60
|
+
def expand_tag(ns_tag: str, namespaces: dict[str, str]) -> str:
|
|
61
61
|
try:
|
|
62
62
|
ns, tag = ns_tag.split(":", maxsplit=2)
|
|
63
63
|
except ValueError:
|
|
@@ -72,42 +72,19 @@ def canonical_path(path: Path) -> str:
|
|
|
72
72
|
return str(path.resolve().as_posix())
|
|
73
73
|
|
|
74
74
|
|
|
75
|
-
def find_rdf_description_path(element: ET.Element) ->
|
|
75
|
+
def find_rdf_description_path(element: ET.Element) -> Path | None:
|
|
76
76
|
about = element.get(_EXPANDED_ABOUT_TAG)
|
|
77
77
|
if about is None:
|
|
78
78
|
return None
|
|
79
79
|
return Path(about)
|
|
80
80
|
|
|
81
81
|
|
|
82
|
-
def index_rdf_description_by_path(
|
|
83
|
-
xml_paths: T.Sequence[Path],
|
|
84
|
-
) -> T.Dict[str, ET.Element]:
|
|
85
|
-
rdf_description_by_path: T.Dict[str, ET.Element] = {}
|
|
86
|
-
|
|
87
|
-
for xml_path in utils.find_xml_files(xml_paths):
|
|
88
|
-
try:
|
|
89
|
-
etree = ET.parse(xml_path)
|
|
90
|
-
except ET.ParseError as ex:
|
|
91
|
-
verbose = LOG.getEffectiveLevel() <= logging.DEBUG
|
|
92
|
-
if verbose:
|
|
93
|
-
LOG.warning(f"Failed to parse {xml_path}", exc_info=verbose)
|
|
94
|
-
else:
|
|
95
|
-
LOG.warning(f"Failed to parse {xml_path}: {ex}", exc_info=verbose)
|
|
96
|
-
continue
|
|
97
|
-
|
|
98
|
-
rdf_description_by_path.update(
|
|
99
|
-
index_rdf_description_by_path_from_xml_element(etree.getroot())
|
|
100
|
-
)
|
|
101
|
-
|
|
102
|
-
return rdf_description_by_path
|
|
103
|
-
|
|
104
|
-
|
|
105
82
|
def index_rdf_description_by_path_from_xml_element(
|
|
106
83
|
element: ET.Element,
|
|
107
84
|
) -> dict[str, ET.Element]:
|
|
108
85
|
rdf_description_by_path: dict[str, ET.Element] = {}
|
|
109
86
|
|
|
110
|
-
elements = element.iterfind(
|
|
87
|
+
elements = element.iterfind(DESCRIPTION_TAG, namespaces=EXIFTOOL_NAMESPACES)
|
|
111
88
|
for element in elements:
|
|
112
89
|
path = find_rdf_description_path(element)
|
|
113
90
|
if path is not None:
|
|
@@ -127,7 +104,7 @@ class ExifToolRead(exif_read.ExifReadABC):
|
|
|
127
104
|
) -> None:
|
|
128
105
|
self.etree = etree
|
|
129
106
|
|
|
130
|
-
def extract_altitude(self) ->
|
|
107
|
+
def extract_altitude(self) -> float | None:
|
|
131
108
|
"""
|
|
132
109
|
Extract altitude
|
|
133
110
|
"""
|
|
@@ -143,7 +120,7 @@ class ExifToolRead(exif_read.ExifReadABC):
|
|
|
143
120
|
|
|
144
121
|
def _extract_gps_datetime(
|
|
145
122
|
self, date_tags: T.Sequence[str], time_tags: T.Sequence[str]
|
|
146
|
-
) ->
|
|
123
|
+
) -> datetime.datetime | None:
|
|
147
124
|
"""
|
|
148
125
|
Extract timestamp from GPS field.
|
|
149
126
|
"""
|
|
@@ -157,13 +134,13 @@ class ExifToolRead(exif_read.ExifReadABC):
|
|
|
157
134
|
|
|
158
135
|
return exif_read.parse_gps_datetime_separately(gpsdate, gpstimestamp)
|
|
159
136
|
|
|
160
|
-
def extract_gps_datetime(self) ->
|
|
137
|
+
def extract_gps_datetime(self) -> datetime.datetime | None:
|
|
161
138
|
"""
|
|
162
139
|
Extract timestamp from GPS field.
|
|
163
140
|
"""
|
|
164
141
|
return self._extract_gps_datetime(["GPS:GPSDateStamp"], ["GPS:GPSTimeStamp"])
|
|
165
142
|
|
|
166
|
-
def extract_gps_datetime_from_xmp(self) ->
|
|
143
|
+
def extract_gps_datetime_from_xmp(self) -> datetime.datetime | None:
|
|
167
144
|
"""
|
|
168
145
|
Extract timestamp from XMP GPS field.
|
|
169
146
|
"""
|
|
@@ -180,7 +157,7 @@ class ExifToolRead(exif_read.ExifReadABC):
|
|
|
180
157
|
dt_tags: T.Sequence[str],
|
|
181
158
|
subsec_tags: T.Sequence[str],
|
|
182
159
|
offset_tags: T.Sequence[str],
|
|
183
|
-
) ->
|
|
160
|
+
) -> datetime.datetime | None:
|
|
184
161
|
dtstr = self._extract_alternative_fields(dt_tags, str)
|
|
185
162
|
if dtstr is None:
|
|
186
163
|
return None
|
|
@@ -195,7 +172,7 @@ class ExifToolRead(exif_read.ExifReadABC):
|
|
|
195
172
|
return None
|
|
196
173
|
return dt
|
|
197
174
|
|
|
198
|
-
def extract_exif_datetime_from_xmp(self) ->
|
|
175
|
+
def extract_exif_datetime_from_xmp(self) -> datetime.datetime | None:
|
|
199
176
|
# EXIF DateTimeOriginal: 0x9003 (date/time when original image was taken)
|
|
200
177
|
# EXIF SubSecTimeOriginal: 0x9291 (fractional seconds for DateTimeOriginal)
|
|
201
178
|
# EXIF OffsetTimeOriginal: 0x9011 (time zone for DateTimeOriginal)
|
|
@@ -234,7 +211,7 @@ class ExifToolRead(exif_read.ExifReadABC):
|
|
|
234
211
|
|
|
235
212
|
return None
|
|
236
213
|
|
|
237
|
-
def extract_exif_datetime(self) ->
|
|
214
|
+
def extract_exif_datetime(self) -> datetime.datetime | None:
|
|
238
215
|
# EXIF DateTimeOriginal: 0x9003 (date/time when original image was taken)
|
|
239
216
|
# EXIF SubSecTimeOriginal: 0x9291 (fractional seconds for DateTimeOriginal)
|
|
240
217
|
# EXIF OffsetTimeOriginal: 0x9011 (time zone for DateTimeOriginal)
|
|
@@ -270,7 +247,7 @@ class ExifToolRead(exif_read.ExifReadABC):
|
|
|
270
247
|
|
|
271
248
|
return None
|
|
272
249
|
|
|
273
|
-
def extract_capture_time(self) ->
|
|
250
|
+
def extract_capture_time(self) -> datetime.datetime | None:
|
|
274
251
|
"""
|
|
275
252
|
Extract capture time from EXIF DateTime tags
|
|
276
253
|
"""
|
|
@@ -300,7 +277,7 @@ class ExifToolRead(exif_read.ExifReadABC):
|
|
|
300
277
|
|
|
301
278
|
return None
|
|
302
279
|
|
|
303
|
-
def extract_direction(self) ->
|
|
280
|
+
def extract_direction(self) -> float | None:
|
|
304
281
|
"""
|
|
305
282
|
Extract image direction (i.e. compass, heading, bearing)
|
|
306
283
|
"""
|
|
@@ -313,7 +290,7 @@ class ExifToolRead(exif_read.ExifReadABC):
|
|
|
313
290
|
float,
|
|
314
291
|
)
|
|
315
292
|
|
|
316
|
-
def extract_lon_lat(self) ->
|
|
293
|
+
def extract_lon_lat(self) -> tuple[float, float] | None:
|
|
317
294
|
lon_lat = self._extract_lon_lat("GPS:GPSLongitude", "GPS:GPSLatitude")
|
|
318
295
|
if lon_lat is not None:
|
|
319
296
|
return lon_lat
|
|
@@ -332,7 +309,7 @@ class ExifToolRead(exif_read.ExifReadABC):
|
|
|
332
309
|
|
|
333
310
|
def _extract_lon_lat(
|
|
334
311
|
self, lon_tag: str, lat_tag: str
|
|
335
|
-
) ->
|
|
312
|
+
) -> tuple[float, float] | None:
|
|
336
313
|
lon = self._extract_alternative_fields(
|
|
337
314
|
[lon_tag],
|
|
338
315
|
float,
|
|
@@ -355,7 +332,7 @@ class ExifToolRead(exif_read.ExifReadABC):
|
|
|
355
332
|
|
|
356
333
|
return lon, lat
|
|
357
334
|
|
|
358
|
-
def extract_make(self) ->
|
|
335
|
+
def extract_make(self) -> str | None:
|
|
359
336
|
"""
|
|
360
337
|
Extract camera make
|
|
361
338
|
"""
|
|
@@ -374,7 +351,7 @@ class ExifToolRead(exif_read.ExifReadABC):
|
|
|
374
351
|
return None
|
|
375
352
|
return make.strip()
|
|
376
353
|
|
|
377
|
-
def extract_model(self) ->
|
|
354
|
+
def extract_model(self) -> str | None:
|
|
378
355
|
"""
|
|
379
356
|
Extract camera model
|
|
380
357
|
"""
|
|
@@ -394,7 +371,7 @@ class ExifToolRead(exif_read.ExifReadABC):
|
|
|
394
371
|
return None
|
|
395
372
|
return model.strip()
|
|
396
373
|
|
|
397
|
-
def extract_width(self) ->
|
|
374
|
+
def extract_width(self) -> int | None:
|
|
398
375
|
"""
|
|
399
376
|
Extract image width in pixels
|
|
400
377
|
"""
|
|
@@ -409,7 +386,7 @@ class ExifToolRead(exif_read.ExifReadABC):
|
|
|
409
386
|
int,
|
|
410
387
|
)
|
|
411
388
|
|
|
412
|
-
def extract_height(self) ->
|
|
389
|
+
def extract_height(self) -> int | None:
|
|
413
390
|
"""
|
|
414
391
|
Extract image height in pixels
|
|
415
392
|
"""
|
|
@@ -447,8 +424,8 @@ class ExifToolRead(exif_read.ExifReadABC):
|
|
|
447
424
|
def _extract_alternative_fields(
|
|
448
425
|
self,
|
|
449
426
|
fields: T.Sequence[str],
|
|
450
|
-
field_type:
|
|
451
|
-
) ->
|
|
427
|
+
field_type: type[_FIELD_TYPE],
|
|
428
|
+
) -> _FIELD_TYPE | None:
|
|
452
429
|
for field in fields:
|
|
453
430
|
value = self.etree.findtext(field, namespaces=EXIFTOOL_NAMESPACES)
|
|
454
431
|
if value is None:
|
|
@@ -1,3 +1,5 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
1
3
|
import dataclasses
|
|
2
4
|
import functools
|
|
3
5
|
import logging
|
|
@@ -9,7 +11,7 @@ from .telemetry import GPSFix, GPSPoint
|
|
|
9
11
|
|
|
10
12
|
|
|
11
13
|
MAX_TRACK_ID = 10
|
|
12
|
-
EXIFTOOL_NAMESPACES:
|
|
14
|
+
EXIFTOOL_NAMESPACES: dict[str, str] = {
|
|
13
15
|
"Keys": "http://ns.exiftool.org/QuickTime/Keys/1.0/",
|
|
14
16
|
"IFD0": "http://ns.exiftool.org/EXIF/IFD0/1.0/",
|
|
15
17
|
"QuickTime": "http://ns.exiftool.org/QuickTime/QuickTime/1.0/",
|
|
@@ -28,7 +30,7 @@ _FIELD_TYPE = T.TypeVar("_FIELD_TYPE", int, float, str, T.List[str])
|
|
|
28
30
|
expand_tag = functools.partial(exiftool_read.expand_tag, namespaces=EXIFTOOL_NAMESPACES)
|
|
29
31
|
|
|
30
32
|
|
|
31
|
-
def _maybe_float(text:
|
|
33
|
+
def _maybe_float(text: str | None) -> float | None:
|
|
32
34
|
if text is None:
|
|
33
35
|
return None
|
|
34
36
|
try:
|
|
@@ -37,8 +39,8 @@ def _maybe_float(text: T.Optional[str]) -> T.Optional[float]:
|
|
|
37
39
|
return None
|
|
38
40
|
|
|
39
41
|
|
|
40
|
-
def _index_text_by_tag(elements: T.Iterable[ET.Element]) ->
|
|
41
|
-
texts_by_tag:
|
|
42
|
+
def _index_text_by_tag(elements: T.Iterable[ET.Element]) -> dict[str, list[str]]:
|
|
43
|
+
texts_by_tag: dict[str, list[str]] = {}
|
|
42
44
|
for element in elements:
|
|
43
45
|
tag = element.tag
|
|
44
46
|
if element.text is not None:
|
|
@@ -47,10 +49,10 @@ def _index_text_by_tag(elements: T.Iterable[ET.Element]) -> T.Dict[str, T.List[s
|
|
|
47
49
|
|
|
48
50
|
|
|
49
51
|
def _extract_alternative_fields(
|
|
50
|
-
texts_by_tag:
|
|
52
|
+
texts_by_tag: dict[str, list[str]],
|
|
51
53
|
fields: T.Sequence[str],
|
|
52
54
|
field_type: T.Type[_FIELD_TYPE],
|
|
53
|
-
) ->
|
|
55
|
+
) -> _FIELD_TYPE | None:
|
|
54
56
|
for field in fields:
|
|
55
57
|
values = texts_by_tag.get(expand_tag(field))
|
|
56
58
|
if values is None:
|
|
@@ -81,15 +83,15 @@ def _extract_alternative_fields(
|
|
|
81
83
|
|
|
82
84
|
|
|
83
85
|
def _aggregate_gps_track(
|
|
84
|
-
texts_by_tag:
|
|
85
|
-
time_tag:
|
|
86
|
+
texts_by_tag: dict[str, list[str]],
|
|
87
|
+
time_tag: str | None,
|
|
86
88
|
lon_tag: str,
|
|
87
89
|
lat_tag: str,
|
|
88
|
-
alt_tag:
|
|
89
|
-
gps_time_tag:
|
|
90
|
-
direction_tag:
|
|
91
|
-
ground_speed_tag:
|
|
92
|
-
) ->
|
|
90
|
+
alt_tag: str | None = None,
|
|
91
|
+
gps_time_tag: str | None = None,
|
|
92
|
+
direction_tag: str | None = None,
|
|
93
|
+
ground_speed_tag: str | None = None,
|
|
94
|
+
) -> list[GPSPoint]:
|
|
93
95
|
"""
|
|
94
96
|
Aggregate all GPS data by the tags.
|
|
95
97
|
It requires lat, lon to be present, and their lengths must match.
|
|
@@ -140,8 +142,8 @@ def _aggregate_gps_track(
|
|
|
140
142
|
assert len(timestamps) == expected_length
|
|
141
143
|
|
|
142
144
|
def _aggregate_float_values_same_length(
|
|
143
|
-
tag:
|
|
144
|
-
) ->
|
|
145
|
+
tag: str | None,
|
|
146
|
+
) -> list[float | None]:
|
|
145
147
|
if tag is not None:
|
|
146
148
|
vals = [
|
|
147
149
|
_maybe_float(val)
|
|
@@ -212,11 +214,11 @@ def _aggregate_samples(
|
|
|
212
214
|
elements: T.Iterable[ET.Element],
|
|
213
215
|
sample_time_tag: str,
|
|
214
216
|
sample_duration_tag: str,
|
|
215
|
-
) -> T.Generator[
|
|
217
|
+
) -> T.Generator[tuple[float, float, list[ET.Element]], None, None]:
|
|
216
218
|
expanded_sample_time_tag = expand_tag(sample_time_tag)
|
|
217
219
|
expanded_sample_duration_tag = expand_tag(sample_duration_tag)
|
|
218
220
|
|
|
219
|
-
accumulated_elements:
|
|
221
|
+
accumulated_elements: list[ET.Element] = []
|
|
220
222
|
sample_time = None
|
|
221
223
|
sample_duration = None
|
|
222
224
|
for element in elements:
|
|
@@ -234,17 +236,17 @@ def _aggregate_samples(
|
|
|
234
236
|
|
|
235
237
|
|
|
236
238
|
def _aggregate_gps_track_by_sample_time(
|
|
237
|
-
sample_iterator: T.Iterable[
|
|
239
|
+
sample_iterator: T.Iterable[tuple[float, float, list[ET.Element]]],
|
|
238
240
|
lon_tag: str,
|
|
239
241
|
lat_tag: str,
|
|
240
|
-
alt_tag:
|
|
241
|
-
gps_time_tag:
|
|
242
|
-
direction_tag:
|
|
243
|
-
ground_speed_tag:
|
|
244
|
-
gps_fix_tag:
|
|
245
|
-
gps_precision_tag:
|
|
246
|
-
) ->
|
|
247
|
-
track:
|
|
242
|
+
alt_tag: str | None = None,
|
|
243
|
+
gps_time_tag: str | None = None,
|
|
244
|
+
direction_tag: str | None = None,
|
|
245
|
+
ground_speed_tag: str | None = None,
|
|
246
|
+
gps_fix_tag: str | None = None,
|
|
247
|
+
gps_precision_tag: str | None = None,
|
|
248
|
+
) -> list[GPSPoint]:
|
|
249
|
+
track: list[GPSPoint] = []
|
|
248
250
|
|
|
249
251
|
expanded_gps_fix_tag = None
|
|
250
252
|
if gps_fix_tag is not None:
|
|
@@ -311,7 +313,7 @@ class ExifToolReadVideo:
|
|
|
311
313
|
self._texts_by_tag = _index_text_by_tag(self.etree.getroot())
|
|
312
314
|
self._all_tags = set(self._texts_by_tag.keys())
|
|
313
315
|
|
|
314
|
-
def extract_gps_track(self) ->
|
|
316
|
+
def extract_gps_track(self) -> list[geo.Point]:
|
|
315
317
|
# blackvue and many other cameras
|
|
316
318
|
track_with_fix = self._extract_gps_track_from_quicktime()
|
|
317
319
|
if track_with_fix:
|
|
@@ -329,7 +331,7 @@ class ExifToolReadVideo:
|
|
|
329
331
|
|
|
330
332
|
return []
|
|
331
333
|
|
|
332
|
-
def _extract_make_and_model(self) ->
|
|
334
|
+
def _extract_make_and_model(self) -> tuple[str | None, str | None]:
|
|
333
335
|
make = self._extract_alternative_fields(["GoPro:Make"], str)
|
|
334
336
|
model = self._extract_alternative_fields(["GoPro:Model"], str)
|
|
335
337
|
if model is not None:
|
|
@@ -360,15 +362,15 @@ class ExifToolReadVideo:
|
|
|
360
362
|
model = model.strip()
|
|
361
363
|
return make, model
|
|
362
364
|
|
|
363
|
-
def extract_make(self) ->
|
|
365
|
+
def extract_make(self) -> str | None:
|
|
364
366
|
make, _ = self._extract_make_and_model()
|
|
365
367
|
return make
|
|
366
368
|
|
|
367
|
-
def extract_model(self) ->
|
|
369
|
+
def extract_model(self) -> str | None:
|
|
368
370
|
_, model = self._extract_make_and_model()
|
|
369
371
|
return model
|
|
370
372
|
|
|
371
|
-
def _extract_gps_track_from_track(self) ->
|
|
373
|
+
def _extract_gps_track_from_track(self) -> list[GPSPoint]:
|
|
372
374
|
for track_id in range(1, MAX_TRACK_ID + 1):
|
|
373
375
|
track_ns = f"Track{track_id}"
|
|
374
376
|
if self._all_tags_exists(
|
|
@@ -402,15 +404,15 @@ class ExifToolReadVideo:
|
|
|
402
404
|
self,
|
|
403
405
|
fields: T.Sequence[str],
|
|
404
406
|
field_type: T.Type[_FIELD_TYPE],
|
|
405
|
-
) ->
|
|
407
|
+
) -> _FIELD_TYPE | None:
|
|
406
408
|
return _extract_alternative_fields(self._texts_by_tag, fields, field_type)
|
|
407
409
|
|
|
408
|
-
def _all_tags_exists(self, tags:
|
|
410
|
+
def _all_tags_exists(self, tags: set[str]) -> bool:
|
|
409
411
|
return self._all_tags.issuperset(tags)
|
|
410
412
|
|
|
411
413
|
def _extract_gps_track_from_quicktime(
|
|
412
414
|
self, namespace: str = "QuickTime"
|
|
413
|
-
) ->
|
|
415
|
+
) -> list[GPSPoint]:
|
|
414
416
|
if not self._all_tags_exists(
|
|
415
417
|
{
|
|
416
418
|
expand_tag(f"{namespace}:GPSDateTime"),
|
mapillary_tools/ffmpeg.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
# pyre-ignore-all-errors[5, 24]
|
|
2
|
+
from __future__ import annotations
|
|
2
3
|
|
|
3
4
|
import datetime
|
|
4
5
|
import json
|
|
@@ -33,7 +34,7 @@ class Stream(T.TypedDict):
|
|
|
33
34
|
|
|
34
35
|
|
|
35
36
|
class ProbeOutput(T.TypedDict):
|
|
36
|
-
streams:
|
|
37
|
+
streams: list[Stream]
|
|
37
38
|
|
|
38
39
|
|
|
39
40
|
class FFmpegNotFoundError(Exception):
|
|
@@ -77,7 +78,7 @@ class FFMPEG:
|
|
|
77
78
|
self,
|
|
78
79
|
ffmpeg_path: str = "ffmpeg",
|
|
79
80
|
ffprobe_path: str = "ffprobe",
|
|
80
|
-
stderr:
|
|
81
|
+
stderr: int | None = None,
|
|
81
82
|
) -> None:
|
|
82
83
|
"""
|
|
83
84
|
ffmpeg_path: path to ffmpeg binary
|
|
@@ -88,8 +89,8 @@ class FFMPEG:
|
|
|
88
89
|
self.ffprobe_path = ffprobe_path
|
|
89
90
|
self.stderr = stderr
|
|
90
91
|
|
|
91
|
-
def _run_ffprobe_json(self, cmd:
|
|
92
|
-
full_cmd:
|
|
92
|
+
def _run_ffprobe_json(self, cmd: list[str]) -> dict:
|
|
93
|
+
full_cmd: list[str] = [self.ffprobe_path, "-print_format", "json", *cmd]
|
|
93
94
|
LOG.info(f"Extracting video information: {' '.join(full_cmd)}")
|
|
94
95
|
try:
|
|
95
96
|
completed = subprocess.run(
|
|
@@ -132,8 +133,8 @@ class FFMPEG:
|
|
|
132
133
|
|
|
133
134
|
return output
|
|
134
135
|
|
|
135
|
-
def _run_ffmpeg(self, cmd:
|
|
136
|
-
full_cmd:
|
|
136
|
+
def _run_ffmpeg(self, cmd: list[str]) -> None:
|
|
137
|
+
full_cmd: list[str] = [self.ffmpeg_path, *cmd]
|
|
137
138
|
LOG.info(f"Extracting frames: {' '.join(full_cmd)}")
|
|
138
139
|
try:
|
|
139
140
|
subprocess.run(full_cmd, check=True, stderr=self.stderr)
|
|
@@ -145,7 +146,7 @@ class FFMPEG:
|
|
|
145
146
|
raise FFmpegCalledProcessError(ex) from ex
|
|
146
147
|
|
|
147
148
|
def probe_format_and_streams(self, video_path: Path) -> ProbeOutput:
|
|
148
|
-
cmd:
|
|
149
|
+
cmd: list[str] = [
|
|
149
150
|
"-hide_banner",
|
|
150
151
|
"-show_format",
|
|
151
152
|
"-show_streams",
|
|
@@ -158,7 +159,7 @@ class FFMPEG:
|
|
|
158
159
|
video_path: Path,
|
|
159
160
|
sample_dir: Path,
|
|
160
161
|
sample_interval: float,
|
|
161
|
-
stream_idx:
|
|
162
|
+
stream_idx: int | None = None,
|
|
162
163
|
) -> None:
|
|
163
164
|
"""
|
|
164
165
|
Extract frames by the sample interval from the specified video stream.
|
|
@@ -175,7 +176,7 @@ class FFMPEG:
|
|
|
175
176
|
ouput_template = f"{sample_prefix}_{NA_STREAM_IDX}_%06d{FRAME_EXT}"
|
|
176
177
|
stream_specifier = "v"
|
|
177
178
|
|
|
178
|
-
cmd:
|
|
179
|
+
cmd: list[str] = [
|
|
179
180
|
# global options should be specified first
|
|
180
181
|
*["-hide_banner", "-nostdin"],
|
|
181
182
|
# input 0
|
|
@@ -195,7 +196,7 @@ class FFMPEG:
|
|
|
195
196
|
|
|
196
197
|
self._run_ffmpeg(cmd)
|
|
197
198
|
|
|
198
|
-
def generate_binary_search(self, sorted_frame_indices:
|
|
199
|
+
def generate_binary_search(self, sorted_frame_indices: list[int]) -> str:
|
|
199
200
|
length = len(sorted_frame_indices)
|
|
200
201
|
|
|
201
202
|
if length == 0:
|
|
@@ -211,8 +212,8 @@ class FFMPEG:
|
|
|
211
212
|
self,
|
|
212
213
|
video_path: Path,
|
|
213
214
|
sample_dir: Path,
|
|
214
|
-
frame_indices:
|
|
215
|
-
stream_idx:
|
|
215
|
+
frame_indices: set[int],
|
|
216
|
+
stream_idx: int | None = None,
|
|
216
217
|
) -> None:
|
|
217
218
|
"""
|
|
218
219
|
Extract specified frames from the specified video stream.
|
|
@@ -253,7 +254,7 @@ class FFMPEG:
|
|
|
253
254
|
# If not close, error "The process cannot access the file because it is being used by another process"
|
|
254
255
|
if not delete:
|
|
255
256
|
select_file.close()
|
|
256
|
-
cmd:
|
|
257
|
+
cmd: list[str] = [
|
|
257
258
|
# global options should be specified first
|
|
258
259
|
*["-hide_banner", "-nostdin"],
|
|
259
260
|
# input 0
|
|
@@ -300,7 +301,7 @@ class Probe:
|
|
|
300
301
|
def __init__(self, probe: ProbeOutput) -> None:
|
|
301
302
|
self.probe = probe
|
|
302
303
|
|
|
303
|
-
def probe_video_start_time(self) ->
|
|
304
|
+
def probe_video_start_time(self) -> datetime.datetime | None:
|
|
304
305
|
"""
|
|
305
306
|
Find video start time of the given video.
|
|
306
307
|
It searches video creation time and duration in video streams first and then the other streams.
|
|
@@ -327,11 +328,11 @@ class Probe:
|
|
|
327
328
|
|
|
328
329
|
return None
|
|
329
330
|
|
|
330
|
-
def probe_video_streams(self) ->
|
|
331
|
+
def probe_video_streams(self) -> list[Stream]:
|
|
331
332
|
streams = self.probe.get("streams", [])
|
|
332
333
|
return [stream for stream in streams if stream.get("codec_type") == "video"]
|
|
333
334
|
|
|
334
|
-
def probe_video_with_max_resolution(self) ->
|
|
335
|
+
def probe_video_with_max_resolution(self) -> Stream | None:
|
|
335
336
|
video_streams = self.probe_video_streams()
|
|
336
337
|
video_streams.sort(
|
|
337
338
|
key=lambda s: s.get("width", 0) * s.get("height", 0), reverse=True
|
|
@@ -341,7 +342,7 @@ class Probe:
|
|
|
341
342
|
return video_streams[0]
|
|
342
343
|
|
|
343
344
|
|
|
344
|
-
def extract_stream_start_time(stream: Stream) ->
|
|
345
|
+
def extract_stream_start_time(stream: Stream) -> datetime.datetime | None:
|
|
345
346
|
"""
|
|
346
347
|
Find the start time of the given stream.
|
|
347
348
|
Start time is the creation time of the stream minus the duration of the stream.
|
|
@@ -368,7 +369,7 @@ def extract_stream_start_time(stream: Stream) -> T.Optional[datetime.datetime]:
|
|
|
368
369
|
def _extract_stream_frame_idx(
|
|
369
370
|
sample_basename: str,
|
|
370
371
|
sample_basename_pattern: T.Pattern[str],
|
|
371
|
-
) ->
|
|
372
|
+
) -> tuple[int | None, int] | None:
|
|
372
373
|
"""
|
|
373
374
|
extract stream id and frame index from sample basename
|
|
374
375
|
e.g. basename GX010001_NA_000000.jpg will extract (None, 0)
|
|
@@ -408,7 +409,7 @@ def _extract_stream_frame_idx(
|
|
|
408
409
|
|
|
409
410
|
def iterate_samples(
|
|
410
411
|
sample_dir: Path, video_path: Path
|
|
411
|
-
) -> T.Generator[
|
|
412
|
+
) -> T.Generator[tuple[int | None, int, Path], None, None]:
|
|
412
413
|
"""
|
|
413
414
|
Search all samples in the sample_dir,
|
|
414
415
|
and return a generator of the tuple: (stream ID, frame index, sample path).
|
|
@@ -428,17 +429,17 @@ def iterate_samples(
|
|
|
428
429
|
|
|
429
430
|
|
|
430
431
|
def sort_selected_samples(
|
|
431
|
-
sample_dir: Path, video_path: Path, selected_stream_indices:
|
|
432
|
-
) ->
|
|
432
|
+
sample_dir: Path, video_path: Path, selected_stream_indices: list[int | None]
|
|
433
|
+
) -> list[tuple[int, list[Path | None]]]:
|
|
433
434
|
"""
|
|
434
435
|
Group frames by frame index, so that
|
|
435
436
|
the Nth group contains all the frames from the selected streams at frame index N.
|
|
436
437
|
"""
|
|
437
|
-
stream_samples:
|
|
438
|
+
stream_samples: dict[int, list[tuple[int | None, Path]]] = {}
|
|
438
439
|
for stream_idx, frame_idx, sample_path in iterate_samples(sample_dir, video_path):
|
|
439
440
|
stream_samples.setdefault(frame_idx, []).append((stream_idx, sample_path))
|
|
440
441
|
|
|
441
|
-
selected:
|
|
442
|
+
selected: list[tuple[int, list[Path | None]]] = []
|
|
442
443
|
for frame_idx in sorted(stream_samples.keys()):
|
|
443
444
|
indexed = {
|
|
444
445
|
stream_idx: sample_path
|
mapillary_tools/geo.py
CHANGED
|
@@ -244,14 +244,14 @@ class Interpolator:
|
|
|
244
244
|
return interpolated
|
|
245
245
|
|
|
246
246
|
|
|
247
|
-
|
|
247
|
+
_T = T.TypeVar("_T")
|
|
248
248
|
|
|
249
249
|
|
|
250
250
|
def sample_points_by_distance(
|
|
251
|
-
samples: T.Iterable[
|
|
251
|
+
samples: T.Iterable[_T],
|
|
252
252
|
min_distance: float,
|
|
253
|
-
point_func: T.Callable[[
|
|
254
|
-
) -> T.Generator[
|
|
253
|
+
point_func: T.Callable[[_T], Point],
|
|
254
|
+
) -> T.Generator[_T, None, None]:
|
|
255
255
|
prevp: Point | None = None
|
|
256
256
|
for sample in samples:
|
|
257
257
|
if prevp is None:
|
|
@@ -281,23 +281,6 @@ def interpolate_directions_if_none(sequence: T.Sequence[PointLike]) -> None:
|
|
|
281
281
|
sequence[-1].angle = prev_angle
|
|
282
282
|
|
|
283
283
|
|
|
284
|
-
def extend_deduplicate_points(
|
|
285
|
-
sequence: T.Iterable[PointLike],
|
|
286
|
-
to_extend: list[PointLike] | None = None,
|
|
287
|
-
) -> list[PointLike]:
|
|
288
|
-
if to_extend is None:
|
|
289
|
-
to_extend = []
|
|
290
|
-
for point in sequence:
|
|
291
|
-
if to_extend:
|
|
292
|
-
prev = to_extend[-1].lon, to_extend[-1].lat
|
|
293
|
-
cur = (point.lon, point.lat)
|
|
294
|
-
if cur != prev:
|
|
295
|
-
to_extend.append(point)
|
|
296
|
-
else:
|
|
297
|
-
to_extend.append(point)
|
|
298
|
-
return to_extend
|
|
299
|
-
|
|
300
|
-
|
|
301
284
|
def _ecef_from_lla2(lat: float, lon: float) -> tuple[float, float, float]:
|
|
302
285
|
"""
|
|
303
286
|
Compute ECEF XYZ from latitude and longitude.
|