mapillary-tools 0.12.1__py3-none-any.whl → 0.13.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mapillary_tools/__init__.py +1 -1
- mapillary_tools/api_v4.py +94 -4
- mapillary_tools/{geotag → camm}/camm_builder.py +73 -61
- mapillary_tools/camm/camm_parser.py +561 -0
- mapillary_tools/commands/__init__.py +0 -1
- mapillary_tools/commands/__main__.py +0 -6
- mapillary_tools/commands/process.py +0 -50
- mapillary_tools/commands/upload.py +1 -26
- mapillary_tools/constants.py +2 -2
- mapillary_tools/exiftool_read_video.py +13 -11
- mapillary_tools/ffmpeg.py +2 -2
- mapillary_tools/geo.py +0 -54
- mapillary_tools/geotag/blackvue_parser.py +4 -4
- mapillary_tools/geotag/geotag_images_from_exif.py +2 -1
- mapillary_tools/geotag/geotag_images_from_exiftool_both_image_and_video.py +0 -1
- mapillary_tools/geotag/geotag_images_from_gpx_file.py +7 -1
- mapillary_tools/geotag/geotag_videos_from_exiftool_video.py +5 -3
- mapillary_tools/geotag/geotag_videos_from_video.py +13 -14
- mapillary_tools/geotag/gpmf_gps_filter.py +9 -10
- mapillary_tools/geotag/gpmf_parser.py +346 -83
- mapillary_tools/mp4/__init__.py +0 -0
- mapillary_tools/{geotag → mp4}/construct_mp4_parser.py +32 -16
- mapillary_tools/mp4/mp4_sample_parser.py +322 -0
- mapillary_tools/{geotag → mp4}/simple_mp4_builder.py +64 -38
- mapillary_tools/process_geotag_properties.py +25 -19
- mapillary_tools/process_sequence_properties.py +6 -6
- mapillary_tools/sample_video.py +17 -16
- mapillary_tools/telemetry.py +71 -0
- mapillary_tools/types.py +18 -0
- mapillary_tools/upload.py +74 -233
- mapillary_tools/upload_api_v4.py +8 -9
- mapillary_tools/utils.py +9 -16
- mapillary_tools/video_data_extraction/cli_options.py +0 -1
- mapillary_tools/video_data_extraction/extract_video_data.py +13 -31
- mapillary_tools/video_data_extraction/extractors/base_parser.py +13 -11
- mapillary_tools/video_data_extraction/extractors/blackvue_parser.py +5 -4
- mapillary_tools/video_data_extraction/extractors/camm_parser.py +13 -16
- mapillary_tools/video_data_extraction/extractors/exiftool_runtime_parser.py +4 -9
- mapillary_tools/video_data_extraction/extractors/exiftool_xml_parser.py +9 -11
- mapillary_tools/video_data_extraction/extractors/generic_video_parser.py +6 -11
- mapillary_tools/video_data_extraction/extractors/gopro_parser.py +11 -4
- mapillary_tools/video_data_extraction/extractors/gpx_parser.py +90 -11
- mapillary_tools/video_data_extraction/extractors/nmea_parser.py +3 -3
- mapillary_tools/video_data_extraction/video_data_parser_factory.py +13 -20
- {mapillary_tools-0.12.1.dist-info → mapillary_tools-0.13.1.dist-info}/METADATA +10 -3
- mapillary_tools-0.13.1.dist-info/RECORD +75 -0
- {mapillary_tools-0.12.1.dist-info → mapillary_tools-0.13.1.dist-info}/WHEEL +1 -1
- mapillary_tools/commands/upload_blackvue.py +0 -33
- mapillary_tools/commands/upload_camm.py +0 -33
- mapillary_tools/commands/upload_zip.py +0 -33
- mapillary_tools/geotag/camm_parser.py +0 -306
- mapillary_tools/geotag/mp4_sample_parser.py +0 -426
- mapillary_tools/process_import_meta_properties.py +0 -76
- mapillary_tools-0.12.1.dist-info/RECORD +0 -77
- /mapillary_tools/{geotag → mp4}/io_utils.py +0 -0
- /mapillary_tools/{geotag → mp4}/simple_mp4_parser.py +0 -0
- {mapillary_tools-0.12.1.dist-info → mapillary_tools-0.13.1.dist-info}/LICENSE +0 -0
- {mapillary_tools-0.12.1.dist-info → mapillary_tools-0.13.1.dist-info}/entry_points.txt +0 -0
- {mapillary_tools-0.12.1.dist-info → mapillary_tools-0.13.1.dist-info}/top_level.txt +0 -0
|
@@ -1,11 +1,15 @@
|
|
|
1
|
+
import dataclasses
|
|
2
|
+
import datetime
|
|
1
3
|
import io
|
|
4
|
+
import itertools
|
|
2
5
|
import pathlib
|
|
3
6
|
import typing as T
|
|
4
7
|
|
|
5
8
|
import construct as C
|
|
6
9
|
|
|
7
|
-
from .. import
|
|
8
|
-
from . import
|
|
10
|
+
from .. import telemetry
|
|
11
|
+
from ..mp4.mp4_sample_parser import MovieBoxParser, Sample, TrackBoxParser
|
|
12
|
+
from ..telemetry import GPSFix, GPSPoint
|
|
9
13
|
|
|
10
14
|
"""
|
|
11
15
|
Parsing GPS from GPMF data format stored in GoPros. See the GPMF spec: https://github.com/gopro/gpmf-parser
|
|
@@ -125,6 +129,22 @@ KLV = C.Struct(
|
|
|
125
129
|
GPMFSampleData = C.GreedyRange(KLV)
|
|
126
130
|
|
|
127
131
|
|
|
132
|
+
@dataclasses.dataclass
|
|
133
|
+
class TelemetryData:
|
|
134
|
+
gps: T.List[GPSPoint]
|
|
135
|
+
accl: T.List[telemetry.AccelerationData]
|
|
136
|
+
gyro: T.List[telemetry.GyroscopeData]
|
|
137
|
+
magn: T.List[telemetry.MagnetometerData]
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
def _gps5_timestamp_to_epoch_time(dtstr: str):
|
|
141
|
+
# yymmddhhmmss.sss
|
|
142
|
+
dt = datetime.datetime.strptime(dtstr, "%y%m%d%H%M%S.%f").replace(
|
|
143
|
+
tzinfo=datetime.timezone.utc
|
|
144
|
+
)
|
|
145
|
+
return dt.timestamp()
|
|
146
|
+
|
|
147
|
+
|
|
128
148
|
# A GPS5 stream example:
|
|
129
149
|
# key = b'STRM' type = b'\x00' structure_size = 1 repeat = 400
|
|
130
150
|
# data = ListContainer:
|
|
@@ -163,7 +183,7 @@ GPMFSampleData = C.GreedyRange(KLV)
|
|
|
163
183
|
# ]
|
|
164
184
|
def gps5_from_stream(
|
|
165
185
|
stream: T.Sequence[KLVDict],
|
|
166
|
-
) -> T.Generator[
|
|
186
|
+
) -> T.Generator[GPSPoint, None, None]:
|
|
167
187
|
indexed: T.Dict[bytes, T.List[T.List[T.Any]]] = {
|
|
168
188
|
klv["key"]: klv["data"] for klv in stream
|
|
169
189
|
}
|
|
@@ -181,10 +201,20 @@ def gps5_from_stream(
|
|
|
181
201
|
|
|
182
202
|
gpsf = indexed.get(b"GPSF")
|
|
183
203
|
if gpsf is not None:
|
|
184
|
-
gpsf_value =
|
|
204
|
+
gpsf_value = GPSFix(gpsf[0][0])
|
|
185
205
|
else:
|
|
186
206
|
gpsf_value = None
|
|
187
207
|
|
|
208
|
+
gpsu = indexed.get(b"GPSU")
|
|
209
|
+
if gpsu is not None:
|
|
210
|
+
try:
|
|
211
|
+
yymmdd = gpsu[0][0].decode("utf-8")
|
|
212
|
+
epoch_time = _gps5_timestamp_to_epoch_time(yymmdd)
|
|
213
|
+
except Exception:
|
|
214
|
+
epoch_time = None
|
|
215
|
+
else:
|
|
216
|
+
epoch_time = None
|
|
217
|
+
|
|
188
218
|
gpsp = indexed.get(b"GPSP")
|
|
189
219
|
if gpsp is not None:
|
|
190
220
|
gpsp_value = gpsp[0][0]
|
|
@@ -195,22 +225,51 @@ def gps5_from_stream(
|
|
|
195
225
|
lat, lon, alt, ground_speed, _speed_3d = [
|
|
196
226
|
v / s for v, s in zip(point, scal_values)
|
|
197
227
|
]
|
|
198
|
-
yield
|
|
228
|
+
yield GPSPoint(
|
|
199
229
|
# will figure out the actual timestamp later
|
|
200
230
|
time=0,
|
|
201
231
|
lat=lat,
|
|
202
232
|
lon=lon,
|
|
203
233
|
alt=alt,
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
234
|
+
epoch_time=epoch_time,
|
|
235
|
+
fix=gpsf_value,
|
|
236
|
+
precision=gpsp_value,
|
|
237
|
+
ground_speed=ground_speed,
|
|
207
238
|
angle=None,
|
|
208
239
|
)
|
|
209
240
|
|
|
210
241
|
|
|
242
|
+
_EPOCH_TIME_IN_2000 = datetime.datetime(
|
|
243
|
+
2000, 1, 1, tzinfo=datetime.timezone.utc
|
|
244
|
+
).timestamp()
|
|
245
|
+
|
|
246
|
+
|
|
247
|
+
def _gps9_timestamp_to_epoch_time(
|
|
248
|
+
days_since_2000: int, secs_since_midnight: float
|
|
249
|
+
) -> float:
|
|
250
|
+
epoch_time = _EPOCH_TIME_IN_2000 + days_since_2000 * 24 * 60 * 60
|
|
251
|
+
epoch_time += secs_since_midnight
|
|
252
|
+
return epoch_time
|
|
253
|
+
|
|
254
|
+
|
|
255
|
+
def _get_gps_type(input) -> bytes:
|
|
256
|
+
final = b""
|
|
257
|
+
for val in input or []:
|
|
258
|
+
if isinstance(val, bytes):
|
|
259
|
+
final += val
|
|
260
|
+
elif isinstance(val, list):
|
|
261
|
+
final += _get_gps_type(val)
|
|
262
|
+
else:
|
|
263
|
+
raise ValueError(f"Unexpected type {type(val)} in {input}")
|
|
264
|
+
|
|
265
|
+
return final
|
|
266
|
+
|
|
267
|
+
|
|
211
268
|
def gps9_from_stream(
|
|
212
269
|
stream: T.Sequence[KLVDict],
|
|
213
|
-
) -> T.Generator[
|
|
270
|
+
) -> T.Generator[GPSPoint, None, None]:
|
|
271
|
+
NUM_VALUES = 9
|
|
272
|
+
|
|
214
273
|
indexed: T.Dict[bytes, T.List[T.List[T.Any]]] = {
|
|
215
274
|
klv["key"]: klv["data"] for klv in stream
|
|
216
275
|
}
|
|
@@ -226,17 +285,25 @@ def gps9_from_stream(
|
|
|
226
285
|
if any(s == 0 for s in scal_values):
|
|
227
286
|
return
|
|
228
287
|
|
|
229
|
-
|
|
230
|
-
if
|
|
288
|
+
gps_value_types = _get_gps_type(indexed.get(b"TYPE"))
|
|
289
|
+
if not gps_value_types:
|
|
231
290
|
return
|
|
232
|
-
|
|
291
|
+
|
|
292
|
+
if len(gps_value_types) != NUM_VALUES:
|
|
293
|
+
raise ValueError(
|
|
294
|
+
f"Error parsing the complex type {gps_value_types!r}: expect {NUM_VALUES} types but got {len(gps_value_types)}"
|
|
295
|
+
)
|
|
233
296
|
|
|
234
297
|
try:
|
|
235
298
|
sample_parser = C.Sequence(
|
|
236
|
-
*[
|
|
299
|
+
*[
|
|
300
|
+
# Changed in version 3.11: Added default argument values for length and byteorder
|
|
301
|
+
_type_mapping[t.to_bytes(length=1, byteorder="big")][0]
|
|
302
|
+
for t in gps_value_types
|
|
303
|
+
]
|
|
237
304
|
)
|
|
238
305
|
except Exception as ex:
|
|
239
|
-
raise ValueError(f"Error parsing the complex type {gps_value_types}: {ex}")
|
|
306
|
+
raise ValueError(f"Error parsing the complex type {gps_value_types!r}: {ex}")
|
|
240
307
|
|
|
241
308
|
for sample_data_bytes in gps9:
|
|
242
309
|
sample_data = sample_parser.parse(sample_data_bytes)
|
|
@@ -247,21 +314,24 @@ def gps9_from_stream(
|
|
|
247
314
|
alt,
|
|
248
315
|
speed_2d,
|
|
249
316
|
_speed_3d,
|
|
250
|
-
|
|
251
|
-
|
|
317
|
+
days_since_2000,
|
|
318
|
+
secs_since_midnight,
|
|
252
319
|
dop,
|
|
253
320
|
gps_fix,
|
|
254
321
|
) = [v / s for v, s in zip(sample_data, scal_values)]
|
|
255
322
|
|
|
256
|
-
|
|
323
|
+
epoch_time = _gps9_timestamp_to_epoch_time(days_since_2000, secs_since_midnight)
|
|
324
|
+
|
|
325
|
+
yield GPSPoint(
|
|
257
326
|
# will figure out the actual timestamp later
|
|
258
327
|
time=0,
|
|
259
328
|
lat=lat,
|
|
260
329
|
lon=lon,
|
|
261
330
|
alt=alt,
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
331
|
+
epoch_time=epoch_time,
|
|
332
|
+
fix=GPSFix(gps_fix),
|
|
333
|
+
precision=dop * 100,
|
|
334
|
+
ground_speed=speed_2d,
|
|
265
335
|
angle=None,
|
|
266
336
|
)
|
|
267
337
|
|
|
@@ -282,8 +352,8 @@ def _find_first_device_id(stream: T.Sequence[KLVDict]) -> int:
|
|
|
282
352
|
return device_id
|
|
283
353
|
|
|
284
354
|
|
|
285
|
-
def _find_first_gps_stream(stream: T.Sequence[KLVDict]) -> T.List[
|
|
286
|
-
sample_points: T.List[
|
|
355
|
+
def _find_first_gps_stream(stream: T.Sequence[KLVDict]) -> T.List[GPSPoint]:
|
|
356
|
+
sample_points: T.List[GPSPoint] = []
|
|
287
357
|
|
|
288
358
|
for klv in stream:
|
|
289
359
|
if klv["key"] == b"STRM":
|
|
@@ -298,14 +368,126 @@ def _find_first_gps_stream(stream: T.Sequence[KLVDict]) -> T.List[geo.PointWithF
|
|
|
298
368
|
return sample_points
|
|
299
369
|
|
|
300
370
|
|
|
371
|
+
# a sensor matrix with only [1,0,0, 0,-1,0, 0,0,1], is just a form of non-calibrated sensor orientation
|
|
372
|
+
def _is_matrix_calibration(matrix: T.Sequence[float]) -> bool:
|
|
373
|
+
for v in matrix:
|
|
374
|
+
if v not in [0, -1, 1]:
|
|
375
|
+
return True
|
|
376
|
+
return False
|
|
377
|
+
|
|
378
|
+
|
|
379
|
+
def _build_matrix(
|
|
380
|
+
orin: T.Union[bytes, T.Sequence[int]], orio: T.Union[bytes, T.Sequence[int]]
|
|
381
|
+
) -> T.Sequence[float]:
|
|
382
|
+
matrix = []
|
|
383
|
+
|
|
384
|
+
# list(b'aA') == [97, 65]
|
|
385
|
+
lower_a, upper_A = 97, 65
|
|
386
|
+
|
|
387
|
+
for out_char in orin:
|
|
388
|
+
for in_char in orio:
|
|
389
|
+
if in_char == out_char:
|
|
390
|
+
matrix.append(1.0)
|
|
391
|
+
elif (in_char - lower_a) == (out_char - upper_A):
|
|
392
|
+
matrix.append(-1.0)
|
|
393
|
+
elif (in_char - upper_A) == (out_char - lower_a):
|
|
394
|
+
matrix.append(-1.0)
|
|
395
|
+
else:
|
|
396
|
+
matrix.append(0.0)
|
|
397
|
+
|
|
398
|
+
return matrix
|
|
399
|
+
|
|
400
|
+
|
|
401
|
+
def _apply_matrix(
|
|
402
|
+
matrix: T.Sequence[float], values: T.Sequence[float]
|
|
403
|
+
) -> T.Generator[float, None, None]:
|
|
404
|
+
size = len(values)
|
|
405
|
+
assert len(matrix) == size * size, (
|
|
406
|
+
f"expecting a square matrix of size {size} x {size} but got {len(matrix)}"
|
|
407
|
+
)
|
|
408
|
+
|
|
409
|
+
for y in range(size):
|
|
410
|
+
row_start = y * size
|
|
411
|
+
yield sum(matrix[row_start + x] * values[x] for x in range(size))
|
|
412
|
+
|
|
413
|
+
|
|
414
|
+
def _flatten(nested: T.Sequence[T.Sequence[float]]) -> T.List[float]:
|
|
415
|
+
output: T.List[float] = []
|
|
416
|
+
for row in nested:
|
|
417
|
+
output.extend(row)
|
|
418
|
+
return output
|
|
419
|
+
|
|
420
|
+
|
|
421
|
+
def _get_matrix(klv: T.Dict[bytes, KLVDict]) -> T.Optional[T.Sequence[float]]:
|
|
422
|
+
mtrx = klv.get(b"MTRX")
|
|
423
|
+
if mtrx is not None:
|
|
424
|
+
matrix: T.Sequence[float] = _flatten(mtrx["data"])
|
|
425
|
+
if _is_matrix_calibration(matrix):
|
|
426
|
+
return matrix
|
|
427
|
+
|
|
428
|
+
orin = klv.get(b"ORIN")
|
|
429
|
+
orio = klv.get(b"ORIO")
|
|
430
|
+
|
|
431
|
+
if orin is not None and orio is not None:
|
|
432
|
+
matrix = _build_matrix(b"".join(orin["data"]), b"".join(orio["data"]))
|
|
433
|
+
return matrix
|
|
434
|
+
|
|
435
|
+
return None
|
|
436
|
+
|
|
437
|
+
|
|
438
|
+
def _scale_and_calibrate(
|
|
439
|
+
stream: T.Sequence[KLVDict], key: bytes
|
|
440
|
+
) -> T.Generator[T.Sequence[float], None, None]:
|
|
441
|
+
indexed: T.Dict[bytes, KLVDict] = {klv["key"]: klv for klv in stream}
|
|
442
|
+
|
|
443
|
+
klv = indexed.get(key)
|
|
444
|
+
if klv is None:
|
|
445
|
+
return
|
|
446
|
+
|
|
447
|
+
scal_klv = indexed.get(b"SCAL")
|
|
448
|
+
|
|
449
|
+
if scal_klv is not None:
|
|
450
|
+
# replace 0s with 1s to avoid division by zero
|
|
451
|
+
scals = [s or 1 for s in _flatten(scal_klv["data"])]
|
|
452
|
+
|
|
453
|
+
if not scals:
|
|
454
|
+
scals = [1]
|
|
455
|
+
|
|
456
|
+
if len(scals) == 1:
|
|
457
|
+
# infinite repeat
|
|
458
|
+
scales: T.Iterable[float] = itertools.repeat(scals[0])
|
|
459
|
+
else:
|
|
460
|
+
scales = scals
|
|
461
|
+
|
|
462
|
+
matrix = _get_matrix(indexed)
|
|
463
|
+
|
|
464
|
+
for values in klv["data"]:
|
|
465
|
+
if matrix is None:
|
|
466
|
+
yield tuple(v / s for v, s in zip(values, scales))
|
|
467
|
+
else:
|
|
468
|
+
yield tuple(v / s for v, s in zip(_apply_matrix(matrix, values), scales))
|
|
469
|
+
|
|
470
|
+
|
|
471
|
+
def _find_first_telemetry_stream(stream: T.Sequence[KLVDict], key: bytes):
|
|
472
|
+
values: T.List[T.Sequence[float]] = []
|
|
473
|
+
|
|
474
|
+
for klv in stream:
|
|
475
|
+
if klv["key"] == b"STRM":
|
|
476
|
+
values = list(_scale_and_calibrate(klv["data"], key))
|
|
477
|
+
if values:
|
|
478
|
+
break
|
|
479
|
+
|
|
480
|
+
return values
|
|
481
|
+
|
|
482
|
+
|
|
301
483
|
def _extract_dvnm_from_samples(
|
|
302
|
-
fp: T.BinaryIO, samples: T.Iterable[
|
|
484
|
+
fp: T.BinaryIO, samples: T.Iterable[Sample]
|
|
303
485
|
) -> T.Dict[int, bytes]:
|
|
304
486
|
dvnm_by_dvid: T.Dict[int, bytes] = {}
|
|
305
487
|
|
|
306
488
|
for sample in samples:
|
|
307
|
-
fp.seek(sample.offset, io.SEEK_SET)
|
|
308
|
-
data = fp.read(sample.size)
|
|
489
|
+
fp.seek(sample.raw_sample.offset, io.SEEK_SET)
|
|
490
|
+
data = fp.read(sample.raw_sample.size)
|
|
309
491
|
gpmf_sample_data = T.cast(T.Dict, GPMFSampleData.parse(data))
|
|
310
492
|
|
|
311
493
|
# iterate devices
|
|
@@ -321,84 +503,174 @@ def _extract_dvnm_from_samples(
|
|
|
321
503
|
return dvnm_by_dvid
|
|
322
504
|
|
|
323
505
|
|
|
506
|
+
def _backfill_gps_timestamps(gps_points: T.Iterable[GPSPoint]) -> None:
|
|
507
|
+
it = iter(gps_points)
|
|
508
|
+
|
|
509
|
+
# find the first point with epoch time
|
|
510
|
+
last = None
|
|
511
|
+
for point in it:
|
|
512
|
+
if point.epoch_time is not None:
|
|
513
|
+
last = point
|
|
514
|
+
break
|
|
515
|
+
|
|
516
|
+
# if no point with epoch time found, return
|
|
517
|
+
if last is None:
|
|
518
|
+
return
|
|
519
|
+
|
|
520
|
+
# backfill points without epoch time
|
|
521
|
+
for point in it:
|
|
522
|
+
assert last.epoch_time is not None
|
|
523
|
+
if point.epoch_time is None:
|
|
524
|
+
point.epoch_time = last.epoch_time + (point.time - last.time)
|
|
525
|
+
last = point
|
|
526
|
+
|
|
527
|
+
|
|
324
528
|
def _extract_points_from_samples(
|
|
325
|
-
fp: T.BinaryIO, samples: T.Iterable[
|
|
326
|
-
) ->
|
|
529
|
+
fp: T.BinaryIO, samples: T.Iterable[Sample]
|
|
530
|
+
) -> TelemetryData:
|
|
327
531
|
# To keep GPS points from different devices separated
|
|
328
|
-
points_by_dvid: T.Dict[int, T.List[
|
|
532
|
+
points_by_dvid: T.Dict[int, T.List[GPSPoint]] = {}
|
|
533
|
+
accls_by_dvid: T.Dict[int, T.List[telemetry.AccelerationData]] = {}
|
|
534
|
+
gyros_by_dvid: T.Dict[int, T.List[telemetry.GyroscopeData]] = {}
|
|
535
|
+
magns_by_dvid: T.Dict[int, T.List[telemetry.MagnetometerData]] = {}
|
|
329
536
|
|
|
330
537
|
for sample in samples:
|
|
331
|
-
fp.seek(sample.offset, io.SEEK_SET)
|
|
332
|
-
data = fp.read(sample.size)
|
|
538
|
+
fp.seek(sample.raw_sample.offset, io.SEEK_SET)
|
|
539
|
+
data = fp.read(sample.raw_sample.size)
|
|
333
540
|
gpmf_sample_data = T.cast(T.Dict, GPMFSampleData.parse(data))
|
|
334
541
|
|
|
335
542
|
# iterate devices
|
|
336
543
|
devices = (klv for klv in gpmf_sample_data if klv["key"] == b"DEVC")
|
|
337
544
|
for device in devices:
|
|
545
|
+
device_id = _find_first_device_id(device["data"])
|
|
546
|
+
|
|
338
547
|
sample_points = _find_first_gps_stream(device["data"])
|
|
339
548
|
if sample_points:
|
|
340
549
|
# interpolate timestamps in between
|
|
341
|
-
avg_timedelta = sample.
|
|
550
|
+
avg_timedelta = sample.exact_timedelta / len(sample_points)
|
|
342
551
|
for idx, point in enumerate(sample_points):
|
|
343
|
-
point.time = sample.
|
|
552
|
+
point.time = sample.exact_time + avg_timedelta * idx
|
|
344
553
|
|
|
345
|
-
device_id = _find_first_device_id(device["data"])
|
|
346
554
|
device_points = points_by_dvid.setdefault(device_id, [])
|
|
347
555
|
device_points.extend(sample_points)
|
|
348
556
|
|
|
349
|
-
|
|
350
|
-
|
|
557
|
+
sample_accls = _find_first_telemetry_stream(device["data"], b"ACCL")
|
|
558
|
+
if sample_accls:
|
|
559
|
+
# interpolate timestamps in between
|
|
560
|
+
avg_delta = sample.exact_timedelta / len(sample_accls)
|
|
561
|
+
accls_by_dvid.setdefault(device_id, []).extend(
|
|
562
|
+
telemetry.AccelerationData(
|
|
563
|
+
time=sample.exact_time + avg_delta * idx,
|
|
564
|
+
x=x,
|
|
565
|
+
y=y,
|
|
566
|
+
z=z,
|
|
567
|
+
)
|
|
568
|
+
for idx, (z, x, y, *_) in enumerate(sample_accls)
|
|
569
|
+
)
|
|
570
|
+
|
|
571
|
+
sample_gyros = _find_first_telemetry_stream(device["data"], b"GYRO")
|
|
572
|
+
if sample_gyros:
|
|
573
|
+
# interpolate timestamps in between
|
|
574
|
+
avg_delta = sample.exact_timedelta / len(sample_gyros)
|
|
575
|
+
gyros_by_dvid.setdefault(device_id, []).extend(
|
|
576
|
+
telemetry.GyroscopeData(
|
|
577
|
+
time=sample.exact_time + avg_delta * idx,
|
|
578
|
+
x=x,
|
|
579
|
+
y=y,
|
|
580
|
+
z=z,
|
|
581
|
+
)
|
|
582
|
+
for idx, (z, x, y, *_) in enumerate(sample_gyros)
|
|
583
|
+
)
|
|
584
|
+
|
|
585
|
+
sample_magns = _find_first_telemetry_stream(device["data"], b"MAGN")
|
|
586
|
+
if sample_magns:
|
|
587
|
+
# interpolate timestamps in between
|
|
588
|
+
avg_delta = sample.exact_timedelta / len(sample_magns)
|
|
589
|
+
magns_by_dvid.setdefault(device_id, []).extend(
|
|
590
|
+
telemetry.MagnetometerData(
|
|
591
|
+
time=sample.exact_time + avg_delta * idx,
|
|
592
|
+
x=x,
|
|
593
|
+
y=y,
|
|
594
|
+
z=z,
|
|
595
|
+
)
|
|
596
|
+
for idx, (z, x, y, *_) in enumerate(sample_magns)
|
|
597
|
+
)
|
|
351
598
|
|
|
599
|
+
gps_points = list(points_by_dvid.values())[0] if points_by_dvid else []
|
|
352
600
|
|
|
353
|
-
|
|
601
|
+
# backfill forward from the first point with epoch time
|
|
602
|
+
_backfill_gps_timestamps(gps_points)
|
|
603
|
+
|
|
604
|
+
# backfill backward from the first point with epoch time in reversed order
|
|
605
|
+
_backfill_gps_timestamps(reversed(gps_points))
|
|
606
|
+
|
|
607
|
+
return TelemetryData(
|
|
608
|
+
gps=gps_points,
|
|
609
|
+
accl=list(accls_by_dvid.values())[0] if accls_by_dvid else [],
|
|
610
|
+
gyro=list(gyros_by_dvid.values())[0] if gyros_by_dvid else [],
|
|
611
|
+
magn=list(magns_by_dvid.values())[0] if magns_by_dvid else [],
|
|
612
|
+
)
|
|
613
|
+
|
|
614
|
+
|
|
615
|
+
def _is_gpmd_description(description: T.Dict) -> bool:
|
|
616
|
+
return description["format"] == b"gpmd"
|
|
617
|
+
|
|
618
|
+
|
|
619
|
+
def _contains_gpmd_description(track: TrackBoxParser) -> bool:
|
|
620
|
+
descriptions = track.extract_sample_descriptions()
|
|
621
|
+
return any(_is_gpmd_description(d) for d in descriptions)
|
|
622
|
+
|
|
623
|
+
|
|
624
|
+
def _filter_gpmd_samples(track: TrackBoxParser) -> T.Generator[Sample, None, None]:
|
|
625
|
+
for sample in track.extract_samples():
|
|
626
|
+
if _is_gpmd_description(sample.description):
|
|
627
|
+
yield sample
|
|
628
|
+
|
|
629
|
+
|
|
630
|
+
def extract_points(fp: T.BinaryIO) -> T.List[GPSPoint]:
|
|
354
631
|
"""
|
|
355
632
|
Return a list of points (could be empty) if it is a valid GoPro video,
|
|
356
633
|
otherwise None
|
|
357
634
|
"""
|
|
358
|
-
|
|
359
|
-
for
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
s.seek(trak_start_offset, io.SEEK_SET)
|
|
364
|
-
gpmd_samples = _extract_gpmd_samples_from_trak(s, h.maxsize)
|
|
365
|
-
points = list(_extract_points_from_samples(fp, gpmd_samples))
|
|
635
|
+
moov = MovieBoxParser.parse_stream(fp)
|
|
636
|
+
for track in moov.extract_tracks():
|
|
637
|
+
if _contains_gpmd_description(track):
|
|
638
|
+
gpmd_samples = _filter_gpmd_samples(track)
|
|
639
|
+
telemetry = _extract_points_from_samples(fp, gpmd_samples)
|
|
366
640
|
# return the firstly found non-empty points
|
|
367
|
-
if
|
|
368
|
-
return
|
|
641
|
+
if telemetry.gps:
|
|
642
|
+
return telemetry.gps
|
|
643
|
+
|
|
369
644
|
# points could be empty list or None here
|
|
370
|
-
return
|
|
645
|
+
return []
|
|
371
646
|
|
|
372
647
|
|
|
373
|
-
def
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
samples = sample_parser.parse_samples_from_trak(s, maxsize=maxsize)
|
|
390
|
-
gpmd_samples = (
|
|
391
|
-
sample for sample in samples if sample.description["format"] == b"gpmd"
|
|
392
|
-
)
|
|
393
|
-
yield from gpmd_samples
|
|
648
|
+
def extract_telemetry_data(fp: T.BinaryIO) -> T.Optional[TelemetryData]:
|
|
649
|
+
"""
|
|
650
|
+
Return the telemetry data from the first found GoPro GPMF track
|
|
651
|
+
"""
|
|
652
|
+
moov = MovieBoxParser.parse_stream(fp)
|
|
653
|
+
|
|
654
|
+
for track in moov.extract_tracks():
|
|
655
|
+
if _contains_gpmd_description(track):
|
|
656
|
+
gpmd_samples = _filter_gpmd_samples(track)
|
|
657
|
+
telemetry = _extract_points_from_samples(fp, gpmd_samples)
|
|
658
|
+
# return the firstly found non-empty points
|
|
659
|
+
if telemetry.gps:
|
|
660
|
+
return telemetry
|
|
661
|
+
|
|
662
|
+
# points could be empty list or None here
|
|
663
|
+
return None
|
|
394
664
|
|
|
395
665
|
|
|
396
666
|
def extract_all_device_names(fp: T.BinaryIO) -> T.Dict[int, bytes]:
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
667
|
+
moov = MovieBoxParser.parse_stream(fp)
|
|
668
|
+
for track in moov.extract_tracks():
|
|
669
|
+
if _contains_gpmd_description(track):
|
|
670
|
+
gpmd_samples = _filter_gpmd_samples(track)
|
|
671
|
+
device_names = _extract_dvnm_from_samples(fp, gpmd_samples)
|
|
672
|
+
if device_names:
|
|
673
|
+
return device_names
|
|
402
674
|
return {}
|
|
403
675
|
|
|
404
676
|
|
|
@@ -433,18 +705,9 @@ def extract_camera_model(fp: T.BinaryIO) -> str:
|
|
|
433
705
|
return unicode_names[0].strip()
|
|
434
706
|
|
|
435
707
|
|
|
436
|
-
def parse_gpx(path: pathlib.Path) -> T.List[
|
|
708
|
+
def parse_gpx(path: pathlib.Path) -> T.List[GPSPoint]:
|
|
437
709
|
with path.open("rb") as fp:
|
|
438
710
|
points = extract_points(fp)
|
|
439
711
|
if points is None:
|
|
440
712
|
return []
|
|
441
713
|
return points
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
def iterate_gpmd_sample_data(fp: T.BinaryIO) -> T.Generator[T.Dict, None, None]:
|
|
445
|
-
for h, s in parser.parse_path(fp, [b"moov", b"trak"]):
|
|
446
|
-
gpmd_samples = _extract_gpmd_samples_from_trak(s, h.maxsize)
|
|
447
|
-
for sample in gpmd_samples:
|
|
448
|
-
fp.seek(sample.offset, io.SEEK_SET)
|
|
449
|
-
data = fp.read(sample.size)
|
|
450
|
-
yield T.cast(T.Dict, GPMFSampleData.parse(data))
|
|
File without changes
|
|
@@ -441,12 +441,6 @@ class Box64ConstructBuilder:
|
|
|
441
441
|
def parse_boxlist(self, data: bytes) -> T.List[BoxDict]:
|
|
442
442
|
return T.cast(T.List[BoxDict], self.BoxList.parse(data))
|
|
443
443
|
|
|
444
|
-
def build_box(self, box: BoxDict) -> bytes:
|
|
445
|
-
return self.Box.build(box)
|
|
446
|
-
|
|
447
|
-
def build_boxlist(self, boxes: T.Sequence[BoxDict]) -> bytes:
|
|
448
|
-
return self.BoxList.build(boxes)
|
|
449
|
-
|
|
450
444
|
|
|
451
445
|
class Box32ConstructBuilder(Box64ConstructBuilder):
|
|
452
446
|
"""
|
|
@@ -467,6 +461,18 @@ class Box32ConstructBuilder(Box64ConstructBuilder):
|
|
|
467
461
|
|
|
468
462
|
return self._box
|
|
469
463
|
|
|
464
|
+
def parse_box(self, data: bytes) -> BoxDict:
|
|
465
|
+
raise NotImplementedError("Box32ConstructBuilder does not support parsing")
|
|
466
|
+
|
|
467
|
+
def parse_boxlist(self, data: bytes) -> T.List[BoxDict]:
|
|
468
|
+
raise NotImplementedError("Box32ConstructBuilder does not support parsing")
|
|
469
|
+
|
|
470
|
+
def build_box(self, box: BoxDict) -> bytes:
|
|
471
|
+
return self.Box.build(box)
|
|
472
|
+
|
|
473
|
+
def build_boxlist(self, boxes: T.Sequence[BoxDict]) -> bytes:
|
|
474
|
+
return self.BoxList.build(boxes)
|
|
475
|
+
|
|
470
476
|
|
|
471
477
|
# pyre-ignore[9]: pyre does not support recursive type SwitchMapType
|
|
472
478
|
CMAP: SwitchMapType = {
|
|
@@ -580,8 +586,17 @@ MOOVWithoutSTBLBuilderConstruct = Box32ConstructBuilder(
|
|
|
580
586
|
def find_box_at_pathx(
|
|
581
587
|
box: T.Union[T.Sequence[BoxDict], BoxDict], path: T.Sequence[bytes]
|
|
582
588
|
) -> BoxDict:
|
|
583
|
-
|
|
589
|
+
found = find_box_at_path(box, path)
|
|
590
|
+
if found is None:
|
|
584
591
|
raise ValueError(f"box at path {path} not found")
|
|
592
|
+
return found
|
|
593
|
+
|
|
594
|
+
|
|
595
|
+
def find_box_at_path(
|
|
596
|
+
box: T.Union[T.Sequence[BoxDict], BoxDict], path: T.Sequence[bytes]
|
|
597
|
+
) -> T.Optional[BoxDict]:
|
|
598
|
+
if not path:
|
|
599
|
+
return None
|
|
585
600
|
|
|
586
601
|
boxes: T.Sequence[BoxDict]
|
|
587
602
|
if isinstance(box, dict):
|
|
@@ -593,12 +608,13 @@ def find_box_at_pathx(
|
|
|
593
608
|
if box["type"] == path[0]:
|
|
594
609
|
if len(path) == 1:
|
|
595
610
|
return box
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
611
|
+
box_data = T.cast(T.Sequence[BoxDict], box["data"])
|
|
612
|
+
# ListContainer from construct is not sequence
|
|
613
|
+
assert isinstance(box_data, T.Sequence), (
|
|
614
|
+
f"expect a list of boxes but got {type(box_data)} at path {path}"
|
|
615
|
+
)
|
|
616
|
+
found = find_box_at_path(box_data, path[1:])
|
|
617
|
+
if found is not None:
|
|
618
|
+
return found
|
|
619
|
+
|
|
620
|
+
return None
|