mapillary-tools 0.13.3__py3-none-any.whl → 0.14.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mapillary_tools/__init__.py +1 -1
- mapillary_tools/api_v4.py +198 -55
- mapillary_tools/authenticate.py +326 -64
- mapillary_tools/blackvue_parser.py +195 -0
- mapillary_tools/camm/camm_builder.py +55 -97
- mapillary_tools/camm/camm_parser.py +429 -181
- mapillary_tools/commands/__main__.py +10 -6
- mapillary_tools/commands/authenticate.py +8 -1
- mapillary_tools/commands/process.py +27 -51
- mapillary_tools/commands/process_and_upload.py +18 -5
- mapillary_tools/commands/sample_video.py +2 -3
- mapillary_tools/commands/upload.py +44 -13
- mapillary_tools/commands/video_process_and_upload.py +19 -5
- mapillary_tools/config.py +65 -26
- mapillary_tools/constants.py +141 -18
- mapillary_tools/exceptions.py +37 -34
- mapillary_tools/exif_read.py +221 -116
- mapillary_tools/exif_write.py +10 -8
- mapillary_tools/exiftool_read.py +33 -42
- mapillary_tools/exiftool_read_video.py +97 -47
- mapillary_tools/exiftool_runner.py +57 -0
- mapillary_tools/ffmpeg.py +417 -242
- mapillary_tools/geo.py +158 -118
- mapillary_tools/geotag/__init__.py +0 -1
- mapillary_tools/geotag/base.py +147 -0
- mapillary_tools/geotag/factory.py +307 -0
- mapillary_tools/geotag/geotag_images_from_exif.py +14 -131
- mapillary_tools/geotag/geotag_images_from_exiftool.py +136 -85
- mapillary_tools/geotag/geotag_images_from_gpx.py +60 -124
- mapillary_tools/geotag/geotag_images_from_gpx_file.py +13 -126
- mapillary_tools/geotag/geotag_images_from_nmea_file.py +4 -5
- mapillary_tools/geotag/geotag_images_from_video.py +88 -51
- mapillary_tools/geotag/geotag_videos_from_exiftool.py +123 -0
- mapillary_tools/geotag/geotag_videos_from_gpx.py +52 -0
- mapillary_tools/geotag/geotag_videos_from_video.py +20 -185
- mapillary_tools/geotag/image_extractors/base.py +18 -0
- mapillary_tools/geotag/image_extractors/exif.py +60 -0
- mapillary_tools/geotag/image_extractors/exiftool.py +18 -0
- mapillary_tools/geotag/options.py +182 -0
- mapillary_tools/geotag/utils.py +52 -16
- mapillary_tools/geotag/video_extractors/base.py +18 -0
- mapillary_tools/geotag/video_extractors/exiftool.py +70 -0
- mapillary_tools/geotag/video_extractors/gpx.py +116 -0
- mapillary_tools/geotag/video_extractors/native.py +160 -0
- mapillary_tools/{geotag → gpmf}/gpmf_parser.py +205 -182
- mapillary_tools/{geotag → gpmf}/gps_filter.py +5 -3
- mapillary_tools/history.py +134 -20
- mapillary_tools/mp4/construct_mp4_parser.py +17 -10
- mapillary_tools/mp4/io_utils.py +0 -1
- mapillary_tools/mp4/mp4_sample_parser.py +36 -28
- mapillary_tools/mp4/simple_mp4_builder.py +10 -9
- mapillary_tools/mp4/simple_mp4_parser.py +13 -22
- mapillary_tools/process_geotag_properties.py +184 -414
- mapillary_tools/process_sequence_properties.py +594 -225
- mapillary_tools/sample_video.py +20 -26
- mapillary_tools/serializer/description.py +587 -0
- mapillary_tools/serializer/gpx.py +132 -0
- mapillary_tools/telemetry.py +26 -13
- mapillary_tools/types.py +98 -611
- mapillary_tools/upload.py +411 -387
- mapillary_tools/upload_api_v4.py +167 -142
- mapillary_tools/uploader.py +804 -284
- mapillary_tools/utils.py +49 -18
- {mapillary_tools-0.13.3.dist-info → mapillary_tools-0.14.0.dist-info}/METADATA +93 -35
- mapillary_tools-0.14.0.dist-info/RECORD +75 -0
- {mapillary_tools-0.13.3.dist-info → mapillary_tools-0.14.0.dist-info}/WHEEL +1 -1
- mapillary_tools/geotag/blackvue_parser.py +0 -118
- mapillary_tools/geotag/geotag_from_generic.py +0 -22
- mapillary_tools/geotag/geotag_images_from_exiftool_both_image_and_video.py +0 -93
- mapillary_tools/geotag/geotag_videos_from_exiftool_video.py +0 -145
- mapillary_tools/video_data_extraction/cli_options.py +0 -22
- mapillary_tools/video_data_extraction/extract_video_data.py +0 -176
- mapillary_tools/video_data_extraction/extractors/base_parser.py +0 -75
- mapillary_tools/video_data_extraction/extractors/blackvue_parser.py +0 -34
- mapillary_tools/video_data_extraction/extractors/camm_parser.py +0 -38
- mapillary_tools/video_data_extraction/extractors/exiftool_runtime_parser.py +0 -71
- mapillary_tools/video_data_extraction/extractors/exiftool_xml_parser.py +0 -53
- mapillary_tools/video_data_extraction/extractors/generic_video_parser.py +0 -52
- mapillary_tools/video_data_extraction/extractors/gopro_parser.py +0 -43
- mapillary_tools/video_data_extraction/extractors/gpx_parser.py +0 -108
- mapillary_tools/video_data_extraction/extractors/nmea_parser.py +0 -24
- mapillary_tools/video_data_extraction/video_data_parser_factory.py +0 -39
- mapillary_tools-0.13.3.dist-info/RECORD +0 -75
- /mapillary_tools/{geotag → gpmf}/gpmf_gps_filter.py +0 -0
- {mapillary_tools-0.13.3.dist-info → mapillary_tools-0.14.0.dist-info}/entry_points.txt +0 -0
- {mapillary_tools-0.13.3.dist-info → mapillary_tools-0.14.0.dist-info/licenses}/LICENSE +0 -0
- {mapillary_tools-0.13.3.dist-info → mapillary_tools-0.14.0.dist-info}/top_level.txt +0 -0
|
@@ -1,90 +1,137 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import functools
|
|
1
4
|
import itertools
|
|
2
5
|
import logging
|
|
3
6
|
import math
|
|
4
7
|
import os
|
|
5
8
|
import typing as T
|
|
6
9
|
|
|
7
|
-
|
|
8
|
-
|
|
10
|
+
import humanize
|
|
11
|
+
|
|
12
|
+
from . import constants, exceptions, geo, types, utils
|
|
13
|
+
from .serializer.description import DescriptionJSONSerializer
|
|
9
14
|
|
|
10
15
|
LOG = logging.getLogger(__name__)
|
|
11
16
|
|
|
12
17
|
|
|
13
|
-
|
|
14
|
-
|
|
18
|
+
S = T.TypeVar("S")
|
|
19
|
+
R = T.TypeVar("R")
|
|
20
|
+
PointSequence = T.List[geo.PointLike]
|
|
15
21
|
|
|
16
22
|
|
|
17
|
-
def
|
|
18
|
-
sequence:
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
sequences: T.List[PointSequence] = []
|
|
23
|
+
def split_sequence_by(
|
|
24
|
+
sequence: T.Iterable[S], reduce: T.Callable[[R, S], tuple[R, bool]], initial: R
|
|
25
|
+
) -> list[list[S]]:
|
|
26
|
+
"""
|
|
27
|
+
Split a sequence into multiple subsequences based on a reduction function.
|
|
23
28
|
|
|
24
|
-
|
|
25
|
-
|
|
29
|
+
The function processes each element through a reduce function that maintains
|
|
30
|
+
state and determines whether to split the sequence at that point. When a split
|
|
31
|
+
is triggered, a new subsequence starts with the current element.
|
|
26
32
|
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
33
|
+
Args:
|
|
34
|
+
sequence: An iterable of elements to split
|
|
35
|
+
reduce: A function that takes (accumulated_state, current_element) and
|
|
36
|
+
returns (new_state, should_split). If should_split is True,
|
|
37
|
+
a new subsequence starts with the current element.
|
|
38
|
+
initial: The initial state value passed to the reduce function
|
|
39
|
+
|
|
40
|
+
Returns:
|
|
41
|
+
A list of subsequences, where each subsequence is a list of elements
|
|
42
|
+
|
|
43
|
+
Examples:
|
|
44
|
+
>>> # Split on even numbers
|
|
45
|
+
>>> def split_on_even(count, x):
|
|
46
|
+
... return count + 1, x % 2 == 0
|
|
47
|
+
>>> split_sequence_by([1, 3, 2, 4, 5, 6, 7], split_on_even, 0)
|
|
48
|
+
[[1, 3], [2], [4, 5], [6, 7]]
|
|
49
|
+
|
|
50
|
+
>>> # Split when sum exceeds threshold
|
|
51
|
+
>>> def split_when_sum_exceeds_5(total, x):
|
|
52
|
+
... total += x
|
|
53
|
+
... return (x, True) if total > 5 else (total, False)
|
|
54
|
+
>>> split_sequence_by([1, 2, 3, 4, 1, 2], split_when_sum_exceeds_5, 0)
|
|
55
|
+
[[1, 2], [3], [4, 1], [2]]
|
|
56
|
+
|
|
57
|
+
>>> # Split on specific values
|
|
58
|
+
>>> def split_on_zero(_, x):
|
|
59
|
+
... return None, x == 0
|
|
60
|
+
>>> split_sequence_by([1, 2, 0, 3, 4, 0, 5], split_on_zero, None)
|
|
61
|
+
[[1, 2], [0, 3, 4], [0, 5]]
|
|
62
|
+
|
|
63
|
+
>>> # Empty sequence
|
|
64
|
+
>>> split_sequence_by([], lambda s, x: (s, False), 0)
|
|
65
|
+
[]
|
|
66
|
+
|
|
67
|
+
>>> # Single element
|
|
68
|
+
>>> split_sequence_by([42], lambda s, x: (s, False), 0)
|
|
69
|
+
[[42]]
|
|
70
|
+
"""
|
|
43
71
|
|
|
44
|
-
|
|
72
|
+
output_sequences: list[list[S]] = []
|
|
73
|
+
|
|
74
|
+
value = initial
|
|
75
|
+
|
|
76
|
+
for element in sequence:
|
|
77
|
+
value, should = reduce(value, element)
|
|
78
|
+
|
|
79
|
+
if should:
|
|
80
|
+
output_sequences.append([element])
|
|
81
|
+
else:
|
|
82
|
+
if output_sequences:
|
|
83
|
+
output_sequences[-1].append(element)
|
|
84
|
+
else:
|
|
85
|
+
output_sequences.append([element])
|
|
86
|
+
|
|
87
|
+
return output_sequences
|
|
45
88
|
|
|
46
89
|
|
|
47
90
|
def duplication_check(
|
|
48
91
|
sequence: PointSequence,
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
92
|
+
*,
|
|
93
|
+
max_duplicate_distance: float,
|
|
94
|
+
max_duplicate_angle: float,
|
|
95
|
+
) -> tuple[PointSequence, list[types.ErrorMetadata]]:
|
|
96
|
+
"""
|
|
97
|
+
>>> duplication_check([], max_duplicate_distance=1, max_duplicate_angle=2)
|
|
98
|
+
([], [])
|
|
99
|
+
"""
|
|
100
|
+
|
|
52
101
|
dedups: PointSequence = []
|
|
53
|
-
dups:
|
|
102
|
+
dups: list[types.ErrorMetadata] = []
|
|
103
|
+
|
|
104
|
+
it = iter(sequence)
|
|
105
|
+
prev = next(it, None)
|
|
54
106
|
|
|
55
|
-
sequence_iter = iter(sequence)
|
|
56
|
-
prev = next(sequence_iter)
|
|
57
107
|
if prev is None:
|
|
58
108
|
return dedups, dups
|
|
109
|
+
|
|
59
110
|
dedups.append(prev)
|
|
60
111
|
|
|
61
|
-
for cur in
|
|
112
|
+
for cur in it:
|
|
62
113
|
# invariant: prev is processed
|
|
63
|
-
distance = geo.gps_distance(
|
|
64
|
-
(prev.lat, prev.lon),
|
|
65
|
-
(cur.lat, cur.lon),
|
|
66
|
-
)
|
|
114
|
+
distance = geo.gps_distance((prev.lat, prev.lon), (cur.lat, cur.lon))
|
|
67
115
|
|
|
68
116
|
if prev.angle is not None and cur.angle is not None:
|
|
69
117
|
angle_diff = geo.diff_bearing(prev.angle, cur.angle)
|
|
70
118
|
else:
|
|
71
119
|
angle_diff = None
|
|
72
120
|
|
|
73
|
-
if distance <=
|
|
74
|
-
angle_diff is
|
|
121
|
+
if distance <= max_duplicate_distance and (
|
|
122
|
+
angle_diff is None or angle_diff <= max_duplicate_angle
|
|
75
123
|
):
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
angle_diff=angle_diff,
|
|
83
|
-
),
|
|
84
|
-
cur.filename,
|
|
85
|
-
filetype=types.FileType.IMAGE,
|
|
86
|
-
),
|
|
124
|
+
msg = f"Duplicate of its previous image in terms of distance <= {max_duplicate_distance} and angle <= {max_duplicate_angle}"
|
|
125
|
+
ex = exceptions.MapillaryDuplicationError(
|
|
126
|
+
msg,
|
|
127
|
+
DescriptionJSONSerializer.as_desc(cur),
|
|
128
|
+
distance=distance,
|
|
129
|
+
angle_diff=angle_diff,
|
|
87
130
|
)
|
|
131
|
+
dup = types.describe_error_metadata(
|
|
132
|
+
ex, cur.filename, filetype=types.FileType.IMAGE
|
|
133
|
+
)
|
|
134
|
+
dups.append(dup)
|
|
88
135
|
# prev does not change
|
|
89
136
|
else:
|
|
90
137
|
dedups.append(cur)
|
|
@@ -94,95 +141,33 @@ def duplication_check(
|
|
|
94
141
|
return dedups, dups
|
|
95
142
|
|
|
96
143
|
|
|
97
|
-
def
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
"""
|
|
106
|
-
sequences: T.List[T.List[types.ImageMetadata]] = []
|
|
107
|
-
last_sequence_file_size = 0
|
|
108
|
-
last_sequence_pixels = 0
|
|
109
|
-
|
|
110
|
-
for image in sequence:
|
|
111
|
-
# decent default values if width/height not available
|
|
112
|
-
width = 1024 if image.width is None else image.width
|
|
113
|
-
height = 1024 if image.height is None else image.height
|
|
114
|
-
|
|
115
|
-
filesize = os.path.getsize(image.filename)
|
|
116
|
-
|
|
117
|
-
if len(sequences) == 0:
|
|
118
|
-
start_new_sequence = True
|
|
119
|
-
else:
|
|
120
|
-
if sequences[-1]:
|
|
121
|
-
if max_images < len(sequences[-1]):
|
|
122
|
-
LOG.debug(
|
|
123
|
-
"Cut the sequence because the current sequence (%s) reaches the max number of images (%s)",
|
|
124
|
-
len(sequences[-1]),
|
|
125
|
-
max_images,
|
|
126
|
-
)
|
|
127
|
-
start_new_sequence = True
|
|
128
|
-
elif max_sequence_filesize < last_sequence_file_size + filesize:
|
|
129
|
-
LOG.debug(
|
|
130
|
-
"Cut the sequence because the current sequence (%s) reaches the max filesize (%s)",
|
|
131
|
-
last_sequence_file_size + filesize,
|
|
132
|
-
max_sequence_filesize,
|
|
133
|
-
)
|
|
134
|
-
start_new_sequence = True
|
|
135
|
-
elif max_sequence_pixels < last_sequence_pixels + width * height:
|
|
136
|
-
LOG.debug(
|
|
137
|
-
"Cut the sequence because the current sequence (%s) reaches the max pixels (%s)",
|
|
138
|
-
last_sequence_pixels + width * height,
|
|
139
|
-
max_sequence_pixels,
|
|
140
|
-
)
|
|
141
|
-
start_new_sequence = True
|
|
142
|
-
else:
|
|
143
|
-
start_new_sequence = False
|
|
144
|
-
else:
|
|
145
|
-
start_new_sequence = False
|
|
146
|
-
|
|
147
|
-
if start_new_sequence:
|
|
148
|
-
sequences.append([])
|
|
149
|
-
last_sequence_file_size = 0
|
|
150
|
-
last_sequence_pixels = 0
|
|
151
|
-
|
|
152
|
-
sequences[-1].append(image)
|
|
153
|
-
last_sequence_file_size += filesize
|
|
154
|
-
last_sequence_pixels += width * height
|
|
155
|
-
|
|
156
|
-
assert sum(len(s) for s in sequences) == len(sequence)
|
|
157
|
-
|
|
158
|
-
return sequences
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
def _group_sort_images_by_folder(
|
|
162
|
-
image_metadatas: T.List[types.ImageMetadata],
|
|
163
|
-
) -> T.List[T.List[types.ImageMetadata]]:
|
|
164
|
-
# group images by parent directory
|
|
165
|
-
sequences_by_parent: T.Dict[str, T.List[types.ImageMetadata]] = {}
|
|
166
|
-
for image_metadata in image_metadatas:
|
|
167
|
-
filename = image_metadata.filename.resolve()
|
|
168
|
-
sequences_by_parent.setdefault(str(filename.parent), []).append(image_metadata)
|
|
169
|
-
|
|
170
|
-
sequences = list(sequences_by_parent.values())
|
|
171
|
-
for sequence in sequences:
|
|
172
|
-
sequence.sort(
|
|
173
|
-
key=lambda metadata: metadata.sort_key(),
|
|
174
|
-
)
|
|
175
|
-
|
|
176
|
-
return sequences
|
|
144
|
+
def _group_images_by(
|
|
145
|
+
image_metadatas: T.Iterable[types.ImageMetadata],
|
|
146
|
+
group_key_func: T.Callable[[types.ImageMetadata], T.Hashable],
|
|
147
|
+
) -> dict[T.Hashable, list[types.ImageMetadata]]:
|
|
148
|
+
grouped: dict[T.Hashable, list[types.ImageMetadata]] = {}
|
|
149
|
+
for metadata in image_metadatas:
|
|
150
|
+
grouped.setdefault(group_key_func(metadata), []).append(metadata)
|
|
151
|
+
return grouped
|
|
177
152
|
|
|
178
153
|
|
|
179
154
|
def _interpolate_subsecs_for_sorting(sequence: PointSequence) -> None:
|
|
180
155
|
"""
|
|
181
|
-
Update the timestamps make sure they are unique and sorted
|
|
156
|
+
Update the timestamps to make sure they are unique and sorted
|
|
182
157
|
in the same order by interpolating subseconds
|
|
158
|
+
|
|
183
159
|
Examples:
|
|
184
|
-
|
|
185
|
-
|
|
160
|
+
>>> def make_point(t):
|
|
161
|
+
... return geo.Point(lat=0, lon=0, time=t, alt=None, angle=None)
|
|
162
|
+
>>> points = [make_point(t) for t in [1, 1, 1, 1, 1, 2]]
|
|
163
|
+
>>> _interpolate_subsecs_for_sorting(points)
|
|
164
|
+
>>> [p.time for p in points]
|
|
165
|
+
[1.0, 1.2, 1.4, 1.6, 1.8, 2]
|
|
166
|
+
|
|
167
|
+
>>> points = [make_point(t) for t in [1.1]]
|
|
168
|
+
>>> _interpolate_subsecs_for_sorting(points)
|
|
169
|
+
>>> [p.time for p in points]
|
|
170
|
+
[1.1]
|
|
186
171
|
"""
|
|
187
172
|
|
|
188
173
|
gidx = 0
|
|
@@ -214,61 +199,420 @@ def _interpolate_subsecs_for_sorting(sequence: PointSequence) -> None:
|
|
|
214
199
|
)
|
|
215
200
|
|
|
216
201
|
|
|
217
|
-
def
|
|
218
|
-
|
|
202
|
+
def _is_video_stationary(
|
|
203
|
+
sequence: T.Sequence[geo.PointLike], max_radius_in_meters: float
|
|
204
|
+
) -> bool:
|
|
205
|
+
if not sequence:
|
|
206
|
+
return 0.0 <= max_radius_in_meters
|
|
219
207
|
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
208
|
+
start = (sequence[0].lat, sequence[0].lon)
|
|
209
|
+
for p in sequence:
|
|
210
|
+
distance = geo.gps_distance(start, (p.lat, p.lon))
|
|
211
|
+
if distance > max_radius_in_meters:
|
|
212
|
+
return False
|
|
213
|
+
|
|
214
|
+
return True
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
def _check_video_limits(
|
|
218
|
+
video_metadatas: T.Iterable[types.VideoMetadata],
|
|
219
|
+
max_sequence_filesize_in_bytes: int | None,
|
|
220
|
+
max_capture_speed_kmh: float,
|
|
221
|
+
max_radius_for_stationary_check: float,
|
|
222
|
+
) -> tuple[list[types.VideoMetadata], list[types.ErrorMetadata]]:
|
|
223
|
+
output_video_metadatas: list[types.VideoMetadata] = []
|
|
224
|
+
error_metadatas: list[types.ErrorMetadata] = []
|
|
225
|
+
|
|
226
|
+
for video_metadata in video_metadatas:
|
|
227
|
+
try:
|
|
228
|
+
is_stationary = _is_video_stationary(
|
|
229
|
+
video_metadata.points,
|
|
230
|
+
max_radius_in_meters=max_radius_for_stationary_check,
|
|
231
|
+
)
|
|
232
|
+
if is_stationary:
|
|
233
|
+
raise exceptions.MapillaryStationaryVideoError("Stationary video")
|
|
234
|
+
|
|
235
|
+
if max_sequence_filesize_in_bytes is not None:
|
|
236
|
+
video_filesize = (
|
|
237
|
+
utils.get_file_size(video_metadata.filename)
|
|
238
|
+
if video_metadata.filesize is None
|
|
239
|
+
else video_metadata.filesize
|
|
240
|
+
)
|
|
241
|
+
if video_filesize > max_sequence_filesize_in_bytes:
|
|
242
|
+
raise exceptions.MapillaryFileTooLargeError(
|
|
243
|
+
f"Video file size {humanize.naturalsize(video_filesize)} exceeds max allowed {humanize.naturalsize(max_sequence_filesize_in_bytes)}",
|
|
244
|
+
)
|
|
245
|
+
|
|
246
|
+
contains_null_island = any(
|
|
247
|
+
p.lat == 0 and p.lon == 0 for p in video_metadata.points
|
|
248
|
+
)
|
|
249
|
+
if contains_null_island:
|
|
250
|
+
raise exceptions.MapillaryNullIslandError(
|
|
251
|
+
"GPS coordinates in Null Island (0, 0)"
|
|
252
|
+
)
|
|
253
|
+
|
|
254
|
+
avg_speed_kmh = (
|
|
255
|
+
geo.avg_speed(video_metadata.points) * 3.6
|
|
256
|
+
) # Convert m/s to km/h
|
|
257
|
+
too_fast = (
|
|
258
|
+
len(video_metadata.points) >= 2
|
|
259
|
+
and avg_speed_kmh > max_capture_speed_kmh
|
|
260
|
+
)
|
|
261
|
+
if too_fast:
|
|
262
|
+
raise exceptions.MapillaryCaptureSpeedTooFastError(
|
|
263
|
+
f"Capture speed {avg_speed_kmh:.3f} km/h exceeds max allowed {max_capture_speed_kmh:.3f} km/h",
|
|
264
|
+
)
|
|
265
|
+
except exceptions.MapillaryDescriptionError as ex:
|
|
266
|
+
LOG.error(f"{_video_name(video_metadata)}: {ex}")
|
|
267
|
+
error_metadatas.append(
|
|
268
|
+
types.describe_error_metadata(
|
|
269
|
+
exc=ex,
|
|
270
|
+
filename=video_metadata.filename,
|
|
271
|
+
filetype=video_metadata.filetype,
|
|
272
|
+
)
|
|
273
|
+
)
|
|
274
|
+
else:
|
|
275
|
+
output_video_metadatas.append(video_metadata)
|
|
276
|
+
|
|
277
|
+
return output_video_metadatas, error_metadatas
|
|
278
|
+
|
|
279
|
+
|
|
280
|
+
def _video_name(video_metadata: types.VideoMetadata) -> str:
|
|
281
|
+
return video_metadata.filename.name
|
|
282
|
+
|
|
283
|
+
|
|
284
|
+
def _check_sequences_by_limits(
|
|
285
|
+
input_sequences: T.Sequence[PointSequence],
|
|
286
|
+
max_sequence_filesize_in_bytes: int | None,
|
|
287
|
+
max_capture_speed_kmh: float,
|
|
288
|
+
) -> tuple[list[PointSequence], list[types.ErrorMetadata]]:
|
|
289
|
+
output_sequences: list[PointSequence] = []
|
|
290
|
+
output_errors: list[types.ErrorMetadata] = []
|
|
291
|
+
|
|
292
|
+
for sequence in input_sequences:
|
|
293
|
+
try:
|
|
294
|
+
if max_sequence_filesize_in_bytes is not None:
|
|
295
|
+
sequence_filesize = sum(
|
|
296
|
+
utils.get_file_size(image.filename)
|
|
297
|
+
if image.filesize is None
|
|
298
|
+
else image.filesize
|
|
299
|
+
for image in sequence
|
|
300
|
+
)
|
|
301
|
+
if sequence_filesize > max_sequence_filesize_in_bytes:
|
|
302
|
+
raise exceptions.MapillaryFileTooLargeError(
|
|
303
|
+
f"Sequence file size {humanize.naturalsize(sequence_filesize)} exceeds max allowed {humanize.naturalsize(max_sequence_filesize_in_bytes)}",
|
|
304
|
+
)
|
|
305
|
+
|
|
306
|
+
contains_null_island = any(
|
|
307
|
+
image.lat == 0 and image.lon == 0 for image in sequence
|
|
308
|
+
)
|
|
309
|
+
if contains_null_island:
|
|
310
|
+
raise exceptions.MapillaryNullIslandError(
|
|
311
|
+
"GPS coordinates in Null Island (0, 0)"
|
|
312
|
+
)
|
|
313
|
+
|
|
314
|
+
avg_speed_kmh = geo.avg_speed(sequence) * 3.6 # Convert m/s to km/h
|
|
315
|
+
too_fast = len(sequence) >= 2 and avg_speed_kmh > max_capture_speed_kmh
|
|
316
|
+
if too_fast:
|
|
317
|
+
raise exceptions.MapillaryCaptureSpeedTooFastError(
|
|
318
|
+
f"Capture speed {avg_speed_kmh:.3f} km/h exceeds max allowed {max_capture_speed_kmh:.3f} km/h",
|
|
319
|
+
)
|
|
320
|
+
except exceptions.MapillaryDescriptionError as ex:
|
|
321
|
+
LOG.error(f"{_sequence_name(sequence)}: {ex}")
|
|
322
|
+
for image in sequence:
|
|
323
|
+
output_errors.append(
|
|
324
|
+
types.describe_error_metadata(
|
|
325
|
+
exc=ex, filename=image.filename, filetype=types.FileType.IMAGE
|
|
326
|
+
)
|
|
327
|
+
)
|
|
328
|
+
|
|
329
|
+
else:
|
|
330
|
+
output_sequences.append(sequence)
|
|
331
|
+
|
|
332
|
+
assert sum(len(s) for s in output_sequences) + len(output_errors) == sum(
|
|
333
|
+
len(s) for s in input_sequences
|
|
334
|
+
)
|
|
335
|
+
|
|
336
|
+
return output_sequences, output_errors
|
|
337
|
+
|
|
338
|
+
|
|
339
|
+
def _sequence_name(sequence: T.Sequence[types.ImageMetadata]) -> str:
|
|
340
|
+
if not sequence:
|
|
341
|
+
return "N/A"
|
|
342
|
+
image = sequence[0]
|
|
343
|
+
return f"{image.filename.parent.name}/{image.filename.name}"
|
|
230
344
|
|
|
231
345
|
|
|
232
|
-
def
|
|
233
|
-
|
|
346
|
+
def _group_by_folder_and_camera(
|
|
347
|
+
image_metadatas: list[types.ImageMetadata],
|
|
348
|
+
) -> list[list[types.ImageMetadata]]:
|
|
349
|
+
grouped = _group_images_by(
|
|
350
|
+
image_metadatas,
|
|
351
|
+
lambda metadata: (
|
|
352
|
+
str(metadata.filename.parent),
|
|
353
|
+
metadata.MAPDeviceMake,
|
|
354
|
+
metadata.MAPDeviceModel,
|
|
355
|
+
metadata.width,
|
|
356
|
+
metadata.height,
|
|
357
|
+
),
|
|
358
|
+
)
|
|
359
|
+
for key in grouped:
|
|
360
|
+
LOG.debug(f"Grouped {len(grouped[key])} images by {key}")
|
|
361
|
+
output_sequences = list(grouped.values())
|
|
362
|
+
|
|
363
|
+
LOG.info(f"Created {len(output_sequences)} sequences by folders and cameras")
|
|
364
|
+
|
|
365
|
+
return output_sequences
|
|
366
|
+
|
|
367
|
+
|
|
368
|
+
def _check_sequences_duplication(
|
|
369
|
+
input_sequences: T.Sequence[PointSequence],
|
|
370
|
+
duplicate_distance: float,
|
|
371
|
+
duplicate_angle: float,
|
|
372
|
+
) -> tuple[list[PointSequence], list[types.ErrorMetadata]]:
|
|
373
|
+
output_sequences: list[PointSequence] = []
|
|
374
|
+
output_errors: list[types.ErrorMetadata] = []
|
|
375
|
+
|
|
376
|
+
for sequence in input_sequences:
|
|
377
|
+
output_sequence, errors = duplication_check(
|
|
378
|
+
sequence,
|
|
379
|
+
max_duplicate_distance=duplicate_distance,
|
|
380
|
+
max_duplicate_angle=duplicate_angle,
|
|
381
|
+
)
|
|
382
|
+
assert len(sequence) == len(output_sequence) + len(errors)
|
|
383
|
+
if output_sequence:
|
|
384
|
+
output_sequences.append(output_sequence)
|
|
385
|
+
output_errors.extend(errors)
|
|
386
|
+
|
|
387
|
+
# All input images should be accounted for either in output sequences or errors
|
|
388
|
+
assert sum(len(s) for s in output_sequences) + len(output_errors) == sum(
|
|
389
|
+
len(s) for s in input_sequences
|
|
390
|
+
)
|
|
391
|
+
|
|
392
|
+
if output_errors:
|
|
393
|
+
LOG.info(
|
|
394
|
+
f"Duplication check: {len(output_errors)} image duplicates removed (with {duplicate_distance=} and {duplicate_angle=})"
|
|
395
|
+
)
|
|
396
|
+
|
|
397
|
+
return output_sequences, output_errors
|
|
398
|
+
|
|
399
|
+
|
|
400
|
+
class SplitState(T.TypedDict, total=False):
|
|
401
|
+
sequence_images: int
|
|
402
|
+
sequence_file_size: int
|
|
403
|
+
sequence_pixels: int
|
|
404
|
+
image: types.ImageMetadata
|
|
405
|
+
|
|
406
|
+
|
|
407
|
+
def _should_split_by_max_sequence_images(
|
|
408
|
+
state: SplitState,
|
|
409
|
+
image: types.ImageMetadata,
|
|
410
|
+
max_sequence_images: int,
|
|
411
|
+
split: bool = False,
|
|
412
|
+
) -> tuple[SplitState, bool]:
|
|
413
|
+
if not split:
|
|
414
|
+
new_sequence_images = state.get("sequence_images", 0) + 1
|
|
415
|
+
split = max_sequence_images < new_sequence_images
|
|
416
|
+
if split:
|
|
417
|
+
LOG.info(
|
|
418
|
+
f"Split sequence at {image.filename.name}: too many images ({new_sequence_images} > {max_sequence_images})"
|
|
419
|
+
)
|
|
420
|
+
|
|
421
|
+
if split:
|
|
422
|
+
new_sequence_images = 1
|
|
234
423
|
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
424
|
+
state["sequence_images"] = new_sequence_images
|
|
425
|
+
|
|
426
|
+
return state, split
|
|
427
|
+
|
|
428
|
+
|
|
429
|
+
def _should_split_by_cutoff_time(
|
|
430
|
+
state: SplitState,
|
|
431
|
+
image: types.ImageMetadata,
|
|
432
|
+
cutoff_time: float,
|
|
433
|
+
split: bool = False,
|
|
434
|
+
) -> tuple[SplitState, bool]:
|
|
435
|
+
if not split:
|
|
436
|
+
last_image = state.get("image")
|
|
437
|
+
if last_image is not None:
|
|
438
|
+
diff = image.time - last_image.time
|
|
439
|
+
split = cutoff_time < diff
|
|
440
|
+
if split:
|
|
441
|
+
LOG.info(
|
|
442
|
+
f"Split sequence at {image.filename.name}: time gap too large ({diff:.6g} seconds > {cutoff_time:.6g} seconds)"
|
|
443
|
+
)
|
|
444
|
+
|
|
445
|
+
state["image"] = image
|
|
446
|
+
|
|
447
|
+
return state, split
|
|
448
|
+
|
|
449
|
+
|
|
450
|
+
def _should_split_by_cutoff_distance(
|
|
451
|
+
state: SplitState,
|
|
452
|
+
image: types.ImageMetadata,
|
|
453
|
+
cutoff_distance: float,
|
|
454
|
+
split: bool = False,
|
|
455
|
+
) -> tuple[SplitState, bool]:
|
|
456
|
+
if not split:
|
|
457
|
+
last_image = state.get("image")
|
|
458
|
+
if last_image is not None:
|
|
459
|
+
diff = geo.gps_distance(
|
|
460
|
+
(last_image.lat, last_image.lon), (image.lat, image.lon)
|
|
461
|
+
)
|
|
462
|
+
split = cutoff_distance < diff
|
|
463
|
+
if split:
|
|
464
|
+
LOG.info(
|
|
465
|
+
f"Split sequence at {image.filename.name}: distance gap too large ({diff:.6g} meters > {cutoff_distance:.6g} meters)"
|
|
466
|
+
)
|
|
467
|
+
|
|
468
|
+
state["image"] = image
|
|
469
|
+
|
|
470
|
+
return state, split
|
|
471
|
+
|
|
472
|
+
|
|
473
|
+
def _should_split_by_max_sequence_filesize(
|
|
474
|
+
state: SplitState,
|
|
475
|
+
image: types.ImageMetadata,
|
|
476
|
+
max_sequence_filesize_in_bytes: int,
|
|
477
|
+
split: bool = False,
|
|
478
|
+
) -> tuple[SplitState, bool]:
|
|
479
|
+
if image.filesize is None:
|
|
480
|
+
filesize = os.path.getsize(image.filename)
|
|
241
481
|
else:
|
|
242
|
-
|
|
482
|
+
filesize = image.filesize
|
|
483
|
+
|
|
484
|
+
if not split:
|
|
485
|
+
new_sequence_file_size = state.get("sequence_file_size", 0) + filesize
|
|
486
|
+
split = max_sequence_filesize_in_bytes < new_sequence_file_size
|
|
487
|
+
if split:
|
|
488
|
+
LOG.info(
|
|
489
|
+
f"Split sequence at {image.filename.name}: filesize too large ({new_sequence_file_size} > {max_sequence_filesize_in_bytes})"
|
|
490
|
+
)
|
|
243
491
|
|
|
492
|
+
if split:
|
|
493
|
+
new_sequence_file_size = filesize
|
|
244
494
|
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
495
|
+
state["sequence_file_size"] = new_sequence_file_size
|
|
496
|
+
|
|
497
|
+
return state, split
|
|
498
|
+
|
|
499
|
+
|
|
500
|
+
def _should_split_by_max_sequence_pixels(
|
|
501
|
+
state: SplitState,
|
|
502
|
+
image: types.ImageMetadata,
|
|
503
|
+
max_sequence_pixels: int,
|
|
504
|
+
split: bool = False,
|
|
505
|
+
) -> tuple[SplitState, bool]:
|
|
506
|
+
# Default values if width/height not available
|
|
507
|
+
width = 1024 if image.width is None else image.width
|
|
508
|
+
height = 1024 if image.height is None else image.height
|
|
509
|
+
pixels = width * height
|
|
510
|
+
|
|
511
|
+
if not split:
|
|
512
|
+
new_sequence_pixels = state.get("sequence_pixels", 0) + pixels
|
|
513
|
+
split = max_sequence_pixels < new_sequence_pixels
|
|
514
|
+
if split:
|
|
515
|
+
LOG.info(
|
|
516
|
+
f"Split sequence at {image.filename.name}: pixels too large ({new_sequence_pixels} > {max_sequence_pixels})"
|
|
517
|
+
)
|
|
518
|
+
|
|
519
|
+
if split:
|
|
520
|
+
new_sequence_pixels = pixels
|
|
521
|
+
|
|
522
|
+
state["sequence_pixels"] = new_sequence_pixels
|
|
523
|
+
|
|
524
|
+
return state, split
|
|
525
|
+
|
|
526
|
+
|
|
527
|
+
def _split_sequences_by_limits(
|
|
528
|
+
input_sequences: T.Sequence[PointSequence],
|
|
529
|
+
max_sequence_filesize_in_bytes: int | None = None,
|
|
530
|
+
max_sequence_pixels: int | None = None,
|
|
531
|
+
max_sequence_images: int | None = None,
|
|
532
|
+
cutoff_time: float | None = None,
|
|
533
|
+
cutoff_distance: float | None = None,
|
|
534
|
+
) -> list[PointSequence]:
|
|
535
|
+
should_splits = []
|
|
536
|
+
|
|
537
|
+
if max_sequence_images is not None:
|
|
538
|
+
should_splits.append(
|
|
539
|
+
functools.partial(
|
|
540
|
+
_should_split_by_max_sequence_images,
|
|
541
|
+
max_sequence_images=max_sequence_images,
|
|
542
|
+
)
|
|
256
543
|
)
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
544
|
+
|
|
545
|
+
if cutoff_time is not None:
|
|
546
|
+
should_splits.append(
|
|
547
|
+
functools.partial(_should_split_by_cutoff_time, cutoff_time=cutoff_time)
|
|
260
548
|
)
|
|
261
549
|
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
550
|
+
if cutoff_distance is not None:
|
|
551
|
+
should_splits.append(
|
|
552
|
+
functools.partial(
|
|
553
|
+
_should_split_by_cutoff_distance, cutoff_distance=cutoff_distance
|
|
554
|
+
)
|
|
555
|
+
)
|
|
556
|
+
|
|
557
|
+
if max_sequence_filesize_in_bytes is not None:
|
|
558
|
+
should_splits.append(
|
|
559
|
+
functools.partial(
|
|
560
|
+
_should_split_by_max_sequence_filesize,
|
|
561
|
+
max_sequence_filesize_in_bytes=max_sequence_filesize_in_bytes,
|
|
562
|
+
)
|
|
563
|
+
)
|
|
564
|
+
|
|
565
|
+
if max_sequence_pixels is not None:
|
|
566
|
+
should_splits.append(
|
|
567
|
+
functools.partial(
|
|
568
|
+
_should_split_by_max_sequence_pixels,
|
|
569
|
+
max_sequence_pixels=max_sequence_pixels,
|
|
570
|
+
)
|
|
571
|
+
)
|
|
572
|
+
|
|
573
|
+
def _should_split_agg(
|
|
574
|
+
state: SplitState, image: types.ImageMetadata
|
|
575
|
+
) -> tuple[SplitState, bool]:
|
|
576
|
+
split = False
|
|
577
|
+
|
|
578
|
+
for should_split in should_splits:
|
|
579
|
+
state, split = should_split(state, image, split=split)
|
|
580
|
+
|
|
581
|
+
return state, split
|
|
582
|
+
|
|
583
|
+
output_sequences = []
|
|
584
|
+
for sequence in input_sequences:
|
|
585
|
+
output_sequences.extend(
|
|
586
|
+
split_sequence_by(
|
|
587
|
+
sequence, _should_split_agg, initial=T.cast(SplitState, {})
|
|
588
|
+
)
|
|
267
589
|
)
|
|
268
590
|
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
591
|
+
assert sum(len(s) for s in output_sequences) == sum(len(s) for s in input_sequences)
|
|
592
|
+
|
|
593
|
+
if len(input_sequences) != len(output_sequences):
|
|
594
|
+
LOG.info(f"Split sequences: {len(input_sequences)} -> {len(output_sequences)}")
|
|
595
|
+
|
|
596
|
+
return output_sequences
|
|
597
|
+
|
|
598
|
+
|
|
599
|
+
def process_sequence_properties(
|
|
600
|
+
metadatas: T.Sequence[types.MetadataOrError],
|
|
601
|
+
cutoff_distance: float = constants.CUTOFF_DISTANCE,
|
|
602
|
+
cutoff_time: float = constants.CUTOFF_TIME,
|
|
603
|
+
interpolate_directions: bool = False,
|
|
604
|
+
duplicate_distance: float = constants.DUPLICATE_DISTANCE,
|
|
605
|
+
duplicate_angle: float = constants.DUPLICATE_ANGLE,
|
|
606
|
+
max_capture_speed_kmh: float = constants.MAX_CAPTURE_SPEED_KMH,
|
|
607
|
+
) -> list[types.MetadataOrError]:
|
|
608
|
+
LOG.info("==> Processing sequences...")
|
|
609
|
+
|
|
610
|
+
max_sequence_filesize_in_bytes = constants.MAX_SEQUENCE_FILESIZE
|
|
611
|
+
max_sequence_pixels = constants.MAX_SEQUENCE_PIXELS
|
|
612
|
+
|
|
613
|
+
error_metadatas: list[types.ErrorMetadata] = []
|
|
614
|
+
image_metadatas: list[types.ImageMetadata] = []
|
|
615
|
+
video_metadatas: list[types.VideoMetadata] = []
|
|
272
616
|
|
|
273
617
|
for metadata in metadatas:
|
|
274
618
|
if isinstance(metadata, types.ErrorMetadata):
|
|
@@ -278,68 +622,93 @@ def process_sequence_properties(
|
|
|
278
622
|
elif isinstance(metadata, types.VideoMetadata):
|
|
279
623
|
video_metadatas.append(metadata)
|
|
280
624
|
else:
|
|
281
|
-
raise
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
625
|
+
raise ValueError(f"invalid metadata type: {metadata}")
|
|
626
|
+
|
|
627
|
+
if video_metadatas:
|
|
628
|
+
# Check limits for videos
|
|
629
|
+
video_metadatas, video_error_metadatas = _check_video_limits(
|
|
630
|
+
video_metadatas,
|
|
631
|
+
max_sequence_filesize_in_bytes=max_sequence_filesize_in_bytes,
|
|
632
|
+
max_capture_speed_kmh=max_capture_speed_kmh,
|
|
633
|
+
max_radius_for_stationary_check=10.0,
|
|
634
|
+
)
|
|
635
|
+
error_metadatas.extend(video_error_metadatas)
|
|
288
636
|
|
|
289
|
-
|
|
290
|
-
|
|
637
|
+
if image_metadatas:
|
|
638
|
+
sequences: list[PointSequence]
|
|
291
639
|
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
for sequence in sequences_by_folder:
|
|
295
|
-
cut = cut_sequence_by_time_distance(sequence, cutoff_distance, cutoff_time)
|
|
296
|
-
sequences_after_cut.extend(cut)
|
|
297
|
-
assert len(image_metadatas) == sum(len(s) for s in sequences_after_cut)
|
|
640
|
+
# Group by folder and camera
|
|
641
|
+
sequences = _group_by_folder_and_camera(image_metadatas)
|
|
298
642
|
|
|
299
|
-
|
|
300
|
-
|
|
643
|
+
# Make sure each sequence is sorted (in-place update)
|
|
644
|
+
for sequence in sequences:
|
|
645
|
+
sequence.sort(
|
|
646
|
+
key=lambda metadata: metadata.sort_key(),
|
|
647
|
+
)
|
|
301
648
|
|
|
302
|
-
|
|
649
|
+
# Interpolate subseconds for same timestamps (in-place update)
|
|
650
|
+
for sequence in sequences:
|
|
651
|
+
_interpolate_subsecs_for_sorting(sequence)
|
|
652
|
+
|
|
653
|
+
# Split sequences by max number of images, max filesize, max pixels, and cutoff time
|
|
654
|
+
# NOTE: Do not split by distance here because it affects the speed limit check
|
|
655
|
+
sequences = _split_sequences_by_limits(
|
|
656
|
+
sequences,
|
|
657
|
+
max_sequence_filesize_in_bytes=max_sequence_filesize_in_bytes,
|
|
658
|
+
max_sequence_pixels=max_sequence_pixels,
|
|
659
|
+
max_sequence_images=constants.MAX_SEQUENCE_LENGTH,
|
|
660
|
+
cutoff_time=cutoff_time,
|
|
661
|
+
)
|
|
303
662
|
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
sequence,
|
|
663
|
+
# Duplication check
|
|
664
|
+
sequences, errors = _check_sequences_duplication(
|
|
665
|
+
sequences,
|
|
308
666
|
duplicate_distance=duplicate_distance,
|
|
309
667
|
duplicate_angle=duplicate_angle,
|
|
310
668
|
)
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
#
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
669
|
+
error_metadatas.extend(errors)
|
|
670
|
+
|
|
671
|
+
# Interpolate angles (in-place update)
|
|
672
|
+
for sequence in sequences:
|
|
673
|
+
if interpolate_directions:
|
|
674
|
+
for image in sequence:
|
|
675
|
+
image.angle = None
|
|
676
|
+
geo.interpolate_directions_if_none(sequence)
|
|
677
|
+
|
|
678
|
+
# Check limits for sequences
|
|
679
|
+
sequences, errors = _check_sequences_by_limits(
|
|
680
|
+
sequences,
|
|
681
|
+
max_sequence_filesize_in_bytes=max_sequence_filesize_in_bytes,
|
|
682
|
+
max_capture_speed_kmh=max_capture_speed_kmh,
|
|
683
|
+
)
|
|
684
|
+
error_metadatas.extend(errors)
|
|
685
|
+
|
|
686
|
+
# Split sequences by cutoff distance
|
|
687
|
+
# NOTE: The speed limit check probably rejects most anomalies
|
|
688
|
+
sequences = _split_sequences_by_limits(
|
|
689
|
+
sequences, cutoff_distance=cutoff_distance
|
|
326
690
|
)
|
|
327
691
|
|
|
328
|
-
#
|
|
329
|
-
|
|
330
|
-
|
|
692
|
+
# Assign sequence UUIDs (in-place update)
|
|
693
|
+
sequence_idx = 0
|
|
694
|
+
for sequence in sequences:
|
|
695
|
+
for image in sequence:
|
|
331
696
|
# using incremental id as shorter "uuid", so we can save some space for the desc file
|
|
332
|
-
|
|
333
|
-
image_metadatas.append(p)
|
|
697
|
+
image.MAPSequenceUUID = str(sequence_idx)
|
|
334
698
|
sequence_idx += 1
|
|
335
699
|
|
|
700
|
+
image_metadatas = []
|
|
701
|
+
for sequence in sequences:
|
|
702
|
+
image_metadatas.extend(sequence)
|
|
703
|
+
|
|
704
|
+
assert sequence_idx == len(
|
|
705
|
+
set(metadata.MAPSequenceUUID for metadata in image_metadatas)
|
|
706
|
+
)
|
|
707
|
+
|
|
336
708
|
results = error_metadatas + image_metadatas + video_metadatas
|
|
337
709
|
|
|
338
710
|
assert len(metadatas) == len(results), (
|
|
339
|
-
f"
|
|
340
|
-
)
|
|
341
|
-
assert sequence_idx == len(
|
|
342
|
-
set(metadata.MAPSequenceUUID for metadata in image_metadatas)
|
|
711
|
+
f"Expected {len(metadatas)} results but got {len(results)}"
|
|
343
712
|
)
|
|
344
713
|
|
|
345
714
|
return results
|