OTVision 0.5.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. OTVision/__init__.py +30 -0
  2. OTVision/application/__init__.py +0 -0
  3. OTVision/application/configure_logger.py +23 -0
  4. OTVision/application/detect/__init__.py +0 -0
  5. OTVision/application/detect/get_detect_cli_args.py +9 -0
  6. OTVision/application/detect/update_detect_config_with_cli_args.py +95 -0
  7. OTVision/application/get_config.py +25 -0
  8. OTVision/config.py +754 -0
  9. OTVision/convert/__init__.py +0 -0
  10. OTVision/convert/convert.py +318 -0
  11. OTVision/dataformat.py +70 -0
  12. OTVision/detect/__init__.py +0 -0
  13. OTVision/detect/builder.py +48 -0
  14. OTVision/detect/cli.py +166 -0
  15. OTVision/detect/detect.py +296 -0
  16. OTVision/detect/otdet.py +103 -0
  17. OTVision/detect/plugin_av/__init__.py +0 -0
  18. OTVision/detect/plugin_av/rotate_frame.py +37 -0
  19. OTVision/detect/yolo.py +277 -0
  20. OTVision/domain/__init__.py +0 -0
  21. OTVision/domain/cli.py +42 -0
  22. OTVision/helpers/__init__.py +0 -0
  23. OTVision/helpers/date.py +26 -0
  24. OTVision/helpers/files.py +538 -0
  25. OTVision/helpers/formats.py +139 -0
  26. OTVision/helpers/log.py +131 -0
  27. OTVision/helpers/machine.py +71 -0
  28. OTVision/helpers/video.py +54 -0
  29. OTVision/track/__init__.py +0 -0
  30. OTVision/track/iou.py +282 -0
  31. OTVision/track/iou_util.py +140 -0
  32. OTVision/track/preprocess.py +451 -0
  33. OTVision/track/track.py +422 -0
  34. OTVision/transform/__init__.py +0 -0
  35. OTVision/transform/get_homography.py +156 -0
  36. OTVision/transform/reference_points_picker.py +462 -0
  37. OTVision/transform/transform.py +352 -0
  38. OTVision/version.py +13 -0
  39. OTVision/view/__init__.py +0 -0
  40. OTVision/view/helpers/OTC.ico +0 -0
  41. OTVision/view/view.py +90 -0
  42. OTVision/view/view_convert.py +128 -0
  43. OTVision/view/view_detect.py +146 -0
  44. OTVision/view/view_helpers.py +417 -0
  45. OTVision/view/view_track.py +131 -0
  46. OTVision/view/view_transform.py +140 -0
  47. otvision-0.5.3.dist-info/METADATA +47 -0
  48. otvision-0.5.3.dist-info/RECORD +50 -0
  49. otvision-0.5.3.dist-info/WHEEL +4 -0
  50. otvision-0.5.3.dist-info/licenses/LICENSE +674 -0
@@ -0,0 +1,422 @@
1
+ """
2
+ OTVision main module for tracking objects in successive frames of videos
3
+ """
4
+
5
+ # points and transform tracksectory points from pixel into world coordinates.
6
+
7
+ import logging
8
+ import uuid
9
+
10
+ # Copyright (C) 2022 OpenTrafficCam Contributors
11
+ # <https://github.com/OpenTrafficCam
12
+ # <team@opentrafficcam.org>
13
+ #
14
+ # This program is free software: you can redistribute it and/or modify
15
+ # it under the terms of the GNU General Public License as published by
16
+ # the Free Software Foundation, either version 3 of the License, or
17
+ # (at your option) any later version.
18
+ #
19
+ # This program is distributed in the hope that it will be useful,
20
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
21
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22
+ # GNU General Public License for more detectionsails.
23
+ #
24
+ # You should have received a copy of the GNU General Public License
25
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
26
+ from collections import defaultdict
27
+ from pathlib import Path
28
+ from typing import Callable, Iterator
29
+
30
+ from tqdm import tqdm
31
+
32
+ from OTVision import dataformat
33
+ from OTVision.config import (
34
+ CONFIG,
35
+ DEFAULT_FILETYPE,
36
+ DETECT,
37
+ FILETYPES,
38
+ IOU,
39
+ OVERWRITE,
40
+ SIGMA_H,
41
+ SIGMA_IOU,
42
+ SIGMA_L,
43
+ T_MIN,
44
+ T_MISS_MAX,
45
+ TRACK,
46
+ )
47
+ from OTVision.dataformat import DATA, DETECTIONS, FINISHED, METADATA, TRACK_ID
48
+ from OTVision.helpers.files import denormalize_bbox, get_files, write_json
49
+ from OTVision.helpers.log import LOGGER_NAME
50
+ from OTVision.track.preprocess import (
51
+ FrameChunk,
52
+ FrameChunkParser,
53
+ FrameIndexer,
54
+ Preprocess,
55
+ )
56
+
57
+ from .iou import TrackedDetections, TrackingResult, id_generator, track_iou
58
+
59
+ log = logging.getLogger(LOGGER_NAME)
60
+
61
+ IdGenerator = Callable[[], str]
62
+
63
+
64
+ class TrackedChunk(TrackedDetections):
65
+ """Tracking results combined with respective metadata."""
66
+
67
+ def __init__(
68
+ self,
69
+ detections: TrackedDetections,
70
+ file_path: Path,
71
+ frame_group_id: int,
72
+ frame_offset: int,
73
+ metadata: dict,
74
+ ) -> None:
75
+ super().__init__(
76
+ detections._detections,
77
+ detections._detected_ids,
78
+ detections._active_track_ids,
79
+ )
80
+ self.file_path = file_path
81
+ self.frame_offset = frame_offset
82
+ self._frame_group_id = frame_group_id
83
+ self.metadata = metadata
84
+
85
+
86
+ class TrackingResultStore:
87
+ """TrackingResultStore manages TrackedChunks
88
+ and data required for the next tracking iteration:
89
+ - the remaining active tracks of the last iteration
90
+ - a global lookup table for each tracks last detection frame
91
+ """
92
+
93
+ def __init__(self, tracking_run_id: str) -> None:
94
+ self.tracking_run_id = tracking_run_id
95
+ self._tracked_chunks: list[TrackedChunk] = list()
96
+ self.last_track_frame: dict[int, int] = dict()
97
+ self.active_tracks: list[dict] = []
98
+
99
+ def store_tracking_result(
100
+ self,
101
+ tracking_result: TrackingResult,
102
+ file_path: Path,
103
+ frame_group_id: int,
104
+ frame_offset: int,
105
+ metadata: dict,
106
+ ) -> None:
107
+ # store tracking results
108
+ self._tracked_chunks.append(
109
+ TrackedChunk(
110
+ detections=tracking_result.tracked_detections,
111
+ file_path=file_path,
112
+ frame_group_id=frame_group_id,
113
+ frame_offset=frame_offset,
114
+ metadata=metadata,
115
+ )
116
+ )
117
+ # update global lookup table
118
+ self.last_track_frame.update(tracking_result.last_track_frame)
119
+
120
+ # replace last iterations active tracks
121
+ self.active_tracks = tracking_result.active_tracks
122
+ print("remaining active tracks", len(self.active_tracks))
123
+
124
+ # collect remaining active ids and update tracked chunks
125
+ new_active_ids = {t[TRACK_ID] for t in self.active_tracks}
126
+ for result in self._tracked_chunks:
127
+ result.update_active_track_ids(new_active_ids)
128
+
129
+ def get_finished_results(self) -> list[TrackedChunk]:
130
+ """Return all TrackedChunks that have no remaining active track ids.
131
+ These are ready to be written to a result file.
132
+
133
+ Returns:
134
+ list[TrackedChunk]: finished TrackChunks
135
+ """
136
+ finished = [chunk for chunk in self._tracked_chunks if chunk.is_finished()]
137
+
138
+ # remove finished TrackedChunks from manager
139
+ self._tracked_chunks = [
140
+ chunk for chunk in self._tracked_chunks if chunk not in finished
141
+ ]
142
+
143
+ return finished
144
+
145
+
146
+ def main(
147
+ paths: list[Path],
148
+ sigma_l: float = CONFIG[TRACK][IOU][SIGMA_L],
149
+ sigma_h: float = CONFIG[TRACK][IOU][SIGMA_H],
150
+ sigma_iou: float = CONFIG[TRACK][IOU][SIGMA_IOU],
151
+ t_min: int = CONFIG[TRACK][IOU][T_MIN],
152
+ t_miss_max: int = CONFIG[TRACK][IOU][T_MISS_MAX],
153
+ overwrite: bool = CONFIG[TRACK][OVERWRITE],
154
+ tracking_run_id_generator: IdGenerator = lambda: str(uuid.uuid4()),
155
+ ) -> None:
156
+ """Read detections from otdet file, perform tracking using iou tracker and
157
+ save tracks to ottrk file.
158
+
159
+ Args:
160
+ paths (list[Path]): List of paths to detection files.
161
+ sigma_l (float, optional): Lower confidence threshold. Detections with
162
+ confidences below sigma_l are not even considered for tracking.
163
+ Defaults to CONFIG["TRACK"]["IOU"]["SIGMA_L"].
164
+ sigma_h (float, optional): Upper confidence threshold. Tracks are only
165
+ considered as valid if they contain at least one detection with a confidence
166
+ above sigma_h.
167
+ Defaults to CONFIG["TRACK"]["IOU"]["SIGMA_H"].
168
+ sigma_iou (float, optional): Intersection-Over-Union threshold. Two detections
169
+ in subsequent frames are considered to belong to the same track if their IOU
170
+ value exceeds sigma_iou and this is the highest IOU of all possible
171
+ combination of detections.
172
+ Defaults to CONFIG["TRACK"]["IOU"]["SIGMA_IOU"].
173
+ t_min (int, optional): Minimum number of detections to count as a valid track.
174
+ All tracks with less detections will be dissmissed.
175
+ Defaults to CONFIG["TRACK"]["IOU"]["T_MIN"].
176
+ t_miss_max (int, optional): Maximum number of missed detections before
177
+ continuing a track. If more detections are missing, the track will not be
178
+ continued.
179
+ Defaults to CONFIG["TRACK"]["IOU"]["T_MISS_MAX"].
180
+ overwrite (bool, optional): Whether or not to overwrite existing tracks files.
181
+ Defaults to CONFIG["TRACK"]["OVERWRITE"].
182
+ tracking_run_id_generator (IdGenerator): Generator used to create a unique id
183
+ for this tracking run
184
+ """
185
+
186
+ filetypes = CONFIG[FILETYPES][DETECT]
187
+ detections_files = get_files(paths=paths, filetypes=filetypes)
188
+
189
+ start_msg = f"Start tracking of {len(detections_files)} detections files"
190
+ log.info(start_msg)
191
+ print(start_msg)
192
+
193
+ if not detections_files:
194
+ log.warning(f"No files of type '{filetypes}' found to track!")
195
+ return
196
+
197
+ tracking_run_id = tracking_run_id_generator()
198
+ preprocessor = Preprocess()
199
+ preprocessed = preprocessor.run(detections_files)
200
+ file_type = CONFIG[DEFAULT_FILETYPE][TRACK]
201
+
202
+ for frame_group_id, frame_group in tqdm(
203
+ enumerate(preprocessed),
204
+ desc="Tracked frame groups",
205
+ unit=" framegroup",
206
+ ):
207
+ print()
208
+ print(f"Process frame group {frame_group_id}")
209
+
210
+ # update metadata for all files in FrameGroup
211
+ frame_group.update_metadata(
212
+ tracker_metadata(sigma_l, sigma_h, sigma_iou, t_min, t_miss_max)
213
+ )
214
+
215
+ # metadata and tracking results to keep track during iterations
216
+ # within a FrameGroups track/vehicle and frame ids should be unique
217
+ vehicle_id_generator = id_generator()
218
+ frame_offset = 0
219
+ track_result_store = TrackingResultStore(tracking_run_id)
220
+
221
+ # process each otdet file in frame group
222
+ for file_path in frame_group.files:
223
+ print(f"Process file {file_path} in frame group {frame_group_id}")
224
+
225
+ # read detection data
226
+ chunk = FrameChunkParser.parse(file_path, frame_offset)
227
+
228
+ if skip_existing_output_files(chunk, overwrite, file_type):
229
+ continue
230
+
231
+ log.info(f"Track {str(chunk)}")
232
+
233
+ detections = chunk.to_dict()
234
+ detections_denormalized = denormalize_bbox(
235
+ detections, metadata=frame_group._files_metadata
236
+ )
237
+
238
+ tracking_result = track(
239
+ detections=detections_denormalized,
240
+ sigma_l=sigma_l,
241
+ sigma_h=sigma_h,
242
+ sigma_iou=sigma_iou,
243
+ t_min=t_min,
244
+ t_miss_max=t_miss_max,
245
+ previous_active_tracks=track_result_store.active_tracks,
246
+ vehicle_id_generator=vehicle_id_generator,
247
+ )
248
+
249
+ track_result_store.store_tracking_result(
250
+ tracking_result,
251
+ file_path=file_path,
252
+ frame_group_id=frame_group_id,
253
+ frame_offset=frame_offset,
254
+ metadata=frame_group.metadata_for(file_path),
255
+ )
256
+ log.debug(f"Successfully tracked {chunk}")
257
+ frame_offset = chunk.last_frame_id() + 1
258
+
259
+ for finished_chunk in track_result_store.get_finished_results():
260
+ mark_and_write_results(finished_chunk, track_result_store, overwrite)
261
+
262
+ # write last files of frame group
263
+ # even though some tracks mights still be active
264
+ for finished_chunk in track_result_store._tracked_chunks:
265
+ mark_and_write_results(finished_chunk, track_result_store, overwrite)
266
+
267
+ log.info("Successfully tracked and wrote ")
268
+
269
+ finished_msg = "Finished tracking"
270
+ log.info(finished_msg)
271
+ print(finished_msg)
272
+
273
+
274
+ def skip_existing_output_files(
275
+ chunk: FrameChunk, overwrite: bool, file_type: str
276
+ ) -> bool:
277
+ existing_output_files = chunk.get_existing_output_files(with_suffix=file_type)
278
+
279
+ if not overwrite and (len(existing_output_files) > 0):
280
+ log.warning(
281
+ (
282
+ f"{existing_output_files} already exist(s)."
283
+ "To overwrite, set overwrite to True"
284
+ )
285
+ )
286
+ return True
287
+
288
+ return False
289
+
290
+
291
+ def tracker_metadata(
292
+ sigma_l: float, sigma_h: float, sigma_iou: float, t_min: float, t_miss_max: float
293
+ ) -> dict:
294
+ return {
295
+ dataformat.NAME: "IOU",
296
+ dataformat.SIGMA_L: sigma_l,
297
+ dataformat.SIGMA_H: sigma_h,
298
+ dataformat.SIGMA_IOU: sigma_iou,
299
+ dataformat.T_MIN: t_min,
300
+ dataformat.T_MISS_MAX: t_miss_max,
301
+ }
302
+
303
+
304
+ def mark_and_write_results(
305
+ chunk: TrackedChunk,
306
+ result_store: TrackingResultStore,
307
+ overwrite: bool,
308
+ ) -> None:
309
+ # no active tracks remaining, so last track frame metadata
310
+ # should be correct for all contained tracks,
311
+ # thus set finished flags now
312
+ mark_last_detections_as_finished(chunk, result_store)
313
+
314
+ # write marked detections to track file and delete the data
315
+ file_path = chunk.file_path
316
+
317
+ # reindex frames before writing ottrk file
318
+ file_type = CONFIG[DEFAULT_FILETYPE][TRACK]
319
+ serializable_detections: list[dict] = FrameIndexer().reindex(
320
+ chunk._detections, frame_offset=chunk.frame_offset
321
+ )
322
+
323
+ output = build_output(
324
+ serializable_detections,
325
+ chunk.metadata,
326
+ result_store.tracking_run_id,
327
+ chunk._frame_group_id,
328
+ )
329
+ write_json(
330
+ dict_to_write=output,
331
+ file=Path(file_path),
332
+ filetype=file_type,
333
+ overwrite=overwrite,
334
+ )
335
+
336
+ log.info(f"Successfully tracked and wrote {file_path}")
337
+ del chunk._detections
338
+
339
+
340
+ def track(
341
+ detections: dict, # TODO: Type hint nested dict during refactoring
342
+ sigma_l: float = CONFIG[TRACK][IOU][SIGMA_L],
343
+ sigma_h: float = CONFIG[TRACK][IOU][SIGMA_H],
344
+ sigma_iou: float = CONFIG[TRACK][IOU][SIGMA_IOU],
345
+ t_min: int = CONFIG[TRACK][IOU][T_MIN],
346
+ t_miss_max: int = CONFIG[TRACK][IOU][T_MISS_MAX],
347
+ previous_active_tracks: list = [],
348
+ vehicle_id_generator: Iterator[int] = id_generator(),
349
+ ) -> TrackingResult: # TODO: Type hint nested dict during refactoring
350
+ """Perform tracking using track_iou with arguments and add metadata to tracks.
351
+
352
+ Args:
353
+ detections (dict): Dict of detections in .otdet format.
354
+ sigma_l (float, optional): Lower confidence threshold. Detections with
355
+ confidences below sigma_l are not even considered for tracking.
356
+ Defaults to CONFIG["TRACK"]["IOU"]["SIGMA_L"].
357
+ sigma_h (float, optional): Upper confidence threshold. Tracks are only
358
+ considered as valid if they contain at least one detection with a confidence
359
+ above sigma_h.
360
+ Defaults to CONFIG["TRACK"]["IOU"]["SIGMA_H"].
361
+ sigma_iou (float, optional): Intersection-Over-Union threshold. Two detections
362
+ in subsequent frames are considered to belong to the same track if their IOU
363
+ value exceeds sigma_iou and this is the highest IOU of all possible
364
+ combination of detections.
365
+ Defaults to CONFIG["TRACK"]["IOU"]["SIGMA_IOU"].
366
+ t_min (int, optional): Minimum number of detections to count as a valid track.
367
+ All tracks with less detections will be dissmissed.
368
+ Defaults to CONFIG["TRACK"]["IOU"]["T_MIN"].
369
+ t_miss_max (int, optional): Maximum number of missed detections before
370
+ continuing a track. If more detections are missing, the track will not be
371
+ continued.
372
+ Defaults to CONFIG["TRACK"]["IOU"]["T_MISS_MAX"].
373
+ previous_active_tracks (list): a list of remaining active tracks
374
+ from previous iterations.
375
+ vehicle_id_generator (Iterator[int]): provides ids for new tracks
376
+
377
+ Returns:
378
+ TrackingResult: A result object holding tracked detecktions in ottrk format
379
+ and list of active tracks (iou format?)
380
+ and a lookup table for each tracks last detection frame.
381
+ """
382
+
383
+ result = track_iou(
384
+ detections=detections[DATA],
385
+ sigma_l=sigma_l,
386
+ sigma_h=sigma_h,
387
+ sigma_iou=sigma_iou,
388
+ t_min=t_min,
389
+ t_miss_max=t_miss_max,
390
+ previous_active_tracks=previous_active_tracks,
391
+ vehicle_id_generator=vehicle_id_generator,
392
+ )
393
+ log.info("Detections tracked")
394
+
395
+ return result
396
+
397
+
398
+ def mark_last_detections_as_finished(
399
+ chunk: TrackedChunk, result_store: TrackingResultStore
400
+ ) -> None:
401
+ # invert last occurrence frame dict
402
+ last_track_frame = result_store.last_track_frame
403
+
404
+ frame_ending_tracks = defaultdict(set)
405
+ for vehID in chunk._detected_ids:
406
+ frame_ending_tracks[last_track_frame[vehID]].add(vehID)
407
+
408
+ for frame_num, frame_det in chunk._detections.items():
409
+ for ending_track in frame_ending_tracks[int(frame_num)]:
410
+ frame_det[ending_track][FINISHED] = True
411
+ del last_track_frame[ending_track]
412
+
413
+
414
+ def build_output(
415
+ detections: list[dict],
416
+ metadata: dict,
417
+ tracking_run_id: str,
418
+ frame_group_id: int,
419
+ ) -> dict:
420
+ metadata[dataformat.TRACKING][dataformat.TRACKING_RUN_ID] = tracking_run_id
421
+ metadata[dataformat.TRACKING][dataformat.FRAME_GROUP] = frame_group_id
422
+ return {METADATA: metadata, DATA: {DETECTIONS: detections}}
File without changes
@@ -0,0 +1,156 @@
1
+ """
2
+ OTVision module for calculating a homography from reference points
3
+ """
4
+
5
+ # Copyright (C) 2022 OpenTrafficCam Contributors
6
+ # <https://github.com/OpenTrafficCam
7
+ # <team@opentrafficcam.org>
8
+ #
9
+ # This program is free software: you can redistribute it and/or modify
10
+ # it under the terms of the GNU General Public License as published by
11
+ # the Free Software Foundation, either version 3 of the License, or
12
+ # (at your option) any later version.
13
+ #
14
+ # This program is distributed in the hope that it will be useful,
15
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
16
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17
+ # GNU General Public License for more details.
18
+ #
19
+ # You should have received a copy of the GNU General Public License
20
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
21
+
22
+
23
+ import logging
24
+
25
+ import cv2
26
+ import numpy as np
27
+ import pandas as pd
28
+
29
+ from OTVision.helpers.log import LOGGER_NAME
30
+
31
+ log = logging.getLogger(LOGGER_NAME)
32
+
33
+
34
+ # TODO: Type hint nested dict during refactoring
35
+ def get_homography(
36
+ refpts: dict,
37
+ ) -> tuple[np.ndarray, np.ndarray, np.ndarray, int, str, dict]:
38
+ """Calculate homography matrix using pixel and world coordinates of corresponding
39
+ reference points.
40
+
41
+ Args:
42
+ refpts (dict): Corresponding reference points in both pixel and utm coordinates
43
+
44
+ Returns:
45
+ ndarry: homography
46
+ ndarry: refpts_utm_upshifted_predecimal_pt1_1row
47
+ ndarry: upshift_utm
48
+ int: utm_zone
49
+ str: hemisphere
50
+ dict: precision of homography
51
+ """
52
+
53
+ refpts_df = pd.DataFrame.from_dict(refpts, orient="index")
54
+ refpts_px = refpts_df[["x_px", "y_px"]].to_numpy()
55
+ refpts_utm = refpts_df[["lon_utm", "lat_utm"]].to_numpy()
56
+
57
+ # Upshift both x and y world coordinates of reference points to next round 500m
58
+ # value (UTM is in meters)
59
+ refpts_utm_min = np.amin(refpts_utm, axis=0)
60
+ refpts_utm_max = np.amax(refpts_utm, axis=0)
61
+ refpts_utm_mean = np.divide(np.add(refpts_utm_min, refpts_utm_max), 2)
62
+ mean_predecimal = refpts_utm_mean.astype(int)
63
+ mean_predecimal_pt1 = np.divide(mean_predecimal, 1000).astype(int)
64
+ mean_predecimal_pt1_Plus_500 = np.add(mean_predecimal_pt1.astype(float), 0.5)
65
+ mean_Plus_500 = np.multiply(mean_predecimal_pt1_Plus_500, 1000)
66
+ upshift_utm = np.subtract(mean_Plus_500, refpts_utm_mean)
67
+ refpts_utm_upshifted = np.add(refpts_utm, upshift_utm)
68
+
69
+ # Truncate thousands digits from shifted reference points
70
+ refpts_utm_upshifted_postdecimal = np.mod(refpts_utm_upshifted, 1)
71
+ refpts_utm_upshifted_predecimal = refpts_utm_upshifted.astype(int)
72
+ refpts_utm_upshifted_predecimal_pt1 = np.divide(
73
+ refpts_utm_upshifted_predecimal, 1000
74
+ ).astype(int)
75
+ refpts_utm_upshifted_predecimal_pt1_1row = np.array(
76
+ [
77
+ [
78
+ refpts_utm_upshifted_predecimal_pt1.item(0),
79
+ refpts_utm_upshifted_predecimal_pt1.item(1),
80
+ ]
81
+ ]
82
+ )
83
+ refpts_utm_upshifted_predecimal_pt2 = np.mod(refpts_utm_upshifted_predecimal, 1000)
84
+ refpts_utm_upshifted_disassembled = np.add(
85
+ refpts_utm_upshifted_predecimal_pt2, refpts_utm_upshifted_postdecimal
86
+ )
87
+
88
+ # Calculate homography matrix with refpts in pixel coordinates and truncated &
89
+ # shifted refpts in world coordinates
90
+ homography, mask = cv2.findHomography(
91
+ refpts_px, refpts_utm_upshifted_disassembled, cv2.RANSAC, 3.0
92
+ ) # RANSAC: Otulier/Inlier definieren??? # FEHLER:
93
+ log.debug(homography)
94
+ log.debug(mask)
95
+
96
+ eval_dict = evaluate_homography(
97
+ refpts_px, refpts_utm_upshifted_disassembled, homography
98
+ )
99
+
100
+ # TODO: Prevent different utm zones or hemispheres
101
+ utm_zone = refpts_df["zone_utm"].mode()[0]
102
+ hemisphere = refpts_df["hemisphere"].mode()[0]
103
+
104
+ return (
105
+ homography,
106
+ refpts_utm_upshifted_predecimal_pt1_1row,
107
+ upshift_utm,
108
+ utm_zone,
109
+ hemisphere,
110
+ eval_dict,
111
+ )
112
+
113
+
114
+ def evaluate_homography(
115
+ refpts_pixel: np.ndarray,
116
+ refpts_world_upshifted_disassembled: np.ndarray,
117
+ homography_matrix: np.ndarray,
118
+ ) -> dict: # TODO: Type hint nested dict during refactoring
119
+ """Calculates transformation error of homography
120
+
121
+ Args:
122
+ refpts_pixel (ndarray): Reference points in both pixel and utm coordinates
123
+ refpts_world_upshifted_disassembled (ndarray): Internal variable
124
+ homography_matrix (ndarray): Homography matrix
125
+
126
+ Returns:
127
+ dict: Evaluation of transformation error
128
+ """
129
+ # Evaluate accuracy of homography matrix using reference points in world coords
130
+ refpts_pixel_tmp = np.array([refpts_pixel], dtype="float32")
131
+ refpts_world_upshifted_disassembled_transf_3d = cv2.perspectiveTransform(
132
+ refpts_pixel_tmp, homography_matrix
133
+ )
134
+ refpts_world_upshifted_disassembled_transf = np.squeeze(
135
+ refpts_world_upshifted_disassembled_transf_3d
136
+ )
137
+ eval_df = pd.DataFrame(
138
+ {
139
+ "x_ref": refpts_world_upshifted_disassembled[:, 0],
140
+ "y_ref": refpts_world_upshifted_disassembled[:, 1],
141
+ "x_transf": refpts_world_upshifted_disassembled_transf[:, 0],
142
+ "y_transf": refpts_world_upshifted_disassembled_transf[:, 1],
143
+ }
144
+ )
145
+ eval_df["x_delta"] = eval_df["x_transf"] - eval_df["x_ref"]
146
+ eval_df["y_delta"] = eval_df["y_transf"] - eval_df["y_ref"]
147
+ # Normalize error vectors using sentence of pythagoras
148
+ eval_df["delta"] = np.linalg.norm(eval_df[["x_delta", "y_delta"]].values, axis=1)
149
+ eval_df["delta_abs"] = eval_df["delta"].abs()
150
+ log.debug("Mean transformation error [m]: " + str(eval_df["delta_abs"].mean()))
151
+ log.debug("Maximum transformation error [m]: " + str(eval_df["delta_abs"].max()))
152
+ # sourcery skip: merge-dict-assign
153
+ eval_dict = {}
154
+ eval_dict["mean_transformation_error_m"] = eval_df["delta_abs"].mean()
155
+ eval_dict["Maximum_transformation_error_m"] = eval_df["delta_abs"].max()
156
+ return eval_dict