OTVision 0.6.1__py3-none-any.whl → 0.6.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. OTVision/__init__.py +0 -10
  2. OTVision/application/detect/current_object_detector.py +1 -2
  3. OTVision/application/detect/detected_frame_factory.py +4 -3
  4. OTVision/application/detect/detected_frame_producer.py +1 -2
  5. OTVision/detect/builder.py +1 -1
  6. OTVision/detect/detected_frame_buffer.py +1 -1
  7. OTVision/detect/otdet.py +3 -2
  8. OTVision/detect/yolo.py +2 -2
  9. OTVision/domain/detect_producer_consumer.py +1 -1
  10. OTVision/domain/detection.py +128 -7
  11. OTVision/domain/frame.py +146 -1
  12. OTVision/domain/object_detection.py +1 -2
  13. OTVision/helpers/files.py +10 -2
  14. OTVision/helpers/input_types.py +15 -0
  15. OTVision/track/exporter/__init__.py +0 -0
  16. OTVision/track/exporter/filebased_exporter.py +24 -0
  17. OTVision/track/model/__init__.py +0 -0
  18. OTVision/track/model/filebased/__init__.py +0 -0
  19. OTVision/track/model/filebased/frame_chunk.py +203 -0
  20. OTVision/track/model/filebased/frame_group.py +95 -0
  21. OTVision/track/model/track_exporter.py +119 -0
  22. OTVision/track/model/tracking_interfaces.py +303 -0
  23. OTVision/track/parser/__init__.py +0 -0
  24. OTVision/track/parser/chunk_parser_plugins.py +99 -0
  25. OTVision/track/parser/frame_group_parser_plugins.py +127 -0
  26. OTVision/track/track.py +54 -332
  27. OTVision/track/tracker/__init__.py +0 -0
  28. OTVision/track/tracker/filebased_tracking.py +192 -0
  29. OTVision/track/tracker/tracker_plugin_iou.py +224 -0
  30. OTVision/version.py +1 -1
  31. OTVision/view/view_track.py +1 -1
  32. {otvision-0.6.1.dist-info → otvision-0.6.3.dist-info}/METADATA +8 -6
  33. {otvision-0.6.1.dist-info → otvision-0.6.3.dist-info}/RECORD +35 -23
  34. OTVision/track/iou.py +0 -282
  35. OTVision/track/iou_util.py +0 -140
  36. OTVision/track/preprocess.py +0 -453
  37. {otvision-0.6.1.dist-info → otvision-0.6.3.dist-info}/WHEEL +0 -0
  38. {otvision-0.6.1.dist-info → otvision-0.6.3.dist-info}/licenses/LICENSE +0 -0
OTVision/track/track.py CHANGED
@@ -1,29 +1,5 @@
1
- """
2
- OTVision main module for tracking objects in successive frames of videos
3
- """
4
-
5
- # points and transform tracksectory points from pixel into world coordinates.
6
-
7
1
  import logging
8
2
  import uuid
9
-
10
- # Copyright (C) 2022 OpenTrafficCam Contributors
11
- # <https://github.com/OpenTrafficCam
12
- # <team@opentrafficcam.org>
13
- #
14
- # This program is free software: you can redistribute it and/or modify
15
- # it under the terms of the GNU General Public License as published by
16
- # the Free Software Foundation, either version 3 of the License, or
17
- # (at your option) any later version.
18
- #
19
- # This program is distributed in the hope that it will be useful,
20
- # but WITHOUT ANY WARRANTY; without even the implied warranty of
21
- # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22
- # GNU General Public License for more detectionsails.
23
- #
24
- # You should have received a copy of the GNU General Public License
25
- # along with this program. If not, see <https://www.gnu.org/licenses/>.
26
- from collections import defaultdict
27
3
  from pathlib import Path
28
4
  from typing import Callable, Iterator
29
5
 
@@ -32,7 +8,6 @@ from tqdm import tqdm
32
8
  from OTVision import dataformat
33
9
  from OTVision.config import (
34
10
  CONFIG,
35
- DEFAULT_FILETYPE,
36
11
  DETECT,
37
12
  FILETYPES,
38
13
  IOU,
@@ -44,103 +19,43 @@ from OTVision.config import (
44
19
  T_MISS_MAX,
45
20
  TRACK,
46
21
  )
47
- from OTVision.dataformat import DATA, DETECTIONS, FINISHED, METADATA, TRACK_ID
48
- from OTVision.helpers.files import denormalize_bbox, get_files, write_json
22
+ from OTVision.helpers.files import get_files
23
+ from OTVision.helpers.input_types import check_types
49
24
  from OTVision.helpers.log import LOGGER_NAME
50
- from OTVision.track.preprocess import (
51
- FrameChunk,
52
- FrameChunkParser,
53
- FrameIndexer,
54
- Preprocess,
25
+ from OTVision.track.exporter.filebased_exporter import FinishedChunkTrackExporter
26
+ from OTVision.track.parser.chunk_parser_plugins import JsonChunkParser
27
+ from OTVision.track.parser.frame_group_parser_plugins import (
28
+ TimeThresholdFrameGroupParser,
55
29
  )
30
+ from OTVision.track.tracker.filebased_tracking import (
31
+ GroupedFilesTracker,
32
+ UnfinishedChunksBuffer,
33
+ )
34
+ from OTVision.track.tracker.tracker_plugin_iou import IouParameters, IouTracker
56
35
 
57
- from .iou import TrackedDetections, TrackingResult, id_generator, track_iou
58
-
59
- log = logging.getLogger(LOGGER_NAME)
60
-
61
- IdGenerator = Callable[[], str]
62
-
63
-
64
- class TrackedChunk(TrackedDetections):
65
- """Tracking results combined with respective metadata."""
66
-
67
- def __init__(
68
- self,
69
- detections: TrackedDetections,
70
- file_path: Path,
71
- frame_group_id: int,
72
- frame_offset: int,
73
- metadata: dict,
74
- ) -> None:
75
- super().__init__(
76
- detections._detections,
77
- detections._detected_ids,
78
- detections._active_track_ids,
79
- )
80
- self.file_path = file_path
81
- self.frame_offset = frame_offset
82
- self._frame_group_id = frame_group_id
83
- self.metadata = metadata
84
-
85
-
86
- class TrackingResultStore:
87
- """TrackingResultStore manages TrackedChunks
88
- and data required for the next tracking iteration:
89
- - the remaining active tracks of the last iteration
90
- - a global lookup table for each tracks last detection frame
91
- """
92
-
93
- def __init__(self, tracking_run_id: str) -> None:
94
- self.tracking_run_id = tracking_run_id
95
- self._tracked_chunks: list[TrackedChunk] = list()
96
- self.last_track_frame: dict[int, int] = dict()
97
- self.active_tracks: list[dict] = []
98
-
99
- def store_tracking_result(
100
- self,
101
- tracking_result: TrackingResult,
102
- file_path: Path,
103
- frame_group_id: int,
104
- frame_offset: int,
105
- metadata: dict,
106
- ) -> None:
107
- # store tracking results
108
- self._tracked_chunks.append(
109
- TrackedChunk(
110
- detections=tracking_result.tracked_detections,
111
- file_path=file_path,
112
- frame_group_id=frame_group_id,
113
- frame_offset=frame_offset,
114
- metadata=metadata,
115
- )
116
- )
117
- # update global lookup table
118
- self.last_track_frame.update(tracking_result.last_track_frame)
119
-
120
- # replace last iterations active tracks
121
- self.active_tracks = tracking_result.active_tracks
122
- print("remaining active tracks", len(self.active_tracks))
123
36
 
124
- # collect remaining active ids and update tracked chunks
125
- new_active_ids = {t[TRACK_ID] for t in self.active_tracks}
126
- for result in self._tracked_chunks:
127
- result.update_active_track_ids(new_active_ids)
37
+ def track_id_generator() -> Iterator[int]:
38
+ ID: int = 0
39
+ while True:
40
+ ID += 1
41
+ yield ID
128
42
 
129
- def get_finished_results(self) -> list[TrackedChunk]:
130
- """Return all TrackedChunks that have no remaining active track ids.
131
- These are ready to be written to a result file.
132
43
 
133
- Returns:
134
- list[TrackedChunk]: finished TrackChunks
135
- """
136
- finished = [chunk for chunk in self._tracked_chunks if chunk.is_finished()]
44
+ log = logging.getLogger(LOGGER_NAME)
45
+ STR_ID_GENERATOR = Callable[[], str]
137
46
 
138
- # remove finished TrackedChunks from manager
139
- self._tracked_chunks = [
140
- chunk for chunk in self._tracked_chunks if chunk not in finished
141
- ]
142
47
 
143
- return finished
48
+ def tracker_metadata(
49
+ sigma_l: float, sigma_h: float, sigma_iou: float, t_min: float, t_miss_max: float
50
+ ) -> dict:
51
+ return {
52
+ dataformat.NAME: "IOU",
53
+ dataformat.SIGMA_L: sigma_l,
54
+ dataformat.SIGMA_H: sigma_h,
55
+ dataformat.SIGMA_IOU: sigma_iou,
56
+ dataformat.T_MIN: t_min,
57
+ dataformat.T_MISS_MAX: t_miss_max,
58
+ }
144
59
 
145
60
 
146
61
  def main(
@@ -151,7 +66,7 @@ def main(
151
66
  t_min: int = CONFIG[TRACK][IOU][T_MIN],
152
67
  t_miss_max: int = CONFIG[TRACK][IOU][T_MISS_MAX],
153
68
  overwrite: bool = CONFIG[TRACK][OVERWRITE],
154
- tracking_run_id_generator: IdGenerator = lambda: str(uuid.uuid4()),
69
+ tracking_run_id_generator: STR_ID_GENERATOR = lambda: str(uuid.uuid4()),
155
70
  ) -> None:
156
71
  """Read detections from otdet file, perform tracking using iou tracker and
157
72
  save tracks to ottrk file.
@@ -183,6 +98,8 @@ def main(
183
98
  for this tracking run
184
99
  """
185
100
 
101
+ check_types(sigma_l, sigma_h, sigma_iou, t_min, t_miss_max)
102
+
186
103
  filetypes = CONFIG[FILETYPES][DETECT]
187
104
  detections_files = get_files(paths=paths, filetypes=filetypes)
188
105
 
@@ -194,229 +111,34 @@ def main(
194
111
  log.warning(f"No files of type '{filetypes}' found to track!")
195
112
  return
196
113
 
197
- tracking_run_id = tracking_run_id_generator()
198
- preprocessor = Preprocess()
199
- preprocessed = preprocessor.run(detections_files)
200
- file_type = CONFIG[DEFAULT_FILETYPE][TRACK]
201
-
202
- for frame_group_id, frame_group in tqdm(
203
- enumerate(preprocessed),
204
- desc="Tracked frame groups",
205
- unit=" framegroup",
206
- ):
207
- print()
208
- print(f"Process frame group {frame_group_id}")
209
-
210
- # update metadata for all files in FrameGroup
211
- frame_group.update_metadata(
212
- tracker_metadata(sigma_l, sigma_h, sigma_iou, t_min, t_miss_max)
213
- )
214
-
215
- # metadata and tracking results to keep track during iterations
216
- # within a FrameGroups track/vehicle and frame ids should be unique
217
- vehicle_id_generator = id_generator()
218
- frame_offset = 0
219
- track_result_store = TrackingResultStore(tracking_run_id)
220
-
221
- # process each otdet file in frame group
222
- for file_path in frame_group.files:
223
- print(f"Process file {file_path} in frame group {frame_group_id}")
224
-
225
- # read detection data
226
- chunk = FrameChunkParser.parse(file_path, frame_offset)
227
-
228
- if skip_existing_output_files(chunk, overwrite, file_type):
229
- continue
230
-
231
- log.info(f"Track {str(chunk)}")
232
-
233
- detections = chunk.to_dict()
234
- detections_denormalized = denormalize_bbox(
235
- detections, metadata=frame_group._files_metadata
236
- )
237
-
238
- tracking_result = track(
239
- detections=detections_denormalized,
240
- sigma_l=sigma_l,
241
- sigma_h=sigma_h,
242
- sigma_iou=sigma_iou,
243
- t_min=t_min,
244
- t_miss_max=t_miss_max,
245
- previous_active_tracks=track_result_store.active_tracks,
246
- vehicle_id_generator=vehicle_id_generator,
247
- )
248
-
249
- track_result_store.store_tracking_result(
250
- tracking_result,
251
- file_path=file_path,
252
- frame_group_id=frame_group_id,
253
- frame_offset=frame_offset,
254
- metadata=frame_group.metadata_for(file_path),
255
- )
256
- log.debug(f"Successfully tracked {chunk}")
257
- frame_offset = chunk.last_frame_id() + 1
258
-
259
- for finished_chunk in track_result_store.get_finished_results():
260
- mark_and_write_results(finished_chunk, track_result_store, overwrite)
261
-
262
- # write last files of frame group
263
- # even though some tracks mights still be active
264
- for finished_chunk in track_result_store._tracked_chunks:
265
- mark_and_write_results(finished_chunk, track_result_store, overwrite)
266
-
267
- log.info("Successfully tracked and wrote ")
268
-
269
- finished_msg = "Finished tracking"
270
- log.info(finished_msg)
271
- print(finished_msg)
272
-
273
-
274
- def skip_existing_output_files(
275
- chunk: FrameChunk, overwrite: bool, file_type: str
276
- ) -> bool:
277
- existing_output_files = chunk.get_existing_output_files(with_suffix=file_type)
278
-
279
- if not overwrite and (len(existing_output_files) > 0):
280
- log.warning(
281
- (
282
- f"{existing_output_files} already exist(s)."
283
- "To overwrite, set overwrite to True"
284
- )
285
- )
286
- return True
287
-
288
- return False
289
-
290
-
291
- def tracker_metadata(
292
- sigma_l: float, sigma_h: float, sigma_iou: float, t_min: float, t_miss_max: float
293
- ) -> dict:
294
- return {
295
- dataformat.NAME: "IOU",
296
- dataformat.SIGMA_L: sigma_l,
297
- dataformat.SIGMA_H: sigma_h,
298
- dataformat.SIGMA_IOU: sigma_iou,
299
- dataformat.T_MIN: t_min,
300
- dataformat.T_MISS_MAX: t_miss_max,
301
- }
302
-
303
-
304
- def mark_and_write_results(
305
- chunk: TrackedChunk,
306
- result_store: TrackingResultStore,
307
- overwrite: bool,
308
- ) -> None:
309
- # no active tracks remaining, so last track frame metadata
310
- # should be correct for all contained tracks,
311
- # thus set finished flags now
312
- mark_last_detections_as_finished(chunk, result_store)
313
-
314
- # write marked detections to track file and delete the data
315
- file_path = chunk.file_path
316
-
317
- # reindex frames before writing ottrk file
318
- file_type = CONFIG[DEFAULT_FILETYPE][TRACK]
319
- serializable_detections: list[dict] = FrameIndexer().reindex(
320
- chunk._detections, frame_offset=chunk.frame_offset
114
+ iou_tracker: IouTracker = IouTracker(
115
+ parameters=IouParameters(sigma_l, sigma_h, sigma_iou, t_min, t_miss_max)
321
116
  )
322
117
 
323
- output = build_output(
324
- serializable_detections,
325
- chunk.metadata,
326
- result_store.tracking_run_id,
327
- chunk._frame_group_id,
328
- )
329
- write_json(
330
- dict_to_write=output,
331
- file=Path(file_path),
332
- filetype=file_type,
333
- overwrite=overwrite,
118
+ chunk_parser = JsonChunkParser()
119
+ group_parser = TimeThresholdFrameGroupParser(
120
+ tracker_data=tracker_metadata(sigma_l, sigma_h, sigma_iou, t_min, t_miss_max)
334
121
  )
335
122
 
336
- log.info(f"Successfully tracked and wrote {file_path}")
337
- del chunk._detections
338
-
339
-
340
- def track(
341
- detections: dict, # TODO: Type hint nested dict during refactoring
342
- sigma_l: float = CONFIG[TRACK][IOU][SIGMA_L],
343
- sigma_h: float = CONFIG[TRACK][IOU][SIGMA_H],
344
- sigma_iou: float = CONFIG[TRACK][IOU][SIGMA_IOU],
345
- t_min: int = CONFIG[TRACK][IOU][T_MIN],
346
- t_miss_max: int = CONFIG[TRACK][IOU][T_MISS_MAX],
347
- previous_active_tracks: list = [],
348
- vehicle_id_generator: Iterator[int] = id_generator(),
349
- ) -> TrackingResult: # TODO: Type hint nested dict during refactoring
350
- """Perform tracking using track_iou with arguments and add metadata to tracks.
351
-
352
- Args:
353
- detections (dict): Dict of detections in .otdet format.
354
- sigma_l (float, optional): Lower confidence threshold. Detections with
355
- confidences below sigma_l are not even considered for tracking.
356
- Defaults to CONFIG["TRACK"]["IOU"]["SIGMA_L"].
357
- sigma_h (float, optional): Upper confidence threshold. Tracks are only
358
- considered as valid if they contain at least one detection with a confidence
359
- above sigma_h.
360
- Defaults to CONFIG["TRACK"]["IOU"]["SIGMA_H"].
361
- sigma_iou (float, optional): Intersection-Over-Union threshold. Two detections
362
- in subsequent frames are considered to belong to the same track if their IOU
363
- value exceeds sigma_iou and this is the highest IOU of all possible
364
- combination of detections.
365
- Defaults to CONFIG["TRACK"]["IOU"]["SIGMA_IOU"].
366
- t_min (int, optional): Minimum number of detections to count as a valid track.
367
- All tracks with less detections will be dissmissed.
368
- Defaults to CONFIG["TRACK"]["IOU"]["T_MIN"].
369
- t_miss_max (int, optional): Maximum number of missed detections before
370
- continuing a track. If more detections are missing, the track will not be
371
- continued.
372
- Defaults to CONFIG["TRACK"]["IOU"]["T_MISS_MAX"].
373
- previous_active_tracks (list): a list of remaining active tracks
374
- from previous iterations.
375
- vehicle_id_generator (Iterator[int]): provides ids for new tracks
376
-
377
- Returns:
378
- TrackingResult: A result object holding tracked detecktions in ottrk format
379
- and list of active tracks (iou format?)
380
- and a lookup table for each tracks last detection frame.
381
- """
382
-
383
- result = track_iou(
384
- detections=detections[DATA],
385
- sigma_l=sigma_l,
386
- sigma_h=sigma_h,
387
- sigma_iou=sigma_iou,
388
- t_min=t_min,
389
- t_miss_max=t_miss_max,
390
- previous_active_tracks=previous_active_tracks,
391
- vehicle_id_generator=vehicle_id_generator,
123
+ file_tracker = GroupedFilesTracker(
124
+ tracker=iou_tracker,
125
+ chunk_parser=chunk_parser,
126
+ frame_group_parser=group_parser,
127
+ id_generator_factory=lambda _: track_id_generator(),
128
+ overwrite=True,
392
129
  )
393
- log.info("Detections tracked")
394
-
395
- return result
396
-
397
130
 
398
- def mark_last_detections_as_finished(
399
- chunk: TrackedChunk, result_store: TrackingResultStore
400
- ) -> None:
401
- # invert last occurrence frame dict
402
- last_track_frame = result_store.last_track_frame
403
-
404
- frame_ending_tracks = defaultdict(set)
405
- for vehID in chunk._detected_ids:
406
- frame_ending_tracks[last_track_frame[vehID]].add(vehID)
131
+ buffer = UnfinishedChunksBuffer(
132
+ tracker=file_tracker,
133
+ keep_discarded=True,
134
+ )
407
135
 
408
- for frame_num, frame_det in chunk._detections.items():
409
- for ending_track in frame_ending_tracks[int(frame_num)]:
410
- frame_det[ending_track][FINISHED] = True
411
- del last_track_frame[ending_track]
136
+ exporter = FinishedChunkTrackExporter()
412
137
 
138
+ tracking_run_id = tracking_run_id_generator()
139
+ finished_chunk_stream = buffer.group_and_track(detections_files)
413
140
 
414
- def build_output(
415
- detections: list[dict],
416
- metadata: dict,
417
- tracking_run_id: str,
418
- frame_group_id: int,
419
- ) -> dict:
420
- metadata[dataformat.TRACKING][dataformat.TRACKING_RUN_ID] = tracking_run_id
421
- metadata[dataformat.TRACKING][dataformat.FRAME_GROUP] = frame_group_id
422
- return {METADATA: metadata, DATA: {DETECTIONS: detections}}
141
+ finished_chunk_progress = tqdm(
142
+ finished_chunk_stream, desc="export FrameChunk", total=len(detections_files)
143
+ )
144
+ exporter.export(tracking_run_id, iter(finished_chunk_progress), overwrite)
File without changes
@@ -0,0 +1,192 @@
1
+ import logging
2
+ from pathlib import Path
3
+ from typing import Callable, Iterator
4
+
5
+ from more_itertools import peekable
6
+ from tqdm import tqdm
7
+
8
+ from OTVision.config import CONFIG, DEFAULT_FILETYPE, OVERWRITE, TRACK
9
+ from OTVision.domain.detection import TrackId
10
+ from OTVision.domain.frame import DetectedFrame, FrameNo, IsLastFrame, TrackedFrame
11
+ from OTVision.helpers.log import LOGGER_NAME
12
+ from OTVision.track.model.filebased.frame_chunk import (
13
+ ChunkParser,
14
+ FinishedChunk,
15
+ FrameChunk,
16
+ TrackedChunk,
17
+ )
18
+ from OTVision.track.model.filebased.frame_group import FrameGroup, FrameGroupParser
19
+ from OTVision.track.model.tracking_interfaces import (
20
+ ID_GENERATOR,
21
+ Tracker,
22
+ UnfinishedTracksBuffer,
23
+ )
24
+
25
+ log = logging.getLogger(LOGGER_NAME)
26
+
27
+
28
+ class ChunkBasedTracker(Tracker):
29
+
30
+ def __init__(self, tracker: Tracker, chunkParser: ChunkParser) -> None:
31
+ super().__init__()
32
+ self._chunk_parser = chunkParser
33
+ self._tracker = tracker
34
+
35
+ def track_frame(
36
+ self,
37
+ frames: DetectedFrame,
38
+ id_generator: ID_GENERATOR,
39
+ ) -> TrackedFrame:
40
+ return self._tracker.track_frame(frames, id_generator)
41
+
42
+ def track_chunk(
43
+ self,
44
+ chunk: FrameChunk,
45
+ is_last_chunk: bool,
46
+ id_generator: ID_GENERATOR,
47
+ ) -> TrackedChunk:
48
+ frames_progress = tqdm(
49
+ chunk.frames, desc="track Frame", total=len(chunk.frames), leave=False
50
+ )
51
+
52
+ tracked_frames = self.track(iter(frames_progress), id_generator)
53
+ return TrackedChunk(
54
+ file=chunk.file,
55
+ frames=list(tracked_frames),
56
+ metadata=chunk.metadata,
57
+ is_last_chunk=is_last_chunk,
58
+ frame_group_id=chunk.frame_group_id,
59
+ )
60
+
61
+ def track_file(
62
+ self,
63
+ file: Path,
64
+ frame_group: FrameGroup,
65
+ is_last_file: bool,
66
+ id_generator: ID_GENERATOR,
67
+ frame_offset: int = 0,
68
+ ) -> TrackedChunk:
69
+ chunk = self._chunk_parser.parse(file, frame_group, frame_offset)
70
+ return self.track_chunk(chunk, is_last_file, id_generator)
71
+
72
+
73
+ ID_GENERATOR_FACTORY = Callable[[FrameGroup], ID_GENERATOR]
74
+
75
+
76
+ class GroupedFilesTracker(ChunkBasedTracker):
77
+
78
+ def __init__(
79
+ self,
80
+ tracker: Tracker,
81
+ chunk_parser: ChunkParser,
82
+ frame_group_parser: FrameGroupParser,
83
+ id_generator_factory: ID_GENERATOR_FACTORY,
84
+ overwrite: bool = CONFIG[TRACK][OVERWRITE],
85
+ file_type: str = CONFIG[DEFAULT_FILETYPE][TRACK],
86
+ ) -> None:
87
+ super().__init__(tracker, chunk_parser)
88
+ self._group_parser = frame_group_parser
89
+ self._id_generator_of = id_generator_factory
90
+ self._overwrite = overwrite
91
+ self._file_type = file_type
92
+
93
+ def track_group(self, group: FrameGroup) -> Iterator[TrackedChunk]:
94
+ if self.check_skip_due_to_existing_output_files(group):
95
+ log.warning(f"Skip FrameGroup {group.id}")
96
+ yield from [] # TODO how to create empty generator stream?
97
+
98
+ frame_offset = 0 # frame no starts a 0 for each frame group
99
+ id_generator = self._id_generator_of(group) # new id generator per group
100
+ file_stream = peekable(
101
+ tqdm(
102
+ group.files,
103
+ desc="track FrameChunk",
104
+ total=len(group.files),
105
+ leave=False,
106
+ )
107
+ )
108
+
109
+ for file in file_stream:
110
+ is_last = file_stream.peek(default=None) is None
111
+
112
+ chunk = self._chunk_parser.parse(file, group, frame_offset)
113
+ frame_offset = chunk.frames[-1].no + 1 # assuming frames are sorted by no
114
+
115
+ tracked_chunk = self.track_chunk(chunk, is_last, id_generator)
116
+ yield tracked_chunk
117
+
118
+ def group_and_track_files(self, files: list[Path]) -> Iterator[TrackedChunk]:
119
+ processed = self._group_parser.process_all(files)
120
+
121
+ processed_progress = tqdm(
122
+ processed, desc="track FrameGroup", total=len(processed), leave=False
123
+ )
124
+ for group in processed_progress:
125
+ yield from self.track_group(group)
126
+
127
+ def check_skip_due_to_existing_output_files(self, group: FrameGroup) -> bool:
128
+ if not self._overwrite and group.check_any_output_file_exists(self._file_type):
129
+ existing_files = group.get_existing_output_files(
130
+ with_suffix=self._file_type
131
+ )
132
+ log.warning(
133
+ (
134
+ f"{existing_files} already exist(s)."
135
+ "To overwrite, set overwrite to True"
136
+ )
137
+ )
138
+ return True
139
+
140
+ return False
141
+
142
+
143
+ class UnfinishedChunksBuffer(UnfinishedTracksBuffer[TrackedChunk, FinishedChunk]):
144
+
145
+ def __init__(
146
+ self,
147
+ tracker: GroupedFilesTracker,
148
+ keep_discarded: bool = False,
149
+ ) -> None:
150
+ super().__init__(keep_discarded)
151
+ self.tracker = tracker
152
+
153
+ def group_and_track(self, files: list[Path]) -> Iterator[FinishedChunk]:
154
+ processed = self.tracker._group_parser.process_all(files)
155
+
156
+ processed_progress = tqdm(
157
+ processed, desc="track FrameGroup", total=len(processed), leave=False
158
+ )
159
+ for group in processed_progress:
160
+ yield from self.track_group(group)
161
+
162
+ def track_group(self, group: FrameGroup) -> Iterator[FinishedChunk]:
163
+ tracked_chunk_stream = self.tracker.track_group(group)
164
+ return self.track_and_finish(tracked_chunk_stream)
165
+
166
+ def _get_last_track_frames(self, container: TrackedChunk) -> dict[TrackId, FrameNo]:
167
+ return container.last_track_frame
168
+
169
+ def _get_unfinished_tracks(self, container: TrackedChunk) -> set[TrackId]:
170
+ return container.unfinished_tracks
171
+
172
+ def _get_observed_tracks(self, container: TrackedChunk) -> set[TrackId]:
173
+ return container.observed_tracks
174
+
175
+ def _get_newly_finished_tracks(self, container: TrackedChunk) -> set[TrackId]:
176
+ return container.finished_tracks
177
+
178
+ def _get_newly_discarded_tracks(self, container: TrackedChunk) -> set[TrackId]:
179
+ return container.discarded_tracks
180
+
181
+ def _get_last_frame_of_container(self, container: TrackedChunk) -> FrameNo:
182
+ return max(frame.no for frame in container.frames)
183
+ # todo faster implementation if sorted or save as metadata?
184
+
185
+ def _finish(
186
+ self,
187
+ container: TrackedChunk,
188
+ is_last: IsLastFrame,
189
+ discarded_tracks: set[TrackId],
190
+ keep_discarded: bool,
191
+ ) -> FinishedChunk:
192
+ return container.finish(is_last, discarded_tracks, keep_discarded)