OTVision 0.6.1__py3-none-any.whl → 0.6.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,119 @@
1
+ import logging
2
+ from abc import ABC, abstractmethod
3
+ from pathlib import Path
4
+ from typing import Generic, Iterator, TypeVar
5
+
6
+ from tqdm import tqdm
7
+
8
+ from OTVision.config import CONFIG, DEFAULT_FILETYPE, TRACK
9
+ from OTVision.dataformat import (
10
+ DATA,
11
+ DETECTIONS,
12
+ FRAME,
13
+ FRAME_GROUP,
14
+ INPUT_FILE_PATH,
15
+ METADATA,
16
+ TRACK_ID,
17
+ TRACKING,
18
+ TRACKING_RUN_ID,
19
+ )
20
+ from OTVision.helpers.files import write_json
21
+ from OTVision.helpers.log import LOGGER_NAME
22
+
23
+ log = logging.getLogger(LOGGER_NAME)
24
+
25
+
26
+ F = TypeVar("F") # Finished container: e.g. FinishedFrame or FinishedChunk
27
+
28
+
29
+ class FinishedTracksExporter(ABC, Generic[F]):
30
+
31
+ def __init__(self, file_type: str = CONFIG[DEFAULT_FILETYPE][TRACK]):
32
+ self.file_type = file_type
33
+
34
+ @abstractmethod
35
+ def get_detection_dicts(self, container: F) -> list[dict]:
36
+ pass
37
+
38
+ @abstractmethod
39
+ def get_result_path(self, container: F) -> Path:
40
+ pass
41
+
42
+ @abstractmethod
43
+ def get_metadata(self, container: F) -> dict:
44
+ pass
45
+
46
+ @abstractmethod
47
+ def get_frame_group_id(self, container: F) -> int:
48
+ pass
49
+
50
+ def export(
51
+ self, tracking_run_id: str, stream: Iterator[F], overwrite: bool
52
+ ) -> None:
53
+ for container in stream:
54
+ self.export_frames(container, tracking_run_id, overwrite)
55
+
56
+ def export_frames(
57
+ self, container: F, tracking_run_id: str, overwrite: bool
58
+ ) -> None:
59
+ file_path = self.get_result_path(container)
60
+
61
+ det_dicts = self.reindex(self.get_detection_dicts(container))
62
+
63
+ output = self.build_output(
64
+ det_dicts,
65
+ self.get_metadata(container),
66
+ tracking_run_id,
67
+ self.get_frame_group_id(container),
68
+ )
69
+
70
+ write_json(
71
+ dict_to_write=output,
72
+ file=Path(file_path),
73
+ filetype=self.file_type,
74
+ overwrite=overwrite,
75
+ )
76
+
77
+ log.info(f"Successfully tracked and wrote {file_path}")
78
+
79
+ @staticmethod
80
+ def reindex(det_dicts: list[dict]) -> list[dict]:
81
+ min_frame_no = min(det[FRAME] for det in det_dicts)
82
+
83
+ det_dicts_progress = tqdm(
84
+ det_dicts,
85
+ desc="reindex TrackedDetections",
86
+ total=len(det_dicts),
87
+ leave=False,
88
+ )
89
+ reindexed_dets = [
90
+ {**det, **{FRAME: det[FRAME] - min_frame_no + 1}}
91
+ for det in det_dicts_progress
92
+ ]
93
+
94
+ if len(reindexed_dets) == 0:
95
+ return []
96
+
97
+ if len({detection[INPUT_FILE_PATH] for detection in reindexed_dets}) > 1:
98
+ raise ValueError("Expect detections from only a single source file")
99
+
100
+ reindexed_dets.sort(
101
+ key=lambda detection: (
102
+ detection[INPUT_FILE_PATH],
103
+ detection[FRAME],
104
+ detection[TRACK_ID],
105
+ )
106
+ )
107
+
108
+ return reindexed_dets
109
+
110
+ @staticmethod
111
+ def build_output(
112
+ detections: list[dict],
113
+ metadata: dict,
114
+ tracking_run_id: str,
115
+ frame_group_id: int,
116
+ ) -> dict:
117
+ metadata[TRACKING][TRACKING_RUN_ID] = tracking_run_id
118
+ metadata[TRACKING][FRAME_GROUP] = frame_group_id
119
+ return {METADATA: metadata, DATA: {DETECTIONS: detections}}
@@ -0,0 +1,309 @@
1
+ from abc import ABC, abstractmethod
2
+ from typing import Generic, Iterator, TypeVar
3
+
4
+ from OTVision.track.model.frame import (
5
+ FinishedFrame,
6
+ Frame,
7
+ FrameNo,
8
+ IsLastFrame,
9
+ TrackedFrame,
10
+ TrackId,
11
+ )
12
+
13
+ S = TypeVar("S") # Source type (e.g., Path, URL, str, etc.)
14
+ # -> would look nicer in python 3.12
15
+
16
+ ID_GENERATOR = Iterator[TrackId]
17
+
18
+
19
+ class Tracker(ABC, Generic[S]):
20
+ """Tracker interface for processing a stream of Frames
21
+ to add tracking information, creating a lazy stream (generator)
22
+ of TrackedFrames.
23
+
24
+ Implementing class can specify template method:
25
+ track_frame for processing a single frame.
26
+
27
+ Args:
28
+ Generic (S): generic type of Frame source (e.g. file path, or stream url)
29
+ """
30
+
31
+ def track(
32
+ self, frames: Iterator[Frame[S]], id_generator: ID_GENERATOR
33
+ ) -> Iterator[TrackedFrame[S]]:
34
+ """Process the given stream of Frames,
35
+ yielding TrackedFrames one by one as a lazy stream of TrackedFrames.
36
+
37
+ Args:
38
+ frames (Iterator[Frame[S]]): (lazy) stream of Frames
39
+ with untracked Detections.
40
+ id_generator (ID_GENERATOR): provider of new (unique) track ids.
41
+
42
+ Yields:
43
+ Iterator[TrackedFrame[S]]: (lazy) stream of TrackedFrames with
44
+ TrackedDetections
45
+ """
46
+ for frame in frames:
47
+ yield self.track_frame(frame, id_generator)
48
+
49
+ @abstractmethod
50
+ def track_frame(
51
+ self,
52
+ frame: Frame[S],
53
+ id_generator: ID_GENERATOR,
54
+ ) -> TrackedFrame[S]:
55
+ """Process single Frame with untracked Detections,
56
+ by adding tracking information,
57
+ creating a TrackedFrame with TrackedDetections.
58
+
59
+ Args:
60
+ frame (Frame[S]): the Frame (with source S) to be tracked.
61
+ id_generator (ID_GENERATOR): provider of new (unique) track ids.
62
+
63
+ Returns:
64
+ TrackedFrame[S]: TrackedFrame with TrackedDetections
65
+ """
66
+ pass
67
+
68
+
69
+ C = TypeVar("C") # Detection container: e.g. TrackedFrame or TrackedChunk
70
+ F = TypeVar("F") # Finished container: e.g. FinishedFrame or FinishedChunk
71
+
72
+
73
+ class UnfinishedTracksBuffer(ABC, Generic[C, F]):
74
+ """UnfinishedTracksBuffer provides functionality
75
+ to add finished information to tracked detections.
76
+
77
+ It processes containers (C) of TrackedDetections, buffers them
78
+ and stores track ids that are reported as finished.
79
+ Only when all tracks of a container (C) were marked as finished,
80
+ it is converted into a finished container (F) and yielded.
81
+
82
+ Args:
83
+ Generic (C): generic type of TrackedDetection container
84
+ (e.g. TrackedFrame or TrackedChunk)
85
+ Generic (F): generic type of FinishedDetection container
86
+ (e.g. FinishedFrame or FinishedChunk)
87
+ keep_discarded (bool): whether detections marked as discarded should
88
+ be kept of filtered when finishing them. Defaults to False.
89
+ """
90
+
91
+ def __init__(self, keep_discarded: bool = False) -> None:
92
+ self._keep_discarded = keep_discarded
93
+ self._unfinished_containers: list[tuple[C, set[TrackId]]] = list()
94
+ self._merged_last_track_frame: dict[TrackId, FrameNo] = dict()
95
+ self._discarded_tracks: set[TrackId] = set()
96
+
97
+ @abstractmethod
98
+ def _get_last_track_frames(self, container: C) -> dict[TrackId, FrameNo]:
99
+ """Mapping from TrackId to frame no of last detection occurrence.
100
+ Mapping for all tracks in newly tracked container.
101
+
102
+ Args:
103
+ container (C): newly tracked TrackedDetection container
104
+
105
+ Returns:
106
+ dict[TrackId, int]: last frame no by TrackId
107
+ """
108
+ pass
109
+
110
+ @abstractmethod
111
+ def _get_unfinished_tracks(self, container: C) -> set[TrackId]:
112
+ """TrackIds of given container, that are marked as unfinished.
113
+
114
+ Args:
115
+ container (C): newly tracked TrackedDetection container
116
+
117
+ Returns:
118
+ set[TrackId]: TrackIds of container marked as unfinished
119
+ """
120
+ pass
121
+
122
+ @abstractmethod
123
+ def _get_observed_tracks(self, container: C) -> set[TrackId]:
124
+ """TrackIds observed given (newly tracked) container.
125
+
126
+ Args:
127
+ container (C): newly tracked TrackedDetection container
128
+
129
+ Returns:
130
+ set[TrackId]: observed TrackIds of container
131
+ """
132
+ pass
133
+
134
+ @abstractmethod
135
+ def _get_newly_finished_tracks(self, container: C) -> set[TrackId]:
136
+ """TrackIds marked as finished in the given (newly tracked) container.
137
+
138
+ Args:
139
+ container (C): newly tracked TrackedDetection container
140
+
141
+ Returns:
142
+ set[TrackId]: finished TrackIds in container
143
+ """
144
+ pass
145
+
146
+ @abstractmethod
147
+ def _get_newly_discarded_tracks(self, container: C) -> set[TrackId]:
148
+ """TrackIds marked as discarded in the given (newly tracked) container.
149
+
150
+ Args:
151
+ container (C): newly tracked TrackedDetection container
152
+
153
+ Returns:
154
+ set[TrackId]: discarded TrackIds in container
155
+ """
156
+ pass
157
+
158
+ @abstractmethod
159
+ def _get_last_frame_of_container(self, container: C) -> FrameNo:
160
+ """The last FrameNo of the given container.
161
+
162
+ Args:
163
+ container (C): newly tracked TrackedDetection container
164
+
165
+ Returns:
166
+ FrameNo: last FrameNo of the given container
167
+ """
168
+ pass
169
+
170
+ @abstractmethod
171
+ def _finish(
172
+ self,
173
+ container: C,
174
+ is_last: IsLastFrame,
175
+ discarded_tracks: set[TrackId],
176
+ keep_discarded: bool,
177
+ ) -> F:
178
+ """Transform the given container to a finished container
179
+ by adding is_finished information to all contained TrackedDetections
180
+ turning them into FinishedDetections.
181
+
182
+ Args:
183
+ container (C): container of TrackedDetections
184
+ is_last (IsLastFrame): check whether a track ends in a certain frame
185
+ keep_discarded (bool): whether detections marked as discarded are kept.
186
+ Returns:
187
+ F: a finished container with transformed detections of given container
188
+ """
189
+ pass
190
+
191
+ def track_and_finish(self, containers: Iterator[C]) -> Iterator[F]:
192
+ # TODO template method to obtain containers?
193
+
194
+ for container in containers:
195
+
196
+ # if track is observed in current iteration, update its last observed frame
197
+ new_last_track_frames = self._get_last_track_frames(container)
198
+ self._merged_last_track_frame.update(new_last_track_frames)
199
+
200
+ newly_unfinished_tracks = self._get_unfinished_tracks(container)
201
+ self._unfinished_containers.append((container, newly_unfinished_tracks))
202
+
203
+ # update unfinished track ids of previously tracked containers
204
+ # if containers have no pending tracks, make ready for finishing
205
+ newly_finished_tracks = self._get_newly_finished_tracks(container)
206
+ newly_discarded_tracks = self._get_newly_discarded_tracks(container)
207
+ self._discarded_tracks.update(newly_discarded_tracks)
208
+
209
+ ready_containers: list[C] = []
210
+ for c, track_ids in self._unfinished_containers:
211
+ track_ids.difference_update(newly_finished_tracks)
212
+ track_ids.difference_update(newly_discarded_tracks)
213
+
214
+ if not track_ids:
215
+ ready_containers.append(c)
216
+
217
+ self._unfinished_containers = [
218
+ (c, u)
219
+ for c, u in self._unfinished_containers
220
+ if c not in ready_containers
221
+ ]
222
+
223
+ finished_containers: list[F] = self._finish_containers(ready_containers)
224
+ yield from finished_containers
225
+
226
+ # finish remaining containers with pending tracks
227
+ remaining_containers = [c for c, _ in self._unfinished_containers]
228
+ self._unfinished_containers = list()
229
+
230
+ finished_containers = self._finish_containers(remaining_containers)
231
+ self._merged_last_track_frame = dict()
232
+ yield from finished_containers
233
+
234
+ def _finish_containers(self, containers: list[C]) -> list[F]:
235
+ if len(containers) == 0:
236
+ return []
237
+
238
+ def is_last(frame_no: FrameNo, track_id: TrackId) -> bool:
239
+ return frame_no == self._merged_last_track_frame[track_id]
240
+
241
+ keep = self._keep_discarded
242
+ discarded = self._discarded_tracks
243
+
244
+ finished_containers: list[F] = [
245
+ self._finish(c, is_last, discarded, keep) for c in containers
246
+ ]
247
+
248
+ # todo check if there are edge cases where track ids in merged_last_track_frame
249
+ # have frame no below containers last frame,
250
+ # but might appear in following containers
251
+ last_frame_of_container = max(
252
+ self._get_last_frame_of_container(c) for c in containers
253
+ )
254
+ ids_to_delete = [
255
+ track_id
256
+ for track_id, frame_no in self._merged_last_track_frame.items()
257
+ if frame_no <= last_frame_of_container
258
+ ]
259
+
260
+ self._merged_last_track_frame = {
261
+ track_id: frame_no
262
+ for track_id, frame_no in self._merged_last_track_frame.items()
263
+ if track_id not in ids_to_delete
264
+ }
265
+ self._discarded_tracks.difference_update(ids_to_delete)
266
+ # self._finished_tracks.difference_update(ids_to_delete)
267
+
268
+ return finished_containers
269
+
270
+
271
+ class UnfinishedFramesBuffer(UnfinishedTracksBuffer[TrackedFrame[S], FinishedFrame[S]]):
272
+ """UnfinishedTracksBuffer implementation for Frames as Detection container."""
273
+
274
+ def __init__(self, tracker: Tracker[S], keep_discarded: bool = False):
275
+ super().__init__(keep_discarded)
276
+ self._tracker = tracker
277
+
278
+ def track(
279
+ self, frames: Iterator[Frame[S]], id_generator: ID_GENERATOR
280
+ ) -> Iterator[FinishedFrame[S]]:
281
+ tracked_frame_stream = self._tracker.track(frames, id_generator)
282
+ return self.track_and_finish(tracked_frame_stream)
283
+
284
+ def _get_last_track_frames(self, container: TrackedFrame[S]) -> dict[TrackId, int]:
285
+ return {o: container.no for o in container.observed_tracks}
286
+
287
+ def _get_unfinished_tracks(self, container: TrackedFrame[S]) -> set[TrackId]:
288
+ return container.unfinished_tracks
289
+
290
+ def _get_observed_tracks(self, container: TrackedFrame[S]) -> set[TrackId]:
291
+ return container.observed_tracks
292
+
293
+ def _get_newly_finished_tracks(self, container: TrackedFrame[S]) -> set[TrackId]:
294
+ return container.finished_tracks
295
+
296
+ def _get_newly_discarded_tracks(self, container: TrackedFrame[S]) -> set[TrackId]:
297
+ return container.discarded_tracks
298
+
299
+ def _get_last_frame_of_container(self, container: TrackedFrame[S]) -> FrameNo:
300
+ return container.no
301
+
302
+ def _finish(
303
+ self,
304
+ container: TrackedFrame[S],
305
+ is_last: IsLastFrame,
306
+ discarded_tracks: set[TrackId],
307
+ keep_discarded: bool,
308
+ ) -> FinishedFrame[S]:
309
+ return container.finish(is_last, discarded_tracks, keep_discarded)
File without changes
@@ -0,0 +1,99 @@
1
+ from datetime import datetime
2
+ from pathlib import Path
3
+ from typing import Any
4
+
5
+ from tqdm import tqdm
6
+
7
+ from OTVision.dataformat import (
8
+ CLASS,
9
+ CONFIDENCE,
10
+ DATA,
11
+ DATE_FORMAT,
12
+ DETECTIONS,
13
+ OCCURRENCE,
14
+ H,
15
+ W,
16
+ X,
17
+ Y,
18
+ )
19
+ from OTVision.helpers.date import (
20
+ parse_date_string_to_utc_datime,
21
+ parse_timestamp_string_to_utc_datetime,
22
+ )
23
+ from OTVision.helpers.files import denormalize_bbox, read_json
24
+ from OTVision.track.model.detection import Detection
25
+ from OTVision.track.model.filebased.frame_chunk import ChunkParser, FrameChunk
26
+ from OTVision.track.model.filebased.frame_group import FrameGroup
27
+ from OTVision.track.model.frame import Frame
28
+
29
+
30
+ class JsonChunkParser(ChunkParser):
31
+
32
+ def parse(
33
+ self, file: Path, frame_group: FrameGroup, frame_offset: int = 0
34
+ ) -> FrameChunk:
35
+ json = read_json(file)
36
+ metadata: dict = frame_group.metadata_by_file[file]
37
+
38
+ denormalized = denormalize_bbox(
39
+ json, file, metadata={file.as_posix(): metadata}
40
+ )
41
+ input: dict[int, dict[str, Any]] = denormalized[DATA]
42
+
43
+ frames = self.convert(file, frame_offset, input)
44
+
45
+ frames.sort(key=lambda frame: (frame.occurrence, frame.no))
46
+ return FrameChunk(file, metadata, frames, frame_group.id)
47
+
48
+ def convert(
49
+ self, file: Path, frame_offset: int, input: dict[int, dict[str, Any]]
50
+ ) -> list[Frame[Path]]:
51
+ detection_parser = DetectionParser()
52
+ frames = []
53
+
54
+ input_progress = tqdm(
55
+ input.items(), desc="parse Frames", total=len(input), leave=False
56
+ )
57
+ for key, value in input_progress:
58
+ occurrence: datetime = parse_datetime(value[OCCURRENCE])
59
+ data_detections = value[DETECTIONS]
60
+ detections = detection_parser.convert(data_detections)
61
+ parsed_frame = Frame(
62
+ int(key) + frame_offset,
63
+ occurrence=occurrence,
64
+ source=file,
65
+ detections=detections,
66
+ image=None,
67
+ )
68
+ frames.append(parsed_frame)
69
+ return frames
70
+
71
+
72
+ class DetectionParser:
73
+ def convert(self, detection_data: list[dict[str, str]]) -> list[Detection]:
74
+ detections: list[Detection] = []
75
+ for detection in detection_data:
76
+ detected_item = Detection(
77
+ detection[CLASS],
78
+ float(detection[CONFIDENCE]),
79
+ float(detection[X]),
80
+ float(detection[Y]),
81
+ float(detection[W]),
82
+ float(detection[H]),
83
+ )
84
+ detections.append(detected_item)
85
+ return detections
86
+
87
+
88
+ def parse_datetime(date: str | float) -> datetime:
89
+ """Parse a date string or timestamp to a datetime with UTC as timezone.
90
+
91
+ Args:
92
+ date (str | float): the date to parse
93
+
94
+ Returns:
95
+ datetime: the parsed datetime object with UTC set as timezone
96
+ """
97
+ if isinstance(date, str) and ("-" in date):
98
+ return parse_date_string_to_utc_datime(date, DATE_FORMAT)
99
+ return parse_timestamp_string_to_utc_datetime(date)
@@ -0,0 +1,127 @@
1
+ import re
2
+ from datetime import datetime, timedelta
3
+ from pathlib import Path
4
+
5
+ from OTVision import version
6
+ from OTVision.dataformat import (
7
+ EXPECTED_DURATION,
8
+ FILENAME,
9
+ FIRST_TRACKED_VIDEO_START,
10
+ LAST_TRACKED_VIDEO_END,
11
+ LENGTH,
12
+ OTTRACK_VERSION,
13
+ OTVISION_VERSION,
14
+ RECORDED_START_DATE,
15
+ TRACKER,
16
+ TRACKING,
17
+ VIDEO,
18
+ )
19
+ from OTVision.helpers.files import (
20
+ FULL_FILE_NAME_PATTERN,
21
+ HOSTNAME,
22
+ InproperFormattedFilename,
23
+ read_json_bz2_metadata,
24
+ )
25
+ from OTVision.track.model.filebased.frame_group import FrameGroup, FrameGroupParser
26
+ from OTVision.track.parser.chunk_parser_plugins import parse_datetime
27
+
28
+ MISSING_START_DATE = datetime(1900, 1, 1)
29
+ MISSING_EXPECTED_DURATION = timedelta(minutes=15)
30
+
31
+
32
+ class TimeThresholdFrameGroupParser(FrameGroupParser):
33
+
34
+ def __init__(
35
+ self, tracker_data: dict, time_without_frames: timedelta = timedelta(minutes=1)
36
+ ):
37
+ self._time_without_frames = time_without_frames
38
+ self._tracker_data: dict = tracker_data
39
+ self._id_count = 0
40
+
41
+ def new_id(self) -> int:
42
+ self._id_count += 1
43
+ return self._id_count
44
+
45
+ def parse(self, file: Path) -> FrameGroup:
46
+ metadata = read_json_bz2_metadata(file)
47
+ return self.convert(file, metadata)
48
+
49
+ def convert(self, file: Path, metadata: dict) -> FrameGroup:
50
+ start_date: datetime = self.extract_start_date_from(metadata)
51
+ duration: timedelta = self.extract_expected_duration_from(metadata)
52
+ end_date: datetime = start_date + duration
53
+ hostname = self.get_hostname(metadata)
54
+
55
+ return FrameGroup(
56
+ id=self.new_id(),
57
+ start_date=start_date,
58
+ end_date=end_date,
59
+ files=[file],
60
+ metadata_by_file={file: metadata},
61
+ hostname=hostname,
62
+ )
63
+
64
+ def get_hostname(self, file_metadata: dict) -> str:
65
+ video_name = Path(file_metadata[VIDEO][FILENAME]).name
66
+ match = re.search(
67
+ FULL_FILE_NAME_PATTERN,
68
+ video_name,
69
+ )
70
+ if match:
71
+ return match.group(HOSTNAME)
72
+
73
+ raise InproperFormattedFilename(f"Could not parse {video_name}.")
74
+
75
+ def extract_start_date_from(self, metadata: dict) -> datetime:
76
+ if RECORDED_START_DATE in metadata[VIDEO].keys():
77
+ recorded_start_date = metadata[VIDEO][RECORDED_START_DATE]
78
+ return parse_datetime(recorded_start_date)
79
+ return MISSING_START_DATE
80
+
81
+ def extract_expected_duration_from(self, metadata: dict) -> timedelta:
82
+ if EXPECTED_DURATION in metadata[VIDEO].keys():
83
+ if expected_duration := metadata[VIDEO][EXPECTED_DURATION]:
84
+ return timedelta(seconds=int(expected_duration))
85
+ return self.parse_video_length(metadata)
86
+
87
+ def parse_video_length(self, metadata: dict) -> timedelta:
88
+ video_length = metadata[VIDEO][LENGTH]
89
+ time = datetime.strptime(video_length, "%H:%M:%S")
90
+ return timedelta(hours=time.hour, minutes=time.minute, seconds=time.second)
91
+
92
+ def update_metadata(self, frame_group: FrameGroup) -> dict[Path, dict]:
93
+ metadata_by_file = dict(frame_group.metadata_by_file)
94
+ for filepath in frame_group.files:
95
+ metadata = metadata_by_file[filepath]
96
+ metadata[OTTRACK_VERSION] = version.ottrack_version()
97
+ metadata[TRACKING] = {
98
+ OTVISION_VERSION: version.otvision_version(),
99
+ FIRST_TRACKED_VIDEO_START: frame_group.start_date.timestamp(),
100
+ LAST_TRACKED_VIDEO_END: frame_group.end_date.timestamp(),
101
+ TRACKER: self._tracker_data,
102
+ }
103
+
104
+ return metadata_by_file
105
+
106
+ def merge(self, frame_groups: list[FrameGroup]) -> list[FrameGroup]:
107
+ if len(frame_groups) == 0:
108
+ return []
109
+
110
+ merged_groups = []
111
+ sorted_groups = sorted(frame_groups, key=lambda group: group.start_date)
112
+ last_group = sorted_groups[0]
113
+ for current_group in sorted_groups[1:]:
114
+ if last_group.hostname != current_group.hostname:
115
+ merged_groups.append(last_group)
116
+ last_group = current_group
117
+ elif (
118
+ timedelta(seconds=0)
119
+ <= (current_group.start_date - last_group.end_date)
120
+ <= self._time_without_frames
121
+ ):
122
+ last_group = last_group.merge(current_group)
123
+ else:
124
+ merged_groups.append(last_group)
125
+ last_group = current_group
126
+ merged_groups.append(last_group)
127
+ return merged_groups