supervisely 6.73.438__py3-none-any.whl → 6.73.513__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (203) hide show
  1. supervisely/__init__.py +137 -1
  2. supervisely/_utils.py +81 -0
  3. supervisely/annotation/annotation.py +8 -2
  4. supervisely/annotation/json_geometries_map.py +14 -11
  5. supervisely/annotation/label.py +80 -3
  6. supervisely/api/annotation_api.py +14 -11
  7. supervisely/api/api.py +59 -38
  8. supervisely/api/app_api.py +11 -2
  9. supervisely/api/dataset_api.py +74 -12
  10. supervisely/api/entities_collection_api.py +10 -0
  11. supervisely/api/entity_annotation/figure_api.py +52 -4
  12. supervisely/api/entity_annotation/object_api.py +3 -3
  13. supervisely/api/entity_annotation/tag_api.py +63 -12
  14. supervisely/api/guides_api.py +210 -0
  15. supervisely/api/image_api.py +72 -1
  16. supervisely/api/labeling_job_api.py +83 -1
  17. supervisely/api/labeling_queue_api.py +33 -7
  18. supervisely/api/module_api.py +9 -0
  19. supervisely/api/project_api.py +71 -26
  20. supervisely/api/storage_api.py +3 -1
  21. supervisely/api/task_api.py +13 -2
  22. supervisely/api/team_api.py +4 -3
  23. supervisely/api/video/video_annotation_api.py +119 -3
  24. supervisely/api/video/video_api.py +65 -14
  25. supervisely/api/video/video_figure_api.py +24 -11
  26. supervisely/app/__init__.py +1 -1
  27. supervisely/app/content.py +23 -7
  28. supervisely/app/development/development.py +18 -2
  29. supervisely/app/fastapi/__init__.py +1 -0
  30. supervisely/app/fastapi/custom_static_files.py +1 -1
  31. supervisely/app/fastapi/multi_user.py +105 -0
  32. supervisely/app/fastapi/subapp.py +88 -42
  33. supervisely/app/fastapi/websocket.py +77 -9
  34. supervisely/app/singleton.py +21 -0
  35. supervisely/app/v1/app_service.py +18 -2
  36. supervisely/app/v1/constants.py +7 -1
  37. supervisely/app/widgets/__init__.py +6 -0
  38. supervisely/app/widgets/activity_feed/__init__.py +0 -0
  39. supervisely/app/widgets/activity_feed/activity_feed.py +239 -0
  40. supervisely/app/widgets/activity_feed/style.css +78 -0
  41. supervisely/app/widgets/activity_feed/template.html +22 -0
  42. supervisely/app/widgets/card/card.py +20 -0
  43. supervisely/app/widgets/classes_list_selector/classes_list_selector.py +121 -9
  44. supervisely/app/widgets/classes_list_selector/template.html +60 -93
  45. supervisely/app/widgets/classes_mapping/classes_mapping.py +13 -12
  46. supervisely/app/widgets/classes_table/classes_table.py +1 -0
  47. supervisely/app/widgets/deploy_model/deploy_model.py +56 -35
  48. supervisely/app/widgets/dialog/dialog.py +12 -0
  49. supervisely/app/widgets/dialog/template.html +2 -1
  50. supervisely/app/widgets/ecosystem_model_selector/ecosystem_model_selector.py +1 -1
  51. supervisely/app/widgets/experiment_selector/experiment_selector.py +8 -0
  52. supervisely/app/widgets/fast_table/fast_table.py +184 -60
  53. supervisely/app/widgets/fast_table/template.html +1 -1
  54. supervisely/app/widgets/heatmap/__init__.py +0 -0
  55. supervisely/app/widgets/heatmap/heatmap.py +564 -0
  56. supervisely/app/widgets/heatmap/script.js +533 -0
  57. supervisely/app/widgets/heatmap/style.css +233 -0
  58. supervisely/app/widgets/heatmap/template.html +21 -0
  59. supervisely/app/widgets/modal/__init__.py +0 -0
  60. supervisely/app/widgets/modal/modal.py +198 -0
  61. supervisely/app/widgets/modal/template.html +10 -0
  62. supervisely/app/widgets/object_class_view/object_class_view.py +3 -0
  63. supervisely/app/widgets/radio_tabs/radio_tabs.py +18 -2
  64. supervisely/app/widgets/radio_tabs/template.html +1 -0
  65. supervisely/app/widgets/select/select.py +6 -3
  66. supervisely/app/widgets/select_class/__init__.py +0 -0
  67. supervisely/app/widgets/select_class/select_class.py +363 -0
  68. supervisely/app/widgets/select_class/template.html +50 -0
  69. supervisely/app/widgets/select_cuda/select_cuda.py +22 -0
  70. supervisely/app/widgets/select_dataset_tree/select_dataset_tree.py +65 -7
  71. supervisely/app/widgets/select_tag/__init__.py +0 -0
  72. supervisely/app/widgets/select_tag/select_tag.py +352 -0
  73. supervisely/app/widgets/select_tag/template.html +64 -0
  74. supervisely/app/widgets/select_team/select_team.py +37 -4
  75. supervisely/app/widgets/select_team/template.html +4 -5
  76. supervisely/app/widgets/select_user/__init__.py +0 -0
  77. supervisely/app/widgets/select_user/select_user.py +270 -0
  78. supervisely/app/widgets/select_user/template.html +13 -0
  79. supervisely/app/widgets/select_workspace/select_workspace.py +59 -10
  80. supervisely/app/widgets/select_workspace/template.html +9 -12
  81. supervisely/app/widgets/table/table.py +68 -13
  82. supervisely/app/widgets/tree_select/tree_select.py +2 -0
  83. supervisely/aug/aug.py +6 -2
  84. supervisely/convert/base_converter.py +1 -0
  85. supervisely/convert/converter.py +2 -2
  86. supervisely/convert/image/csv/csv_converter.py +24 -15
  87. supervisely/convert/image/image_converter.py +3 -1
  88. supervisely/convert/image/image_helper.py +48 -4
  89. supervisely/convert/image/label_studio/label_studio_converter.py +2 -0
  90. supervisely/convert/image/medical2d/medical2d_helper.py +2 -24
  91. supervisely/convert/image/multispectral/multispectral_converter.py +6 -0
  92. supervisely/convert/image/pascal_voc/pascal_voc_converter.py +8 -5
  93. supervisely/convert/image/pascal_voc/pascal_voc_helper.py +7 -0
  94. supervisely/convert/pointcloud/kitti_3d/kitti_3d_converter.py +33 -3
  95. supervisely/convert/pointcloud/kitti_3d/kitti_3d_helper.py +12 -5
  96. supervisely/convert/pointcloud/las/las_converter.py +13 -1
  97. supervisely/convert/pointcloud/las/las_helper.py +110 -11
  98. supervisely/convert/pointcloud/nuscenes_conv/nuscenes_converter.py +27 -16
  99. supervisely/convert/pointcloud/pointcloud_converter.py +91 -3
  100. supervisely/convert/pointcloud_episodes/nuscenes_conv/nuscenes_converter.py +58 -22
  101. supervisely/convert/pointcloud_episodes/nuscenes_conv/nuscenes_helper.py +21 -47
  102. supervisely/convert/video/__init__.py +1 -0
  103. supervisely/convert/video/multi_view/__init__.py +0 -0
  104. supervisely/convert/video/multi_view/multi_view.py +543 -0
  105. supervisely/convert/video/sly/sly_video_converter.py +359 -3
  106. supervisely/convert/video/video_converter.py +24 -4
  107. supervisely/convert/volume/dicom/dicom_converter.py +13 -5
  108. supervisely/convert/volume/dicom/dicom_helper.py +30 -18
  109. supervisely/geometry/constants.py +1 -0
  110. supervisely/geometry/geometry.py +4 -0
  111. supervisely/geometry/helpers.py +5 -1
  112. supervisely/geometry/oriented_bbox.py +676 -0
  113. supervisely/geometry/polyline_3d.py +110 -0
  114. supervisely/geometry/rectangle.py +2 -1
  115. supervisely/io/env.py +76 -1
  116. supervisely/io/fs.py +21 -0
  117. supervisely/nn/benchmark/base_evaluator.py +104 -11
  118. supervisely/nn/benchmark/instance_segmentation/evaluator.py +1 -8
  119. supervisely/nn/benchmark/object_detection/evaluator.py +20 -4
  120. supervisely/nn/benchmark/object_detection/vis_metrics/pr_curve.py +10 -5
  121. supervisely/nn/benchmark/semantic_segmentation/evaluator.py +34 -16
  122. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/confusion_matrix.py +1 -1
  123. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/frequently_confused.py +1 -1
  124. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/overview.py +1 -1
  125. supervisely/nn/benchmark/visualization/evaluation_result.py +66 -4
  126. supervisely/nn/inference/cache.py +43 -18
  127. supervisely/nn/inference/gui/serving_gui_template.py +5 -2
  128. supervisely/nn/inference/inference.py +916 -222
  129. supervisely/nn/inference/inference_request.py +55 -10
  130. supervisely/nn/inference/predict_app/gui/classes_selector.py +83 -12
  131. supervisely/nn/inference/predict_app/gui/gui.py +676 -488
  132. supervisely/nn/inference/predict_app/gui/input_selector.py +205 -26
  133. supervisely/nn/inference/predict_app/gui/model_selector.py +2 -4
  134. supervisely/nn/inference/predict_app/gui/output_selector.py +46 -6
  135. supervisely/nn/inference/predict_app/gui/settings_selector.py +756 -59
  136. supervisely/nn/inference/predict_app/gui/tags_selector.py +1 -1
  137. supervisely/nn/inference/predict_app/gui/utils.py +236 -119
  138. supervisely/nn/inference/predict_app/predict_app.py +2 -2
  139. supervisely/nn/inference/session.py +43 -35
  140. supervisely/nn/inference/tracking/bbox_tracking.py +118 -35
  141. supervisely/nn/inference/tracking/point_tracking.py +5 -1
  142. supervisely/nn/inference/tracking/tracker_interface.py +10 -1
  143. supervisely/nn/inference/uploader.py +139 -12
  144. supervisely/nn/live_training/__init__.py +7 -0
  145. supervisely/nn/live_training/api_server.py +111 -0
  146. supervisely/nn/live_training/artifacts_utils.py +243 -0
  147. supervisely/nn/live_training/checkpoint_utils.py +229 -0
  148. supervisely/nn/live_training/dynamic_sampler.py +44 -0
  149. supervisely/nn/live_training/helpers.py +14 -0
  150. supervisely/nn/live_training/incremental_dataset.py +146 -0
  151. supervisely/nn/live_training/live_training.py +497 -0
  152. supervisely/nn/live_training/loss_plateau_detector.py +111 -0
  153. supervisely/nn/live_training/request_queue.py +52 -0
  154. supervisely/nn/model/model_api.py +9 -0
  155. supervisely/nn/model/prediction.py +2 -1
  156. supervisely/nn/model/prediction_session.py +26 -14
  157. supervisely/nn/prediction_dto.py +19 -1
  158. supervisely/nn/tracker/base_tracker.py +11 -1
  159. supervisely/nn/tracker/botsort/botsort_config.yaml +0 -1
  160. supervisely/nn/tracker/botsort/tracker/mc_bot_sort.py +7 -4
  161. supervisely/nn/tracker/botsort_tracker.py +94 -65
  162. supervisely/nn/tracker/utils.py +4 -5
  163. supervisely/nn/tracker/visualize.py +93 -93
  164. supervisely/nn/training/gui/classes_selector.py +16 -1
  165. supervisely/nn/training/gui/train_val_splits_selector.py +52 -31
  166. supervisely/nn/training/train_app.py +46 -31
  167. supervisely/project/data_version.py +115 -51
  168. supervisely/project/download.py +1 -1
  169. supervisely/project/pointcloud_episode_project.py +37 -8
  170. supervisely/project/pointcloud_project.py +30 -2
  171. supervisely/project/project.py +14 -2
  172. supervisely/project/project_meta.py +27 -1
  173. supervisely/project/project_settings.py +32 -18
  174. supervisely/project/versioning/__init__.py +1 -0
  175. supervisely/project/versioning/common.py +20 -0
  176. supervisely/project/versioning/schema_fields.py +35 -0
  177. supervisely/project/versioning/video_schema.py +221 -0
  178. supervisely/project/versioning/volume_schema.py +87 -0
  179. supervisely/project/video_project.py +717 -15
  180. supervisely/project/volume_project.py +623 -5
  181. supervisely/template/experiment/experiment.html.jinja +4 -4
  182. supervisely/template/experiment/experiment_generator.py +14 -21
  183. supervisely/template/live_training/__init__.py +0 -0
  184. supervisely/template/live_training/header.html.jinja +96 -0
  185. supervisely/template/live_training/live_training.html.jinja +51 -0
  186. supervisely/template/live_training/live_training_generator.py +464 -0
  187. supervisely/template/live_training/sly-style.css +402 -0
  188. supervisely/template/live_training/template.html.jinja +18 -0
  189. supervisely/versions.json +28 -26
  190. supervisely/video/sampling.py +39 -20
  191. supervisely/video/video.py +41 -12
  192. supervisely/video_annotation/video_figure.py +38 -4
  193. supervisely/video_annotation/video_object.py +29 -4
  194. supervisely/volume/stl_converter.py +2 -0
  195. supervisely/worker_api/agent_rpc.py +24 -1
  196. supervisely/worker_api/rpc_servicer.py +31 -7
  197. {supervisely-6.73.438.dist-info → supervisely-6.73.513.dist-info}/METADATA +58 -40
  198. {supervisely-6.73.438.dist-info → supervisely-6.73.513.dist-info}/RECORD +203 -155
  199. {supervisely-6.73.438.dist-info → supervisely-6.73.513.dist-info}/WHEEL +1 -1
  200. supervisely_lib/__init__.py +6 -1
  201. {supervisely-6.73.438.dist-info → supervisely-6.73.513.dist-info}/entry_points.txt +0 -0
  202. {supervisely-6.73.438.dist-info → supervisely-6.73.513.dist-info/licenses}/LICENSE +0 -0
  203. {supervisely-6.73.438.dist-info → supervisely-6.73.513.dist-info}/top_level.txt +0 -0
@@ -1,21 +1,21 @@
1
- from typing import Union, Dict, List, Tuple, Iterator, Optional
2
- import numpy as np
1
+ import shutil
2
+ import tempfile
3
+ from collections import defaultdict
4
+ from pathlib import Path
5
+ from typing import Iterator, List, Optional, Tuple, Union
6
+
3
7
  import cv2
4
8
  import ffmpeg
5
- from pathlib import Path
6
- from collections import defaultdict
7
- from dataclasses import dataclass
8
- import tempfile
9
- import shutil
9
+ import numpy as np
10
10
 
11
- import supervisely as sly
12
- from supervisely import logger
11
+ from supervisely import VideoAnnotation, logger
12
+ from supervisely.geometry.geometry import Geometry
13
13
  from supervisely.nn.model.prediction import Prediction
14
- from supervisely import VideoAnnotation
15
14
  from supervisely.nn.tracker.utils import predictions_to_video_annotation
16
15
 
17
16
 
18
17
  class TrackingVisualizer:
18
+
19
19
  def __init__(
20
20
  self,
21
21
  show_labels: bool = True,
@@ -29,7 +29,7 @@ class TrackingVisualizer:
29
29
  codec: str = "mp4",
30
30
  output_fps: float = 30.0,
31
31
  colorize_tracks: bool = True,
32
-
32
+ trajectory_thickness: int = 2,
33
33
  ):
34
34
  """
35
35
  Initialize the visualizer with configuration.
@@ -58,6 +58,7 @@ class TrackingVisualizer:
58
58
  self.text_scale = text_scale
59
59
  self.text_thickness = text_thickness
60
60
  self.trajectory_length = trajectory_length
61
+ self.trajectory_thickness = trajectory_thickness
61
62
  self.colorize_tracks = colorize_tracks
62
63
 
63
64
  # Output settings
@@ -71,7 +72,7 @@ class TrackingVisualizer:
71
72
  self.track_colors = {}
72
73
  self.color_palette = self._generate_color_palette()
73
74
  self._temp_dir = None
74
-
75
+
75
76
  def _generate_color_palette(self, num_colors: int = 100) -> List[Tuple[int, int, int]]:
76
77
  """
77
78
  Generate bright, distinct color palette for track visualization.
@@ -88,11 +89,11 @@ class TrackingVisualizer:
88
89
  bgr_color = cv2.cvtColor(hsv_color, cv2.COLOR_HSV2BGR)[0][0]
89
90
  colors.append(tuple(map(int, bgr_color)))
90
91
  return colors
91
-
92
+
92
93
  def _get_track_color(self, track_id: int) -> Tuple[int, int, int]:
93
94
  """Get consistent color for track ID from palette."""
94
95
  return self.color_palette[track_id % len(self.color_palette)]
95
-
96
+
96
97
  def _get_video_info(self, video_path: Path) -> Tuple[int, int, float, int]:
97
98
  """
98
99
  Get video metadata using ffmpeg.
@@ -104,13 +105,13 @@ class TrackingVisualizer:
104
105
  probe = ffmpeg.probe(str(video_path))
105
106
  video_stream = next((stream for stream in probe['streams']
106
107
  if stream['codec_type'] == 'video'), None)
107
-
108
+
108
109
  if video_stream is None:
109
110
  raise ValueError(f"No video stream found in: {video_path}")
110
-
111
+
111
112
  width = int(video_stream['width'])
112
113
  height = int(video_stream['height'])
113
-
114
+
114
115
  # Extract FPS
115
116
  fps_str = video_stream.get('r_frame_rate', '30/1')
116
117
  if '/' in fps_str:
@@ -118,19 +119,19 @@ class TrackingVisualizer:
118
119
  fps = num / den if den != 0 else 30.0
119
120
  else:
120
121
  fps = float(fps_str)
121
-
122
+
122
123
  # Get total frames
123
124
  total_frames = int(video_stream.get('nb_frames', 0))
124
125
  if total_frames == 0:
125
126
  # Fallback: estimate from duration and fps
126
127
  duration = float(video_stream.get('duration', 0))
127
128
  total_frames = int(duration * fps) if duration > 0 else 0
128
-
129
+
129
130
  return width, height, fps, total_frames
130
-
131
+
131
132
  except Exception as e:
132
133
  raise ValueError(f"Could not read video metadata {video_path}: {str(e)}")
133
-
134
+
134
135
  def _create_frame_iterator(self, source: Union[str, Path]) -> Iterator[Tuple[int, np.ndarray]]:
135
136
  """
136
137
  Create iterator that yields (frame_index, frame) tuples.
@@ -142,67 +143,70 @@ class TrackingVisualizer:
142
143
  Tuple of (frame_index, frame_array)
143
144
  """
144
145
  source = Path(source)
145
-
146
+
146
147
  if source.is_file():
147
148
  yield from self._iterate_video_frames(source)
148
149
  elif source.is_dir():
149
150
  yield from self._iterate_directory_frames(source)
150
151
  else:
151
152
  raise ValueError(f"Source must be a video file or directory, got: {source}")
152
-
153
+
153
154
  def _iterate_video_frames(self, video_path: Path) -> Iterator[Tuple[int, np.ndarray]]:
154
155
  """Iterate through video frames using ffmpeg."""
155
156
  width, height, fps, total_frames = self._get_video_info(video_path)
156
-
157
+
157
158
  # Store video info for later use
158
159
  self.source_fps = fps
159
160
  self.frame_size = (width, height)
160
-
161
+
161
162
  process = (
162
163
  ffmpeg
163
164
  .input(str(video_path))
164
- .output('pipe:', format='rawvideo', pix_fmt='bgr24')
165
- .run_async(pipe_stdout=True, pipe_stderr=True)
165
+ .output('pipe:', format='rawvideo', pix_fmt='bgr24', loglevel='quiet')
166
+ .run_async(pipe_stdout=True, pipe_stderr=False)
166
167
  )
167
-
168
+
168
169
  try:
169
170
  frame_size_bytes = width * height * 3
170
171
  frame_idx = 0
171
-
172
+
172
173
  while True:
173
174
  frame_data = process.stdout.read(frame_size_bytes)
174
175
  if len(frame_data) != frame_size_bytes:
175
176
  break
176
-
177
+
177
178
  frame = np.frombuffer(frame_data, np.uint8).reshape([height, width, 3])
178
179
  yield frame_idx, frame
179
180
  frame_idx += 1
180
-
181
+
182
+ except ffmpeg.Error as e:
183
+ logger.error(f"ffmpeg error: {e.stderr.decode() if e.stderr else str(e)}", exc_info=True)
184
+
181
185
  finally:
182
186
  process.stdout.close()
183
187
  if process.stderr:
184
188
  process.stderr.close()
185
189
  process.wait()
186
-
190
+
187
191
  def _iterate_directory_frames(self, frames_dir: Path) -> Iterator[Tuple[int, np.ndarray]]:
188
192
  """Iterate through image frames in directory."""
189
193
  if not frames_dir.is_dir():
190
194
  raise ValueError(f"Directory does not exist: {frames_dir}")
191
-
195
+
192
196
  # Support common image extensions
193
197
  extensions = ['.jpg', '.jpeg', '.png', '.bmp', '.tiff']
194
198
  image_files = []
195
199
  for ext in extensions:
196
200
  image_files.extend(frames_dir.glob(f'*{ext}'))
197
201
  image_files.extend(frames_dir.glob(f'*{ext.upper()}'))
198
-
202
+
199
203
  image_files = sorted(image_files)
200
204
  if not image_files:
201
205
  raise ValueError(f"No image files found in directory: {frames_dir}")
202
-
206
+
203
207
  # Set fps from config for image sequences
204
208
  self.source_fps = self.output_fps
205
-
209
+
206
210
  for frame_idx, img_path in enumerate(image_files):
207
211
  frame = cv2.imread(str(img_path))
208
212
  if frame is not None:
@@ -212,7 +216,7 @@ class TrackingVisualizer:
212
216
  yield frame_idx, frame
213
217
  else:
214
218
  logger.warning(f"Could not read image: {img_path}")
215
-
219
+
216
220
  def _extract_tracks_from_annotation(self) -> None:
217
221
  """
218
222
  Extract tracking data from Supervisely VideoAnnotation.
@@ -221,29 +225,22 @@ class TrackingVisualizer:
221
225
  """
222
226
  self.tracks_by_frame = defaultdict(list)
223
227
  self.track_colors = {}
224
-
228
+
225
229
  # Map object keys to track info
226
230
  objects = {}
227
231
  for i, obj in enumerate(self.annotation.objects):
228
232
  objects[obj.key] = (i, obj.obj_class.name)
229
-
233
+
230
234
  # Extract tracks from frames
231
235
  for frame in self.annotation.frames:
232
236
  frame_idx = frame.index
233
237
  for figure in frame.figures:
234
- if figure.geometry.geometry_name() != 'rectangle':
235
- continue
236
-
237
238
  object_key = figure.parent_object.key
238
239
  if object_key not in objects:
239
240
  continue
240
-
241
+
241
242
  track_id, class_name = objects[object_key]
242
-
243
- # Extract bbox coordinates
244
- rect = figure.geometry
245
- bbox = (rect.left, rect.top, rect.right, rect.bottom)
246
-
243
+
247
244
  if track_id not in self.track_colors:
248
245
  if self.colorize_tracks:
249
246
  # auto-color override everything
@@ -260,26 +257,30 @@ class TrackingVisualizer:
260
257
 
261
258
  self.track_colors[track_id] = color
262
259
 
263
-
264
- self.tracks_by_frame[frame_idx].append((track_id, bbox, class_name))
265
-
260
+ self.tracks_by_frame[frame_idx].append((track_id, figure.geometry, class_name))
261
+
266
262
  logger.info(f"Extracted tracks from {len(self.tracks_by_frame)} frames")
267
-
268
- def _draw_detection(self, img: np.ndarray, track_id: int, bbox: Tuple[int, int, int, int],
269
- class_name: str) -> Optional[Tuple[int, int]]:
263
+
264
+ def _draw_detection(
265
+ self, img: np.ndarray, track_id: int, geometry: Geometry, class_name: str
266
+ ) -> Optional[Tuple[int, int]]:
270
267
  """
271
268
  Draw single detection with track ID and class label.
272
269
  Returns the center point of the bbox for trajectory drawing.
273
270
  """
271
+ rect = geometry.to_bbox()
272
+ bbox = (rect.left, rect.top, rect.right, rect.bottom)
273
+
274
274
  x1, y1, x2, y2 = map(int, bbox)
275
275
 
276
276
  if x2 <= x1 or y2 <= y1:
277
277
  return None
278
-
278
+
279
279
  color = self.track_colors[track_id]
280
280
 
281
281
  # Draw bounding box
282
- cv2.rectangle(img, (x1, y1), (x2, y2), color, self.box_thickness)
282
+ geometry.draw_contour(img, color=color, thickness=self.box_thickness)
283
+ # cv2.rectangle(img, (x1, y1), (x2, y2), color, self.box_thickness)
283
284
 
284
285
  # Draw label if enabled
285
286
  if self.show_labels:
@@ -301,7 +302,6 @@ class TrackingVisualizer:
301
302
  # Return center point for trajectory
302
303
  return (x1 + x2) // 2, (y1 + y2) // 2
303
304
 
304
-
305
305
  def _draw_trajectories(self, img: np.ndarray) -> None:
306
306
  """Draw trajectory lines for all tracks, filtering out big jumps."""
307
307
  if not self.show_trajectories:
@@ -309,24 +309,24 @@ class TrackingVisualizer:
309
309
 
310
310
  max_jump = 200
311
311
 
312
- for track_id, centers in self.track_centers.items():
313
- if len(centers) < 2:
312
+ for centers_with_colors in self.track_centers.values():
313
+
314
+ if len(centers_with_colors) < 2:
314
315
  continue
315
316
 
316
- color = self.track_colors[track_id]
317
- points = centers[-self.trajectory_length:]
317
+ points, colors = zip(*centers_with_colors[-self.trajectory_length :])
318
318
 
319
319
  for i in range(1, len(points)):
320
+ color = colors[i]
320
321
  p1, p2 = points[i - 1], points[i]
321
322
  if p1 is None or p2 is None:
322
323
  continue
323
-
324
+
324
325
  if np.hypot(p2[0] - p1[0], p2[1] - p1[1]) > max_jump:
325
326
  continue
326
- cv2.line(img, p1, p2, color, 2)
327
+ cv2.line(img, p1, p2, color, self.trajectory_thickness)
327
328
  cv2.circle(img, p1, 3, color, -1)
328
329
 
329
-
330
330
  def _process_single_frame(self, frame: np.ndarray, frame_idx: int) -> np.ndarray:
331
331
  """
332
332
  Process single frame: add annotations and return processed frame.
@@ -342,29 +342,30 @@ class TrackingVisualizer:
342
342
  active_ids = set()
343
343
  # Draw detections for current frame
344
344
  if frame_idx in self.tracks_by_frame:
345
- for track_id, bbox, class_name in self.tracks_by_frame[frame_idx]:
346
- center = self._draw_detection(img, track_id, bbox, class_name)
347
- self.track_centers[track_id].append(center)
345
+ for track_id, geometry, class_name in self.tracks_by_frame[frame_idx]:
346
+ center = self._draw_detection(img, track_id, geometry, class_name)
347
+ color = self.track_colors[track_id]
348
+ self.track_centers[track_id].append((center, color))
348
349
  if len(self.track_centers[track_id]) > self.trajectory_length:
349
350
  self.track_centers[track_id].pop(0)
350
351
  active_ids.add(track_id)
351
-
352
+
352
353
  for tid in self.track_centers.keys():
353
354
  if tid not in active_ids:
354
- self.track_centers[tid].append(None)
355
+ self.track_centers[tid].append((None, None))
355
356
  if len(self.track_centers[tid]) > self.trajectory_length:
356
357
  self.track_centers[tid].pop(0)
357
-
358
+
358
359
  # Draw trajectories
359
360
  self._draw_trajectories(img)
360
-
361
+
361
362
  # Add frame number if requested
362
363
  if self.show_frame_number:
363
364
  cv2.putText(img, f"Frame: {frame_idx + 1}", (10, 30),
364
365
  cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2, cv2.LINE_AA)
365
-
366
+
366
367
  return img
367
-
368
+
368
369
  def _save_processed_frame(self, frame: np.ndarray, frame_idx: int) -> str:
369
370
  """
370
371
  Save processed frame to temporary directory.
@@ -379,7 +380,7 @@ class TrackingVisualizer:
379
380
  frame_path = self._temp_dir / f"frame_{frame_idx:08d}.jpg"
380
381
  cv2.imwrite(str(frame_path), frame, [cv2.IMWRITE_JPEG_QUALITY, 95])
381
382
  return str(frame_path)
382
-
383
+
383
384
  def _create_video_from_frames(self, output_path: Union[str, Path]) -> None:
384
385
  """
385
386
  Create final video from processed frames using ffmpeg.
@@ -389,10 +390,10 @@ class TrackingVisualizer:
389
390
  """
390
391
  output_path = Path(output_path)
391
392
  output_path.parent.mkdir(parents=True, exist_ok=True)
392
-
393
+
393
394
  # Create video from frame sequence
394
395
  input_pattern = str(self._temp_dir / "frame_%08d.jpg")
395
-
396
+
396
397
  try:
397
398
  (
398
399
  ffmpeg
@@ -402,17 +403,17 @@ class TrackingVisualizer:
402
403
  .run(capture_stdout=True, capture_stderr=True)
403
404
  )
404
405
  logger.info(f"Video saved to {output_path}")
405
-
406
+
406
407
  except ffmpeg.Error as e:
407
408
  error_msg = e.stderr.decode() if e.stderr else "Unknown ffmpeg error"
408
409
  raise ValueError(f"Failed to create video: {error_msg}")
409
-
410
+
410
411
  def _cleanup_temp_directory(self) -> None:
411
412
  """Clean up temporary directory and all its contents."""
412
413
  if self._temp_dir and self._temp_dir.exists():
413
414
  shutil.rmtree(self._temp_dir)
414
415
  self._temp_dir = None
415
-
416
+
416
417
  def visualize_video_annotation(self, annotation: VideoAnnotation,
417
418
  source: Union[str, Path],
418
419
  output_path: Union[str, Path]) -> None:
@@ -430,43 +431,43 @@ class TrackingVisualizer:
430
431
  """
431
432
  if not isinstance(annotation, VideoAnnotation):
432
433
  raise TypeError(f"Annotation must be VideoAnnotation, got {type(annotation)}")
433
-
434
+
434
435
  # Store annotation
435
436
  self.annotation = annotation
436
-
437
+
437
438
  # Create temporary directory for processed frames
438
439
  self._temp_dir = Path(tempfile.mkdtemp(prefix="video_viz_"))
439
-
440
+
440
441
  try:
441
442
  # Extract tracking data
442
443
  self._extract_tracks_from_annotation()
443
-
444
+
444
445
  if not self.tracks_by_frame:
445
446
  logger.warning("No tracking data found in annotation")
446
-
447
+
447
448
  # Reset trajectory tracking
448
449
  self.track_centers = defaultdict(list)
449
-
450
+
450
451
  # Process frames one by one
451
452
  frame_count = 0
452
453
  for frame_idx, frame in self._create_frame_iterator(source):
453
454
  # Process frame
454
455
  processed_frame = self._process_single_frame(frame, frame_idx)
455
-
456
+
456
457
  # Save processed frame
457
458
  self._save_processed_frame(processed_frame, frame_idx)
458
-
459
+
459
460
  frame_count += 1
460
-
461
+
461
462
  # Progress logging
462
463
  if frame_count % 100 == 0:
463
464
  logger.info(f"Processed {frame_count} frames")
464
-
465
+
465
466
  logger.info(f"Finished processing {frame_count} frames")
466
-
467
+
467
468
  # Create final video from saved frames
468
469
  self._create_video_from_frames(output_path)
469
-
470
+
470
471
  finally:
471
472
  # Always cleanup temporary files
472
473
  self._cleanup_temp_directory()
@@ -474,7 +475,7 @@ class TrackingVisualizer:
474
475
  def __del__(self):
475
476
  """Cleanup temporary directory on object destruction."""
476
477
  self._cleanup_temp_directory()
477
-
478
+
478
479
 
479
480
  def visualize(
480
481
  predictions: Union[VideoAnnotation, List[Prediction]],
@@ -516,4 +517,3 @@ def visualize(
516
517
  visualizer.visualize_video_annotation(predictions, source, output_path)
517
518
  else:
518
519
  raise TypeError(f"Predictions must be VideoAnnotation or list of Prediction, got {type(predictions)}")
519
-
@@ -51,7 +51,19 @@ class ClassesSelector:
51
51
  text=f"<i class='zmdi zmdi-chart-donut' style='color: #7f858e'></i> <a href='{qa_stats_link}' target='_blank'> <b> QA & Stats </b></a>"
52
52
  )
53
53
 
54
- self.classes_table = ClassesTable(project_id=project_id)
54
+ models = model_selector.models
55
+ task_types = [model["meta"]["task_type"] for model in models]
56
+ task_types = list(set(task_types))
57
+ allowed_types = []
58
+ for task_type in task_types:
59
+ if task_type.endswith("detection"):
60
+ allowed_types.append(Rectangle)
61
+ elif task_type.endswith("segmentation"):
62
+ allowed_types.extend([Bitmap, Polygon])
63
+ elif task_type == TaskType.POSE_ESTIMATION:
64
+ allowed_types.append(GraphNodes)
65
+
66
+ self.classes_table = ClassesTable(project_id=project_id, allowed_types=allowed_types)
55
67
  if len(classes) > 0:
56
68
  self.classes_table.select_classes(classes)
57
69
  else:
@@ -107,6 +119,9 @@ class ClassesSelector:
107
119
  TaskType.INSTANCE_SEGMENTATION: {Bitmap},
108
120
  TaskType.SEMANTIC_SEGMENTATION: {Bitmap},
109
121
  TaskType.POSE_ESTIMATION: {GraphNodes},
122
+ TaskType.PROMPTABLE_SEGMENTATION: {Bitmap},
123
+ TaskType.INTERACTIVE_SEGMENTATION: {Bitmap},
124
+ TaskType.PROMPT_BASED_OBJECT_DETECTION: {Rectangle},
110
125
  }
111
126
 
112
127
  if task_type not in allowed_shapes:
@@ -180,7 +180,13 @@ class TrainValSplitsSelector:
180
180
  return False
181
181
 
182
182
  # Check if datasets are not empty
183
- filters = [{ ApiField.FIELD: ApiField.ID, ApiField.OPERATOR: "in", ApiField.VALUE: train_dataset_id + val_dataset_id}]
183
+ filters = [
184
+ {
185
+ ApiField.FIELD: ApiField.ID,
186
+ ApiField.OPERATOR: "in",
187
+ ApiField.VALUE: train_dataset_id + val_dataset_id,
188
+ }
189
+ ]
184
190
  selected_datasets = self.api.dataset.get_list(self.project_id, filters, recursive=True)
185
191
  datasets_count = {}
186
192
  for dataset in selected_datasets:
@@ -334,6 +340,7 @@ class TrainValSplitsSelector:
334
340
 
335
341
  def _detect_splits(self, collections_split: bool, datasets_split: bool) -> bool:
336
342
  """Detect splits based on the selected method"""
343
+ self._parse_collections()
337
344
  splits_found = False
338
345
  if collections_split:
339
346
  splits_found = self._detect_collections()
@@ -341,47 +348,59 @@ class TrainValSplitsSelector:
341
348
  splits_found = self._detect_datasets()
342
349
  return splits_found
343
350
 
351
+ def _parse_collections(self) -> None:
352
+ """Parse collections with train and val prefixes and set them to train_val_splits variables"""
353
+ all_collections = self.api.entities_collection.get_list(self.project_id)
354
+ existing_train_collections = [
355
+ collection for collection in all_collections if collection.name.startswith("train_")
356
+ ]
357
+ existing_val_collections = [
358
+ collection for collection in all_collections if collection.name.startswith("val_")
359
+ ]
360
+
361
+ self._all_train_collections = existing_train_collections
362
+ self._all_val_collections = existing_val_collections
363
+ self._latest_train_collection = self._get_latest_collection(existing_train_collections, "train")
364
+ self._latest_val_collection = self._get_latest_collection(existing_val_collections, "val")
365
+
366
+ def _get_latest_collection(
367
+ self, collections: List[EntitiesCollectionInfo], expected_prefix: str
368
+ ) -> EntitiesCollectionInfo:
369
+ curr_collection = None
370
+ curr_idx = 0
371
+ for collection in collections:
372
+ parts = collection.name.split("_")
373
+ if len(parts) == 2:
374
+ prefix = parts[0].lower()
375
+ if prefix == expected_prefix:
376
+ if parts[1].isdigit():
377
+ collection_idx = int(parts[1])
378
+ if collection_idx > curr_idx:
379
+ curr_idx = collection_idx
380
+ curr_collection = collection
381
+ return curr_collection
382
+
383
+
344
384
  def _detect_collections(self) -> bool:
345
385
  """Find collections with train and val prefixes and set them to train_val_splits"""
346
- def _get_latest_collection(collections: List[EntitiesCollectionInfo]) -> EntitiesCollectionInfo:
347
- curr_collection = None
348
- curr_idx = 0
349
- for collection in collections:
350
- collection_idx = int(collection.name.rsplit('_', 1)[-1])
351
- if collection_idx > curr_idx:
352
- curr_idx = collection_idx
353
- curr_collection = collection
354
- return curr_collection
355
386
 
356
- all_collections = self.api.entities_collection.get_list(self.project_id)
357
- train_collections = []
358
- val_collections = []
359
387
  collections_found = False
360
- for collection in all_collections:
361
- if collection.name.lower().startswith("train_"):
362
- train_collections.append(collection)
363
- elif collection.name.lower().startswith("val_"):
364
- val_collections.append(collection)
365
-
366
- train_collection = _get_latest_collection(train_collections)
367
- val_collection = _get_latest_collection(val_collections)
368
- if train_collection is not None and val_collection is not None:
369
- self.train_val_splits.set_collections_splits([train_collection.id], [val_collection.id])
388
+ if self._latest_train_collection is not None and self._latest_val_collection is not None:
389
+ self.train_val_splits.set_collections_splits(
390
+ [self._latest_train_collection.id], [self._latest_val_collection.id]
391
+ )
370
392
  self.validator_text = Text("Train and val collections are detected", status="info")
371
393
  self.validator_text.show()
372
394
  collections_found = True
373
- self._all_train_collections = train_collections
374
- self._all_val_collections = val_collections
375
- self._latest_train_collection = train_collection
376
- self._latest_val_collection = val_collection
377
395
  else:
378
396
  self.validator_text = Text("")
379
397
  self.validator_text.hide()
380
398
  collections_found = False
381
399
  return collections_found
382
-
400
+
383
401
  def _detect_datasets(self) -> bool:
384
402
  """Find datasets with train and val prefixes and set them to train_val_splits"""
403
+
385
404
  def _extend_with_nested(root_ds):
386
405
  nested = self.api.dataset.get_nested(self.project_id, root_ds.id)
387
406
  nested_ids = [ds.id for ds in nested]
@@ -407,7 +426,9 @@ class TrainValSplitsSelector:
407
426
  val_count = len(train_val_dataset_ids["val"])
408
427
 
409
428
  if train_count > 0 and val_count > 0:
410
- self.train_val_splits.set_datasets_splits(train_val_dataset_ids["train"], train_val_dataset_ids["val"])
429
+ self.train_val_splits.set_datasets_splits(
430
+ train_val_dataset_ids["train"], train_val_dataset_ids["val"]
431
+ )
411
432
  datasets_found = True
412
433
 
413
434
  if train_count > 0 and val_count > 0:
@@ -415,7 +436,7 @@ class TrainValSplitsSelector:
415
436
  message = "train and val datasets are detected"
416
437
  else:
417
438
  message = "Multiple train and val datasets are detected. Check manually if selection is correct"
418
-
439
+
419
440
  self.validator_text = Text(message, status="info")
420
441
  self.validator_text.show()
421
442
  datasets_found = True
@@ -423,4 +444,4 @@ class TrainValSplitsSelector:
423
444
  self.validator_text = Text("")
424
445
  self.validator_text.hide()
425
446
  datasets_found = False
426
- return datasets_found
447
+ return datasets_found