supervisely 6.73.456__py3-none-any.whl → 6.73.458__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. supervisely/__init__.py +24 -1
  2. supervisely/api/image_api.py +4 -0
  3. supervisely/api/video/video_annotation_api.py +4 -2
  4. supervisely/api/video/video_api.py +41 -1
  5. supervisely/app/v1/app_service.py +18 -2
  6. supervisely/app/v1/constants.py +7 -1
  7. supervisely/app/widgets/card/card.py +20 -0
  8. supervisely/app/widgets/deploy_model/deploy_model.py +56 -35
  9. supervisely/app/widgets/experiment_selector/experiment_selector.py +8 -0
  10. supervisely/app/widgets/fast_table/fast_table.py +45 -11
  11. supervisely/app/widgets/fast_table/template.html +1 -1
  12. supervisely/app/widgets/radio_tabs/radio_tabs.py +18 -2
  13. supervisely/app/widgets/radio_tabs/template.html +1 -0
  14. supervisely/app/widgets/select_dataset_tree/select_dataset_tree.py +63 -7
  15. supervisely/app/widgets/tree_select/tree_select.py +2 -0
  16. supervisely/nn/inference/cache.py +2 -2
  17. supervisely/nn/inference/inference.py +364 -73
  18. supervisely/nn/inference/inference_request.py +3 -2
  19. supervisely/nn/inference/predict_app/gui/classes_selector.py +81 -12
  20. supervisely/nn/inference/predict_app/gui/gui.py +676 -488
  21. supervisely/nn/inference/predict_app/gui/input_selector.py +178 -25
  22. supervisely/nn/inference/predict_app/gui/model_selector.py +2 -4
  23. supervisely/nn/inference/predict_app/gui/output_selector.py +46 -6
  24. supervisely/nn/inference/predict_app/gui/settings_selector.py +756 -59
  25. supervisely/nn/inference/predict_app/gui/tags_selector.py +1 -1
  26. supervisely/nn/inference/predict_app/gui/utils.py +236 -119
  27. supervisely/nn/inference/predict_app/predict_app.py +2 -2
  28. supervisely/nn/model/model_api.py +9 -0
  29. supervisely/nn/tracker/base_tracker.py +11 -1
  30. supervisely/nn/tracker/botsort/botsort_config.yaml +0 -1
  31. supervisely/nn/tracker/botsort_tracker.py +14 -7
  32. supervisely/nn/tracker/visualize.py +70 -72
  33. supervisely/video/video.py +15 -1
  34. supervisely/worker_api/agent_rpc.py +24 -1
  35. supervisely/worker_api/rpc_servicer.py +31 -7
  36. {supervisely-6.73.456.dist-info → supervisely-6.73.458.dist-info}/METADATA +3 -2
  37. {supervisely-6.73.456.dist-info → supervisely-6.73.458.dist-info}/RECORD +41 -41
  38. {supervisely-6.73.456.dist-info → supervisely-6.73.458.dist-info}/LICENSE +0 -0
  39. {supervisely-6.73.456.dist-info → supervisely-6.73.458.dist-info}/WHEEL +0 -0
  40. {supervisely-6.73.456.dist-info → supervisely-6.73.458.dist-info}/entry_points.txt +0 -0
  41. {supervisely-6.73.456.dist-info → supervisely-6.73.458.dist-info}/top_level.txt +0 -0
@@ -1,21 +1,22 @@
1
- from typing import Union, Dict, List, Tuple, Iterator, Optional
2
- import numpy as np
3
- import cv2
4
- import ffmpeg
5
- from pathlib import Path
1
+ import shutil
2
+ import tempfile
6
3
  from collections import defaultdict
7
4
  from dataclasses import dataclass
8
- import tempfile
9
- import shutil
5
+ from pathlib import Path
6
+ from typing import Dict, Iterator, List, Optional, Tuple, Union
7
+
8
+ import cv2
9
+ import ffmpeg
10
+ import numpy as np
10
11
 
11
12
  import supervisely as sly
12
- from supervisely import logger
13
+ from supervisely import VideoAnnotation, logger
13
14
  from supervisely.nn.model.prediction import Prediction
14
- from supervisely import VideoAnnotation
15
15
  from supervisely.nn.tracker.utils import predictions_to_video_annotation
16
16
 
17
17
 
18
18
  class TrackingVisualizer:
19
+
19
20
  def __init__(
20
21
  self,
21
22
  show_labels: bool = True,
@@ -29,7 +30,7 @@ class TrackingVisualizer:
29
30
  codec: str = "mp4",
30
31
  output_fps: float = 30.0,
31
32
  colorize_tracks: bool = True,
32
-
33
+ trajectory_thickness: int = 2,
33
34
  ):
34
35
  """
35
36
  Initialize the visualizer with configuration.
@@ -58,6 +59,7 @@ class TrackingVisualizer:
58
59
  self.text_scale = text_scale
59
60
  self.text_thickness = text_thickness
60
61
  self.trajectory_length = trajectory_length
62
+ self.trajectory_thickness = trajectory_thickness
61
63
  self.colorize_tracks = colorize_tracks
62
64
 
63
65
  # Output settings
@@ -71,7 +73,7 @@ class TrackingVisualizer:
71
73
  self.track_colors = {}
72
74
  self.color_palette = self._generate_color_palette()
73
75
  self._temp_dir = None
74
-
76
+
75
77
  def _generate_color_palette(self, num_colors: int = 100) -> List[Tuple[int, int, int]]:
76
78
  """
77
79
  Generate bright, distinct color palette for track visualization.
@@ -88,11 +90,11 @@ class TrackingVisualizer:
88
90
  bgr_color = cv2.cvtColor(hsv_color, cv2.COLOR_HSV2BGR)[0][0]
89
91
  colors.append(tuple(map(int, bgr_color)))
90
92
  return colors
91
-
93
+
92
94
  def _get_track_color(self, track_id: int) -> Tuple[int, int, int]:
93
95
  """Get consistent color for track ID from palette."""
94
96
  return self.color_palette[track_id % len(self.color_palette)]
95
-
97
+
96
98
  def _get_video_info(self, video_path: Path) -> Tuple[int, int, float, int]:
97
99
  """
98
100
  Get video metadata using ffmpeg.
@@ -104,13 +106,13 @@ class TrackingVisualizer:
104
106
  probe = ffmpeg.probe(str(video_path))
105
107
  video_stream = next((stream for stream in probe['streams']
106
108
  if stream['codec_type'] == 'video'), None)
107
-
109
+
108
110
  if video_stream is None:
109
111
  raise ValueError(f"No video stream found in: {video_path}")
110
-
112
+
111
113
  width = int(video_stream['width'])
112
114
  height = int(video_stream['height'])
113
-
115
+
114
116
  # Extract FPS
115
117
  fps_str = video_stream.get('r_frame_rate', '30/1')
116
118
  if '/' in fps_str:
@@ -118,19 +120,19 @@ class TrackingVisualizer:
118
120
  fps = num / den if den != 0 else 30.0
119
121
  else:
120
122
  fps = float(fps_str)
121
-
123
+
122
124
  # Get total frames
123
125
  total_frames = int(video_stream.get('nb_frames', 0))
124
126
  if total_frames == 0:
125
127
  # Fallback: estimate from duration and fps
126
128
  duration = float(video_stream.get('duration', 0))
127
129
  total_frames = int(duration * fps) if duration > 0 else 0
128
-
130
+
129
131
  return width, height, fps, total_frames
130
-
132
+
131
133
  except Exception as e:
132
134
  raise ValueError(f"Could not read video metadata {video_path}: {str(e)}")
133
-
135
+
134
136
  def _create_frame_iterator(self, source: Union[str, Path]) -> Iterator[Tuple[int, np.ndarray]]:
135
137
  """
136
138
  Create iterator that yields (frame_index, frame) tuples.
@@ -142,38 +144,38 @@ class TrackingVisualizer:
142
144
  Tuple of (frame_index, frame_array)
143
145
  """
144
146
  source = Path(source)
145
-
147
+
146
148
  if source.is_file():
147
149
  yield from self._iterate_video_frames(source)
148
150
  elif source.is_dir():
149
151
  yield from self._iterate_directory_frames(source)
150
152
  else:
151
153
  raise ValueError(f"Source must be a video file or directory, got: {source}")
152
-
154
+
153
155
  def _iterate_video_frames(self, video_path: Path) -> Iterator[Tuple[int, np.ndarray]]:
154
156
  """Iterate through video frames using ffmpeg."""
155
157
  width, height, fps, total_frames = self._get_video_info(video_path)
156
-
158
+
157
159
  # Store video info for later use
158
160
  self.source_fps = fps
159
161
  self.frame_size = (width, height)
160
-
162
+
161
163
  process = (
162
164
  ffmpeg
163
165
  .input(str(video_path))
164
166
  .output('pipe:', format='rawvideo', pix_fmt='bgr24', loglevel='quiet')
165
167
  .run_async(pipe_stdout=True, pipe_stderr=False)
166
168
  )
167
-
169
+
168
170
  try:
169
171
  frame_size_bytes = width * height * 3
170
172
  frame_idx = 0
171
-
173
+
172
174
  while True:
173
175
  frame_data = process.stdout.read(frame_size_bytes)
174
176
  if len(frame_data) != frame_size_bytes:
175
177
  break
176
-
178
+
177
179
  frame = np.frombuffer(frame_data, np.uint8).reshape([height, width, 3])
178
180
  yield frame_idx, frame
179
181
  frame_idx += 1
@@ -186,26 +188,26 @@ class TrackingVisualizer:
186
188
  if process.stderr:
187
189
  process.stderr.close()
188
190
  process.wait()
189
-
191
+
190
192
  def _iterate_directory_frames(self, frames_dir: Path) -> Iterator[Tuple[int, np.ndarray]]:
191
193
  """Iterate through image frames in directory."""
192
194
  if not frames_dir.is_dir():
193
195
  raise ValueError(f"Directory does not exist: {frames_dir}")
194
-
196
+
195
197
  # Support common image extensions
196
198
  extensions = ['.jpg', '.jpeg', '.png', '.bmp', '.tiff']
197
199
  image_files = []
198
200
  for ext in extensions:
199
201
  image_files.extend(frames_dir.glob(f'*{ext}'))
200
202
  image_files.extend(frames_dir.glob(f'*{ext.upper()}'))
201
-
203
+
202
204
  image_files = sorted(image_files)
203
205
  if not image_files:
204
206
  raise ValueError(f"No image files found in directory: {frames_dir}")
205
-
207
+
206
208
  # Set fps from config for image sequences
207
209
  self.source_fps = self.output_fps
208
-
210
+
209
211
  for frame_idx, img_path in enumerate(image_files):
210
212
  frame = cv2.imread(str(img_path))
211
213
  if frame is not None:
@@ -215,7 +217,7 @@ class TrackingVisualizer:
215
217
  yield frame_idx, frame
216
218
  else:
217
219
  logger.warning(f"Could not read image: {img_path}")
218
-
220
+
219
221
  def _extract_tracks_from_annotation(self) -> None:
220
222
  """
221
223
  Extract tracking data from Supervisely VideoAnnotation.
@@ -224,29 +226,29 @@ class TrackingVisualizer:
224
226
  """
225
227
  self.tracks_by_frame = defaultdict(list)
226
228
  self.track_colors = {}
227
-
229
+
228
230
  # Map object keys to track info
229
231
  objects = {}
230
232
  for i, obj in enumerate(self.annotation.objects):
231
233
  objects[obj.key] = (i, obj.obj_class.name)
232
-
234
+
233
235
  # Extract tracks from frames
234
236
  for frame in self.annotation.frames:
235
237
  frame_idx = frame.index
236
238
  for figure in frame.figures:
237
239
  if figure.geometry.geometry_name() != 'rectangle':
238
240
  continue
239
-
241
+
240
242
  object_key = figure.parent_object.key
241
243
  if object_key not in objects:
242
244
  continue
243
-
245
+
244
246
  track_id, class_name = objects[object_key]
245
-
247
+
246
248
  # Extract bbox coordinates
247
249
  rect = figure.geometry
248
250
  bbox = (rect.left, rect.top, rect.right, rect.bottom)
249
-
251
+
250
252
  if track_id not in self.track_colors:
251
253
  if self.colorize_tracks:
252
254
  # auto-color override everything
@@ -263,11 +265,10 @@ class TrackingVisualizer:
263
265
 
264
266
  self.track_colors[track_id] = color
265
267
 
266
-
267
268
  self.tracks_by_frame[frame_idx].append((track_id, bbox, class_name))
268
-
269
+
269
270
  logger.info(f"Extracted tracks from {len(self.tracks_by_frame)} frames")
270
-
271
+
271
272
  def _draw_detection(self, img: np.ndarray, track_id: int, bbox: Tuple[int, int, int, int],
272
273
  class_name: str) -> Optional[Tuple[int, int]]:
273
274
  """
@@ -278,7 +279,7 @@ class TrackingVisualizer:
278
279
 
279
280
  if x2 <= x1 or y2 <= y1:
280
281
  return None
281
-
282
+
282
283
  color = self.track_colors[track_id]
283
284
 
284
285
  # Draw bounding box
@@ -304,7 +305,6 @@ class TrackingVisualizer:
304
305
  # Return center point for trajectory
305
306
  return (x1 + x2) // 2, (y1 + y2) // 2
306
307
 
307
-
308
308
  def _draw_trajectories(self, img: np.ndarray) -> None:
309
309
  """Draw trajectory lines for all tracks, filtering out big jumps."""
310
310
  if not self.show_trajectories:
@@ -323,13 +323,12 @@ class TrackingVisualizer:
323
323
  p1, p2 = points[i - 1], points[i]
324
324
  if p1 is None or p2 is None:
325
325
  continue
326
-
326
+
327
327
  if np.hypot(p2[0] - p1[0], p2[1] - p1[1]) > max_jump:
328
328
  continue
329
- cv2.line(img, p1, p2, color, 2)
329
+ cv2.line(img, p1, p2, color, self.trajectory_thickness)
330
330
  cv2.circle(img, p1, 3, color, -1)
331
331
 
332
-
333
332
  def _process_single_frame(self, frame: np.ndarray, frame_idx: int) -> np.ndarray:
334
333
  """
335
334
  Process single frame: add annotations and return processed frame.
@@ -351,23 +350,23 @@ class TrackingVisualizer:
351
350
  if len(self.track_centers[track_id]) > self.trajectory_length:
352
351
  self.track_centers[track_id].pop(0)
353
352
  active_ids.add(track_id)
354
-
353
+
355
354
  for tid in self.track_centers.keys():
356
355
  if tid not in active_ids:
357
356
  self.track_centers[tid].append(None)
358
357
  if len(self.track_centers[tid]) > self.trajectory_length:
359
358
  self.track_centers[tid].pop(0)
360
-
359
+
361
360
  # Draw trajectories
362
361
  self._draw_trajectories(img)
363
-
362
+
364
363
  # Add frame number if requested
365
364
  if self.show_frame_number:
366
365
  cv2.putText(img, f"Frame: {frame_idx + 1}", (10, 30),
367
366
  cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2, cv2.LINE_AA)
368
-
367
+
369
368
  return img
370
-
369
+
371
370
  def _save_processed_frame(self, frame: np.ndarray, frame_idx: int) -> str:
372
371
  """
373
372
  Save processed frame to temporary directory.
@@ -382,7 +381,7 @@ class TrackingVisualizer:
382
381
  frame_path = self._temp_dir / f"frame_{frame_idx:08d}.jpg"
383
382
  cv2.imwrite(str(frame_path), frame, [cv2.IMWRITE_JPEG_QUALITY, 95])
384
383
  return str(frame_path)
385
-
384
+
386
385
  def _create_video_from_frames(self, output_path: Union[str, Path]) -> None:
387
386
  """
388
387
  Create final video from processed frames using ffmpeg.
@@ -392,10 +391,10 @@ class TrackingVisualizer:
392
391
  """
393
392
  output_path = Path(output_path)
394
393
  output_path.parent.mkdir(parents=True, exist_ok=True)
395
-
394
+
396
395
  # Create video from frame sequence
397
396
  input_pattern = str(self._temp_dir / "frame_%08d.jpg")
398
-
397
+
399
398
  try:
400
399
  (
401
400
  ffmpeg
@@ -405,17 +404,17 @@ class TrackingVisualizer:
405
404
  .run(capture_stdout=True, capture_stderr=True)
406
405
  )
407
406
  logger.info(f"Video saved to {output_path}")
408
-
407
+
409
408
  except ffmpeg.Error as e:
410
409
  error_msg = e.stderr.decode() if e.stderr else "Unknown ffmpeg error"
411
410
  raise ValueError(f"Failed to create video: {error_msg}")
412
-
411
+
413
412
  def _cleanup_temp_directory(self) -> None:
414
413
  """Clean up temporary directory and all its contents."""
415
414
  if self._temp_dir and self._temp_dir.exists():
416
415
  shutil.rmtree(self._temp_dir)
417
416
  self._temp_dir = None
418
-
417
+
419
418
  def visualize_video_annotation(self, annotation: VideoAnnotation,
420
419
  source: Union[str, Path],
421
420
  output_path: Union[str, Path]) -> None:
@@ -433,43 +432,43 @@ class TrackingVisualizer:
433
432
  """
434
433
  if not isinstance(annotation, VideoAnnotation):
435
434
  raise TypeError(f"Annotation must be VideoAnnotation, got {type(annotation)}")
436
-
435
+
437
436
  # Store annotation
438
437
  self.annotation = annotation
439
-
438
+
440
439
  # Create temporary directory for processed frames
441
440
  self._temp_dir = Path(tempfile.mkdtemp(prefix="video_viz_"))
442
-
441
+
443
442
  try:
444
443
  # Extract tracking data
445
444
  self._extract_tracks_from_annotation()
446
-
445
+
447
446
  if not self.tracks_by_frame:
448
447
  logger.warning("No tracking data found in annotation")
449
-
448
+
450
449
  # Reset trajectory tracking
451
450
  self.track_centers = defaultdict(list)
452
-
451
+
453
452
  # Process frames one by one
454
453
  frame_count = 0
455
454
  for frame_idx, frame in self._create_frame_iterator(source):
456
455
  # Process frame
457
456
  processed_frame = self._process_single_frame(frame, frame_idx)
458
-
457
+
459
458
  # Save processed frame
460
459
  self._save_processed_frame(processed_frame, frame_idx)
461
-
460
+
462
461
  frame_count += 1
463
-
462
+
464
463
  # Progress logging
465
464
  if frame_count % 100 == 0:
466
465
  logger.info(f"Processed {frame_count} frames")
467
-
466
+
468
467
  logger.info(f"Finished processing {frame_count} frames")
469
-
468
+
470
469
  # Create final video from saved frames
471
470
  self._create_video_from_frames(output_path)
472
-
471
+
473
472
  finally:
474
473
  # Always cleanup temporary files
475
474
  self._cleanup_temp_directory()
@@ -477,7 +476,7 @@ class TrackingVisualizer:
477
476
  def __del__(self):
478
477
  """Cleanup temporary directory on object destruction."""
479
478
  self._cleanup_temp_directory()
480
-
479
+
481
480
 
482
481
  def visualize(
483
482
  predictions: Union[VideoAnnotation, List[Prediction]],
@@ -519,4 +518,3 @@ def visualize(
519
518
  visualizer.visualize_video_annotation(predictions, source, output_path)
520
519
  else:
521
520
  raise TypeError(f"Predictions must be VideoAnnotation or list of Prediction, got {type(predictions)}")
522
-
@@ -4,7 +4,7 @@
4
4
  from __future__ import annotations
5
5
 
6
6
  import os
7
- from typing import Dict, Generator, List, Optional, Tuple
7
+ from typing import Dict, Generator, Iterable, List, Optional, Tuple
8
8
 
9
9
  import cv2
10
10
  import numpy as np
@@ -625,3 +625,17 @@ class VideoFrameReader:
625
625
  return self.vr.get_avg_fps()
626
626
  else:
627
627
  return int(self.cap.get(cv2.CAP_PROP_FPS))
628
+
629
+
630
+ def create_from_frames(frames: Iterable[np.ndarray], output_path: str, fps: int = 30) -> None:
631
+ video_writer = None
632
+ for frame in frames:
633
+ if video_writer is None:
634
+ height, width, _ = frame.shape
635
+ fourcc = cv2.VideoWriter.fourcc(*"mp4v")
636
+ video_writer = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
637
+ if frame.dtype != np.uint8:
638
+ frame = (frame * 255).astype(np.uint8) if frame.max() <= 1.0 else frame.astype(np.uint8)
639
+
640
+ video_writer.write(frame)
641
+ video_writer.release()
@@ -1,10 +1,12 @@
1
1
  # coding: utf-8
2
+ # isort: skip_file
2
3
 
3
4
  import cv2
4
5
  import numpy as np
5
6
 
6
7
  from .chunking import load_to_memory_chunked_image, load_to_memory_chunked
7
- from ..worker_proto import worker_api_pb2 as api_proto
8
+
9
+ # from ..worker_proto import worker_api_pb2 as api_proto # Import moved to methods where needed
8
10
 
9
11
 
10
12
  class SimpleCache:
@@ -22,6 +24,13 @@ class SimpleCache:
22
24
 
23
25
 
24
26
  def download_image_from_remote(agent_api, image_hash, src_node_token, logger):
27
+ try:
28
+ from ..worker_proto import worker_api_pb2 as api_proto
29
+ except Exception as e:
30
+ from supervisely.app.v1.constants import PROTOBUF_REQUIRED_ERROR
31
+
32
+ raise ImportError(PROTOBUF_REQUIRED_ERROR) from e
33
+
25
34
  resp = agent_api.get_stream_with_data(
26
35
  'DownloadImages',
27
36
  api_proto.ChunkImage,
@@ -34,6 +43,13 @@ def download_image_from_remote(agent_api, image_hash, src_node_token, logger):
34
43
 
35
44
 
36
45
  def download_data_from_remote(agent_api, req_id, logger):
46
+ try:
47
+ from ..worker_proto import worker_api_pb2 as api_proto
48
+ except Exception as e:
49
+ from supervisely.app.v1.constants import PROTOBUF_REQUIRED_ERROR
50
+
51
+ raise ImportError(PROTOBUF_REQUIRED_ERROR) from e
52
+
37
53
  resp = agent_api.get_stream_with_data('GetGeneralEventData', api_proto.Chunk, api_proto.Empty(),
38
54
  addit_headers={'x-request-id': req_id})
39
55
  b_data = load_to_memory_chunked(resp)
@@ -47,6 +63,13 @@ def batched(seq, batch_size):
47
63
 
48
64
 
49
65
  def send_from_memory_generator(out_bytes, chunk_size):
66
+ try:
67
+ from ..worker_proto import worker_api_pb2 as api_proto
68
+ except Exception as e:
69
+ from supervisely.app.v1.constants import PROTOBUF_REQUIRED_ERROR
70
+
71
+ raise ImportError(PROTOBUF_REQUIRED_ERROR) from e
72
+
50
73
  for bytes_chunk in batched(out_bytes, chunk_size):
51
74
  yield api_proto.Chunk(buffer=bytes_chunk, total_size=len(out_bytes))
52
75
 
@@ -1,4 +1,5 @@
1
1
  # coding: utf-8
2
+ # isort: skip_file
2
3
 
3
4
  import os
4
5
  import concurrent.futures
@@ -11,18 +12,27 @@ import threading
11
12
  from supervisely.annotation.annotation import Annotation
12
13
  from supervisely.function_wrapper import function_wrapper, function_wrapper_nofail
13
14
  from supervisely.imaging.image import drop_image_alpha_channel
14
- from supervisely.nn.legacy.hosted.inference_modes import InferenceModeFactory, InfModeFullImage, \
15
- MODE, NAME, get_effective_inference_mode_config
15
+ from supervisely.nn.legacy.hosted.inference_modes import (
16
+ InferenceModeFactory,
17
+ InfModeFullImage,
18
+ MODE,
19
+ NAME,
20
+ get_effective_inference_mode_config,
21
+ )
16
22
  from supervisely.project.project_meta import ProjectMeta
17
23
  from supervisely.worker_api.agent_api import AgentAPI
18
- from supervisely.worker_api.agent_rpc import decode_image, download_image_from_remote, download_data_from_remote, \
19
- send_from_memory_generator
24
+ from supervisely.worker_api.agent_rpc import (
25
+ decode_image,
26
+ download_image_from_remote,
27
+ download_data_from_remote,
28
+ send_from_memory_generator,
29
+ )
20
30
  from supervisely.worker_api.interfaces import SingleImageInferenceInterface
21
- from supervisely.worker_proto import worker_api_pb2 as api_proto
31
+
32
+ # from supervisely.worker_proto import worker_api_pb2 as api_proto # Import moved to methods where needed
22
33
  from supervisely.task.progress import report_agent_rpc_ready
23
34
  from supervisely.api.api import Api
24
35
 
25
-
26
36
  REQUEST_TYPE = 'request_type'
27
37
  GET_OUT_META = 'get_out_meta'
28
38
  INFERENCE = 'inference'
@@ -123,6 +133,13 @@ class AgentRPCServicerBase:
123
133
  self.thread_pool.submit(function_wrapper_nofail, self._send_data, res_msg, req_id) # skip errors
124
134
 
125
135
  def _send_data(self, out_msg, req_id):
136
+ try:
137
+ from supervisely.worker_proto import worker_api_pb2 as api_proto
138
+ except Exception as e:
139
+ from supervisely.app.v1.constants import PROTOBUF_REQUIRED_ERROR
140
+
141
+ raise ImportError(PROTOBUF_REQUIRED_ERROR) from e
142
+
126
143
  self.logger.trace('Will send output data.', extra={REQUEST_ID: req_id})
127
144
  out_bytes = json.dumps(out_msg).encode('utf-8')
128
145
 
@@ -173,6 +190,13 @@ class AgentRPCServicerBase:
173
190
  self._load_data_if_required(event_obj)
174
191
 
175
192
  def run_inf_loop(self):
193
+ try:
194
+ from supervisely.worker_proto import worker_api_pb2 as api_proto
195
+ except Exception as e:
196
+ from supervisely.app.v1.constants import PROTOBUF_REQUIRED_ERROR
197
+
198
+ raise ImportError(PROTOBUF_REQUIRED_ERROR) from e
199
+
176
200
  def seq_inf_wrapped():
177
201
  function_wrapper(self._sequential_final_processing) # exit if raised
178
202
 
@@ -252,4 +276,4 @@ class InactiveRPCServicer(AgentRPCServicer):
252
276
  self.logger.info('Created InactiveRPCServicer for internal usage', extra=conn_config)
253
277
 
254
278
  def run_inf_loop(self):
255
- raise RuntimeError("Method is not accessible")
279
+ raise RuntimeError("Method is not accessible")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: supervisely
3
- Version: 6.73.456
3
+ Version: 6.73.458
4
4
  Summary: Supervisely Python SDK.
5
5
  Home-page: https://github.com/supervisely/supervisely
6
6
  Author: Supervisely
@@ -25,7 +25,6 @@ Requires-Dist: numpy<2.0.0,>=1.19
25
25
  Requires-Dist: opencv-python<5.0.0.0,>=4.6.0.66
26
26
  Requires-Dist: PTable<1.0.0,>=0.9.2
27
27
  Requires-Dist: pillow<=10.4.0,>=5.4.1
28
- Requires-Dist: protobuf<=3.20.3,>=3.19.5
29
28
  Requires-Dist: python-json-logger<3.0.0,>=0.1.11
30
29
  Requires-Dist: requests<3.0.0,>=2.27.1
31
30
  Requires-Dist: requests-toolbelt>=0.9.1
@@ -71,6 +70,8 @@ Requires-Dist: zstd
71
70
  Requires-Dist: aiofiles
72
71
  Requires-Dist: httpx[http2]==0.27.2
73
72
  Requires-Dist: debugpy
73
+ Provides-Extra: agent
74
+ Requires-Dist: protobuf<=3.20.3,>=3.19.5; extra == "agent"
74
75
  Provides-Extra: apps
75
76
  Requires-Dist: uvicorn[standard]<1.0.0,>=0.18.2; extra == "apps"
76
77
  Requires-Dist: fastapi<1.0.0,>=0.79.0; extra == "apps"