supervisely 6.73.356__py3-none-any.whl → 6.73.358__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. supervisely/_utils.py +12 -0
  2. supervisely/api/annotation_api.py +3 -0
  3. supervisely/api/api.py +2 -2
  4. supervisely/api/app_api.py +27 -2
  5. supervisely/api/entity_annotation/tag_api.py +0 -1
  6. supervisely/api/labeling_job_api.py +4 -1
  7. supervisely/api/nn/__init__.py +0 -0
  8. supervisely/api/nn/deploy_api.py +821 -0
  9. supervisely/api/nn/neural_network_api.py +248 -0
  10. supervisely/api/task_api.py +26 -467
  11. supervisely/app/fastapi/subapp.py +1 -0
  12. supervisely/nn/__init__.py +2 -1
  13. supervisely/nn/artifacts/artifacts.py +5 -5
  14. supervisely/nn/benchmark/object_detection/metric_provider.py +3 -0
  15. supervisely/nn/experiments.py +28 -5
  16. supervisely/nn/inference/cache.py +178 -114
  17. supervisely/nn/inference/gui/gui.py +18 -35
  18. supervisely/nn/inference/gui/serving_gui.py +3 -1
  19. supervisely/nn/inference/inference.py +1421 -1265
  20. supervisely/nn/inference/inference_request.py +412 -0
  21. supervisely/nn/inference/object_detection_3d/object_detection_3d.py +31 -24
  22. supervisely/nn/inference/session.py +2 -2
  23. supervisely/nn/inference/tracking/base_tracking.py +45 -79
  24. supervisely/nn/inference/tracking/bbox_tracking.py +220 -155
  25. supervisely/nn/inference/tracking/mask_tracking.py +274 -250
  26. supervisely/nn/inference/tracking/tracker_interface.py +23 -0
  27. supervisely/nn/inference/uploader.py +164 -0
  28. supervisely/nn/model/__init__.py +0 -0
  29. supervisely/nn/model/model_api.py +259 -0
  30. supervisely/nn/model/prediction.py +311 -0
  31. supervisely/nn/model/prediction_session.py +632 -0
  32. supervisely/nn/tracking/__init__.py +1 -0
  33. supervisely/nn/tracking/boxmot.py +114 -0
  34. supervisely/nn/tracking/tracking.py +24 -0
  35. supervisely/nn/training/train_app.py +61 -19
  36. supervisely/nn/utils.py +43 -3
  37. supervisely/task/progress.py +12 -2
  38. supervisely/video/video.py +107 -1
  39. supervisely/volume_annotation/volume_figure.py +8 -2
  40. {supervisely-6.73.356.dist-info → supervisely-6.73.358.dist-info}/METADATA +2 -1
  41. {supervisely-6.73.356.dist-info → supervisely-6.73.358.dist-info}/RECORD +45 -34
  42. supervisely/api/neural_network_api.py +0 -202
  43. {supervisely-6.73.356.dist-info → supervisely-6.73.358.dist-info}/LICENSE +0 -0
  44. {supervisely-6.73.356.dist-info → supervisely-6.73.358.dist-info}/WHEEL +0 -0
  45. {supervisely-6.73.356.dist-info → supervisely-6.73.358.dist-info}/entry_points.txt +0 -0
  46. {supervisely-6.73.356.dist-info → supervisely-6.73.358.dist-info}/top_level.txt +0 -0
@@ -1,8 +1,6 @@
1
1
  import time
2
2
  import uuid
3
3
  from pathlib import Path
4
- from queue import Queue
5
- from threading import Event, Thread
6
4
  from typing import Any, BinaryIO, Dict, List, Optional
7
5
 
8
6
  import numpy as np
@@ -17,14 +15,16 @@ from supervisely.api.video.video_figure_api import FigureInfo
17
15
  from supervisely.geometry.helpers import deserialize_geometry
18
16
  from supervisely.geometry.rectangle import Rectangle
19
17
  from supervisely.imaging import image as sly_image
18
+ from supervisely.nn.inference.inference import Uploader
19
+ from supervisely.nn.inference.inference_request import InferenceRequest
20
20
  from supervisely.nn.inference.tracking.base_tracking import BaseTracking
21
21
  from supervisely.nn.inference.tracking.tracker_interface import (
22
22
  TrackerInterface,
23
23
  TrackerInterfaceV2,
24
24
  )
25
+ from supervisely.nn.inference.uploader import Uploader
25
26
  from supervisely.nn.prediction_dto import Prediction, PredictionBBox
26
27
  from supervisely.sly_logger import logger
27
- from supervisely.task.progress import Progress
28
28
 
29
29
 
30
30
  class BBoxTracking(BaseTracking):
@@ -33,14 +33,19 @@ class BBoxTracking(BaseTracking):
33
33
  geometry_json = data["data"]
34
34
  return deserialize_geometry(geometry_type_str, geometry_json)
35
35
 
36
- def _track(self, api: Api, context: dict, notify_annotation_tool: bool):
36
+ def _track(
37
+ self,
38
+ api: Api,
39
+ context: dict,
40
+ inference_request: InferenceRequest,
41
+ ):
37
42
  video_interface = TrackerInterface(
38
43
  context=context,
39
44
  api=api,
40
45
  load_all_frames=False,
41
46
  frame_loader=self.cache.download_frame,
42
47
  frames_loader=self.cache.download_frames,
43
- should_notify=notify_annotation_tool,
48
+ should_notify=True,
44
49
  )
45
50
 
46
51
  range_of_frames = [
@@ -62,36 +67,41 @@ class BBoxTracking(BaseTracking):
62
67
  video_id=video_interface.video_id,
63
68
  )
64
69
 
65
- api.logger.info("Start tracking.")
70
+ def _upload_f(items: List):
71
+ video_interface.add_object_geometries_on_frames(*list(zip(*items)), notify=False)
72
+ inference_request.done(len(items))
73
+
74
+ def _notify_f(items: List):
75
+ frame_range = [
76
+ min(frame_index for (_, _, frame_index) in items),
77
+ max(frame_index for (_, _, frame_index) in items),
78
+ ]
79
+ pos_inc = inference_request.progress.current - video_interface.global_pos
80
+
81
+ video_interface._notify(
82
+ pos_increment=pos_inc,
83
+ fstart=frame_range[0],
84
+ fend=frame_range[1],
85
+ task=inference_request.stage,
86
+ )
66
87
 
67
- def _upload_loop(q: Queue, stop_event: Event, video_interface: TrackerInterface):
88
+ def _exception_handler(exception: Exception):
68
89
  try:
69
- while True:
70
- items = []
71
- while not q.empty():
72
- items.append(q.get_nowait())
73
- if len(items) > 0:
74
- video_interface.add_object_geometries_on_frames(*list(zip(*items)))
75
- continue
76
- if stop_event.is_set():
77
- video_interface._notify(True, task="stop tracking")
78
- return
79
- time.sleep(1)
80
- except Exception as e:
81
- api.logger.error("Error in upload loop: %s", str(e), exc_info=True)
82
- video_interface._notify(True, task="stop tracking")
83
- video_interface.global_stop_indicatior = True
84
- raise
85
-
86
- upload_queue = Queue()
87
- stop_upload_event = Event()
88
- Thread(
89
- target=_upload_loop,
90
- args=[upload_queue, stop_upload_event, video_interface],
91
- daemon=True,
92
- ).start()
93
-
94
- try:
90
+ raise exception
91
+ except Exception:
92
+ api.logger.error(f"Error: {str(exception)}", exc_info=True)
93
+ video_interface._notify(True, task="Stop tracking due to an error")
94
+ raise exception
95
+
96
+ api.logger.info("Start tracking.")
97
+ total_progress = video_interface.frames_count * len(video_interface.figure_ids)
98
+ inference_request.set_stage(InferenceRequest.Stage.INFERENCE, 0, total_progress)
99
+ with Uploader(
100
+ upload_f=_upload_f,
101
+ notify_f=_notify_f,
102
+ exception_handler=_exception_handler,
103
+ logger=api.logger,
104
+ ) as uploader:
95
105
  for fig_id, obj_id in zip(
96
106
  video_interface.geometries.keys(),
97
107
  video_interface.object_ids,
@@ -100,7 +110,6 @@ class BBoxTracking(BaseTracking):
100
110
  for _ in video_interface.frames_loader_generator():
101
111
  geom = video_interface.geometries[fig_id]
102
112
  if not isinstance(geom, Rectangle):
103
- stop_upload_event.set()
104
113
  raise TypeError(f"Tracking does not work with {geom.geometry_name()}.")
105
114
 
106
115
  imgs = video_interface.frames
@@ -121,21 +130,38 @@ class BBoxTracking(BaseTracking):
121
130
  settings=self.custom_inference_settings_dict,
122
131
  )
123
132
  sly_geometry = self._to_sly_geometry(geometry)
124
- upload_queue.put(
125
- (sly_geometry, obj_id, video_interface._cur_frames_indexes[-1])
126
- )
127
133
 
128
- if video_interface.global_stop_indicatior:
129
- stop_upload_event.set()
130
- return
134
+ uploader.put([(sly_geometry, obj_id, video_interface._cur_frames_indexes[-1])])
131
135
 
132
- api.logger.info(f"Figure #{fig_id} tracked.")
133
- except Exception:
134
- stop_upload_event.set()
135
- raise
136
- stop_upload_event.set()
136
+ if inference_request.is_stopped() or video_interface.global_stop_indicatior:
137
+ api.logger.info(
138
+ "Inference request stopped.",
139
+ extra={"inference_request_uuid": inference_request.uuid},
140
+ )
141
+ video_interface._notify(True, task="Stop tracking")
142
+ return
143
+ if uploader.has_exception():
144
+ exception = uploader.exception
145
+ if not isinstance(exception, Exception):
146
+ raise RuntimeError(
147
+ f"Uploader exception is not an instance of Exception: {str(exception)}"
148
+ )
149
+ raise uploader.exception
150
+
151
+ api.logger.info(
152
+ f"Figure #{fig_id} tracked.",
153
+ extra={
154
+ "figure_id": fig_id,
155
+ "object_id": obj_id,
156
+ "inference_request_uuid": inference_request.uuid,
157
+ },
158
+ )
159
+ api.logger.info(
160
+ "Finished tracking.", extra={"inference_request_uuid": inference_request.uuid}
161
+ )
162
+ video_interface._notify(True, task="Finished tracking")
137
163
 
138
- def _track_api(self, api: Api, context: dict, request_uuid: str = None):
164
+ def _track_api(self, api: Api, context: dict, inference_request: InferenceRequest):
139
165
  track_t = time.monotonic()
140
166
  # unused fields:
141
167
  context["trackId"] = "auto"
@@ -179,6 +205,8 @@ class BBoxTracking(BaseTracking):
179
205
  frames_n = video_interface.frames_count
180
206
  box_n = len(input_bboxes)
181
207
  geom_t = time.monotonic()
208
+
209
+ inference_request.set_stage(InferenceRequest.Stage.INFERENCE, 0, frames_n * box_n)
182
210
  api.logger.info(
183
211
  "Start tracking.",
184
212
  extra={
@@ -186,7 +214,7 @@ class BBoxTracking(BaseTracking):
186
214
  "frame_range": range_of_frames,
187
215
  "geometries_count": box_n,
188
216
  "frames_count": frames_n,
189
- "request_uuid": request_uuid,
217
+ "request_uuid": inference_request.uuid,
190
218
  },
191
219
  )
192
220
  for box_i, input_geom in enumerate(input_bboxes, 1):
@@ -218,6 +246,7 @@ class BBoxTracking(BaseTracking):
218
246
  predictions_for_object.append(
219
247
  {"type": sly_geometry.geometry_name(), "data": sly_geometry.to_json()}
220
248
  )
249
+ inference_request.done()
221
250
  api.logger.debug(
222
251
  "Frame processed. Geometry: [%d / %d]. Frame: [%d / %d]",
223
252
  box_i,
@@ -228,7 +257,7 @@ class BBoxTracking(BaseTracking):
228
257
  "geometry_index": box_i,
229
258
  "frame_index": frame_i,
230
259
  "processing_time": time.monotonic() - frame_t,
231
- "request_uuid": request_uuid,
260
+ "inference_request_uuid": inference_request.uuid,
232
261
  },
233
262
  )
234
263
  frame_t = time.monotonic()
@@ -241,7 +270,7 @@ class BBoxTracking(BaseTracking):
241
270
  extra={
242
271
  "geometry_index": box_i,
243
272
  "processing_time": time.monotonic() - geom_t,
244
- "request_uuid": request_uuid,
273
+ "inference_request_uuid": inference_request.uuid,
245
274
  },
246
275
  )
247
276
  geom_t = time.monotonic()
@@ -250,9 +279,12 @@ class BBoxTracking(BaseTracking):
250
279
  predictions = list(map(list, zip(*predictions)))
251
280
  api.logger.info(
252
281
  "Tracking finished.",
253
- extra={"tracking_time": time.monotonic() - track_t, "request_uuid": request_uuid},
282
+ extra={
283
+ "tracking_time": time.monotonic() - track_t,
284
+ "inference_request_uuid": inference_request.uuid,
285
+ },
254
286
  )
255
- return predictions
287
+ inference_request.final_result = predictions
256
288
 
257
289
  def _inference(self, frames: List[np.ndarray], geometries: List[Geometry], settings: dict):
258
290
  updated_settings = {
@@ -282,127 +314,161 @@ class BBoxTracking(BaseTracking):
282
314
  )
283
315
  return results
284
316
 
285
- def _track_async(self, api: Api, context: dict, inference_request_uuid: str = None):
286
- inference_request = self._inference_requests[inference_request_uuid]
317
+ def _track_async(self, api: Api, context: dict, inference_request: InferenceRequest):
287
318
  tracker_interface = TrackerInterfaceV2(api, context, self.cache)
288
- progress: Progress = inference_request["progress"]
289
319
  frames_count = tracker_interface.frames_count
290
320
  figures = tracker_interface.figures
321
+ frame_range = [
322
+ tracker_interface.frame_indexes[0],
323
+ tracker_interface.frame_indexes[-1],
324
+ ]
325
+ frame_range_asc = [min(frame_range), max(frame_range)]
291
326
  progress_total = frames_count * len(figures)
292
- progress.total = progress_total
293
327
 
294
328
  def _upload_f(items: List[FigureInfo]):
295
- with inference_request["lock"]:
296
- inference_request["pending_results"].extend(items)
329
+ inference_request.add_results(items)
330
+ inference_request.done(len(items))
297
331
 
298
332
  def _notify_f(items: List[FigureInfo]):
299
- items_by_object_id: Dict[int, List[FigureInfo]] = {}
300
- for item in items:
301
- items_by_object_id.setdefault(item.object_id, []).append(item)
302
-
303
- for object_id, object_items in items_by_object_id.items():
304
- frame_range = [
305
- min(item.frame_index for item in object_items),
306
- max(item.frame_index for item in object_items),
307
- ]
308
- progress.iters_done_report(len(object_items))
309
- tracker_interface.notify_progress(progress.current, progress.total, frame_range)
333
+ frame_range = [
334
+ min(item.frame_index for item in items),
335
+ max(item.frame_index for item in items),
336
+ ]
337
+ tracker_interface.notify_progress(
338
+ inference_request.progress.current, inference_request.progress.total, frame_range
339
+ )
310
340
 
311
- api.logger.info("Start tracking.")
312
- try:
313
- with tracker_interface(_upload_f, _notify_f):
314
- for fig_i, figure in enumerate(figures, 1):
315
- figure = api.video.figure._convert_json_info(figure)
316
- if not figure.geometry_type == Rectangle.geometry_name():
317
- raise TypeError(f"Tracking does not work with {figure.geometry_type}.")
318
- api.logger.info("figure:", extra={"figure": figure._asdict()})
319
- sly_geometry: Rectangle = deserialize_geometry(
320
- figure.geometry_type, figure.geometry
341
+ def _exception_handler(exception: Exception):
342
+ api.logger.error(f"Error saving predictions: {str(exception)}", exc_info=True)
343
+ tracker_interface.notify_progress(
344
+ inference_request.progress.current,
345
+ inference_request.progress.current,
346
+ frame_range_asc,
347
+ )
348
+ tracker_interface.notify_error(exception)
349
+ raise Exception
350
+
351
+ api.logger.info("Start tracking.", extra={"inference_request_uuid": inference_request.uuid})
352
+ inference_request.set_stage(InferenceRequest.Stage.INFERENCE, 0, progress_total)
353
+ with Uploader(
354
+ upload_f=_upload_f,
355
+ notify_f=_notify_f,
356
+ exception_handler=_exception_handler,
357
+ logger=api.logger,
358
+ ) as uploader:
359
+ uploader.raise_from_notify
360
+ for fig_i, figure in enumerate(figures, 1):
361
+ figure = api.video.figure._convert_json_info(figure)
362
+ if not figure.geometry_type == Rectangle.geometry_name():
363
+ raise TypeError(f"Tracking does not work with {figure.geometry_type}.")
364
+ api.logger.info("figure:", extra={"figure": figure._asdict()})
365
+ sly_geometry: Rectangle = deserialize_geometry(
366
+ figure.geometry_type, figure.geometry
367
+ )
368
+ init = False
369
+ for frame_i, (frame, next_frame) in enumerate(
370
+ tracker_interface.frames_loader_generator(), 1
371
+ ):
372
+ target = PredictionBBox(
373
+ "", # TODO: can this be useful?
374
+ [
375
+ sly_geometry.top,
376
+ sly_geometry.left,
377
+ sly_geometry.bottom,
378
+ sly_geometry.right,
379
+ ],
380
+ None,
321
381
  )
322
- init = False
323
- for frame_i, (frame, next_frame) in enumerate(
324
- tracker_interface.frames_loader_generator(), 1
325
- ):
326
- target = PredictionBBox(
327
- "", # TODO: can this be useful?
328
- [
329
- sly_geometry.top,
330
- sly_geometry.left,
331
- sly_geometry.bottom,
332
- sly_geometry.right,
333
- ],
334
- None,
335
- )
336
382
 
337
- if not init:
338
- self.initialize(frame.image, target)
339
- init = True
340
-
341
- logger.debug("Start prediction")
342
- t = time.time()
343
- geometry = self.predict(
344
- rgb_image=next_frame.image,
345
- prev_rgb_image=frame.image,
346
- target_bbox=target,
347
- settings=self.custom_inference_settings_dict,
348
- )
349
- logger.debug("Prediction done. Time: %f sec", time.time() - t)
350
- sly_geometry = self._to_sly_geometry(geometry)
351
-
352
- figure_id = uuid.uuid5(
353
- namespace=uuid.NAMESPACE_URL, name=f"{time.time()}"
354
- ).hex
355
- result_figure = api.video.figure._convert_json_info(
356
- {
357
- ApiField.ID: figure_id,
358
- ApiField.OBJECT_ID: figure.object_id,
359
- "meta": {"frame": next_frame.frame_index},
360
- ApiField.GEOMETRY_TYPE: sly_geometry.geometry_name(),
361
- ApiField.GEOMETRY: sly_geometry.to_json(),
362
- ApiField.TRACK_ID: tracker_interface.track_id,
363
- }
364
- )
383
+ if not init:
384
+ self.initialize(frame.image, target)
385
+ init = True
365
386
 
366
- tracker_interface.add_prediction(result_figure)
387
+ logger.debug("Start prediction")
388
+ t = time.time()
389
+ geometry = self.predict(
390
+ rgb_image=next_frame.image,
391
+ prev_rgb_image=frame.image,
392
+ target_bbox=target,
393
+ settings=self.custom_inference_settings_dict,
394
+ )
395
+ logger.debug("Prediction done. Time: %f sec", time.time() - t)
396
+ sly_geometry = self._to_sly_geometry(geometry)
367
397
 
368
- logger.debug(
369
- "Frame [%d / %d] processed.",
370
- frame_i,
371
- tracker_interface.frames_count,
372
- )
398
+ figure_id = uuid.uuid5(namespace=uuid.NAMESPACE_URL, name=f"{time.time()}").hex
399
+ result_figure = api.video.figure._convert_json_info(
400
+ {
401
+ ApiField.ID: figure_id,
402
+ ApiField.OBJECT_ID: figure.object_id,
403
+ "meta": {"frame": next_frame.frame_index},
404
+ ApiField.GEOMETRY_TYPE: sly_geometry.geometry_name(),
405
+ ApiField.GEOMETRY: sly_geometry.to_json(),
406
+ ApiField.TRACK_ID: tracker_interface.track_id,
407
+ }
408
+ )
373
409
 
374
- if inference_request["cancel_inference"]:
375
- return
376
- if tracker_interface.is_stopped():
377
- reason = tracker_interface.stop_reason()
378
- if isinstance(reason, Exception):
379
- raise reason
380
- return
381
-
382
- api.logger.info(
383
- "Figure [%d, %d] tracked.",
384
- fig_i,
385
- len(figures),
386
- extra={"figure_id": figure.id},
410
+ uploader.put([result_figure])
411
+
412
+ logger.debug(
413
+ "Frame [%d / %d] processed.",
414
+ frame_i,
415
+ tracker_interface.frames_count,
416
+ extra={
417
+ "frame_index": frame_i,
418
+ "figure_index": fig_i,
419
+ "inference_request_uuid": inference_request.uuid,
420
+ },
387
421
  )
388
- except Exception:
389
- progress.message = "Error occured during tracking"
390
- raise
391
- else:
392
- progress.message = "Ready"
393
- finally:
394
- progress.set(current=0, total=1, report=True)
422
+
423
+ if inference_request.is_stopped() or tracker_interface.is_stopped():
424
+ if isinstance(tracker_interface.stop_reason(), Exception):
425
+ raise tracker_interface.stop_reason()
426
+ api.logger.info(
427
+ "Inference request stopped.",
428
+ extra={"inference_request_uuid": inference_request.uuid},
429
+ )
430
+ tracker_interface.notify_progress(
431
+ inference_request.progress.current,
432
+ inference_request.progress.current,
433
+ frame_range_asc,
434
+ )
435
+ return
436
+ if uploader.has_exception():
437
+ raise uploader.exception
438
+
439
+ api.logger.info(
440
+ "Figure [%d, %d] tracked.",
441
+ fig_i,
442
+ len(figures),
443
+ extra={
444
+ "figure_id": figure.id,
445
+ "figure_index": fig_i,
446
+ "inference_request_uuid": inference_request.uuid,
447
+ },
448
+ )
449
+ api.logger.info(
450
+ "Finished tracking", extra={"inference_request_uuid": inference_request.uuid}
451
+ )
452
+ tracker_interface.notify_progress(
453
+ inference_request.progress.current,
454
+ inference_request.progress.current,
455
+ frame_range_asc,
456
+ )
395
457
 
396
458
  def track(self, api: Api, state: Dict, context: Dict):
397
459
  fn = self.send_error_data(api, context)(self._track)
398
- self.schedule_task(fn, api, context, notify_annotation_tool=True)
460
+ self.inference_requests_manager.schedule_task(fn, api, context)
399
461
  return {"message": "Track task started."}
400
462
 
401
463
  def track_api(self, api: Api, state: Dict, context: Dict):
402
- request_uuid = uuid.uuid5(namespace=uuid.NAMESPACE_URL, name=f"{time.time()}").hex
403
- result = self._track_api(api, context, request_uuid)
404
- logger.info("Track-api request processed.", extra={"request_uuid": request_uuid})
405
- return result
464
+ inference_request, future = self.inference_requests_manager.schedule_task(
465
+ self._track_api, api, context
466
+ )
467
+ future.result()
468
+ logger.info(
469
+ "Track-api request processed.", extra={"inference_request_uuid": inference_request.uuid}
470
+ )
471
+ return inference_request.final_result
406
472
 
407
473
  def track_api_files(self, files: List[BinaryIO], settings: Dict):
408
474
  logger.info("Start tracking with settings:", extra={"settings": settings})
@@ -425,17 +491,16 @@ class BBoxTracking(BaseTracking):
425
491
  f"Batch size should be less than or equal to {self.max_batch_size} for this model."
426
492
  )
427
493
 
428
- inference_request_uuid = uuid.uuid5(namespace=uuid.NAMESPACE_URL, name=f"{time.time()}").hex
429
494
  fn = self.send_error_data(api, context)(self._track_async)
430
- self.schedule_task(fn, api, context, inference_request_uuid=inference_request_uuid)
495
+ inference_request, future = self.inference_requests_manager.schedule_task(fn, api, context)
431
496
 
432
497
  logger.debug(
433
498
  "Inference has scheduled from 'track_async' endpoint",
434
- extra={"inference_request_uuid": inference_request_uuid},
499
+ extra={"inference_request_uuid": inference_request.uuid},
435
500
  )
436
501
  return {
437
502
  "message": "Inference has started.",
438
- "inference_request_uuid": inference_request_uuid,
503
+ "inference_request_uuid": inference_request.uuid,
439
504
  }
440
505
 
441
506
  def initialize(self, init_rgb_image: np.ndarray, target_bbox: PredictionBBox) -> None: