supervisely 6.73.462__py3-none-any.whl → 6.73.464__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of supervisely might be problematic. Click here for more details.

@@ -11,6 +11,7 @@ import subprocess
11
11
  import tempfile
12
12
  import threading
13
13
  import time
14
+ import uuid
14
15
  from collections import OrderedDict, defaultdict
15
16
  from concurrent.futures import ThreadPoolExecutor
16
17
  from dataclasses import asdict, dataclass
@@ -52,6 +53,7 @@ from supervisely.annotation.tag_meta import TagMeta, TagValueType
52
53
  from supervisely.api.api import Api, ApiField
53
54
  from supervisely.api.app_api import WorkflowMeta, WorkflowSettings
54
55
  from supervisely.api.image_api import ImageInfo
56
+ from supervisely.api.video.video_api import VideoInfo
55
57
  from supervisely.app.content import get_data_dir
56
58
  from supervisely.app.fastapi.subapp import (
57
59
  Application,
@@ -102,6 +104,11 @@ from supervisely.video_annotation.video_figure import VideoFigure
102
104
  from supervisely.video_annotation.video_object import VideoObject
103
105
  from supervisely.video_annotation.video_object_collection import VideoObjectCollection
104
106
  from supervisely.video_annotation.video_tag_collection import VideoTagCollection
107
+ from supervisely.video_annotation.key_id_map import KeyIdMap
108
+ from supervisely.video_annotation.video_object_collection import (
109
+ VideoObject,
110
+ VideoObjectCollection,
111
+ )
105
112
 
106
113
  try:
107
114
  from typing import Literal
@@ -2289,6 +2296,162 @@ class Inference:
2289
2296
  inference_request.final_result = {"video_ann": video_ann_json}
2290
2297
  return video_ann_json
2291
2298
 
2299
+ def _tracking_by_detection(self, api: Api, state: dict, inference_request: InferenceRequest):
2300
+ logger.debug("Inferring video_id...", extra={"state": state})
2301
+ inference_settings = self._get_inference_settings(state)
2302
+ logger.debug(f"Inference settings:", extra=inference_settings)
2303
+ batch_size = self._get_batch_size_from_state(state)
2304
+ video_id = get_value_for_keys(state, ["videoId", "video_id"], ignore_none=True)
2305
+ if video_id is None:
2306
+ raise ValueError("Video id is not provided")
2307
+ video_info = api.video.get_info_by_id(video_id)
2308
+ start_frame_index = get_value_for_keys(
2309
+ state, ["startFrameIndex", "start_frame_index", "start_frame"], ignore_none=True
2310
+ )
2311
+ if start_frame_index is None:
2312
+ start_frame_index = 0
2313
+ step = get_value_for_keys(state, ["stride", "step"], ignore_none=True)
2314
+ if step is None:
2315
+ step = 1
2316
+ end_frame_index = get_value_for_keys(
2317
+ state, ["endFrameIndex", "end_frame_index", "end_frame"], ignore_none=True
2318
+ )
2319
+ duration = state.get("duration", None)
2320
+ frames_count = get_value_for_keys(
2321
+ state, ["framesCount", "frames_count", "num_frames"], ignore_none=True
2322
+ )
2323
+ tracking = state.get("tracker", None)
2324
+ direction = state.get("direction", "forward")
2325
+ direction = 1 if direction == "forward" else -1
2326
+ track_id = get_value_for_keys(state, ["trackId", "track_id"], ignore_none=True)
2327
+
2328
+ if frames_count is not None:
2329
+ n_frames = frames_count
2330
+ elif end_frame_index is not None:
2331
+ n_frames = end_frame_index - start_frame_index
2332
+ elif duration is not None:
2333
+ fps = video_info.frames_count / video_info.duration
2334
+ n_frames = int(duration * fps)
2335
+ else:
2336
+ n_frames = video_info.frames_count
2337
+
2338
+ inference_request.tracker = self._tracker_init(state.get("tracker", None), state.get("tracker_settings", {}))
2339
+
2340
+ logger.debug(
2341
+ f"Video info:",
2342
+ extra=dict(
2343
+ w=video_info.frame_width,
2344
+ h=video_info.frame_height,
2345
+ start_frame_index=start_frame_index,
2346
+ n_frames=n_frames,
2347
+ ),
2348
+ )
2349
+
2350
+ # start downloading video in background
2351
+ self.cache.run_cache_task_manually(api, None, video_id=video_id)
2352
+
2353
+ progress_total = (n_frames + step - 1) // step
2354
+ inference_request.set_stage(InferenceRequest.Stage.INFERENCE, 0, progress_total)
2355
+
2356
+ _upload_f = partial(
2357
+ self.upload_predictions_to_video,
2358
+ api=api,
2359
+ video_info=video_info,
2360
+ track_id=track_id,
2361
+ context=inference_request.context,
2362
+ progress_cb=inference_request.done,
2363
+ inference_request=inference_request,
2364
+ )
2365
+
2366
+ _range = (start_frame_index, start_frame_index + direction * n_frames)
2367
+ if _range[0] > _range[1]:
2368
+ _range = (_range[1], _range[0])
2369
+
2370
+ def _notify_f(predictions: List[Prediction]):
2371
+ logger.debug(
2372
+ "Notifying tracking progress...",
2373
+ extra={
2374
+ "track_id": track_id,
2375
+ "range": _range,
2376
+ "current": inference_request.progress.current,
2377
+ "total": inference_request.progress.total,
2378
+ },
2379
+ )
2380
+ stopped = self.api.video.notify_progress(
2381
+ track_id=track_id,
2382
+ video_id=video_info.id,
2383
+ frame_start=_range[0],
2384
+ frame_end=_range[1],
2385
+ current=inference_request.progress.current,
2386
+ total=inference_request.progress.total,
2387
+ )
2388
+ if stopped:
2389
+ inference_request.stop()
2390
+ logger.info("Tracking has been stopped by user", extra={"track_id": track_id})
2391
+
2392
+ def _exception_handler(e: Exception):
2393
+ self.api.video.notify_tracking_error(
2394
+ track_id=track_id,
2395
+ error=str(type(e)),
2396
+ message=str(e),
2397
+ )
2398
+ raise e
2399
+
2400
+ with Uploader(
2401
+ upload_f=_upload_f,
2402
+ notify_f=_notify_f,
2403
+ exception_handler=_exception_handler,
2404
+ logger=logger,
2405
+ ) as uploader:
2406
+ for batch in batched(
2407
+ range(
2408
+ start_frame_index, start_frame_index + direction * n_frames, direction * step
2409
+ ),
2410
+ batch_size,
2411
+ ):
2412
+ if inference_request.is_stopped():
2413
+ logger.debug(
2414
+ f"Cancelling inference video...",
2415
+ extra={"inference_request_uuid": inference_request.uuid},
2416
+ )
2417
+ break
2418
+ logger.debug(
2419
+ f"Inferring frames {batch[0]}-{batch[-1]}:",
2420
+ )
2421
+ frames = self.cache.download_frames(
2422
+ api, video_info.id, batch, redownload_video=True
2423
+ )
2424
+ anns, slides_data = self._inference_auto(
2425
+ source=frames,
2426
+ settings=inference_settings,
2427
+ )
2428
+
2429
+ if inference_request.tracker is not None:
2430
+ anns = self._apply_tracker_to_anns(frames, anns, inference_request.tracker)
2431
+
2432
+ predictions = [
2433
+ Prediction(
2434
+ ann,
2435
+ model_meta=self.model_meta,
2436
+ frame_index=frame_index,
2437
+ video_id=video_info.id,
2438
+ dataset_id=video_info.dataset_id,
2439
+ project_id=video_info.project_id,
2440
+ )
2441
+ for ann, frame_index in zip(anns, batch)
2442
+ ]
2443
+ for pred, this_slides_data in zip(predictions, slides_data):
2444
+ pred.extra_data["slides_data"] = this_slides_data
2445
+ uploader.put(predictions)
2446
+ video_ann_json = None
2447
+ if inference_request.tracker is not None:
2448
+ inference_request.set_stage("Postprocess...", 0, 1)
2449
+ video_ann_json = inference_request.tracker.video_annotation.to_json()
2450
+ inference_request.done()
2451
+ inference_request.final_result = {"video_ann": video_ann_json}
2452
+ return video_ann_json
2453
+
2454
+
2292
2455
  def _inference_project_id(self, api: Api, state: dict, inference_request: InferenceRequest):
2293
2456
  """Inference project images.
2294
2457
  If "output_project_id" in state, upload images and annotations to the output project.
@@ -2955,6 +3118,83 @@ class Inference:
2955
3118
  inference_request.add_results(results)
2956
3119
  inference_request.done(len(results))
2957
3120
 
3121
+ def upload_predictions_to_video(
3122
+ self,
3123
+ predictions: List[Prediction],
3124
+ api: Api,
3125
+ video_info: VideoInfo,
3126
+ track_id: str,
3127
+ context: Dict,
3128
+ progress_cb=None,
3129
+ inference_request: InferenceRequest = None,
3130
+ ):
3131
+ key_id_map = KeyIdMap()
3132
+ project_meta = context.get("project_meta", None)
3133
+ if project_meta is None:
3134
+ project_meta = ProjectMeta.from_json(api.project.get_meta(video_info.project_id))
3135
+ context["project_meta"] = project_meta
3136
+ meta_changed = False
3137
+ for prediction in predictions:
3138
+ project_meta, ann, meta_changed_ = update_meta_and_ann(
3139
+ project_meta, prediction.annotation, None
3140
+ )
3141
+ prediction.annotation = ann
3142
+ meta_changed = meta_changed or meta_changed_
3143
+ if meta_changed:
3144
+ project_meta = api.project.update_meta(video_info.project_id, project_meta)
3145
+ context["project_meta"] = project_meta
3146
+
3147
+ figure_data_by_object_id = defaultdict(list)
3148
+
3149
+ tracks_to_object_ids = context.setdefault("tracks_to_object_ids", {})
3150
+ new_tracks: Dict[int, VideoObject] = {}
3151
+ for prediction in predictions:
3152
+ annotation = prediction.annotation
3153
+ tracks = annotation.custom_data
3154
+ for track, label in zip(tracks, annotation.labels):
3155
+ if track not in tracks_to_object_ids and track not in new_tracks:
3156
+ video_object = VideoObject(obj_class=label.obj_class)
3157
+ new_tracks[track] = video_object
3158
+ if new_tracks:
3159
+ tracks, video_objects = zip(*new_tracks.items())
3160
+ added_object_ids = api.video.object.append_bulk(
3161
+ video_info.id, VideoObjectCollection(video_objects), key_id_map=key_id_map
3162
+ )
3163
+ for track, object_id in zip(tracks, added_object_ids):
3164
+ tracks_to_object_ids[track] = object_id
3165
+ for prediction in predictions:
3166
+ annotation = prediction.annotation
3167
+ tracks = annotation.custom_data
3168
+ for track, label in zip(tracks, annotation.labels):
3169
+ object_id = tracks_to_object_ids[track]
3170
+ figure_data_by_object_id[object_id].append(
3171
+ {
3172
+ ApiField.OBJECT_ID: object_id,
3173
+ ApiField.GEOMETRY_TYPE: label.geometry.geometry_name(),
3174
+ ApiField.GEOMETRY: label.geometry.to_json(),
3175
+ ApiField.META: {ApiField.FRAME: prediction.frame_index},
3176
+ ApiField.TRACK_ID: track_id,
3177
+ }
3178
+ )
3179
+
3180
+ for object_id, figures_data in figure_data_by_object_id.items():
3181
+ figures_keys = [uuid.uuid4() for _ in figures_data]
3182
+ api.video.figure._append_bulk(
3183
+ entity_id=video_info.id,
3184
+ figures_json=figures_data,
3185
+ figures_keys=figures_keys,
3186
+ key_id_map=key_id_map,
3187
+ )
3188
+ logger.debug(f"Added {len(figures_data)} geometries to object #{object_id}")
3189
+ if progress_cb:
3190
+ progress_cb(len(predictions))
3191
+ if inference_request is not None:
3192
+ results = self._format_output(predictions)
3193
+ for result in results:
3194
+ result["annotation"] = None
3195
+ result["data"] = None
3196
+ inference_request.add_results(results)
3197
+
2958
3198
  def serve(self):
2959
3199
  if not self._use_gui and not self._is_cli_deploy:
2960
3200
  Progress("Deploying model ...", 1)
@@ -3352,6 +3592,22 @@ class Inference:
3352
3592
  "inference_request_uuid": inference_request.uuid,
3353
3593
  }
3354
3594
 
3595
+ @server.post("/tracking_by_detection")
3596
+ def tracking_by_detection(response: Response, request: Request):
3597
+ state = request.state.state
3598
+ context = request.state.context
3599
+ state.update(context)
3600
+ if state.get("tracker") is None:
3601
+ state["tracker"] = "botsort"
3602
+
3603
+ logger.debug("Received a request to 'tracking_by_detection'", extra={"state": state})
3604
+ self.validate_inference_state(state)
3605
+ api = self.api_from_request(request)
3606
+ inference_request, future = self.inference_requests_manager.schedule_task(
3607
+ self._tracking_by_detection, api, state
3608
+ )
3609
+ return {"message": "Track task started."}
3610
+
3355
3611
  @server.post("/inference_project_id_async")
3356
3612
  def inference_project_id_async(response: Response, request: Request):
3357
3613
  state = request.state.state
@@ -225,28 +225,48 @@ def sample_video(
225
225
  progress.miniters = 1
226
226
  progress.refresh()
227
227
 
228
- with VideoFrameReader(video_path, frame_indices) as reader:
229
- for batch in batched_iter(zip(reader, frame_indices), 10):
230
- frames, indices = zip(*batch)
231
- if resize:
232
- for frame in frames:
233
- cv2.resize(frame, [*resize, frame.shape[2]], interpolation=cv2.INTER_LINEAR)
234
-
235
- image_ids = _upload_frames(
236
- api=api,
237
- frames=frames,
238
- video_name=video_info.name,
239
- video_frames_count=video_info.frames_count,
240
- indices=indices,
241
- dataset_id=dst_dataset_info.id,
242
- sample_info=sample_info,
243
- context=context,
244
- copy_annotations=copy_annotations,
245
- video_annotation=video_annotation,
246
- )
228
+ batch_size = 50
229
+ try:
230
+ with VideoFrameReader(video_path, frame_indices) as reader:
231
+ for batch_indices in batched_iter(frame_indices, batch_size):
232
+ batch_indices_list = list(batch_indices)
233
+ frames = reader.read_batch(batch_indices_list)
234
+
235
+ if resize:
236
+ resized_frames = []
237
+ for frame in frames:
238
+ resized_frame = cv2.resize(
239
+ frame,
240
+ (resize[1], resize[0]), # (width, height)
241
+ interpolation=cv2.INTER_LINEAR,
242
+ )
243
+ resized_frames.append(resized_frame)
244
+ frames = resized_frames
245
+
246
+ image_ids = _upload_frames(
247
+ api=api,
248
+ frames=frames,
249
+ video_name=video_info.name,
250
+ video_frames_count=video_info.frames_count,
251
+ indices=batch_indices_list,
252
+ dataset_id=dst_dataset_info.id,
253
+ sample_info=sample_info,
254
+ context=context,
255
+ copy_annotations=copy_annotations,
256
+ video_annotation=video_annotation,
257
+ )
247
258
 
248
- if progress is not None:
249
- progress.update(len(image_ids))
259
+ if progress is not None:
260
+ progress.update(len(image_ids))
261
+
262
+ # Free memory after each batch
263
+ del frames
264
+ if resize:
265
+ del resized_frames
266
+ finally:
267
+ import os
268
+ if os.path.exists(video_path):
269
+ os.remove(video_path)
250
270
 
251
271
 
252
272
  def _get_or_create_dst_dataset(
@@ -537,11 +537,9 @@ class VideoFrameReader:
537
537
  try:
538
538
  import decord
539
539
 
540
- self.vr = decord.VideoReader(str(self.video_path))
540
+ self.vr = decord.VideoReader(str(self.video_path), num_threads=1)
541
541
  except ImportError:
542
- default_logger.debug(
543
- "Decord is not installed. Falling back to OpenCV for video reading."
544
- )
542
+ default_logger.debug("Decord is not installed. Falling back to OpenCV for video reading.")
545
543
  self.cap = cv2.VideoCapture(str(self.video_path))
546
544
 
547
545
  def close(self):
@@ -562,24 +560,30 @@ class VideoFrameReader:
562
560
  def __del__(self):
563
561
  self.close()
564
562
 
565
- def iterate_frames(self, frame_indexes: List[int] = None) -> Generator[np.ndarray, None, None]:
563
+ def iterate_frames(self, frame_indexes: Optional[List[int]] = None) -> Generator[np.ndarray, None, None]:
566
564
  self._ensure_initialized()
567
565
  if frame_indexes is None:
568
566
  frame_indexes = self.frame_indexes
569
567
  if self.vr is not None:
568
+ # Decord
570
569
  if frame_indexes is None:
571
570
  frame_indexes = range(len(self.vr))
572
- for frame_index in frame_indexes:
573
- frame = self.vr[frame_index]
574
- yield frame.asnumpy()
571
+ for idx in frame_indexes:
572
+ arr = self.vr[idx].asnumpy()
573
+ yield arr
574
+ del arr
575
575
  else:
576
+ # OpenCV fallback
576
577
  if frame_indexes is None:
577
578
  frame_count = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
578
579
  frame_indexes = range(frame_count)
579
580
  for frame_index in frame_indexes:
580
- if 1 > frame_index - self.prev_idx < 20:
581
+ if 1 < frame_index - self.prev_idx < 20:
581
582
  while self.prev_idx < frame_index - 1:
582
- self.cap.read()
583
+ ok, _ = self.cap.read()
584
+ if not ok:
585
+ break
586
+ self.prev_idx += 1
583
587
  if frame_index != self.prev_idx + 1:
584
588
  self.cap.set(cv2.CAP_PROP_POS_FRAMES, frame_index)
585
589
  ret, frame = self.cap.read()
@@ -588,6 +592,17 @@ class VideoFrameReader:
588
592
  yield cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
589
593
  self.prev_idx = frame_index
590
594
 
595
+ def read_batch(self, frame_indexes: List[int]) -> List[np.ndarray]:
596
+ self._ensure_initialized()
597
+ if self.vr is not None:
598
+ batch_nd = self.vr.get_batch(frame_indexes)
599
+ batch_np = batch_nd.asnumpy()
600
+ frames = [batch_np[i].copy() for i in range(batch_np.shape[0])]
601
+ del batch_np
602
+ return frames
603
+ else:
604
+ return list(self.iterate_frames(frame_indexes))
605
+
591
606
  def read_frames(self, frame_indexes: List[int] = None) -> List[np.ndarray]:
592
607
  return list(self.iterate_frames(frame_indexes))
593
608
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: supervisely
3
- Version: 6.73.462
3
+ Version: 6.73.464
4
4
  Summary: Supervisely Python SDK.
5
5
  Home-page: https://github.com/supervisely/supervisely
6
6
  Author: Supervisely
@@ -906,7 +906,7 @@ supervisely/nn/benchmark/visualization/widgets/table/__init__.py,sha256=47DEQpj8
906
906
  supervisely/nn/benchmark/visualization/widgets/table/table.py,sha256=atmDnF1Af6qLQBUjLhK18RMDKAYlxnsuVHMSEa5a-e8,4319
907
907
  supervisely/nn/inference/__init__.py,sha256=QFukX2ip-U7263aEPCF_UCFwj6EujbMnsgrXp5Bbt8I,1623
908
908
  supervisely/nn/inference/cache.py,sha256=Hkxvu70rrB-j7ztQ4TBOxQePAxiKS7Erdb2FmK7aetY,35795
909
- supervisely/nn/inference/inference.py,sha256=54SXkXYEyswQN1L9hbOn0luSLyWbFOoaSH1qzNfu7HQ,219687
909
+ supervisely/nn/inference/inference.py,sha256=2Unz6m_AnzVWTKy1Jv9G6VYYLvzu1Mbdl17SKIPZkAA,230530
910
910
  supervisely/nn/inference/inference_request.py,sha256=yuqEL4BWjC-aKze_raGScEQyhHe8loYb_eNhGPsf2-4,14870
911
911
  supervisely/nn/inference/session.py,sha256=WRJKVnmh5GPXnwtuKJn7AO1C7Td39wZo774ZIVQJGYk,36228
912
912
  supervisely/nn/inference/uploader.py,sha256=Dn5MfMRq7tclEWpP0B9fJjTiQPBpwumfXxC8-lOYgnM,5659
@@ -1090,8 +1090,8 @@ supervisely/user/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,
1090
1090
  supervisely/user/user.py,sha256=4GSVIupPAxWjIxZmUtH3Dtms_vGV82-49kM_aaR2gBI,319
1091
1091
  supervisely/video/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
1092
1092
  supervisely/video/import_utils.py,sha256=b1Nl0gscNsV0iB9nWPeqt8GrkhOeuTZsN1p-d3gDUmE,544
1093
- supervisely/video/sampling.py,sha256=PGZVP1V9pDzdMsGThwS7U8E4VS6h1ba0nvpjVshIPfg,20248
1094
- supervisely/video/video.py,sha256=nG1TE4MEvoh-_pfTTOx44dzqRq2VqLljmUnQ8r1czUY,20799
1093
+ supervisely/video/sampling.py,sha256=SA1HeS1yK0-w7oHrojuCQJIAO5UAJuO6zrdOgeE1Twc,20979
1094
+ supervisely/video/video.py,sha256=ufwyec2d9ekV3_CLy4VhOj3Ni0gcXIerIBHtC1KGzTQ,21400
1095
1095
  supervisely/video_annotation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
1096
1096
  supervisely/video_annotation/constants.py,sha256=_gW9iMhVk1w_dUaFiaiyXn66mt13S6bkxC64xpjP-CU,529
1097
1097
  supervisely/video_annotation/frame.py,sha256=np21FqavJ3xW9VbLbohifDwZQtF5dWIsNSGVSjn-NnY,10574
@@ -1129,9 +1129,9 @@ supervisely/worker_proto/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZ
1129
1129
  supervisely/worker_proto/worker_api_pb2.py,sha256=VQfi5JRBHs2pFCK1snec3JECgGnua3Xjqw_-b3aFxuM,59142
1130
1130
  supervisely/worker_proto/worker_api_pb2_grpc.py,sha256=3BwQXOaP9qpdi0Dt9EKG--Lm8KGN0C5AgmUfRv77_Jk,28940
1131
1131
  supervisely_lib/__init__.py,sha256=yRwzEQmVwSd6lUQoAUdBngKEOlnoQ6hA9ZcoZGJRNC4,331
1132
- supervisely-6.73.462.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
1133
- supervisely-6.73.462.dist-info/METADATA,sha256=OzYjw5iRdUCofQdUCsd5xkjgEOSl600QFG7lHM7GAlU,35604
1134
- supervisely-6.73.462.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
1135
- supervisely-6.73.462.dist-info/entry_points.txt,sha256=U96-5Hxrp2ApRjnCoUiUhWMqijqh8zLR03sEhWtAcms,102
1136
- supervisely-6.73.462.dist-info/top_level.txt,sha256=kcFVwb7SXtfqZifrZaSE3owHExX4gcNYe7Q2uoby084,28
1137
- supervisely-6.73.462.dist-info/RECORD,,
1132
+ supervisely-6.73.464.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
1133
+ supervisely-6.73.464.dist-info/METADATA,sha256=LetWCHVdHoghuOfeeVXPlpSzx46mBvxCsBpAf1P1uYs,35604
1134
+ supervisely-6.73.464.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
1135
+ supervisely-6.73.464.dist-info/entry_points.txt,sha256=U96-5Hxrp2ApRjnCoUiUhWMqijqh8zLR03sEhWtAcms,102
1136
+ supervisely-6.73.464.dist-info/top_level.txt,sha256=kcFVwb7SXtfqZifrZaSE3owHExX4gcNYe7Q2uoby084,28
1137
+ supervisely-6.73.464.dist-info/RECORD,,