supervisely 6.73.410__py3-none-any.whl → 6.73.470__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of supervisely might be problematic. Click here for more details.
- supervisely/__init__.py +136 -1
- supervisely/_utils.py +81 -0
- supervisely/annotation/json_geometries_map.py +2 -0
- supervisely/annotation/label.py +80 -3
- supervisely/api/annotation_api.py +9 -9
- supervisely/api/api.py +67 -43
- supervisely/api/app_api.py +72 -5
- supervisely/api/dataset_api.py +108 -33
- supervisely/api/entity_annotation/figure_api.py +113 -49
- supervisely/api/image_api.py +82 -0
- supervisely/api/module_api.py +10 -0
- supervisely/api/nn/deploy_api.py +15 -9
- supervisely/api/nn/ecosystem_models_api.py +201 -0
- supervisely/api/nn/neural_network_api.py +12 -3
- supervisely/api/pointcloud/pointcloud_api.py +38 -0
- supervisely/api/pointcloud/pointcloud_episode_annotation_api.py +3 -0
- supervisely/api/project_api.py +213 -6
- supervisely/api/task_api.py +11 -1
- supervisely/api/video/video_annotation_api.py +4 -2
- supervisely/api/video/video_api.py +79 -1
- supervisely/api/video/video_figure_api.py +24 -11
- supervisely/api/volume/volume_api.py +38 -0
- supervisely/app/__init__.py +1 -1
- supervisely/app/content.py +14 -6
- supervisely/app/fastapi/__init__.py +1 -0
- supervisely/app/fastapi/custom_static_files.py +1 -1
- supervisely/app/fastapi/multi_user.py +88 -0
- supervisely/app/fastapi/subapp.py +175 -42
- supervisely/app/fastapi/templating.py +1 -1
- supervisely/app/fastapi/websocket.py +77 -9
- supervisely/app/singleton.py +21 -0
- supervisely/app/v1/app_service.py +18 -2
- supervisely/app/v1/constants.py +7 -1
- supervisely/app/widgets/__init__.py +11 -1
- supervisely/app/widgets/agent_selector/template.html +1 -0
- supervisely/app/widgets/card/card.py +20 -0
- supervisely/app/widgets/dataset_thumbnail/dataset_thumbnail.py +11 -2
- supervisely/app/widgets/dataset_thumbnail/template.html +3 -1
- supervisely/app/widgets/deploy_model/deploy_model.py +750 -0
- supervisely/app/widgets/dialog/dialog.py +12 -0
- supervisely/app/widgets/dialog/template.html +2 -1
- supervisely/app/widgets/dropdown_checkbox_selector/__init__.py +0 -0
- supervisely/app/widgets/dropdown_checkbox_selector/dropdown_checkbox_selector.py +87 -0
- supervisely/app/widgets/dropdown_checkbox_selector/template.html +12 -0
- supervisely/app/widgets/ecosystem_model_selector/__init__.py +0 -0
- supervisely/app/widgets/ecosystem_model_selector/ecosystem_model_selector.py +195 -0
- supervisely/app/widgets/experiment_selector/experiment_selector.py +454 -263
- supervisely/app/widgets/fast_table/fast_table.py +713 -126
- supervisely/app/widgets/fast_table/script.js +492 -95
- supervisely/app/widgets/fast_table/style.css +54 -0
- supervisely/app/widgets/fast_table/template.html +45 -5
- supervisely/app/widgets/heatmap/__init__.py +0 -0
- supervisely/app/widgets/heatmap/heatmap.py +523 -0
- supervisely/app/widgets/heatmap/script.js +378 -0
- supervisely/app/widgets/heatmap/style.css +227 -0
- supervisely/app/widgets/heatmap/template.html +21 -0
- supervisely/app/widgets/input_tag/input_tag.py +102 -15
- supervisely/app/widgets/input_tag_list/__init__.py +0 -0
- supervisely/app/widgets/input_tag_list/input_tag_list.py +274 -0
- supervisely/app/widgets/input_tag_list/template.html +70 -0
- supervisely/app/widgets/radio_table/radio_table.py +10 -2
- supervisely/app/widgets/radio_tabs/radio_tabs.py +18 -2
- supervisely/app/widgets/radio_tabs/template.html +1 -0
- supervisely/app/widgets/select/select.py +6 -4
- supervisely/app/widgets/select_dataset/select_dataset.py +6 -0
- supervisely/app/widgets/select_dataset_tree/select_dataset_tree.py +83 -7
- supervisely/app/widgets/table/table.py +68 -13
- supervisely/app/widgets/tabs/tabs.py +22 -6
- supervisely/app/widgets/tabs/template.html +5 -1
- supervisely/app/widgets/transfer/style.css +3 -0
- supervisely/app/widgets/transfer/template.html +3 -1
- supervisely/app/widgets/transfer/transfer.py +48 -45
- supervisely/app/widgets/tree_select/tree_select.py +2 -0
- supervisely/convert/image/csv/csv_converter.py +24 -15
- supervisely/convert/pointcloud/nuscenes_conv/nuscenes_converter.py +43 -41
- supervisely/convert/pointcloud_episodes/nuscenes_conv/nuscenes_converter.py +75 -51
- supervisely/convert/pointcloud_episodes/nuscenes_conv/nuscenes_helper.py +137 -124
- supervisely/convert/video/video_converter.py +2 -2
- supervisely/geometry/polyline_3d.py +110 -0
- supervisely/io/env.py +161 -1
- supervisely/nn/artifacts/__init__.py +1 -1
- supervisely/nn/artifacts/artifacts.py +10 -2
- supervisely/nn/artifacts/detectron2.py +1 -0
- supervisely/nn/artifacts/hrda.py +1 -0
- supervisely/nn/artifacts/mmclassification.py +20 -0
- supervisely/nn/artifacts/mmdetection.py +5 -3
- supervisely/nn/artifacts/mmsegmentation.py +1 -0
- supervisely/nn/artifacts/ritm.py +1 -0
- supervisely/nn/artifacts/rtdetr.py +1 -0
- supervisely/nn/artifacts/unet.py +1 -0
- supervisely/nn/artifacts/utils.py +3 -0
- supervisely/nn/artifacts/yolov5.py +2 -0
- supervisely/nn/artifacts/yolov8.py +1 -0
- supervisely/nn/benchmark/semantic_segmentation/metric_provider.py +18 -18
- supervisely/nn/experiments.py +9 -0
- supervisely/nn/inference/cache.py +37 -17
- supervisely/nn/inference/gui/serving_gui_template.py +39 -13
- supervisely/nn/inference/inference.py +953 -211
- supervisely/nn/inference/inference_request.py +15 -8
- supervisely/nn/inference/instance_segmentation/instance_segmentation.py +1 -0
- supervisely/nn/inference/object_detection/object_detection.py +1 -0
- supervisely/nn/inference/predict_app/__init__.py +0 -0
- supervisely/nn/inference/predict_app/gui/__init__.py +0 -0
- supervisely/nn/inference/predict_app/gui/classes_selector.py +160 -0
- supervisely/nn/inference/predict_app/gui/gui.py +915 -0
- supervisely/nn/inference/predict_app/gui/input_selector.py +344 -0
- supervisely/nn/inference/predict_app/gui/model_selector.py +77 -0
- supervisely/nn/inference/predict_app/gui/output_selector.py +179 -0
- supervisely/nn/inference/predict_app/gui/preview.py +93 -0
- supervisely/nn/inference/predict_app/gui/settings_selector.py +881 -0
- supervisely/nn/inference/predict_app/gui/tags_selector.py +110 -0
- supervisely/nn/inference/predict_app/gui/utils.py +399 -0
- supervisely/nn/inference/predict_app/predict_app.py +176 -0
- supervisely/nn/inference/session.py +47 -39
- supervisely/nn/inference/tracking/bbox_tracking.py +5 -1
- supervisely/nn/inference/tracking/point_tracking.py +5 -1
- supervisely/nn/inference/tracking/tracker_interface.py +4 -0
- supervisely/nn/inference/uploader.py +9 -5
- supervisely/nn/model/model_api.py +44 -22
- supervisely/nn/model/prediction.py +15 -1
- supervisely/nn/model/prediction_session.py +70 -14
- supervisely/nn/prediction_dto.py +7 -0
- supervisely/nn/tracker/__init__.py +6 -8
- supervisely/nn/tracker/base_tracker.py +54 -0
- supervisely/nn/tracker/botsort/__init__.py +1 -0
- supervisely/nn/tracker/botsort/botsort_config.yaml +30 -0
- supervisely/nn/tracker/botsort/osnet_reid/__init__.py +0 -0
- supervisely/nn/tracker/botsort/osnet_reid/osnet.py +566 -0
- supervisely/nn/tracker/botsort/osnet_reid/osnet_reid_interface.py +88 -0
- supervisely/nn/tracker/botsort/tracker/__init__.py +0 -0
- supervisely/nn/tracker/{bot_sort → botsort/tracker}/basetrack.py +1 -2
- supervisely/nn/tracker/{utils → botsort/tracker}/gmc.py +51 -59
- supervisely/nn/tracker/{deep_sort/deep_sort → botsort/tracker}/kalman_filter.py +71 -33
- supervisely/nn/tracker/botsort/tracker/matching.py +202 -0
- supervisely/nn/tracker/{bot_sort/bot_sort.py → botsort/tracker/mc_bot_sort.py} +68 -81
- supervisely/nn/tracker/botsort_tracker.py +273 -0
- supervisely/nn/tracker/calculate_metrics.py +264 -0
- supervisely/nn/tracker/utils.py +273 -0
- supervisely/nn/tracker/visualize.py +520 -0
- supervisely/nn/training/gui/gui.py +152 -49
- supervisely/nn/training/gui/hyperparameters_selector.py +1 -1
- supervisely/nn/training/gui/model_selector.py +8 -6
- supervisely/nn/training/gui/train_val_splits_selector.py +144 -71
- supervisely/nn/training/gui/training_artifacts.py +3 -1
- supervisely/nn/training/train_app.py +225 -46
- supervisely/project/pointcloud_episode_project.py +12 -8
- supervisely/project/pointcloud_project.py +12 -8
- supervisely/project/project.py +221 -75
- supervisely/template/experiment/experiment.html.jinja +105 -55
- supervisely/template/experiment/experiment_generator.py +258 -112
- supervisely/template/experiment/header.html.jinja +31 -13
- supervisely/template/experiment/sly-style.css +7 -2
- supervisely/versions.json +3 -1
- supervisely/video/sampling.py +42 -20
- supervisely/video/video.py +41 -12
- supervisely/video_annotation/video_figure.py +38 -4
- supervisely/volume/stl_converter.py +2 -0
- supervisely/worker_api/agent_rpc.py +24 -1
- supervisely/worker_api/rpc_servicer.py +31 -7
- {supervisely-6.73.410.dist-info → supervisely-6.73.470.dist-info}/METADATA +22 -14
- {supervisely-6.73.410.dist-info → supervisely-6.73.470.dist-info}/RECORD +167 -148
- supervisely_lib/__init__.py +6 -1
- supervisely/app/widgets/experiment_selector/style.css +0 -27
- supervisely/app/widgets/experiment_selector/template.html +0 -61
- supervisely/nn/tracker/bot_sort/__init__.py +0 -21
- supervisely/nn/tracker/bot_sort/fast_reid_interface.py +0 -152
- supervisely/nn/tracker/bot_sort/matching.py +0 -127
- supervisely/nn/tracker/bot_sort/sly_tracker.py +0 -401
- supervisely/nn/tracker/deep_sort/__init__.py +0 -6
- supervisely/nn/tracker/deep_sort/deep_sort/__init__.py +0 -1
- supervisely/nn/tracker/deep_sort/deep_sort/detection.py +0 -49
- supervisely/nn/tracker/deep_sort/deep_sort/iou_matching.py +0 -81
- supervisely/nn/tracker/deep_sort/deep_sort/linear_assignment.py +0 -202
- supervisely/nn/tracker/deep_sort/deep_sort/nn_matching.py +0 -176
- supervisely/nn/tracker/deep_sort/deep_sort/track.py +0 -166
- supervisely/nn/tracker/deep_sort/deep_sort/tracker.py +0 -145
- supervisely/nn/tracker/deep_sort/deep_sort.py +0 -301
- supervisely/nn/tracker/deep_sort/generate_clip_detections.py +0 -90
- supervisely/nn/tracker/deep_sort/preprocessing.py +0 -70
- supervisely/nn/tracker/deep_sort/sly_tracker.py +0 -273
- supervisely/nn/tracker/tracker.py +0 -285
- supervisely/nn/tracker/utils/kalman_filter.py +0 -492
- supervisely/nn/tracking/__init__.py +0 -1
- supervisely/nn/tracking/boxmot.py +0 -114
- supervisely/nn/tracking/tracking.py +0 -24
- /supervisely/{nn/tracker/utils → app/widgets/deploy_model}/__init__.py +0 -0
- {supervisely-6.73.410.dist-info → supervisely-6.73.470.dist-info}/LICENSE +0 -0
- {supervisely-6.73.410.dist-info → supervisely-6.73.470.dist-info}/WHEEL +0 -0
- {supervisely-6.73.410.dist-info → supervisely-6.73.470.dist-info}/entry_points.txt +0 -0
- {supervisely-6.73.410.dist-info → supervisely-6.73.470.dist-info}/top_level.txt +0 -0
supervisely/project/project.py
CHANGED
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
5
|
import asyncio
|
|
6
|
+
import gc
|
|
6
7
|
import io
|
|
7
8
|
import json
|
|
8
9
|
import os
|
|
@@ -26,6 +27,7 @@ from typing import (
|
|
|
26
27
|
|
|
27
28
|
import aiofiles
|
|
28
29
|
import numpy as np
|
|
30
|
+
from PIL import Image as PILImage
|
|
29
31
|
from tqdm import tqdm
|
|
30
32
|
|
|
31
33
|
import supervisely as sly
|
|
@@ -3333,7 +3335,6 @@ class Project:
|
|
|
3333
3335
|
|
|
3334
3336
|
return train_items, val_items
|
|
3335
3337
|
|
|
3336
|
-
|
|
3337
3338
|
@staticmethod
|
|
3338
3339
|
def download(
|
|
3339
3340
|
api: Api,
|
|
@@ -4583,6 +4584,7 @@ def upload_project(
|
|
|
4583
4584
|
blob_file_infos = []
|
|
4584
4585
|
|
|
4585
4586
|
for ds_fs in project_fs.datasets:
|
|
4587
|
+
logger.debug(f"Processing dataset: {ds_fs.name}")
|
|
4586
4588
|
if len(ds_fs.parents) > 0:
|
|
4587
4589
|
parent = f"{os.path.sep}".join(ds_fs.parents)
|
|
4588
4590
|
parent_id = dataset_map.get(parent)
|
|
@@ -4623,8 +4625,15 @@ def upload_project(
|
|
|
4623
4625
|
if os.path.isfile(path):
|
|
4624
4626
|
valid_indices.append(i)
|
|
4625
4627
|
valid_paths.append(path)
|
|
4626
|
-
|
|
4628
|
+
elif len(project_fs.blob_files) > 0:
|
|
4627
4629
|
offset_indices.append(i)
|
|
4630
|
+
else:
|
|
4631
|
+
if img_infos[i] is not None:
|
|
4632
|
+
logger.debug(f"Image will be uploaded by image_info: {names[i]}")
|
|
4633
|
+
else:
|
|
4634
|
+
logger.warning(
|
|
4635
|
+
f"Image and image info file not found, image will be skipped: {names[i]}"
|
|
4636
|
+
)
|
|
4628
4637
|
img_paths = valid_paths
|
|
4629
4638
|
ann_paths = list(filter(lambda x: os.path.isfile(x), ann_paths))
|
|
4630
4639
|
# Create a mapping from name to index position for quick lookups
|
|
@@ -5612,6 +5621,7 @@ async def _download_project_async(
|
|
|
5612
5621
|
blob_files_to_download = {}
|
|
5613
5622
|
blob_images = []
|
|
5614
5623
|
|
|
5624
|
+
sly.logger.info("Calculating images to download...", extra={"dataset": dataset.name})
|
|
5615
5625
|
async for image_batch in all_images:
|
|
5616
5626
|
for image in image_batch:
|
|
5617
5627
|
if images_ids is None or image.id in images_ids:
|
|
@@ -5621,7 +5631,7 @@ async def _download_project_async(
|
|
|
5621
5631
|
if download_blob_files and image.related_data_id is not None:
|
|
5622
5632
|
blob_files_to_download[image.related_data_id] = image.download_id
|
|
5623
5633
|
blob_images.append(image)
|
|
5624
|
-
elif image.size < switch_size:
|
|
5634
|
+
elif image.size is not None and image.size < switch_size:
|
|
5625
5635
|
small_images.append(image)
|
|
5626
5636
|
else:
|
|
5627
5637
|
large_images.append(image)
|
|
@@ -5655,16 +5665,55 @@ async def _download_project_async(
|
|
|
5655
5665
|
ds_progress(1)
|
|
5656
5666
|
return to_download
|
|
5657
5667
|
|
|
5658
|
-
async def
|
|
5659
|
-
|
|
5660
|
-
|
|
5661
|
-
|
|
5662
|
-
|
|
5663
|
-
|
|
5668
|
+
async def run_tasks_with_semaphore_control(task_list: list, delay=0.05):
|
|
5669
|
+
"""
|
|
5670
|
+
Execute tasks with semaphore control - create tasks only as semaphore permits become available.
|
|
5671
|
+
task_list - list of coroutines or callables that create tasks
|
|
5672
|
+
"""
|
|
5673
|
+
random.shuffle(task_list)
|
|
5674
|
+
running_tasks = set()
|
|
5675
|
+
max_concurrent = getattr(semaphore, "_value", 10)
|
|
5676
|
+
|
|
5677
|
+
task_iter = iter(task_list)
|
|
5678
|
+
completed_count = 0
|
|
5679
|
+
|
|
5680
|
+
while True:
|
|
5681
|
+
# Add new tasks while we have capacity
|
|
5682
|
+
while len(running_tasks) < max_concurrent:
|
|
5683
|
+
try:
|
|
5684
|
+
task_gen = next(task_iter)
|
|
5685
|
+
if callable(task_gen):
|
|
5686
|
+
task = asyncio.create_task(task_gen())
|
|
5687
|
+
else:
|
|
5688
|
+
task = asyncio.create_task(task_gen)
|
|
5689
|
+
running_tasks.add(task)
|
|
5690
|
+
await asyncio.sleep(delay)
|
|
5691
|
+
except StopIteration:
|
|
5692
|
+
break
|
|
5693
|
+
|
|
5694
|
+
if not running_tasks:
|
|
5695
|
+
break
|
|
5696
|
+
|
|
5697
|
+
# Wait for at least one task to complete
|
|
5698
|
+
done, running_tasks = await asyncio.wait(
|
|
5699
|
+
running_tasks, return_when=asyncio.FIRST_COMPLETED
|
|
5700
|
+
)
|
|
5701
|
+
|
|
5702
|
+
# Process completed tasks
|
|
5703
|
+
for task in done:
|
|
5704
|
+
completed_count += 1
|
|
5705
|
+
try:
|
|
5706
|
+
await task
|
|
5707
|
+
except Exception as e:
|
|
5708
|
+
logger.error(f"Task error: {e}")
|
|
5709
|
+
|
|
5710
|
+
# Clear the done set - this should be enough for memory cleanup
|
|
5711
|
+
done.clear()
|
|
5712
|
+
|
|
5664
5713
|
logger.debug(
|
|
5665
|
-
f"{
|
|
5714
|
+
f"{completed_count} tasks have been completed for dataset ID: {dataset.id}, Name: {dataset.name}"
|
|
5666
5715
|
)
|
|
5667
|
-
return
|
|
5716
|
+
return completed_count
|
|
5668
5717
|
|
|
5669
5718
|
# Download blob files if required
|
|
5670
5719
|
if download_blob_files and len(blob_files_to_download) > 0:
|
|
@@ -5728,19 +5777,24 @@ async def _download_project_async(
|
|
|
5728
5777
|
progress_cb=ds_progress,
|
|
5729
5778
|
)
|
|
5730
5779
|
offset_tasks.append(offset_task)
|
|
5731
|
-
|
|
5732
|
-
await asyncio.gather(*created_tasks)
|
|
5780
|
+
await run_tasks_with_semaphore_control(offset_tasks, 0.05)
|
|
5733
5781
|
|
|
5734
5782
|
tasks = []
|
|
5735
|
-
|
|
5736
|
-
|
|
5737
|
-
|
|
5783
|
+
if resume_download is True:
|
|
5784
|
+
sly.logger.info("Checking existing images...", extra={"dataset": dataset.name})
|
|
5785
|
+
# Check which images need to be downloaded
|
|
5786
|
+
small_images = await check_items(small_images)
|
|
5787
|
+
large_images = await check_items(large_images)
|
|
5738
5788
|
|
|
5739
5789
|
# If only one small image, treat it as a large image for efficiency
|
|
5740
5790
|
if len(small_images) == 1:
|
|
5741
5791
|
large_images.append(small_images.pop())
|
|
5742
5792
|
|
|
5743
5793
|
# Create batch download tasks
|
|
5794
|
+
sly.logger.debug(
|
|
5795
|
+
f"Downloading {len(small_images)} small images in batch number {len(small_images) // batch_size}...",
|
|
5796
|
+
extra={"dataset": dataset.name},
|
|
5797
|
+
)
|
|
5744
5798
|
for images_batch in batched(small_images, batch_size=batch_size):
|
|
5745
5799
|
task = _download_project_items_batch_async(
|
|
5746
5800
|
api=api,
|
|
@@ -5758,6 +5812,10 @@ async def _download_project_async(
|
|
|
5758
5812
|
tasks.append(task)
|
|
5759
5813
|
|
|
5760
5814
|
# Create individual download tasks for large images
|
|
5815
|
+
sly.logger.debug(
|
|
5816
|
+
f"Downloading {len(large_images)} large images one by one...",
|
|
5817
|
+
extra={"dataset": dataset.name},
|
|
5818
|
+
)
|
|
5761
5819
|
for image in large_images:
|
|
5762
5820
|
task = _download_project_item_async(
|
|
5763
5821
|
api=api,
|
|
@@ -5773,8 +5831,7 @@ async def _download_project_async(
|
|
|
5773
5831
|
)
|
|
5774
5832
|
tasks.append(task)
|
|
5775
5833
|
|
|
5776
|
-
|
|
5777
|
-
await asyncio.gather(*created_tasks)
|
|
5834
|
+
await run_tasks_with_semaphore_control(tasks)
|
|
5778
5835
|
|
|
5779
5836
|
if save_image_meta:
|
|
5780
5837
|
meta_dir = dataset_fs.meta_dir
|
|
@@ -5815,20 +5872,10 @@ async def _download_project_item_async(
|
|
|
5815
5872
|
) -> None:
|
|
5816
5873
|
"""Download image and annotation from Supervisely API and save it to the local filesystem.
|
|
5817
5874
|
Uses parameters from the parent function _download_project_async.
|
|
5875
|
+
Optimized version - uses streaming only for large images (>5MB) to avoid performance degradation.
|
|
5818
5876
|
"""
|
|
5819
|
-
if save_images:
|
|
5820
|
-
logger.debug(
|
|
5821
|
-
f"Downloading 1 image in single mode with _download_project_item_async. ID: {img_info.id}, Name: {img_info.name}"
|
|
5822
|
-
)
|
|
5823
|
-
img_bytes = await api.image.download_bytes_single_async(
|
|
5824
|
-
img_info.id, semaphore=semaphore, check_hash=True
|
|
5825
|
-
)
|
|
5826
|
-
if None in [img_info.height, img_info.width]:
|
|
5827
|
-
width, height = sly.image.get_size_from_bytes(img_bytes)
|
|
5828
|
-
img_info = img_info._replace(height=height, width=width)
|
|
5829
|
-
else:
|
|
5830
|
-
img_bytes = None
|
|
5831
5877
|
|
|
5878
|
+
# Prepare annotation first (small data)
|
|
5832
5879
|
if only_image_tags is False:
|
|
5833
5880
|
ann_info = await api.annotation.download_async(
|
|
5834
5881
|
img_info.id,
|
|
@@ -5853,13 +5900,84 @@ async def _download_project_item_async(
|
|
|
5853
5900
|
tmp_ann = Annotation(img_size=(img_info.height, img_info.width), img_tags=tags)
|
|
5854
5901
|
ann_json = tmp_ann.to_json()
|
|
5855
5902
|
|
|
5856
|
-
|
|
5857
|
-
|
|
5858
|
-
|
|
5859
|
-
|
|
5860
|
-
|
|
5861
|
-
|
|
5862
|
-
|
|
5903
|
+
# Handle image download - choose method based on estimated size
|
|
5904
|
+
if save_images:
|
|
5905
|
+
# Estimate size threshold: 5MB for streaming to avoid performance degradation
|
|
5906
|
+
size_threshold_for_streaming = 5 * 1024 * 1024 # 5MB
|
|
5907
|
+
estimated_size = getattr(img_info, "size", 0) or (
|
|
5908
|
+
img_info.height * img_info.width * 3 if img_info.height and img_info.width else 0
|
|
5909
|
+
)
|
|
5910
|
+
|
|
5911
|
+
if estimated_size > size_threshold_for_streaming:
|
|
5912
|
+
# Use streaming for large images only
|
|
5913
|
+
sly.logger.trace(
|
|
5914
|
+
f"Downloading large image in streaming mode: {img_info.size / 1024 / 1024:.1f}MB"
|
|
5915
|
+
)
|
|
5916
|
+
|
|
5917
|
+
# Clean up existing item first
|
|
5918
|
+
dataset_fs.delete_item(img_info.name)
|
|
5919
|
+
|
|
5920
|
+
final_path = dataset_fs.generate_item_path(img_info.name)
|
|
5921
|
+
temp_path = final_path + ".tmp"
|
|
5922
|
+
await api.image.download_path_async(
|
|
5923
|
+
img_info.id, temp_path, semaphore=semaphore, check_hash=True
|
|
5924
|
+
)
|
|
5925
|
+
|
|
5926
|
+
# Get dimensions if needed
|
|
5927
|
+
if None in [img_info.height, img_info.width]:
|
|
5928
|
+
# Use PIL directly on the file - it will only read the minimal header needed
|
|
5929
|
+
with PILImage.open(temp_path) as image:
|
|
5930
|
+
width, height = image.size
|
|
5931
|
+
img_info = img_info._replace(height=height, width=width)
|
|
5932
|
+
|
|
5933
|
+
# Update annotation with correct dimensions if needed
|
|
5934
|
+
if None in tmp_ann.img_size:
|
|
5935
|
+
tmp_ann = tmp_ann.clone(img_size=(img_info.height, img_info.width))
|
|
5936
|
+
ann_json = tmp_ann.to_json()
|
|
5937
|
+
|
|
5938
|
+
# os.rename is atomic and will overwrite the destination if it exists
|
|
5939
|
+
os.rename(temp_path, final_path)
|
|
5940
|
+
|
|
5941
|
+
# For streaming, we save directly to filesystem, so use add_item_raw_bytes_async with None
|
|
5942
|
+
await dataset_fs.add_item_raw_bytes_async(
|
|
5943
|
+
item_name=img_info.name,
|
|
5944
|
+
item_raw_bytes=None, # Image already saved to disk
|
|
5945
|
+
ann=ann_json,
|
|
5946
|
+
img_info=img_info if save_image_info is True else None,
|
|
5947
|
+
)
|
|
5948
|
+
else:
|
|
5949
|
+
sly.logger.trace(f"Downloading large image: {img_info.size / 1024 / 1024:.1f}MB")
|
|
5950
|
+
# Use fast in-memory download for small images
|
|
5951
|
+
img_bytes = await api.image.download_bytes_single_async(
|
|
5952
|
+
img_info.id, semaphore=semaphore, check_hash=True
|
|
5953
|
+
)
|
|
5954
|
+
|
|
5955
|
+
if None in [img_info.height, img_info.width]:
|
|
5956
|
+
width, height = sly.image.get_size_from_bytes(img_bytes)
|
|
5957
|
+
img_info = img_info._replace(height=height, width=width)
|
|
5958
|
+
|
|
5959
|
+
# Update annotation with correct dimensions if needed
|
|
5960
|
+
if None in tmp_ann.img_size:
|
|
5961
|
+
tmp_ann = tmp_ann.clone(img_size=(img_info.height, img_info.width))
|
|
5962
|
+
ann_json = tmp_ann.to_json()
|
|
5963
|
+
|
|
5964
|
+
# Clean up existing item first, then save new one
|
|
5965
|
+
dataset_fs.delete_item(img_info.name)
|
|
5966
|
+
await dataset_fs.add_item_raw_bytes_async(
|
|
5967
|
+
item_name=img_info.name,
|
|
5968
|
+
item_raw_bytes=img_bytes,
|
|
5969
|
+
ann=ann_json,
|
|
5970
|
+
img_info=img_info if save_image_info is True else None,
|
|
5971
|
+
)
|
|
5972
|
+
else:
|
|
5973
|
+
dataset_fs.delete_item(img_info.name)
|
|
5974
|
+
await dataset_fs.add_item_raw_bytes_async(
|
|
5975
|
+
item_name=img_info.name,
|
|
5976
|
+
item_raw_bytes=None,
|
|
5977
|
+
ann=ann_json,
|
|
5978
|
+
img_info=img_info if save_image_info is True else None,
|
|
5979
|
+
)
|
|
5980
|
+
|
|
5863
5981
|
if progress_cb is not None:
|
|
5864
5982
|
progress_cb(1)
|
|
5865
5983
|
logger.debug(f"Single project item has been downloaded. Semaphore state: {semaphore._value}")
|
|
@@ -5882,32 +6000,14 @@ async def _download_project_items_batch_async(
|
|
|
5882
6000
|
Download images and annotations from Supervisely API and save them to the local filesystem.
|
|
5883
6001
|
Uses parameters from the parent function _download_project_async.
|
|
5884
6002
|
It is used for batch download of images and annotations with the bulk download API methods.
|
|
6003
|
+
|
|
6004
|
+
IMPORTANT: The total size of all images in a batch must not exceed 130MB, and the size of each image must not exceed 1.28MB.
|
|
5885
6005
|
"""
|
|
5886
|
-
|
|
5887
|
-
|
|
5888
|
-
imgs_bytes = [None] * len(img_ids)
|
|
5889
|
-
temp_dict = {}
|
|
5890
|
-
logger.debug(
|
|
5891
|
-
f"Downloading {len(img_ids)} images in bulk with _download_project_items_batch_async"
|
|
5892
|
-
)
|
|
5893
|
-
async for img_id, img_bytes in api.image.download_bytes_generator_async(
|
|
5894
|
-
dataset_id,
|
|
5895
|
-
img_ids,
|
|
5896
|
-
semaphore=semaphore,
|
|
5897
|
-
check_hash=True,
|
|
5898
|
-
):
|
|
5899
|
-
temp_dict[img_id] = img_bytes
|
|
5900
|
-
# to be sure that the order is correct
|
|
5901
|
-
for idx, img_id in enumerate(img_ids):
|
|
5902
|
-
imgs_bytes[idx] = temp_dict[img_id]
|
|
5903
|
-
for img_info, img_bytes in zip(img_infos, imgs_bytes):
|
|
5904
|
-
if None in [img_info.height, img_info.width]:
|
|
5905
|
-
width, height = sly.image.get_size_from_bytes(img_bytes)
|
|
5906
|
-
img_info = img_info._replace(height=height, width=width)
|
|
5907
|
-
else:
|
|
5908
|
-
img_ids = [img_info.id for img_info in img_infos]
|
|
5909
|
-
imgs_bytes = [None] * len(img_infos)
|
|
6006
|
+
img_ids = [img_info.id for img_info in img_infos]
|
|
6007
|
+
img_ids_to_info = {img_info.id: img_info for img_info in img_infos}
|
|
5910
6008
|
|
|
6009
|
+
sly.logger.trace(f"Downloading {len(img_infos)} images in batch mode.")
|
|
6010
|
+
# Download annotations first
|
|
5911
6011
|
if only_image_tags is False:
|
|
5912
6012
|
ann_infos = await api.annotation.download_bulk_async(
|
|
5913
6013
|
dataset_id,
|
|
@@ -5915,20 +6015,20 @@ async def _download_project_items_batch_async(
|
|
|
5915
6015
|
semaphore=semaphore,
|
|
5916
6016
|
force_metadata_for_links=not save_images,
|
|
5917
6017
|
)
|
|
5918
|
-
|
|
6018
|
+
id_to_annotation = {}
|
|
5919
6019
|
for img_info, ann_info in zip(img_infos, ann_infos):
|
|
5920
6020
|
try:
|
|
5921
6021
|
tmp_ann = Annotation.from_json(ann_info.annotation, meta)
|
|
5922
6022
|
if None in tmp_ann.img_size:
|
|
5923
6023
|
tmp_ann = tmp_ann.clone(img_size=(img_info.height, img_info.width))
|
|
5924
|
-
|
|
6024
|
+
id_to_annotation[img_info.id] = tmp_ann.to_json()
|
|
5925
6025
|
except Exception:
|
|
5926
6026
|
logger.error(
|
|
5927
6027
|
f"Error while deserializing annotation for image with ID: {img_info.id}"
|
|
5928
6028
|
)
|
|
5929
6029
|
raise
|
|
5930
6030
|
else:
|
|
5931
|
-
|
|
6031
|
+
id_to_annotation = {}
|
|
5932
6032
|
for img_info in img_infos:
|
|
5933
6033
|
tags = TagCollection.from_api_response(
|
|
5934
6034
|
img_info.tags,
|
|
@@ -5936,17 +6036,63 @@ async def _download_project_items_batch_async(
|
|
|
5936
6036
|
id_to_tagmeta,
|
|
5937
6037
|
)
|
|
5938
6038
|
tmp_ann = Annotation(img_size=(img_info.height, img_info.width), img_tags=tags)
|
|
5939
|
-
|
|
5940
|
-
|
|
5941
|
-
|
|
5942
|
-
|
|
5943
|
-
|
|
5944
|
-
|
|
5945
|
-
|
|
5946
|
-
|
|
5947
|
-
|
|
5948
|
-
|
|
5949
|
-
|
|
6039
|
+
id_to_annotation[img_info.id] = tmp_ann.to_json()
|
|
6040
|
+
|
|
6041
|
+
if save_images:
|
|
6042
|
+
async for img_id, img_bytes in api.image.download_bytes_generator_async(
|
|
6043
|
+
dataset_id=dataset_id, img_ids=img_ids, semaphore=semaphore, check_hash=True
|
|
6044
|
+
):
|
|
6045
|
+
img_info = img_ids_to_info.get(img_id)
|
|
6046
|
+
if img_info is None:
|
|
6047
|
+
continue
|
|
6048
|
+
|
|
6049
|
+
if None in [img_info.height, img_info.width]:
|
|
6050
|
+
width, height = sly.image.get_size_from_bytes(img_bytes)
|
|
6051
|
+
img_info = img_info._replace(height=height, width=width)
|
|
6052
|
+
|
|
6053
|
+
# Update annotation if needed - use pop to get and remove at the same time
|
|
6054
|
+
ann_json = id_to_annotation.pop(img_id, None)
|
|
6055
|
+
if ann_json is not None:
|
|
6056
|
+
try:
|
|
6057
|
+
tmp_ann = Annotation.from_json(ann_json, meta)
|
|
6058
|
+
if None in tmp_ann.img_size:
|
|
6059
|
+
tmp_ann = tmp_ann.clone(img_size=(img_info.height, img_info.width))
|
|
6060
|
+
ann_json = tmp_ann.to_json()
|
|
6061
|
+
except Exception:
|
|
6062
|
+
pass
|
|
6063
|
+
else:
|
|
6064
|
+
ann_json = id_to_annotation.pop(img_id, None)
|
|
6065
|
+
|
|
6066
|
+
dataset_fs.delete_item(img_info.name)
|
|
6067
|
+
await dataset_fs.add_item_raw_bytes_async(
|
|
6068
|
+
item_name=img_info.name,
|
|
6069
|
+
item_raw_bytes=img_bytes,
|
|
6070
|
+
ann=ann_json,
|
|
6071
|
+
img_info=img_info if save_image_info is True else None,
|
|
6072
|
+
)
|
|
6073
|
+
|
|
6074
|
+
if progress_cb is not None:
|
|
6075
|
+
progress_cb(1)
|
|
6076
|
+
else:
|
|
6077
|
+
for img_info in img_infos:
|
|
6078
|
+
dataset_fs.delete_item(img_info.name)
|
|
6079
|
+
ann_json = id_to_annotation.pop(img_info.id, None)
|
|
6080
|
+
await dataset_fs.add_item_raw_bytes_async(
|
|
6081
|
+
item_name=img_info.name,
|
|
6082
|
+
item_raw_bytes=None,
|
|
6083
|
+
ann=ann_json,
|
|
6084
|
+
img_info=img_info if save_image_info is True else None,
|
|
6085
|
+
)
|
|
6086
|
+
if progress_cb is not None:
|
|
6087
|
+
progress_cb(1)
|
|
6088
|
+
|
|
6089
|
+
# Clear dictionaries and force GC for large batches only
|
|
6090
|
+
batch_size = len(img_infos)
|
|
6091
|
+
id_to_annotation.clear()
|
|
6092
|
+
img_ids_to_info.clear()
|
|
6093
|
+
|
|
6094
|
+
if batch_size > 50: # Only for large batches
|
|
6095
|
+
gc.collect()
|
|
5950
6096
|
|
|
5951
6097
|
logger.debug(f"Batch of project items has been downloaded. Semaphore state: {semaphore._value}")
|
|
5952
6098
|
|