lightly-studio 0.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lightly-studio might be problematic. Click here for more details.
- lightly_studio/__init__.py +11 -0
- lightly_studio/api/__init__.py +0 -0
- lightly_studio/api/app.py +110 -0
- lightly_studio/api/cache.py +77 -0
- lightly_studio/api/db.py +133 -0
- lightly_studio/api/db_tables.py +32 -0
- lightly_studio/api/features.py +7 -0
- lightly_studio/api/routes/api/annotation.py +233 -0
- lightly_studio/api/routes/api/annotation_label.py +90 -0
- lightly_studio/api/routes/api/annotation_task.py +38 -0
- lightly_studio/api/routes/api/classifier.py +387 -0
- lightly_studio/api/routes/api/dataset.py +182 -0
- lightly_studio/api/routes/api/dataset_tag.py +257 -0
- lightly_studio/api/routes/api/exceptions.py +96 -0
- lightly_studio/api/routes/api/features.py +17 -0
- lightly_studio/api/routes/api/metadata.py +37 -0
- lightly_studio/api/routes/api/metrics.py +80 -0
- lightly_studio/api/routes/api/sample.py +196 -0
- lightly_studio/api/routes/api/settings.py +45 -0
- lightly_studio/api/routes/api/status.py +19 -0
- lightly_studio/api/routes/api/text_embedding.py +48 -0
- lightly_studio/api/routes/api/validators.py +17 -0
- lightly_studio/api/routes/healthz.py +13 -0
- lightly_studio/api/routes/images.py +104 -0
- lightly_studio/api/routes/webapp.py +51 -0
- lightly_studio/api/server.py +82 -0
- lightly_studio/core/__init__.py +0 -0
- lightly_studio/core/dataset.py +523 -0
- lightly_studio/core/sample.py +77 -0
- lightly_studio/core/start_gui.py +15 -0
- lightly_studio/dataset/__init__.py +0 -0
- lightly_studio/dataset/edge_embedding_generator.py +144 -0
- lightly_studio/dataset/embedding_generator.py +91 -0
- lightly_studio/dataset/embedding_manager.py +163 -0
- lightly_studio/dataset/env.py +16 -0
- lightly_studio/dataset/file_utils.py +35 -0
- lightly_studio/dataset/loader.py +622 -0
- lightly_studio/dataset/mobileclip_embedding_generator.py +144 -0
- lightly_studio/dist_lightly_studio_view_app/_app/env.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/assets/0.DenzbfeK.css +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/assets/LightlyLogo.BNjCIww-.png +0 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/assets/OpenSans- +0 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/assets/OpenSans-Bold.DGvYQtcs.ttf +0 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/assets/OpenSans-Italic-VariableFont_wdth_wght.B4AZ-wl6.ttf +0 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/assets/OpenSans-Regular.DxJTClRG.ttf +0 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/assets/OpenSans-SemiBold.D3TTYgdB.ttf +0 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/assets/OpenSans-VariableFont_wdth_wght.BZBpG5Iz.ttf +0 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/assets/SelectableSvgGroup.OwPEPQZu.css +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/assets/SelectableSvgGroup.b653GmVf.css +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/assets/_layout.T-zjSUd3.css +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/assets/useFeatureFlags.CV-KWLNP.css +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/69_IOA4Y.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/B2FVR0s0.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/B90CZVMX.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/B9zumHo5.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/BJXwVxaE.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/Bsi3UGy5.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/Bu7uvVrG.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/Bx1xMsFy.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/BylOuP6i.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/C8I8rFJQ.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/CDnpyLsT.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/CWj6FrbW.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/CYgJF_JY.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/CcaPhhk3.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/CvOmgdoc.js +93 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/CxtLVaYz.js +3 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/D5-A_Ffd.js +4 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/D6RI2Zrd.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/D6su9Aln.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/D98V7j6A.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/DIRAtgl0.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/DIeogL5L.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/DOlTMNyt.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/DjUWrjOv.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/DjfY96ND.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/H7C68rOM.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/O-EABkf9.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/XO7A28GO.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/hQVEETDE.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/l7KrR96u.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/nAHhluT7.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/r64xT6ao.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/vC4nQVEB.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/x9G_hzyY.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/entry/app.CjnvpsmS.js +2 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/entry/start.0o1H7wM9.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/nodes/0.XRq_TUwu.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/nodes/1.B4rNYwVp.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/nodes/10.DfBwOEhN.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/nodes/11.CWG1ehzT.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/nodes/12.CwF2_8mP.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/nodes/2.CS4muRY-.js +6 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/nodes/3.CWHpKonm.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/nodes/4.OUWOLQeV.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/nodes/5.Dm6t9F5W.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/nodes/6.Bw5ck4gK.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/nodes/7.CF0EDTR6.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/nodes/8.Cw30LEcV.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/immutable/nodes/9.CPu3CiBc.js +1 -0
- lightly_studio/dist_lightly_studio_view_app/_app/version.json +1 -0
- lightly_studio/dist_lightly_studio_view_app/apple-touch-icon-precomposed.png +0 -0
- lightly_studio/dist_lightly_studio_view_app/apple-touch-icon.png +0 -0
- lightly_studio/dist_lightly_studio_view_app/favicon.png +0 -0
- lightly_studio/dist_lightly_studio_view_app/index.html +44 -0
- lightly_studio/examples/example.py +23 -0
- lightly_studio/examples/example_metadata.py +338 -0
- lightly_studio/examples/example_selection.py +39 -0
- lightly_studio/examples/example_split_work.py +67 -0
- lightly_studio/examples/example_v2.py +21 -0
- lightly_studio/export_schema.py +18 -0
- lightly_studio/few_shot_classifier/__init__.py +0 -0
- lightly_studio/few_shot_classifier/classifier.py +80 -0
- lightly_studio/few_shot_classifier/classifier_manager.py +663 -0
- lightly_studio/few_shot_classifier/random_forest_classifier.py +489 -0
- lightly_studio/metadata/complex_metadata.py +47 -0
- lightly_studio/metadata/gps_coordinate.py +41 -0
- lightly_studio/metadata/metadata_protocol.py +17 -0
- lightly_studio/metrics/__init__.py +0 -0
- lightly_studio/metrics/detection/__init__.py +0 -0
- lightly_studio/metrics/detection/map.py +268 -0
- lightly_studio/models/__init__.py +1 -0
- lightly_studio/models/annotation/__init__.py +0 -0
- lightly_studio/models/annotation/annotation_base.py +171 -0
- lightly_studio/models/annotation/instance_segmentation.py +56 -0
- lightly_studio/models/annotation/links.py +17 -0
- lightly_studio/models/annotation/object_detection.py +47 -0
- lightly_studio/models/annotation/semantic_segmentation.py +44 -0
- lightly_studio/models/annotation_label.py +47 -0
- lightly_studio/models/annotation_task.py +28 -0
- lightly_studio/models/classifier.py +20 -0
- lightly_studio/models/dataset.py +84 -0
- lightly_studio/models/embedding_model.py +30 -0
- lightly_studio/models/metadata.py +208 -0
- lightly_studio/models/sample.py +180 -0
- lightly_studio/models/sample_embedding.py +37 -0
- lightly_studio/models/settings.py +60 -0
- lightly_studio/models/tag.py +96 -0
- lightly_studio/py.typed +0 -0
- lightly_studio/resolvers/__init__.py +7 -0
- lightly_studio/resolvers/annotation_label_resolver/__init__.py +21 -0
- lightly_studio/resolvers/annotation_label_resolver/create.py +27 -0
- lightly_studio/resolvers/annotation_label_resolver/delete.py +28 -0
- lightly_studio/resolvers/annotation_label_resolver/get_all.py +22 -0
- lightly_studio/resolvers/annotation_label_resolver/get_by_id.py +24 -0
- lightly_studio/resolvers/annotation_label_resolver/get_by_ids.py +25 -0
- lightly_studio/resolvers/annotation_label_resolver/get_by_label_name.py +24 -0
- lightly_studio/resolvers/annotation_label_resolver/names_by_ids.py +25 -0
- lightly_studio/resolvers/annotation_label_resolver/update.py +38 -0
- lightly_studio/resolvers/annotation_resolver/__init__.py +33 -0
- lightly_studio/resolvers/annotation_resolver/count_annotations_by_dataset.py +120 -0
- lightly_studio/resolvers/annotation_resolver/create.py +19 -0
- lightly_studio/resolvers/annotation_resolver/create_many.py +96 -0
- lightly_studio/resolvers/annotation_resolver/delete_annotation.py +45 -0
- lightly_studio/resolvers/annotation_resolver/delete_annotations.py +56 -0
- lightly_studio/resolvers/annotation_resolver/get_all.py +74 -0
- lightly_studio/resolvers/annotation_resolver/get_by_id.py +18 -0
- lightly_studio/resolvers/annotation_resolver/update_annotation_label.py +144 -0
- lightly_studio/resolvers/annotation_resolver/update_bounding_box.py +68 -0
- lightly_studio/resolvers/annotation_task_resolver.py +31 -0
- lightly_studio/resolvers/annotations/__init__.py +1 -0
- lightly_studio/resolvers/annotations/annotations_filter.py +89 -0
- lightly_studio/resolvers/dataset_resolver.py +278 -0
- lightly_studio/resolvers/embedding_model_resolver.py +100 -0
- lightly_studio/resolvers/metadata_resolver/__init__.py +15 -0
- lightly_studio/resolvers/metadata_resolver/metadata_filter.py +163 -0
- lightly_studio/resolvers/metadata_resolver/sample/__init__.py +21 -0
- lightly_studio/resolvers/metadata_resolver/sample/bulk_set_metadata.py +48 -0
- lightly_studio/resolvers/metadata_resolver/sample/get_by_sample_id.py +24 -0
- lightly_studio/resolvers/metadata_resolver/sample/get_metadata_info.py +104 -0
- lightly_studio/resolvers/metadata_resolver/sample/get_value_for_sample.py +27 -0
- lightly_studio/resolvers/metadata_resolver/sample/set_value_for_sample.py +53 -0
- lightly_studio/resolvers/sample_embedding_resolver.py +86 -0
- lightly_studio/resolvers/sample_resolver.py +249 -0
- lightly_studio/resolvers/samples_filter.py +81 -0
- lightly_studio/resolvers/settings_resolver.py +58 -0
- lightly_studio/resolvers/tag_resolver.py +276 -0
- lightly_studio/selection/README.md +6 -0
- lightly_studio/selection/mundig.py +105 -0
- lightly_studio/selection/select.py +96 -0
- lightly_studio/selection/select_via_db.py +93 -0
- lightly_studio/selection/selection_config.py +31 -0
- lightly_studio/services/annotations_service/__init__.py +21 -0
- lightly_studio/services/annotations_service/get_annotation_by_id.py +31 -0
- lightly_studio/services/annotations_service/update_annotation.py +65 -0
- lightly_studio/services/annotations_service/update_annotation_label.py +48 -0
- lightly_studio/services/annotations_service/update_annotations.py +29 -0
- lightly_studio/setup_logging.py +19 -0
- lightly_studio/type_definitions.py +19 -0
- lightly_studio/vendor/ACKNOWLEDGEMENTS +422 -0
- lightly_studio/vendor/LICENSE +31 -0
- lightly_studio/vendor/LICENSE_weights_data +50 -0
- lightly_studio/vendor/README.md +5 -0
- lightly_studio/vendor/__init__.py +1 -0
- lightly_studio/vendor/mobileclip/__init__.py +96 -0
- lightly_studio/vendor/mobileclip/clip.py +77 -0
- lightly_studio/vendor/mobileclip/configs/mobileclip_b.json +18 -0
- lightly_studio/vendor/mobileclip/configs/mobileclip_s0.json +18 -0
- lightly_studio/vendor/mobileclip/configs/mobileclip_s1.json +18 -0
- lightly_studio/vendor/mobileclip/configs/mobileclip_s2.json +18 -0
- lightly_studio/vendor/mobileclip/image_encoder.py +67 -0
- lightly_studio/vendor/mobileclip/logger.py +154 -0
- lightly_studio/vendor/mobileclip/models/__init__.py +10 -0
- lightly_studio/vendor/mobileclip/models/mci.py +933 -0
- lightly_studio/vendor/mobileclip/models/vit.py +433 -0
- lightly_studio/vendor/mobileclip/modules/__init__.py +4 -0
- lightly_studio/vendor/mobileclip/modules/common/__init__.py +4 -0
- lightly_studio/vendor/mobileclip/modules/common/mobileone.py +341 -0
- lightly_studio/vendor/mobileclip/modules/common/transformer.py +451 -0
- lightly_studio/vendor/mobileclip/modules/image/__init__.py +4 -0
- lightly_studio/vendor/mobileclip/modules/image/image_projection.py +113 -0
- lightly_studio/vendor/mobileclip/modules/image/replknet.py +188 -0
- lightly_studio/vendor/mobileclip/modules/text/__init__.py +4 -0
- lightly_studio/vendor/mobileclip/modules/text/repmixer.py +281 -0
- lightly_studio/vendor/mobileclip/modules/text/tokenizer.py +38 -0
- lightly_studio/vendor/mobileclip/text_encoder.py +245 -0
- lightly_studio-0.3.1.dist-info/METADATA +520 -0
- lightly_studio-0.3.1.dist-info/RECORD +219 -0
- lightly_studio-0.3.1.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,523 @@
|
|
|
1
|
+
"""LightlyStudio Dataset."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from dataclasses import dataclass
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Iterable
|
|
8
|
+
from uuid import UUID
|
|
9
|
+
|
|
10
|
+
import PIL
|
|
11
|
+
from labelformat.formats import (
|
|
12
|
+
COCOInstanceSegmentationInput,
|
|
13
|
+
COCOObjectDetectionInput,
|
|
14
|
+
YOLOv8ObjectDetectionInput,
|
|
15
|
+
)
|
|
16
|
+
from labelformat.model.binary_mask_segmentation import BinaryMaskSegmentation
|
|
17
|
+
from labelformat.model.bounding_box import BoundingBoxFormat
|
|
18
|
+
from labelformat.model.image import Image
|
|
19
|
+
from labelformat.model.instance_segmentation import (
|
|
20
|
+
ImageInstanceSegmentation,
|
|
21
|
+
InstanceSegmentationInput,
|
|
22
|
+
)
|
|
23
|
+
from labelformat.model.multipolygon import MultiPolygon
|
|
24
|
+
from labelformat.model.object_detection import (
|
|
25
|
+
ImageObjectDetection,
|
|
26
|
+
ObjectDetectionInput,
|
|
27
|
+
)
|
|
28
|
+
from sqlmodel import Session
|
|
29
|
+
from tqdm import tqdm
|
|
30
|
+
|
|
31
|
+
from lightly_studio.api.db import db_manager
|
|
32
|
+
from lightly_studio.models.annotation.annotation_base import AnnotationCreate
|
|
33
|
+
from lightly_studio.models.annotation_label import AnnotationLabelCreate
|
|
34
|
+
from lightly_studio.models.annotation_task import (
|
|
35
|
+
AnnotationTaskTable,
|
|
36
|
+
AnnotationType,
|
|
37
|
+
)
|
|
38
|
+
from lightly_studio.models.dataset import DatasetCreate, DatasetTable
|
|
39
|
+
from lightly_studio.models.sample import SampleCreate, SampleTable
|
|
40
|
+
from lightly_studio.resolvers import (
|
|
41
|
+
annotation_label_resolver,
|
|
42
|
+
annotation_resolver,
|
|
43
|
+
annotation_task_resolver,
|
|
44
|
+
dataset_resolver,
|
|
45
|
+
sample_resolver,
|
|
46
|
+
)
|
|
47
|
+
from lightly_studio.type_definitions import PathLike
|
|
48
|
+
|
|
49
|
+
# Constants
|
|
50
|
+
ANNOTATION_BATCH_SIZE = 64 # Number of annotations to process in a single batch
|
|
51
|
+
SAMPLE_BATCH_SIZE = 32 # Number of samples to process in a single batch
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
@dataclass
|
|
55
|
+
class AnnotationProcessingContext:
|
|
56
|
+
"""Context for processing annotations for a single sample."""
|
|
57
|
+
|
|
58
|
+
dataset_id: UUID
|
|
59
|
+
sample_id: UUID
|
|
60
|
+
label_map: dict[int, UUID]
|
|
61
|
+
annotation_task_id: UUID
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
class Dataset:
|
|
65
|
+
"""A LightlyStudio Dataset.
|
|
66
|
+
|
|
67
|
+
Represents a dataset in LightlyStudio.
|
|
68
|
+
|
|
69
|
+
Args:
|
|
70
|
+
name: The name of the dataset. If None, a default name will be assigned.
|
|
71
|
+
"""
|
|
72
|
+
|
|
73
|
+
def __init__(self, name: str | None = None) -> None:
|
|
74
|
+
"""Initialize a LightlyStudio Dataset."""
|
|
75
|
+
if name is None:
|
|
76
|
+
name = "default_dataset"
|
|
77
|
+
self.name = name
|
|
78
|
+
self.session = db_manager.persistent_session()
|
|
79
|
+
# Create dataset.
|
|
80
|
+
self._dataset = dataset_resolver.create(
|
|
81
|
+
session=self.session,
|
|
82
|
+
dataset=DatasetCreate(
|
|
83
|
+
name=self.name,
|
|
84
|
+
directory="", # The directory is not used at the moment
|
|
85
|
+
),
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
@property
|
|
89
|
+
def dataset_id(self) -> UUID:
|
|
90
|
+
"""Get the dataset ID."""
|
|
91
|
+
return self._dataset.dataset_id
|
|
92
|
+
|
|
93
|
+
def add_samples_from_path(
|
|
94
|
+
self,
|
|
95
|
+
path: PathLike,
|
|
96
|
+
recursive: bool = True,
|
|
97
|
+
allowed_extensions: Iterable[str] = {
|
|
98
|
+
".png",
|
|
99
|
+
".jpg",
|
|
100
|
+
".jpeg",
|
|
101
|
+
".gif",
|
|
102
|
+
".webp",
|
|
103
|
+
".bmp",
|
|
104
|
+
".tiff",
|
|
105
|
+
},
|
|
106
|
+
) -> None:
|
|
107
|
+
"""Adding samples from the specified path to the dataset.
|
|
108
|
+
|
|
109
|
+
Args:
|
|
110
|
+
path: Path to the folder containing the images to add.
|
|
111
|
+
recursive: If True, search for images recursively in subfolders.
|
|
112
|
+
allowed_extensions: An iterable container of allowed image file
|
|
113
|
+
extensions.
|
|
114
|
+
"""
|
|
115
|
+
path = Path(path).absolute() if isinstance(path, str) else path.absolute()
|
|
116
|
+
if not path.exists() or not path.is_dir():
|
|
117
|
+
raise ValueError(f"Provided path is not a valid directory: {path}")
|
|
118
|
+
|
|
119
|
+
# Collect image file paths.
|
|
120
|
+
allowed_extensions_set = {ext.lower() for ext in allowed_extensions}
|
|
121
|
+
image_paths = []
|
|
122
|
+
path_iter = path.rglob("*") if recursive else path.glob("*")
|
|
123
|
+
for path in path_iter:
|
|
124
|
+
if path.is_file() and path.suffix.lower() in allowed_extensions_set:
|
|
125
|
+
image_paths.append(path)
|
|
126
|
+
print(f"Found {len(image_paths)} images in {path}.")
|
|
127
|
+
|
|
128
|
+
# Process images.
|
|
129
|
+
_load_into_dataset_from_paths(
|
|
130
|
+
session=self.session,
|
|
131
|
+
dataset_id=self.dataset_id,
|
|
132
|
+
image_paths=image_paths,
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
def add_samples_from_labelformat(
|
|
136
|
+
self,
|
|
137
|
+
input_labels: ObjectDetectionInput | InstanceSegmentationInput,
|
|
138
|
+
images_path: PathLike,
|
|
139
|
+
is_prediction: bool = True,
|
|
140
|
+
task_name: str | None = None,
|
|
141
|
+
) -> None:
|
|
142
|
+
"""Load a dataset from a labelformat object and store in database.
|
|
143
|
+
|
|
144
|
+
Args:
|
|
145
|
+
input_labels: The labelformat input object.
|
|
146
|
+
images_path: Path to the folder containing the images.
|
|
147
|
+
is_prediction: Whether the task is for prediction or labels.
|
|
148
|
+
task_name: Optional name for the annotation task. If None, a
|
|
149
|
+
default name is generated.
|
|
150
|
+
|
|
151
|
+
Returns:
|
|
152
|
+
DatasetTable: The created dataset table entry.
|
|
153
|
+
"""
|
|
154
|
+
if isinstance(images_path, str):
|
|
155
|
+
images_path = Path(images_path)
|
|
156
|
+
images_path = images_path.absolute()
|
|
157
|
+
|
|
158
|
+
# Determine annotation type based on input.
|
|
159
|
+
# Currently, we always create BBOX tasks, even for segmentation,
|
|
160
|
+
# as segmentation data is stored alongside bounding boxes.
|
|
161
|
+
annotation_type = AnnotationType.BBOX
|
|
162
|
+
|
|
163
|
+
# Generate a default task name if none is provided.
|
|
164
|
+
if task_name is None:
|
|
165
|
+
task_name = f"Loaded from labelformat: {self.name}"
|
|
166
|
+
|
|
167
|
+
# Create annotation task.
|
|
168
|
+
new_annotation_task = annotation_task_resolver.create(
|
|
169
|
+
session=self.session,
|
|
170
|
+
annotation_task=AnnotationTaskTable(
|
|
171
|
+
name=task_name,
|
|
172
|
+
annotation_type=annotation_type,
|
|
173
|
+
is_prediction=is_prediction,
|
|
174
|
+
),
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
_load_into_dataset(
|
|
178
|
+
session=self.session,
|
|
179
|
+
dataset_id=self.dataset_id,
|
|
180
|
+
input_labels=input_labels,
|
|
181
|
+
images_path=images_path,
|
|
182
|
+
annotation_task_id=new_annotation_task.annotation_task_id,
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
def from_yolo(
|
|
186
|
+
self,
|
|
187
|
+
data_yaml_path: str,
|
|
188
|
+
input_split: str = "train",
|
|
189
|
+
task_name: str | None = None,
|
|
190
|
+
) -> DatasetTable:
|
|
191
|
+
"""Load a dataset in YOLO format and store in DB.
|
|
192
|
+
|
|
193
|
+
Args:
|
|
194
|
+
data_yaml_path: Path to the YOLO data.yaml file.
|
|
195
|
+
input_split: The split to load (e.g., 'train', 'val').
|
|
196
|
+
task_name: Optional name for the annotation task. If None, a
|
|
197
|
+
default name is generated.
|
|
198
|
+
|
|
199
|
+
Returns:
|
|
200
|
+
DatasetTable: The created dataset table entry.
|
|
201
|
+
"""
|
|
202
|
+
data_yaml = Path(data_yaml_path).absolute()
|
|
203
|
+
dataset_name = data_yaml.parent.name
|
|
204
|
+
|
|
205
|
+
if task_name is None:
|
|
206
|
+
task_name = f"Loaded from YOLO: {data_yaml.name} ({input_split} split)"
|
|
207
|
+
|
|
208
|
+
# Load the dataset using labelformat.
|
|
209
|
+
label_input = YOLOv8ObjectDetectionInput(
|
|
210
|
+
input_file=data_yaml,
|
|
211
|
+
input_split=input_split,
|
|
212
|
+
)
|
|
213
|
+
img_dir = label_input._images_dir() # noqa: SLF001
|
|
214
|
+
|
|
215
|
+
return self.from_labelformat( # type: ignore[no-any-return,attr-defined]
|
|
216
|
+
input_labels=label_input,
|
|
217
|
+
dataset_name=dataset_name,
|
|
218
|
+
img_dir=str(img_dir),
|
|
219
|
+
is_prediction=False,
|
|
220
|
+
task_name=task_name,
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
def from_coco_object_detections(
|
|
224
|
+
self,
|
|
225
|
+
annotations_json_path: str,
|
|
226
|
+
img_dir: str,
|
|
227
|
+
task_name: str | None = None,
|
|
228
|
+
) -> DatasetTable:
|
|
229
|
+
"""Load a dataset in COCO Object Detection format and store in DB.
|
|
230
|
+
|
|
231
|
+
Args:
|
|
232
|
+
annotations_json_path: Path to the COCO annotations JSON file.
|
|
233
|
+
img_dir: Path to the folder containing the images.
|
|
234
|
+
task_name: Optional name for the annotation task. If None, a
|
|
235
|
+
default name is generated.
|
|
236
|
+
|
|
237
|
+
Returns:
|
|
238
|
+
DatasetTable: The created dataset table entry.
|
|
239
|
+
"""
|
|
240
|
+
annotations_json = Path(annotations_json_path)
|
|
241
|
+
dataset_name = annotations_json.parent.name
|
|
242
|
+
|
|
243
|
+
if task_name is None:
|
|
244
|
+
task_name = f"Loaded from COCO Object Detection: {annotations_json.name}"
|
|
245
|
+
|
|
246
|
+
label_input = COCOObjectDetectionInput(
|
|
247
|
+
input_file=annotations_json,
|
|
248
|
+
)
|
|
249
|
+
img_dir_path = Path(img_dir).absolute()
|
|
250
|
+
|
|
251
|
+
return self.from_labelformat( # type: ignore[no-any-return, attr-defined]
|
|
252
|
+
input_labels=label_input,
|
|
253
|
+
dataset_name=dataset_name,
|
|
254
|
+
img_dir=str(img_dir_path),
|
|
255
|
+
is_prediction=False,
|
|
256
|
+
task_name=task_name,
|
|
257
|
+
)
|
|
258
|
+
|
|
259
|
+
def from_coco_instance_segmentations(
|
|
260
|
+
self,
|
|
261
|
+
annotations_json_path: str,
|
|
262
|
+
img_dir: str,
|
|
263
|
+
task_name: str | None = None,
|
|
264
|
+
) -> DatasetTable:
|
|
265
|
+
"""Load a dataset in COCO Instance Segmentation format and store in DB.
|
|
266
|
+
|
|
267
|
+
Args:
|
|
268
|
+
annotations_json_path: Path to the COCO annotations JSON file.
|
|
269
|
+
img_dir: Path to the folder containing the images.
|
|
270
|
+
task_name: Optional name for the annotation task. If None, a
|
|
271
|
+
default name is generated.
|
|
272
|
+
|
|
273
|
+
Returns:
|
|
274
|
+
DatasetTable: The created dataset table entry.
|
|
275
|
+
"""
|
|
276
|
+
annotations_json = Path(annotations_json_path)
|
|
277
|
+
dataset_name = annotations_json.parent.name
|
|
278
|
+
|
|
279
|
+
if task_name is None:
|
|
280
|
+
task_name = f"Loaded from COCO Instance Segmentation: {annotations_json.name}"
|
|
281
|
+
|
|
282
|
+
label_input = COCOInstanceSegmentationInput(
|
|
283
|
+
input_file=annotations_json,
|
|
284
|
+
)
|
|
285
|
+
img_dir_path = Path(img_dir).absolute()
|
|
286
|
+
|
|
287
|
+
return self.from_labelformat( # type: ignore[no-any-return,attr-defined]
|
|
288
|
+
input_labels=label_input,
|
|
289
|
+
dataset_name=dataset_name,
|
|
290
|
+
img_dir=str(img_dir_path),
|
|
291
|
+
is_prediction=False,
|
|
292
|
+
task_name=task_name,
|
|
293
|
+
)
|
|
294
|
+
|
|
295
|
+
@staticmethod
|
|
296
|
+
def load_from_db(name: str, db_path: PathLike) -> Dataset:
|
|
297
|
+
"""Load a dataset from the database.
|
|
298
|
+
|
|
299
|
+
Returns:
|
|
300
|
+
Dataset: The loaded dataset.
|
|
301
|
+
"""
|
|
302
|
+
raise NotImplementedError
|
|
303
|
+
|
|
304
|
+
|
|
305
|
+
def _load_into_dataset_from_paths(
|
|
306
|
+
dataset_id: UUID,
|
|
307
|
+
session: Session,
|
|
308
|
+
image_paths: Iterable[Path],
|
|
309
|
+
) -> None:
|
|
310
|
+
samples_to_create: list[SampleCreate] = []
|
|
311
|
+
|
|
312
|
+
for image_path in tqdm(
|
|
313
|
+
image_paths,
|
|
314
|
+
desc="Processing images",
|
|
315
|
+
unit=" images",
|
|
316
|
+
):
|
|
317
|
+
try:
|
|
318
|
+
image = PIL.Image.open(image_path)
|
|
319
|
+
width, height = image.size
|
|
320
|
+
image.close()
|
|
321
|
+
except (FileNotFoundError, PIL.UnidentifiedImageError, OSError):
|
|
322
|
+
continue
|
|
323
|
+
|
|
324
|
+
sample = SampleCreate(
|
|
325
|
+
file_name=image_path.name,
|
|
326
|
+
file_path_abs=str(image_path),
|
|
327
|
+
width=width,
|
|
328
|
+
height=height,
|
|
329
|
+
dataset_id=dataset_id,
|
|
330
|
+
)
|
|
331
|
+
samples_to_create.append(sample)
|
|
332
|
+
|
|
333
|
+
# Process batch when it reaches SAMPLE_BATCH_SIZE
|
|
334
|
+
if len(samples_to_create) >= SAMPLE_BATCH_SIZE:
|
|
335
|
+
_ = sample_resolver.create_many(session=session, samples=samples_to_create)
|
|
336
|
+
samples_to_create = []
|
|
337
|
+
|
|
338
|
+
# Handle remaining samples
|
|
339
|
+
if samples_to_create:
|
|
340
|
+
_ = sample_resolver.create_many(session=session, samples=samples_to_create)
|
|
341
|
+
|
|
342
|
+
|
|
343
|
+
def _load_into_dataset(
|
|
344
|
+
session: Session,
|
|
345
|
+
dataset_id: UUID,
|
|
346
|
+
input_labels: ObjectDetectionInput | InstanceSegmentationInput,
|
|
347
|
+
images_path: Path,
|
|
348
|
+
annotation_task_id: UUID,
|
|
349
|
+
) -> None:
|
|
350
|
+
"""Store a loaded dataset in database."""
|
|
351
|
+
# Create label mapping
|
|
352
|
+
label_map = _create_label_map(session=session, input_labels=input_labels)
|
|
353
|
+
|
|
354
|
+
annotations_to_create: list[AnnotationCreate] = []
|
|
355
|
+
sample_ids: list[UUID] = []
|
|
356
|
+
samples_to_create: list[SampleCreate] = []
|
|
357
|
+
samples_image_data: list[
|
|
358
|
+
tuple[SampleCreate, ImageInstanceSegmentation | ImageObjectDetection]
|
|
359
|
+
] = []
|
|
360
|
+
|
|
361
|
+
for image_data in tqdm(input_labels.get_labels(), desc="Processing images", unit=" images"):
|
|
362
|
+
image: Image = image_data.image # type: ignore[attr-defined]
|
|
363
|
+
|
|
364
|
+
typed_image_data: ImageInstanceSegmentation | ImageObjectDetection = image_data # type: ignore[assignment]
|
|
365
|
+
sample = SampleCreate(
|
|
366
|
+
file_name=str(image.filename),
|
|
367
|
+
file_path_abs=str(images_path / image.filename),
|
|
368
|
+
width=image.width,
|
|
369
|
+
height=image.height,
|
|
370
|
+
dataset_id=dataset_id,
|
|
371
|
+
)
|
|
372
|
+
samples_to_create.append(sample)
|
|
373
|
+
samples_image_data.append((sample, typed_image_data))
|
|
374
|
+
|
|
375
|
+
if len(samples_to_create) >= SAMPLE_BATCH_SIZE:
|
|
376
|
+
stored_samples = sample_resolver.create_many(session=session, samples=samples_to_create)
|
|
377
|
+
_process_batch_annotations(
|
|
378
|
+
session=session,
|
|
379
|
+
stored_samples=stored_samples,
|
|
380
|
+
samples_data=samples_image_data,
|
|
381
|
+
dataset_id=dataset_id,
|
|
382
|
+
label_map=label_map,
|
|
383
|
+
annotation_task_id=annotation_task_id,
|
|
384
|
+
annotations_to_create=annotations_to_create,
|
|
385
|
+
sample_ids=sample_ids,
|
|
386
|
+
)
|
|
387
|
+
samples_to_create.clear()
|
|
388
|
+
samples_image_data.clear()
|
|
389
|
+
|
|
390
|
+
if samples_to_create:
|
|
391
|
+
stored_samples = sample_resolver.create_many(session=session, samples=samples_to_create)
|
|
392
|
+
_process_batch_annotations(
|
|
393
|
+
session=session,
|
|
394
|
+
stored_samples=stored_samples,
|
|
395
|
+
samples_data=samples_image_data,
|
|
396
|
+
dataset_id=dataset_id,
|
|
397
|
+
label_map=label_map,
|
|
398
|
+
annotation_task_id=annotation_task_id,
|
|
399
|
+
annotations_to_create=annotations_to_create,
|
|
400
|
+
sample_ids=sample_ids,
|
|
401
|
+
)
|
|
402
|
+
|
|
403
|
+
# Insert any remaining annotations
|
|
404
|
+
if annotations_to_create:
|
|
405
|
+
annotation_resolver.create_many(session=session, annotations=annotations_to_create)
|
|
406
|
+
|
|
407
|
+
|
|
408
|
+
def _create_label_map(
|
|
409
|
+
session: Session,
|
|
410
|
+
input_labels: ObjectDetectionInput | InstanceSegmentationInput,
|
|
411
|
+
) -> dict[int, UUID]:
|
|
412
|
+
"""Create a mapping of category IDs to annotation label IDs."""
|
|
413
|
+
label_map = {}
|
|
414
|
+
for category in tqdm(
|
|
415
|
+
input_labels.get_categories(),
|
|
416
|
+
desc="Processing categories",
|
|
417
|
+
unit=" categories",
|
|
418
|
+
):
|
|
419
|
+
label = AnnotationLabelCreate(annotation_label_name=category.name)
|
|
420
|
+
stored_label = annotation_label_resolver.create(session=session, label=label)
|
|
421
|
+
label_map[category.id] = stored_label.annotation_label_id
|
|
422
|
+
return label_map
|
|
423
|
+
|
|
424
|
+
|
|
425
|
+
def _process_object_detection_annotations(
|
|
426
|
+
context: AnnotationProcessingContext,
|
|
427
|
+
image_data: ImageObjectDetection,
|
|
428
|
+
) -> list[AnnotationCreate]:
|
|
429
|
+
"""Process object detection annotations for a single image."""
|
|
430
|
+
new_annotations = []
|
|
431
|
+
for obj in image_data.objects:
|
|
432
|
+
box = obj.box.to_format(BoundingBoxFormat.XYWH)
|
|
433
|
+
x, y, width, height = box
|
|
434
|
+
|
|
435
|
+
new_annotations.append(
|
|
436
|
+
AnnotationCreate(
|
|
437
|
+
dataset_id=context.dataset_id,
|
|
438
|
+
sample_id=context.sample_id,
|
|
439
|
+
annotation_label_id=context.label_map[obj.category.id],
|
|
440
|
+
annotation_type="object_detection",
|
|
441
|
+
x=x,
|
|
442
|
+
y=y,
|
|
443
|
+
width=width,
|
|
444
|
+
height=height,
|
|
445
|
+
confidence=obj.confidence,
|
|
446
|
+
annotation_task_id=context.annotation_task_id,
|
|
447
|
+
)
|
|
448
|
+
)
|
|
449
|
+
return new_annotations
|
|
450
|
+
|
|
451
|
+
|
|
452
|
+
def _process_instance_segmentation_annotations(
|
|
453
|
+
context: AnnotationProcessingContext,
|
|
454
|
+
image_data: ImageInstanceSegmentation,
|
|
455
|
+
) -> list[AnnotationCreate]:
|
|
456
|
+
"""Process instance segmentation annotations for a single image."""
|
|
457
|
+
new_annotations = []
|
|
458
|
+
for obj in image_data.objects:
|
|
459
|
+
segmentation_rle: None | list[int] = None
|
|
460
|
+
if isinstance(obj.segmentation, MultiPolygon):
|
|
461
|
+
box = obj.segmentation.bounding_box().to_format(BoundingBoxFormat.XYWH)
|
|
462
|
+
elif isinstance(obj.segmentation, BinaryMaskSegmentation):
|
|
463
|
+
box = obj.segmentation.bounding_box.to_format(BoundingBoxFormat.XYWH)
|
|
464
|
+
segmentation_rle = obj.segmentation._rle_row_wise # noqa: SLF001
|
|
465
|
+
else:
|
|
466
|
+
raise ValueError(f"Unsupported segmentation type: {type(obj.segmentation)}")
|
|
467
|
+
|
|
468
|
+
x, y, width, height = box
|
|
469
|
+
|
|
470
|
+
new_annotations.append(
|
|
471
|
+
AnnotationCreate(
|
|
472
|
+
dataset_id=context.dataset_id,
|
|
473
|
+
sample_id=context.sample_id,
|
|
474
|
+
annotation_label_id=context.label_map[obj.category.id],
|
|
475
|
+
annotation_type="instance_segmentation",
|
|
476
|
+
x=x,
|
|
477
|
+
y=y,
|
|
478
|
+
width=width,
|
|
479
|
+
height=height,
|
|
480
|
+
segmentation_mask=segmentation_rle,
|
|
481
|
+
annotation_task_id=context.annotation_task_id,
|
|
482
|
+
)
|
|
483
|
+
)
|
|
484
|
+
return new_annotations
|
|
485
|
+
|
|
486
|
+
|
|
487
|
+
def _process_batch_annotations( # noqa: PLR0913
|
|
488
|
+
session: Session,
|
|
489
|
+
stored_samples: list[SampleTable],
|
|
490
|
+
samples_data: list[tuple[SampleCreate, ImageInstanceSegmentation | ImageObjectDetection]],
|
|
491
|
+
dataset_id: UUID,
|
|
492
|
+
label_map: dict[int, UUID],
|
|
493
|
+
annotation_task_id: UUID,
|
|
494
|
+
annotations_to_create: list[AnnotationCreate],
|
|
495
|
+
sample_ids: list[UUID],
|
|
496
|
+
) -> None:
|
|
497
|
+
"""Process annotations for a batch of samples."""
|
|
498
|
+
for stored_sample, (_, img_data) in zip(stored_samples, samples_data):
|
|
499
|
+
sample_ids.append(stored_sample.sample_id)
|
|
500
|
+
|
|
501
|
+
context = AnnotationProcessingContext(
|
|
502
|
+
dataset_id=dataset_id,
|
|
503
|
+
sample_id=stored_sample.sample_id,
|
|
504
|
+
label_map=label_map,
|
|
505
|
+
annotation_task_id=annotation_task_id,
|
|
506
|
+
)
|
|
507
|
+
|
|
508
|
+
if isinstance(img_data, ImageInstanceSegmentation):
|
|
509
|
+
new_annotations = _process_instance_segmentation_annotations(
|
|
510
|
+
context=context, image_data=img_data
|
|
511
|
+
)
|
|
512
|
+
elif isinstance(img_data, ImageObjectDetection):
|
|
513
|
+
new_annotations = _process_object_detection_annotations(
|
|
514
|
+
context=context, image_data=img_data
|
|
515
|
+
)
|
|
516
|
+
else:
|
|
517
|
+
raise ValueError(f"Unsupported annotation type: {type(img_data)}")
|
|
518
|
+
|
|
519
|
+
annotations_to_create.extend(new_annotations)
|
|
520
|
+
|
|
521
|
+
if len(annotations_to_create) >= ANNOTATION_BATCH_SIZE:
|
|
522
|
+
annotation_resolver.create_many(session=session, annotations=annotations_to_create)
|
|
523
|
+
annotations_to_create.clear()
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
"""Interface for Sample objects."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Any, Generic, Protocol, TypeVar
|
|
6
|
+
|
|
7
|
+
from sqlalchemy.orm import object_session
|
|
8
|
+
|
|
9
|
+
from lightly_studio.models.sample import SampleTable
|
|
10
|
+
|
|
11
|
+
T = TypeVar("T")
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class _HasInner(Protocol):
|
|
15
|
+
_inner: Any
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class DBField(Generic[T]):
|
|
19
|
+
"""Descriptor for a database-backed field.
|
|
20
|
+
|
|
21
|
+
Provides interface to a SQLAlchemy model field. Setting the field
|
|
22
|
+
immediately commits to the database. The owner class must implement
|
|
23
|
+
the _inner attribute.
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
__slots__ = ("_sqla_descriptor",)
|
|
27
|
+
"""Store the SQLAlchemy descriptor for accessing the field."""
|
|
28
|
+
|
|
29
|
+
def __init__(self, sqla_descriptor: T) -> None:
|
|
30
|
+
"""Initialize the DBField with a SQLAlchemy descriptor.
|
|
31
|
+
|
|
32
|
+
Note: Mypy thinks that the descriptor has type T. In reality, during
|
|
33
|
+
runtime, it will be InstrumentedAttribute[T].
|
|
34
|
+
"""
|
|
35
|
+
self._sqla_descriptor = sqla_descriptor
|
|
36
|
+
|
|
37
|
+
def __get__(self, obj: _HasInner | None, owner: type | None = None) -> T:
|
|
38
|
+
"""Get the value of the field from the database."""
|
|
39
|
+
assert obj is not None, "DBField must be accessed via an instance, not the class"
|
|
40
|
+
# Delegate to SQLAlchemy's descriptor.
|
|
41
|
+
# Note: Mypy incorrectly thinks that the descriptor has type T. It complains
|
|
42
|
+
# about the lack of a __get__ method.
|
|
43
|
+
value: T = self._sqla_descriptor.__get__(obj._inner, type(obj._inner)) # type: ignore[attr-defined] # noqa: SLF001
|
|
44
|
+
return value
|
|
45
|
+
|
|
46
|
+
def __set__(self, obj: _HasInner, value: T) -> None:
|
|
47
|
+
"""Set the value of the field in the database. Commits the session."""
|
|
48
|
+
# Delegate to SQLAlchemy's descriptor.
|
|
49
|
+
# Note: Mypy incorrectly thinks that the descriptor has type T. It complains
|
|
50
|
+
# about the lack of a __set__ method.
|
|
51
|
+
self._sqla_descriptor.__set__(obj._inner, value) # type: ignore[attr-defined] # noqa: SLF001
|
|
52
|
+
sess = object_session(obj._inner) # noqa: SLF001
|
|
53
|
+
if sess is None:
|
|
54
|
+
raise RuntimeError("No active session found for the DBField object")
|
|
55
|
+
sess.commit()
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
class Sample:
|
|
59
|
+
"""Interface to a dataset sample."""
|
|
60
|
+
|
|
61
|
+
file_name = DBField(SampleTable.file_name)
|
|
62
|
+
width = DBField(SampleTable.width)
|
|
63
|
+
height = DBField(SampleTable.height)
|
|
64
|
+
dataset_id = DBField(SampleTable.dataset_id)
|
|
65
|
+
file_path_abs = DBField(SampleTable.file_path_abs)
|
|
66
|
+
|
|
67
|
+
sample_id = DBField(SampleTable.sample_id)
|
|
68
|
+
created_at = DBField(SampleTable.created_at)
|
|
69
|
+
updated_at = DBField(SampleTable.updated_at)
|
|
70
|
+
|
|
71
|
+
def __init__(self, inner: SampleTable) -> None:
|
|
72
|
+
"""Initialize the Sample.
|
|
73
|
+
|
|
74
|
+
Args:
|
|
75
|
+
inner: The SampleTable SQLAlchemy model instance.
|
|
76
|
+
"""
|
|
77
|
+
self._inner = inner
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
"""Module to launch the GUI."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from lightly_studio.api.server import Server
|
|
6
|
+
from lightly_studio.dataset import env
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def start_gui() -> None:
|
|
10
|
+
"""Launch the web interface for the loaded dataset."""
|
|
11
|
+
server = Server(host=env.LIGHTLY_STUDIO_HOST, port=env.LIGHTLY_STUDIO_PORT)
|
|
12
|
+
|
|
13
|
+
print(f"Open the LightlyStudio GUI under: {env.APP_URL}")
|
|
14
|
+
|
|
15
|
+
server.start()
|
|
File without changes
|