lightly-studio 0.4.0__py3-none-any.whl → 0.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lightly-studio might be problematic. Click here for more details.

Files changed (74) hide show
  1. lightly_studio/__init__.py +1 -2
  2. lightly_studio/api/db_tables.py +3 -0
  3. lightly_studio/api/routes/api/dataset.py +5 -3
  4. lightly_studio/api/routes/api/embeddings2d.py +4 -16
  5. lightly_studio/core/dataset_query/dataset_query.py +98 -2
  6. lightly_studio/core/dataset_query/sample_field.py +11 -2
  7. lightly_studio/dist_lightly_studio_view_app/_app/immutable/assets/0.B-Bn8V7R.css +1 -0
  8. lightly_studio/dist_lightly_studio_view_app/_app/immutable/assets/Samples.CTl60pQb.css +1 -0
  9. lightly_studio/dist_lightly_studio_view_app/_app/immutable/assets/_layout.CxuTMeYy.css +1 -0
  10. lightly_studio/dist_lightly_studio_view_app/_app/immutable/assets/transform.Bte-FZ4V.css +1 -0
  11. lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/{ChlxSwqI.js → 7XnHnNk_.js} +1 -1
  12. lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/{DthpwYR_.js → BDE1kT04.js} +2 -2
  13. lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/{D8ZGoCPm.js → BHoO7UL_.js} +2 -2
  14. lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/{BiGQqqJP.js → BTpMiPuk.js} +1 -1
  15. lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/BbG6fgnL.js +1 -0
  16. lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/Bip0vTKl.js +1 -0
  17. lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/{BrNKoXwc.js → BzYRexN9.js} +1 -1
  18. lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/{CP9M7pei.js → C5oezL0m.js} +1 -1
  19. lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/CEPpkZIV.js +1 -0
  20. lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/ChO13USc.js +96 -0
  21. lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/{ClzkJBWk.js → CmNXvs5e.js} +1 -1
  22. lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/{DUtlYNuP.js → D9WxTSWa.js} +1 -1
  23. lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/DELrRF6e.js +4 -0
  24. lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/{BtXGzlpP.js → DFib3GIP.js} +1 -1
  25. lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/{DMJzr1NB.js → DeAqvaPB.js} +1 -1
  26. lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/DgCXCApo.js +438 -0
  27. lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/DqhovIS0.js +1 -0
  28. lightly_studio/dist_lightly_studio_view_app/_app/immutable/entry/{app.Y-sSoz5q.js → app.CED-eiXl.js} +2 -2
  29. lightly_studio/dist_lightly_studio_view_app/_app/immutable/entry/start.D6UcpqyO.js +1 -0
  30. lightly_studio/dist_lightly_studio_view_app/_app/immutable/nodes/{0.0Fm6E-5B.js → 0.waSqEGqm.js} +2 -2
  31. lightly_studio/dist_lightly_studio_view_app/_app/immutable/nodes/{1.DB-0vkHb.js → 1.Bb_sKkYl.js} +1 -1
  32. lightly_studio/dist_lightly_studio_view_app/_app/immutable/nodes/{10.vaUePh5k.js → 10.D9Uicmfq.js} +1 -1
  33. lightly_studio/dist_lightly_studio_view_app/_app/immutable/nodes/{11.7i7ljNVT.js → 11.DKnK0EcJ.js} +1 -1
  34. lightly_studio/dist_lightly_studio_view_app/_app/immutable/nodes/13.D7dGgxYC.js +1 -0
  35. lightly_studio/dist_lightly_studio_view_app/_app/immutable/nodes/{2.Drwwdm7A.js → 2.BzK1sjoM.js} +138 -100
  36. lightly_studio/dist_lightly_studio_view_app/_app/immutable/nodes/{3.D3X_-Wan.js → 3.B08UPFst.js} +1 -1
  37. lightly_studio/dist_lightly_studio_view_app/_app/immutable/nodes/{4.C9TqY3tA.js → 4.C19_Nx95.js} +1 -1
  38. lightly_studio/dist_lightly_studio_view_app/_app/immutable/nodes/5.B2cR7brs.js +39 -0
  39. lightly_studio/dist_lightly_studio_view_app/_app/immutable/nodes/{6.fqfYR7dB.js → 6.C9Y2LZxG.js} +1 -1
  40. lightly_studio/dist_lightly_studio_view_app/_app/immutable/nodes/{7.C7gMM-gk.js → 7.CEbWmgIF.js} +1 -1
  41. lightly_studio/dist_lightly_studio_view_app/_app/immutable/nodes/8.Con-GcPz.js +20 -0
  42. lightly_studio/dist_lightly_studio_view_app/_app/immutable/nodes/{9.DbHcSiMn.js → 9.DW7FBNNH.js} +1 -1
  43. lightly_studio/dist_lightly_studio_view_app/_app/version.json +1 -1
  44. lightly_studio/dist_lightly_studio_view_app/index.html +7 -8
  45. lightly_studio/models/annotation/annotation_base.py +9 -0
  46. lightly_studio/models/dataset.py +6 -0
  47. lightly_studio/models/sample.py +1 -1
  48. lightly_studio/models/two_dim_embedding.py +16 -0
  49. lightly_studio/resolvers/annotation_resolver/get_all.py +3 -1
  50. lightly_studio/resolvers/dataset_resolver.py +18 -1
  51. lightly_studio/resolvers/sample_embedding_resolver.py +44 -2
  52. lightly_studio/resolvers/sample_resolver.py +1 -3
  53. lightly_studio/resolvers/twodim_embedding_resolver.py +93 -3
  54. lightly_studio/selection/select.py +70 -1
  55. lightly_studio/selection/selection_config.py +1 -1
  56. {lightly_studio-0.4.0.dist-info → lightly_studio-0.4.1.dist-info}/METADATA +1 -1
  57. {lightly_studio-0.4.0.dist-info → lightly_studio-0.4.1.dist-info}/RECORD +58 -58
  58. lightly_studio/dataset/loader.py +0 -581
  59. lightly_studio/dist_lightly_studio_view_app/_app/immutable/assets/0.CN4hnTks.css +0 -1
  60. lightly_studio/dist_lightly_studio_view_app/_app/immutable/assets/Samples.C0_eo9eP.css +0 -1
  61. lightly_studio/dist_lightly_studio_view_app/_app/immutable/assets/_layout.kFFGI0zL.css +0 -1
  62. lightly_studio/dist_lightly_studio_view_app/_app/immutable/assets/transform.sLzR40om.css +0 -1
  63. lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/BOmrKuMn.js +0 -1
  64. lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/BPpOWbDa.js +0 -1
  65. lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/BaFFwDFr.js +0 -1
  66. lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/BsaJCCG_.js +0 -96
  67. lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/CWuDkrMZ.js +0 -436
  68. lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/DyIcJj6J.js +0 -1
  69. lightly_studio/dist_lightly_studio_view_app/_app/immutable/chunks/WEyXQRi6.js +0 -1
  70. lightly_studio/dist_lightly_studio_view_app/_app/immutable/entry/start.CvxVp0Cu.js +0 -1
  71. lightly_studio/dist_lightly_studio_view_app/_app/immutable/nodes/13.9qy3WtZv.js +0 -1
  72. lightly_studio/dist_lightly_studio_view_app/_app/immutable/nodes/5.iRw6HCWX.js +0 -39
  73. lightly_studio/dist_lightly_studio_view_app/_app/immutable/nodes/8.C4v1w-oS.js +0 -20
  74. {lightly_studio-0.4.0.dist-info → lightly_studio-0.4.1.dist-info}/WHEEL +0 -0
@@ -1,581 +0,0 @@
1
- """Dataset functionality module."""
2
-
3
- from __future__ import annotations
4
-
5
- from collections.abc import Iterator
6
- from dataclasses import dataclass
7
- from pathlib import Path
8
- from typing import Iterable
9
- from uuid import UUID
10
-
11
- import fsspec
12
- import PIL
13
- from labelformat.formats import (
14
- COCOInstanceSegmentationInput,
15
- COCOObjectDetectionInput,
16
- YOLOv8ObjectDetectionInput,
17
- )
18
- from labelformat.model.binary_mask_segmentation import BinaryMaskSegmentation
19
- from labelformat.model.bounding_box import BoundingBoxFormat
20
- from labelformat.model.image import Image
21
- from labelformat.model.instance_segmentation import (
22
- ImageInstanceSegmentation,
23
- InstanceSegmentationInput,
24
- )
25
- from labelformat.model.multipolygon import MultiPolygon
26
- from labelformat.model.object_detection import (
27
- ImageObjectDetection,
28
- ObjectDetectionInput,
29
- )
30
- from sqlmodel import Session
31
- from tqdm import tqdm
32
-
33
- from lightly_studio import db_manager
34
- from lightly_studio.api.features import lightly_studio_active_features
35
- from lightly_studio.api.server import Server
36
- from lightly_studio.dataset import env, fsspec_lister
37
- from lightly_studio.dataset.embedding_generator import EmbeddingGenerator
38
- from lightly_studio.dataset.embedding_manager import (
39
- EmbeddingManager,
40
- EmbeddingManagerProvider,
41
- )
42
- from lightly_studio.models.annotation.annotation_base import AnnotationCreate
43
- from lightly_studio.models.annotation_label import AnnotationLabelCreate
44
- from lightly_studio.models.dataset import DatasetCreate, DatasetTable
45
- from lightly_studio.models.sample import SampleCreate, SampleTable
46
- from lightly_studio.resolvers import (
47
- annotation_label_resolver,
48
- annotation_resolver,
49
- dataset_resolver,
50
- sample_resolver,
51
- )
52
-
53
- # Constants
54
- ANNOTATION_BATCH_SIZE = 64 # Number of annotations to process in a single batch
55
- SAMPLE_BATCH_SIZE = 32 # Number of samples to process in a single batch
56
-
57
-
58
- @dataclass
59
- class AnnotationProcessingContext:
60
- """Context for processing annotations for a single sample."""
61
-
62
- dataset_id: UUID
63
- sample_id: UUID
64
- label_map: dict[int, UUID]
65
-
66
-
67
- class DatasetLoader:
68
- """Class responsible for loading datasets from various sources."""
69
-
70
- def __init__(self) -> None:
71
- """Initialize the dataset loader."""
72
- self.session = db_manager.persistent_session()
73
- self.embedding_manager = EmbeddingManagerProvider.get_embedding_manager()
74
-
75
- def _load_into_dataset(
76
- self,
77
- dataset: DatasetTable,
78
- input_labels: ObjectDetectionInput | InstanceSegmentationInput,
79
- img_dir: Path,
80
- ) -> None:
81
- """Store a loaded dataset in database."""
82
- # Create label mapping
83
- label_map = _create_label_map(session=self.session, input_labels=input_labels)
84
-
85
- annotations_to_create: list[AnnotationCreate] = []
86
- sample_ids: list[UUID] = []
87
- samples_to_create: list[SampleCreate] = []
88
- samples_image_data: list[
89
- tuple[SampleCreate, ImageInstanceSegmentation | ImageObjectDetection]
90
- ] = []
91
-
92
- for image_data in tqdm(input_labels.get_labels(), desc="Processing images", unit=" images"):
93
- image: Image = image_data.image # type: ignore[attr-defined]
94
-
95
- typed_image_data: ImageInstanceSegmentation | ImageObjectDetection = image_data # type: ignore[assignment]
96
- sample = SampleCreate(
97
- file_name=str(image.filename),
98
- file_path_abs=str(img_dir / image.filename),
99
- width=image.width,
100
- height=image.height,
101
- dataset_id=dataset.dataset_id,
102
- )
103
- samples_to_create.append(sample)
104
- samples_image_data.append((sample, typed_image_data))
105
-
106
- if len(samples_to_create) >= SAMPLE_BATCH_SIZE:
107
- stored_samples = sample_resolver.create_many(
108
- session=self.session, samples=samples_to_create
109
- )
110
- _process_batch_annotations(
111
- session=self.session,
112
- stored_samples=stored_samples,
113
- samples_data=samples_image_data,
114
- dataset_id=dataset.dataset_id,
115
- label_map=label_map,
116
- annotations_to_create=annotations_to_create,
117
- sample_ids=sample_ids,
118
- )
119
- samples_to_create.clear()
120
- samples_image_data.clear()
121
-
122
- if samples_to_create:
123
- stored_samples = sample_resolver.create_many(
124
- session=self.session, samples=samples_to_create
125
- )
126
- _process_batch_annotations(
127
- session=self.session,
128
- stored_samples=stored_samples,
129
- samples_data=samples_image_data,
130
- dataset_id=dataset.dataset_id,
131
- label_map=label_map,
132
- annotations_to_create=annotations_to_create,
133
- sample_ids=sample_ids,
134
- )
135
-
136
- # Insert any remaining annotations
137
- if annotations_to_create:
138
- annotation_resolver.create_many(session=self.session, annotations=annotations_to_create)
139
-
140
- # Generate embeddings for the dataset.
141
- _generate_embeddings(
142
- session=self.session,
143
- embedding_manager=self.embedding_manager,
144
- dataset_id=dataset.dataset_id,
145
- sample_ids=sample_ids,
146
- )
147
-
148
- def from_yolo(
149
- self,
150
- data_yaml_path: str,
151
- input_split: str = "train",
152
- task_name: str | None = None,
153
- ) -> DatasetTable:
154
- """Load a dataset in YOLO format and store in DB.
155
-
156
- Args:
157
- data_yaml_path: Path to the YOLO data.yaml file.
158
- input_split: The split to load (e.g., 'train', 'val').
159
- task_name: Optional name for the annotation task. If None, a
160
- default name is generated.
161
-
162
- Returns:
163
- DatasetTable: The created dataset table entry.
164
- """
165
- data_yaml = Path(data_yaml_path).absolute()
166
- dataset_name = data_yaml.parent.name
167
-
168
- if task_name is None:
169
- task_name = f"Loaded from YOLO: {data_yaml.name} ({input_split} split)"
170
-
171
- # Load the dataset using labelformat.
172
- label_input = YOLOv8ObjectDetectionInput(
173
- input_file=data_yaml,
174
- input_split=input_split,
175
- )
176
- img_dir = label_input._images_dir() # noqa: SLF001
177
-
178
- return self.from_labelformat(
179
- input_labels=label_input,
180
- dataset_name=dataset_name,
181
- img_dir=str(img_dir),
182
- )
183
-
184
- def from_coco_object_detections(
185
- self,
186
- annotations_json_path: str,
187
- img_dir: str,
188
- ) -> DatasetTable:
189
- """Load a dataset in COCO Object Detection format and store in DB.
190
-
191
- Args:
192
- annotations_json_path: Path to the COCO annotations JSON file.
193
- img_dir: Path to the folder containing the images.
194
-
195
- Returns:
196
- DatasetTable: The created dataset table entry.
197
- """
198
- annotations_json = Path(annotations_json_path)
199
- dataset_name = annotations_json.parent.name
200
-
201
- label_input = COCOObjectDetectionInput(
202
- input_file=annotations_json,
203
- )
204
- img_dir_path = Path(img_dir).absolute()
205
-
206
- return self.from_labelformat(
207
- input_labels=label_input,
208
- dataset_name=dataset_name,
209
- img_dir=str(img_dir_path),
210
- )
211
-
212
- def from_coco_instance_segmentations(
213
- self,
214
- annotations_json_path: str,
215
- img_dir: str,
216
- ) -> DatasetTable:
217
- """Load a dataset in COCO Instance Segmentation format and store in DB.
218
-
219
- Args:
220
- annotations_json_path: Path to the COCO annotations JSON file.
221
- img_dir: Path to the folder containing the images.
222
-
223
- Returns:
224
- DatasetTable: The created dataset table entry.
225
- """
226
- annotations_json = Path(annotations_json_path)
227
- dataset_name = annotations_json.parent.name
228
-
229
- label_input = COCOInstanceSegmentationInput(
230
- input_file=annotations_json,
231
- )
232
- img_dir_path = Path(img_dir).absolute()
233
-
234
- return self.from_labelformat(
235
- input_labels=label_input,
236
- dataset_name=dataset_name,
237
- img_dir=str(img_dir_path),
238
- )
239
-
240
- def from_labelformat(
241
- self,
242
- input_labels: ObjectDetectionInput | InstanceSegmentationInput,
243
- dataset_name: str,
244
- img_dir: str,
245
- ) -> DatasetTable:
246
- """Load a dataset from a labelformat object and store in database.
247
-
248
- Args:
249
- input_labels: The labelformat input object.
250
- dataset_name: The name for the new dataset.
251
- img_dir: Path to the folder containing the images.
252
-
253
- Returns:
254
- DatasetTable: The created dataset table entry.
255
- """
256
- img_dir_path = Path(img_dir).absolute()
257
-
258
- # Create dataset and annotation task.
259
- dataset = dataset_resolver.create(
260
- session=self.session,
261
- dataset=DatasetCreate(name=dataset_name),
262
- )
263
-
264
- self._load_into_dataset(
265
- dataset=dataset,
266
- input_labels=input_labels,
267
- img_dir=img_dir_path,
268
- )
269
- return dataset
270
-
271
- def from_directory(
272
- self,
273
- dataset_name: str,
274
- img_dir: str,
275
- allowed_extensions: Iterable[str] = {
276
- ".png",
277
- ".jpg",
278
- ".jpeg",
279
- ".gif",
280
- ".webp",
281
- ".bmp",
282
- ".tiff",
283
- },
284
- ) -> DatasetTable:
285
- """Load a dataset from a folder of images and store in database.
286
-
287
- Args:
288
- dataset_name: The name for the new dataset.
289
- img_dir: Path to the folder containing the images.
290
- allowed_extensions: An iterable container of allowed image file
291
- extensions.
292
- """
293
- # Create dataset.
294
- dataset = dataset_resolver.create(
295
- session=self.session,
296
- dataset=DatasetCreate(name=dataset_name),
297
- )
298
-
299
- # Collect image file paths with extension filtering.
300
- allowed_extensions_set = {ext.lower() for ext in allowed_extensions}
301
- image_paths = list(fsspec_lister.iter_files_from_path(img_dir, allowed_extensions_set))
302
- print(f"Found {len(image_paths)} images in {img_dir}.")
303
-
304
- # Process images.
305
- sample_ids = _create_samples_from_paths(
306
- session=self.session,
307
- dataset_id=dataset.dataset_id,
308
- image_paths=image_paths,
309
- )
310
-
311
- # Generate embeddings for the dataset.
312
- _generate_embeddings(
313
- session=self.session,
314
- embedding_manager=self.embedding_manager,
315
- dataset_id=dataset.dataset_id,
316
- sample_ids=list(sample_ids),
317
- )
318
-
319
- return dataset
320
-
321
- def _validate_has_samples(self) -> None:
322
- """Validate that there are samples in the database before starting GUI.
323
-
324
- Raises:
325
- ValueError: If no samples are found in any dataset.
326
- """
327
- # Check if any datasets exist
328
- datasets = dataset_resolver.get_all(session=self.session, offset=0, limit=1)
329
-
330
- if not datasets:
331
- raise ValueError(
332
- "No datasets found. Please load a dataset using one of the loader methods "
333
- "(e.g., from_yolo(), from_directory(), etc.) before starting the GUI."
334
- )
335
-
336
- # Check if there are any samples in the first dataset
337
- first_dataset = datasets[0]
338
- sample_count = sample_resolver.count_by_dataset_id(
339
- session=self.session, dataset_id=first_dataset.dataset_id
340
- )
341
-
342
- if sample_count == 0:
343
- raise ValueError(
344
- "No images have been indexed for the first dataset. "
345
- "Please ensure your dataset contains valid images and try loading again."
346
- )
347
-
348
- def start_gui(self) -> None:
349
- """Launch the web interface for the loaded dataset."""
350
- self._validate_has_samples()
351
-
352
- server = Server(host=env.LIGHTLY_STUDIO_HOST, port=env.LIGHTLY_STUDIO_PORT)
353
-
354
- print(f"Open the LightlyStudio GUI under: {env.APP_URL}")
355
-
356
- server.start()
357
-
358
-
359
- def _create_samples_from_paths(
360
- session: Session,
361
- dataset_id: UUID,
362
- image_paths: Iterable[str],
363
- ) -> Iterator[UUID]:
364
- """Create samples from a list of image paths.
365
-
366
- Args:
367
- session: The database session to use.
368
- dataset_id: The ID of the dataset to which samples belong.
369
- image_paths: An iterable of image file paths.
370
-
371
- Yields:
372
- UUIDs of created sample records.
373
- """
374
- samples_to_create: list[SampleCreate] = []
375
-
376
- for image_path in tqdm(
377
- image_paths,
378
- desc="Processing images",
379
- unit=" images",
380
- ):
381
- try:
382
- with fsspec.open(image_path, "rb") as file, PIL.Image.open(file) as img:
383
- width, height = img.size
384
- except (FileNotFoundError, PIL.UnidentifiedImageError, OSError):
385
- continue
386
-
387
- sample = SampleCreate(
388
- file_name=Path(image_path).name,
389
- file_path_abs=image_path,
390
- width=width,
391
- height=height,
392
- dataset_id=dataset_id,
393
- )
394
- samples_to_create.append(sample)
395
-
396
- # Process batch when it reaches SAMPLE_BATCH_SIZE
397
- if len(samples_to_create) >= SAMPLE_BATCH_SIZE:
398
- stored_samples = sample_resolver.create_many(session=session, samples=samples_to_create)
399
- for stored_sample in stored_samples:
400
- yield stored_sample.sample_id
401
- samples_to_create = []
402
-
403
- # Handle remaining samples
404
- if samples_to_create:
405
- stored_samples = sample_resolver.create_many(session=session, samples=samples_to_create)
406
- for stored_sample in stored_samples:
407
- yield stored_sample.sample_id
408
-
409
-
410
- def _create_label_map(
411
- session: Session,
412
- input_labels: ObjectDetectionInput | InstanceSegmentationInput,
413
- ) -> dict[int, UUID]:
414
- """Create a mapping of category IDs to annotation label IDs."""
415
- label_map = {}
416
- for category in tqdm(
417
- input_labels.get_categories(),
418
- desc="Processing categories",
419
- unit=" categories",
420
- ):
421
- label = AnnotationLabelCreate(annotation_label_name=category.name)
422
- stored_label = annotation_label_resolver.create(session=session, label=label)
423
- label_map[category.id] = stored_label.annotation_label_id
424
- return label_map
425
-
426
-
427
- def _process_object_detection_annotations(
428
- context: AnnotationProcessingContext,
429
- image_data: ImageObjectDetection,
430
- ) -> list[AnnotationCreate]:
431
- """Process object detection annotations for a single image."""
432
- new_annotations = []
433
- for obj in image_data.objects:
434
- box = obj.box.to_format(BoundingBoxFormat.XYWH)
435
- x, y, width, height = box
436
-
437
- new_annotations.append(
438
- AnnotationCreate(
439
- dataset_id=context.dataset_id,
440
- sample_id=context.sample_id,
441
- annotation_label_id=context.label_map[obj.category.id],
442
- annotation_type="object_detection",
443
- x=int(x),
444
- y=int(y),
445
- width=int(width),
446
- height=int(height),
447
- confidence=obj.confidence,
448
- )
449
- )
450
- return new_annotations
451
-
452
-
453
- def _process_instance_segmentation_annotations(
454
- context: AnnotationProcessingContext,
455
- image_data: ImageInstanceSegmentation,
456
- ) -> list[AnnotationCreate]:
457
- """Process instance segmentation annotations for a single image."""
458
- new_annotations = []
459
- for obj in image_data.objects:
460
- segmentation_rle: None | list[int] = None
461
- if isinstance(obj.segmentation, MultiPolygon):
462
- box = obj.segmentation.bounding_box().to_format(BoundingBoxFormat.XYWH)
463
- elif isinstance(obj.segmentation, BinaryMaskSegmentation):
464
- box = obj.segmentation.bounding_box.to_format(BoundingBoxFormat.XYWH)
465
- segmentation_rle = obj.segmentation._rle_row_wise # noqa: SLF001
466
- else:
467
- raise ValueError(f"Unsupported segmentation type: {type(obj.segmentation)}")
468
-
469
- x, y, width, height = box
470
-
471
- new_annotations.append(
472
- AnnotationCreate(
473
- dataset_id=context.dataset_id,
474
- sample_id=context.sample_id,
475
- annotation_label_id=context.label_map[obj.category.id],
476
- annotation_type="instance_segmentation",
477
- x=int(x),
478
- y=int(y),
479
- width=int(width),
480
- height=int(height),
481
- segmentation_mask=segmentation_rle,
482
- )
483
- )
484
- return new_annotations
485
-
486
-
487
- def _process_batch_annotations( # noqa: PLR0913
488
- session: Session,
489
- stored_samples: list[SampleTable],
490
- samples_data: list[tuple[SampleCreate, ImageInstanceSegmentation | ImageObjectDetection]],
491
- dataset_id: UUID,
492
- label_map: dict[int, UUID],
493
- annotations_to_create: list[AnnotationCreate],
494
- sample_ids: list[UUID],
495
- ) -> None:
496
- """Process annotations for a batch of samples."""
497
- for stored_sample, (_, img_data) in zip(stored_samples, samples_data):
498
- sample_ids.append(stored_sample.sample_id)
499
-
500
- context = AnnotationProcessingContext(
501
- dataset_id=dataset_id,
502
- sample_id=stored_sample.sample_id,
503
- label_map=label_map,
504
- )
505
-
506
- if isinstance(img_data, ImageInstanceSegmentation):
507
- new_annotations = _process_instance_segmentation_annotations(
508
- context=context, image_data=img_data
509
- )
510
- elif isinstance(img_data, ImageObjectDetection):
511
- new_annotations = _process_object_detection_annotations(
512
- context=context, image_data=img_data
513
- )
514
- else:
515
- raise ValueError(f"Unsupported annotation type: {type(img_data)}")
516
-
517
- annotations_to_create.extend(new_annotations)
518
-
519
- if len(annotations_to_create) >= ANNOTATION_BATCH_SIZE:
520
- annotation_resolver.create_many(session=session, annotations=annotations_to_create)
521
- annotations_to_create.clear()
522
-
523
-
524
- def _generate_embeddings(
525
- session: Session,
526
- embedding_manager: EmbeddingManager,
527
- dataset_id: UUID,
528
- sample_ids: list[UUID],
529
- ) -> None:
530
- """Generate embeddings for the dataset."""
531
- # Load an embedding generator and register the model.
532
- embedding_generator = _load_embedding_generator()
533
-
534
- if embedding_generator:
535
- lightly_studio_active_features.append("embeddingSearchEnabled")
536
- embedding_model = embedding_manager.register_embedding_model(
537
- session=session,
538
- dataset_id=dataset_id,
539
- embedding_generator=embedding_generator,
540
- )
541
- embedding_manager.embed_images(
542
- session=session,
543
- sample_ids=sample_ids,
544
- embedding_model_id=embedding_model.embedding_model_id,
545
- )
546
-
547
-
548
- def _load_embedding_generator() -> EmbeddingGenerator | None:
549
- """Load the embedding generator.
550
-
551
- Use MobileCLIP if its dependencies have been installed,
552
- otherwise return None.
553
- """
554
- if env.LIGHTLY_STUDIO_EMBEDDINGS_MODEL_TYPE == "EDGE":
555
- try:
556
- from lightly_studio.dataset.edge_embedding_generator import (
557
- EdgeSDKEmbeddingGenerator,
558
- )
559
-
560
- print("Using LightlyEdge embedding generator.")
561
- return EdgeSDKEmbeddingGenerator(model_path=env.LIGHTLY_STUDIO_EDGE_MODEL_FILE_PATH)
562
- except ImportError:
563
- print("Embedding functionality is disabled.")
564
- return None
565
- elif env.LIGHTLY_STUDIO_EMBEDDINGS_MODEL_TYPE == "MOBILE_CLIP":
566
- try:
567
- from lightly_studio.dataset.mobileclip_embedding_generator import (
568
- MobileCLIPEmbeddingGenerator,
569
- )
570
-
571
- print("Using MobileCLIP embedding generator.")
572
- return MobileCLIPEmbeddingGenerator()
573
- except ImportError:
574
- print("Embedding functionality is disabled.")
575
- return None
576
- else:
577
- print(
578
- f"Unsupported model type: '{env.LIGHTLY_STUDIO_EMBEDDINGS_MODEL_TYPE}'",
579
- )
580
- print("Embedding functionality is disabled.")
581
- return None