hafnia 0.5.0__py3-none-any.whl → 0.5.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hafnia/dataset/dataset_helpers.py +59 -1
- hafnia/dataset/dataset_names.py +1 -108
- hafnia/dataset/dataset_recipe/dataset_recipe.py +48 -4
- hafnia/dataset/format_conversions/torchvision_datasets.py +2 -2
- hafnia/dataset/hafnia_dataset.py +163 -69
- hafnia/dataset/hafnia_dataset_types.py +142 -18
- hafnia/dataset/operations/dataset_s3_storage.py +7 -2
- hafnia/dataset/operations/table_transformations.py +0 -18
- hafnia/experiment/command_builder.py +686 -0
- hafnia/platform/datasets.py +32 -132
- hafnia/platform/download.py +1 -1
- hafnia/platform/s5cmd_utils.py +122 -3
- {hafnia-0.5.0.dist-info → hafnia-0.5.2.dist-info}/METADATA +3 -2
- {hafnia-0.5.0.dist-info → hafnia-0.5.2.dist-info}/RECORD +19 -18
- hafnia_cli/dataset_cmds.py +19 -13
- hafnia_cli/runc_cmds.py +7 -2
- {hafnia-0.5.0.dist-info → hafnia-0.5.2.dist-info}/WHEEL +0 -0
- {hafnia-0.5.0.dist-info → hafnia-0.5.2.dist-info}/entry_points.txt +0 -0
- {hafnia-0.5.0.dist-info → hafnia-0.5.2.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import collections
|
|
2
2
|
import json
|
|
3
|
+
from dataclasses import dataclass
|
|
3
4
|
from datetime import datetime
|
|
4
5
|
from pathlib import Path
|
|
5
6
|
from typing import Any, Dict, List, Optional, Type, Union
|
|
@@ -7,12 +8,21 @@ from typing import Any, Dict, List, Optional, Type, Union
|
|
|
7
8
|
import cv2
|
|
8
9
|
import more_itertools
|
|
9
10
|
import numpy as np
|
|
11
|
+
import polars as pl
|
|
10
12
|
from packaging.version import Version
|
|
11
13
|
from PIL import Image
|
|
12
14
|
from pydantic import BaseModel, Field, field_serializer, field_validator
|
|
13
15
|
|
|
14
16
|
import hafnia
|
|
15
|
-
from hafnia.dataset
|
|
17
|
+
from hafnia.dataset import dataset_helpers
|
|
18
|
+
from hafnia.dataset.dataset_helpers import version_from_string
|
|
19
|
+
from hafnia.dataset.dataset_names import (
|
|
20
|
+
FILENAME_ANNOTATIONS_JSONL,
|
|
21
|
+
FILENAME_ANNOTATIONS_PARQUET,
|
|
22
|
+
FILENAME_DATASET_INFO,
|
|
23
|
+
SampleField,
|
|
24
|
+
StorageFormat,
|
|
25
|
+
)
|
|
16
26
|
from hafnia.dataset.primitives import (
|
|
17
27
|
PRIMITIVE_TYPES,
|
|
18
28
|
Bbox,
|
|
@@ -102,7 +112,7 @@ class TaskInfo(BaseModel):
|
|
|
102
112
|
|
|
103
113
|
class DatasetInfo(BaseModel):
|
|
104
114
|
dataset_name: str = Field(description="Name of the dataset, e.g. 'coco'")
|
|
105
|
-
version:
|
|
115
|
+
version: str = Field(default="0.0.0", description="Version of the dataset")
|
|
106
116
|
dataset_title: Optional[str] = Field(default=None, description="Optional, human-readable title of the dataset")
|
|
107
117
|
description: Optional[str] = Field(default=None, description="Optional, description of the dataset")
|
|
108
118
|
tasks: List[TaskInfo] = Field(default=None, description="List of tasks in the dataset")
|
|
@@ -144,31 +154,21 @@ class DatasetInfo(BaseModel):
|
|
|
144
154
|
@field_validator("format_version")
|
|
145
155
|
@classmethod
|
|
146
156
|
def _validate_format_version(cls, format_version: str) -> str:
|
|
147
|
-
|
|
148
|
-
Version(format_version)
|
|
149
|
-
except Exception as e:
|
|
150
|
-
raise ValueError(f"Invalid format_version '{format_version}'. Must be a valid version string.") from e
|
|
157
|
+
version_casted: Version = dataset_helpers.version_from_string(format_version, raise_error=True)
|
|
151
158
|
|
|
152
|
-
if
|
|
159
|
+
if version_casted > Version(hafnia.__dataset_format_version__):
|
|
153
160
|
user_logger.warning(
|
|
154
161
|
f"The loaded dataset format version '{format_version}' is newer than the format version "
|
|
155
162
|
f"'{hafnia.__dataset_format_version__}' used in your version of Hafnia. Please consider "
|
|
156
163
|
f"updating Hafnia package."
|
|
157
164
|
)
|
|
158
|
-
return
|
|
165
|
+
return str(version_casted)
|
|
159
166
|
|
|
160
167
|
@field_validator("version")
|
|
161
168
|
@classmethod
|
|
162
169
|
def _validate_version(cls, dataset_version: Optional[str]) -> Optional[str]:
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
try:
|
|
167
|
-
Version(dataset_version)
|
|
168
|
-
except Exception as e:
|
|
169
|
-
raise ValueError(f"Invalid dataset_version '{dataset_version}'. Must be a valid version string.") from e
|
|
170
|
-
|
|
171
|
-
return dataset_version
|
|
170
|
+
version_casted: Version = dataset_helpers.version_from_string(dataset_version, raise_error=True)
|
|
171
|
+
return str(version_casted)
|
|
172
172
|
|
|
173
173
|
def check_for_duplicate_task_names(self) -> List[TaskInfo]:
|
|
174
174
|
return self._validate_check_for_duplicate_tasks(self.tasks)
|
|
@@ -238,7 +238,7 @@ class DatasetInfo(BaseModel):
|
|
|
238
238
|
meta.update(info1.meta or {})
|
|
239
239
|
return DatasetInfo(
|
|
240
240
|
dataset_name=info0.dataset_name + "+" + info1.dataset_name,
|
|
241
|
-
version=
|
|
241
|
+
version="0.0.0",
|
|
242
242
|
tasks=list(unique_tasks),
|
|
243
243
|
meta=meta,
|
|
244
244
|
format_version=dataset_format_version,
|
|
@@ -477,3 +477,127 @@ class Sample(BaseModel):
|
|
|
477
477
|
annotations = self.get_annotations()
|
|
478
478
|
annotations_visualized = image_visualizations.draw_annotations(image=image, primitives=annotations)
|
|
479
479
|
return annotations_visualized
|
|
480
|
+
|
|
481
|
+
|
|
482
|
+
@dataclass
|
|
483
|
+
class DatasetMetadataFilePaths:
|
|
484
|
+
dataset_info: str # Use 'str' to also support s3 paths
|
|
485
|
+
annotations_jsonl: Optional[str]
|
|
486
|
+
annotations_parquet: Optional[str]
|
|
487
|
+
|
|
488
|
+
def as_list(self) -> List[str]:
|
|
489
|
+
files = [self.dataset_info]
|
|
490
|
+
if self.annotations_jsonl is not None:
|
|
491
|
+
files.append(self.annotations_jsonl)
|
|
492
|
+
if self.annotations_parquet is not None:
|
|
493
|
+
files.append(self.annotations_parquet)
|
|
494
|
+
return files
|
|
495
|
+
|
|
496
|
+
def read_samples(self) -> pl.DataFrame:
|
|
497
|
+
if self.annotations_parquet is not None:
|
|
498
|
+
if not Path(self.annotations_parquet).exists():
|
|
499
|
+
raise FileNotFoundError(f"Parquet annotations file '{self.annotations_parquet}' does not exist.")
|
|
500
|
+
user_logger.info(f"Reading dataset annotations from Parquet file: {self.annotations_parquet}")
|
|
501
|
+
return pl.read_parquet(self.annotations_parquet)
|
|
502
|
+
|
|
503
|
+
if self.annotations_jsonl is not None:
|
|
504
|
+
if not Path(self.annotations_jsonl).exists():
|
|
505
|
+
raise FileNotFoundError(f"JSONL annotations file '{self.annotations_jsonl}' does not exist.")
|
|
506
|
+
user_logger.info(f"Reading dataset annotations from JSONL file: {self.annotations_jsonl}")
|
|
507
|
+
return pl.read_ndjson(self.annotations_jsonl)
|
|
508
|
+
|
|
509
|
+
raise ValueError(
|
|
510
|
+
"No annotations file available to read samples from. Dataset is missing both JSONL and Parquet files."
|
|
511
|
+
)
|
|
512
|
+
|
|
513
|
+
@staticmethod
|
|
514
|
+
def from_path(path_dataset: Path) -> "DatasetMetadataFilePaths":
|
|
515
|
+
path_dataset = path_dataset.absolute()
|
|
516
|
+
metadata_files = DatasetMetadataFilePaths(
|
|
517
|
+
dataset_info=str(path_dataset / FILENAME_DATASET_INFO),
|
|
518
|
+
annotations_jsonl=str(path_dataset / FILENAME_ANNOTATIONS_JSONL),
|
|
519
|
+
annotations_parquet=str(path_dataset / FILENAME_ANNOTATIONS_PARQUET),
|
|
520
|
+
)
|
|
521
|
+
|
|
522
|
+
return metadata_files
|
|
523
|
+
|
|
524
|
+
@staticmethod
|
|
525
|
+
def available_versions_from_files_list(files: list[str]) -> Dict[Version, "DatasetMetadataFilePaths"]:
|
|
526
|
+
versions_and_files: Dict[Version, Dict[str, str]] = collections.defaultdict(dict)
|
|
527
|
+
for metadata_file in files:
|
|
528
|
+
version_str, filename = metadata_file.split("/")[-2:]
|
|
529
|
+
versions_and_files[version_str][filename] = metadata_file
|
|
530
|
+
|
|
531
|
+
available_versions: Dict[Version, DatasetMetadataFilePaths] = {}
|
|
532
|
+
for version_str, version_files in versions_and_files.items():
|
|
533
|
+
version_casted: Version = dataset_helpers.version_from_string(version_str, raise_error=False)
|
|
534
|
+
if version_casted is None:
|
|
535
|
+
continue
|
|
536
|
+
|
|
537
|
+
if FILENAME_DATASET_INFO not in version_files:
|
|
538
|
+
continue
|
|
539
|
+
dataset_metadata_file = DatasetMetadataFilePaths(
|
|
540
|
+
dataset_info=version_files[FILENAME_DATASET_INFO],
|
|
541
|
+
annotations_jsonl=version_files.get(FILENAME_ANNOTATIONS_JSONL, None),
|
|
542
|
+
annotations_parquet=version_files.get(FILENAME_ANNOTATIONS_PARQUET, None),
|
|
543
|
+
)
|
|
544
|
+
|
|
545
|
+
available_versions[version_casted] = dataset_metadata_file
|
|
546
|
+
|
|
547
|
+
return available_versions
|
|
548
|
+
|
|
549
|
+
def check_version(self, version: str, raise_error: bool = True) -> bool:
|
|
550
|
+
"""
|
|
551
|
+
Check if the dataset metadata files match the given version.
|
|
552
|
+
If raise_error is True, raises ValueError if the version does not match.
|
|
553
|
+
"""
|
|
554
|
+
valid_version = version_from_string(version, raise_error=raise_error)
|
|
555
|
+
if valid_version is None:
|
|
556
|
+
return False
|
|
557
|
+
|
|
558
|
+
path_dataset_info = Path(self.dataset_info)
|
|
559
|
+
if not path_dataset_info.exists():
|
|
560
|
+
raise FileNotFoundError(f"Dataset info file missing '{self.dataset_info}' in dataset folder.")
|
|
561
|
+
|
|
562
|
+
dataset_info = json.loads(path_dataset_info.read_text())
|
|
563
|
+
dataset_version = dataset_info.get("version", None)
|
|
564
|
+
if dataset_version != version:
|
|
565
|
+
if raise_error:
|
|
566
|
+
raise ValueError(
|
|
567
|
+
f"Dataset version mismatch. Expected version '{version}' but found "
|
|
568
|
+
f"version '{dataset_version}' in dataset info."
|
|
569
|
+
)
|
|
570
|
+
return False
|
|
571
|
+
|
|
572
|
+
return True
|
|
573
|
+
|
|
574
|
+
def exists(self, version: Optional[str] = None, raise_error: bool = True) -> bool:
|
|
575
|
+
"""
|
|
576
|
+
Check if all metadata files exist.
|
|
577
|
+
Add version to check if it matches the version in dataset info.
|
|
578
|
+
If raise_error is True, raises FileNotFoundError if any file is missing.
|
|
579
|
+
"""
|
|
580
|
+
path_dataset_info = Path(self.dataset_info)
|
|
581
|
+
if not path_dataset_info.exists():
|
|
582
|
+
if raise_error:
|
|
583
|
+
raise FileNotFoundError(f"Dataset info file missing '{self.dataset_info}' in dataset folder.")
|
|
584
|
+
return False
|
|
585
|
+
|
|
586
|
+
if version is not None and self.check_version(version, raise_error=raise_error) is False:
|
|
587
|
+
return False
|
|
588
|
+
|
|
589
|
+
has_jsonl_file = self.annotations_jsonl is not None and Path(self.annotations_jsonl).exists()
|
|
590
|
+
if has_jsonl_file:
|
|
591
|
+
return True
|
|
592
|
+
|
|
593
|
+
has_parquet_file = self.annotations_parquet is not None and Path(self.annotations_parquet).exists()
|
|
594
|
+
if has_parquet_file:
|
|
595
|
+
return True
|
|
596
|
+
|
|
597
|
+
if raise_error:
|
|
598
|
+
raise FileNotFoundError(
|
|
599
|
+
f"Missing annotation file. Expected either '{FILENAME_ANNOTATIONS_JSONL}' or "
|
|
600
|
+
f"'{FILENAME_ANNOTATIONS_PARQUET}' in dataset folder."
|
|
601
|
+
)
|
|
602
|
+
|
|
603
|
+
return False
|
|
@@ -8,13 +8,13 @@ import polars as pl
|
|
|
8
8
|
from hafnia.dataset.dataset_helpers import hash_file_xxhash
|
|
9
9
|
from hafnia.dataset.dataset_names import (
|
|
10
10
|
DatasetVariant,
|
|
11
|
-
ResourceCredentials,
|
|
12
11
|
SampleField,
|
|
13
12
|
)
|
|
14
13
|
from hafnia.dataset.hafnia_dataset import HafniaDataset
|
|
15
14
|
from hafnia.log import user_logger
|
|
16
15
|
from hafnia.platform import s5cmd_utils
|
|
17
16
|
from hafnia.platform.datasets import get_upload_credentials
|
|
17
|
+
from hafnia.platform.s5cmd_utils import ResourceCredentials
|
|
18
18
|
from hafnia.utils import progress_bar
|
|
19
19
|
from hafnia_cli.config import Config
|
|
20
20
|
|
|
@@ -39,6 +39,7 @@ def delete_hafnia_dataset_files_on_platform(
|
|
|
39
39
|
def delete_hafnia_dataset_files_from_resource_credentials(
|
|
40
40
|
resource_credentials: ResourceCredentials,
|
|
41
41
|
interactive: bool = True,
|
|
42
|
+
remove_bucket: bool = True,
|
|
42
43
|
) -> bool:
|
|
43
44
|
envs = resource_credentials.aws_credentials()
|
|
44
45
|
bucket_name = resource_credentials.bucket_name()
|
|
@@ -58,7 +59,11 @@ def delete_hafnia_dataset_files_from_resource_credentials(
|
|
|
58
59
|
user_logger.info("Delete operation cancelled by the user.")
|
|
59
60
|
return False
|
|
60
61
|
user_logger.info(f"Deleting all files in S3 bucket '{bucket_name}'...")
|
|
61
|
-
s5cmd_utils.delete_bucket_content(
|
|
62
|
+
s5cmd_utils.delete_bucket_content(
|
|
63
|
+
bucket_prefix=f"s3://{bucket_name}",
|
|
64
|
+
remove_bucket=remove_bucket,
|
|
65
|
+
append_envs=envs,
|
|
66
|
+
)
|
|
62
67
|
return True
|
|
63
68
|
|
|
64
69
|
|
|
@@ -4,8 +4,6 @@ from typing import List, Optional, Tuple, Type
|
|
|
4
4
|
import polars as pl
|
|
5
5
|
|
|
6
6
|
from hafnia.dataset.dataset_names import (
|
|
7
|
-
FILENAME_ANNOTATIONS_JSONL,
|
|
8
|
-
FILENAME_ANNOTATIONS_PARQUET,
|
|
9
7
|
PrimitiveField,
|
|
10
8
|
SampleField,
|
|
11
9
|
)
|
|
@@ -204,22 +202,6 @@ def split_primitive_columns_by_task_name(
|
|
|
204
202
|
return samples_table
|
|
205
203
|
|
|
206
204
|
|
|
207
|
-
def read_samples_from_path(path: Path) -> pl.DataFrame:
|
|
208
|
-
path_annotations = path / FILENAME_ANNOTATIONS_PARQUET
|
|
209
|
-
if path_annotations.exists():
|
|
210
|
-
user_logger.info(f"Reading dataset annotations from Parquet file: {path_annotations}")
|
|
211
|
-
return pl.read_parquet(path_annotations)
|
|
212
|
-
|
|
213
|
-
path_annotations_jsonl = path / FILENAME_ANNOTATIONS_JSONL
|
|
214
|
-
if path_annotations_jsonl.exists():
|
|
215
|
-
user_logger.info(f"Reading dataset annotations from JSONL file: {path_annotations_jsonl}")
|
|
216
|
-
return pl.read_ndjson(path_annotations_jsonl)
|
|
217
|
-
|
|
218
|
-
raise FileNotFoundError(
|
|
219
|
-
f"Unable to read annotations. No json file '{path_annotations.name}' or Parquet file '{{path_annotations.name}} in in '{path}'."
|
|
220
|
-
)
|
|
221
|
-
|
|
222
|
-
|
|
223
205
|
def check_image_paths(table: pl.DataFrame) -> bool:
|
|
224
206
|
missing_files = []
|
|
225
207
|
org_paths = table[SampleField.FILE_PATH].to_list()
|