hirundo 0.1.9__py3-none-any.whl → 0.1.18__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
hirundo/__init__.py CHANGED
@@ -1,43 +1,62 @@
1
+ from .dataset_enum import (
2
+ DatasetMetadataType,
3
+ LabelingType,
4
+ StorageTypes,
5
+ )
1
6
  from .dataset_optimization import (
2
- COCO,
3
- YOLO,
4
- HirundoCSV,
5
7
  HirundoError,
6
8
  OptimizationDataset,
7
9
  RunArgs,
8
10
  VisionRunArgs,
9
11
  )
10
- from .enum import (
11
- DatasetMetadataType,
12
- LabelingType,
12
+ from .dataset_optimization_results import DatasetOptimizationResults
13
+ from .git import GitPlainAuth, GitRepo, GitSSHAuth
14
+ from .labeling import (
15
+ COCO,
16
+ YOLO,
17
+ HirundoCSV,
18
+ KeylabsAuth,
19
+ KeylabsObjDetImages,
20
+ KeylabsObjDetVideo,
21
+ KeylabsObjSegImages,
22
+ KeylabsObjSegVideo,
13
23
  )
14
- from .git import GitRepo
15
24
  from .storage import (
16
25
  StorageConfig,
17
26
  StorageGCP,
18
27
  # StorageAzure, TODO: Azure storage is coming soon
19
28
  StorageGit,
20
29
  StorageS3,
21
- StorageTypes,
22
30
  )
31
+ from .unzip import load_df, load_from_zip
23
32
 
24
33
  __all__ = [
25
34
  "COCO",
26
35
  "YOLO",
27
- "HirundoCSV",
28
36
  "HirundoError",
37
+ "HirundoCSV",
38
+ "KeylabsAuth",
39
+ "KeylabsObjDetImages",
40
+ "KeylabsObjDetVideo",
41
+ "KeylabsObjSegImages",
42
+ "KeylabsObjSegVideo",
29
43
  "OptimizationDataset",
30
44
  "RunArgs",
31
45
  "VisionRunArgs",
32
- "LabelingType",
33
46
  "DatasetMetadataType",
47
+ "LabelingType",
48
+ "GitPlainAuth",
34
49
  "GitRepo",
50
+ "GitSSHAuth",
35
51
  "StorageTypes",
36
52
  "StorageS3",
37
53
  "StorageGCP",
38
54
  # "StorageAzure", TODO: Azure storage is coming soon
39
55
  "StorageGit",
40
56
  "StorageConfig",
57
+ "DatasetOptimizationResults",
58
+ "load_df",
59
+ "load_from_zip",
41
60
  ]
42
61
 
43
- __version__ = "0.1.9"
62
+ __version__ = "0.1.18"
hirundo/_constraints.py CHANGED
@@ -1,53 +1,164 @@
1
- from typing import Annotated
2
-
3
- from pydantic import StringConstraints, UrlConstraints
4
- from pydantic_core import Url
5
-
6
- S3BucketUrl = Annotated[
7
- str,
8
- StringConstraints(
9
- min_length=8,
10
- max_length=1023,
11
- pattern=r"s3?://[a-z0-9.-]{3,64}[/]?", # Only allow real S3 bucket URLs
12
- ),
13
- ]
14
-
15
- StorageConfigName = Annotated[
16
- str,
17
- StringConstraints(
18
- min_length=1,
19
- max_length=255,
20
- pattern=r"^[a-zA-Z0-9-_]+$",
21
- ),
22
- ]
23
-
24
- S3_MIN_LENGTH = 8
25
- S3_MAX_LENGTH = 1023
26
- S3_PATTERN = r"s3://[a-zA-Z0-9.-]{3,64}/[a-zA-Z0-9.-/]+"
27
- GCP_MIN_LENGTH = 8
28
- GCP_MAX_LENGTH = 1023
29
- GCP_PATTERN = r"gs://[a-zA-Z0-9.-]{3,64}/[a-zA-Z0-9.-/]+"
30
-
31
- RepoUrl = Annotated[
32
- Url,
33
- UrlConstraints(
34
- allowed_schemes=[
35
- "ssh",
36
- "https",
37
- "http",
38
- ]
39
- ),
40
- ]
41
- HirundoUrl = Annotated[
42
- Url,
43
- UrlConstraints(
44
- allowed_schemes=[
45
- "file",
46
- "https",
47
- "http",
48
- "s3",
49
- "gs",
50
- "ssh",
51
- ]
52
- ),
53
- ]
1
+ import re
2
+ import typing
3
+ from typing import TYPE_CHECKING
4
+
5
+ from hirundo._urls import (
6
+ LENGTH_CONSTRAINTS,
7
+ STORAGE_PATTERNS,
8
+ )
9
+ from hirundo.dataset_enum import DatasetMetadataType, LabelingType, StorageTypes
10
+ from hirundo.labeling import COCO, YOLO, HirundoCSV, Keylabs
11
+
12
+ if TYPE_CHECKING:
13
+ from hirundo._urls import HirundoUrl
14
+ from hirundo.dataset_optimization import LabelingInfo
15
+ from hirundo.storage import (
16
+ ResponseStorageConfig,
17
+ StorageConfig,
18
+ StorageGCP,
19
+ StorageGCPOut,
20
+ StorageS3,
21
+ StorageS3Out,
22
+ )
23
+
24
+ LABELING_TYPES_TO_DATASET_METADATA_TYPES = {
25
+ LabelingType.SINGLE_LABEL_CLASSIFICATION: [
26
+ DatasetMetadataType.HIRUNDO_CSV,
27
+ ],
28
+ LabelingType.OBJECT_DETECTION: [
29
+ DatasetMetadataType.HIRUNDO_CSV,
30
+ DatasetMetadataType.COCO,
31
+ DatasetMetadataType.YOLO,
32
+ DatasetMetadataType.KeylabsObjDetImages,
33
+ DatasetMetadataType.KeylabsObjDetVideo,
34
+ ],
35
+ LabelingType.OBJECT_SEGMENTATION: [
36
+ DatasetMetadataType.HIRUNDO_CSV,
37
+ DatasetMetadataType.KeylabsObjSegImages,
38
+ DatasetMetadataType.KeylabsObjSegVideo,
39
+ ],
40
+ LabelingType.SEMANTIC_SEGMENTATION: [
41
+ DatasetMetadataType.HIRUNDO_CSV,
42
+ ],
43
+ LabelingType.PANOPTIC_SEGMENTATION: [
44
+ DatasetMetadataType.HIRUNDO_CSV,
45
+ ],
46
+ LabelingType.SPEECH_TO_TEXT: [
47
+ DatasetMetadataType.HIRUNDO_CSV,
48
+ ],
49
+ }
50
+
51
+
52
+ def validate_s3_url(str_url: str, s3_config: "StorageS3 | StorageS3Out"):
53
+ if (
54
+ len(str_url) < LENGTH_CONSTRAINTS[StorageTypes.S3]["min_length"]
55
+ or len(str_url) > LENGTH_CONSTRAINTS[StorageTypes.S3]["max_length"]
56
+ ):
57
+ raise ValueError("S3 URL must be between 8 and 1023 characters")
58
+ elif not re.match(STORAGE_PATTERNS[StorageTypes.S3], str_url):
59
+ raise ValueError(
60
+ f"Invalid S3 URL. Pattern must match: {STORAGE_PATTERNS[StorageTypes.S3]}"
61
+ )
62
+ elif not str_url.startswith(f"{s3_config.bucket_url}/"):
63
+ raise ValueError(f"S3 URL must start with {s3_config.bucket_url}/")
64
+
65
+
66
+ def validate_gcp_url(str_url: str, gcp_config: "StorageGCP | StorageGCPOut"):
67
+ matches = re.match(STORAGE_PATTERNS[StorageTypes.GCP], str_url)
68
+ if (
69
+ len(str_url) < LENGTH_CONSTRAINTS[StorageTypes.GCP]["min_length"]
70
+ or len(str_url) > LENGTH_CONSTRAINTS[StorageTypes.GCP]["max_length"]
71
+ ):
72
+ raise ValueError(
73
+ f"GCP URL must be between {LENGTH_CONSTRAINTS[StorageTypes.GCP]['min_length']}"
74
+ + f" and {LENGTH_CONSTRAINTS[StorageTypes.GCP]['max_length']} characters"
75
+ )
76
+ elif not matches:
77
+ raise ValueError(
78
+ f"Invalid GCP URL. Pattern must match: {STORAGE_PATTERNS[StorageTypes.GCP]}"
79
+ )
80
+ elif (
81
+ matches
82
+ and len(matches.group(1))
83
+ > LENGTH_CONSTRAINTS[StorageTypes.GCP]["bucket_max_length"]
84
+ ):
85
+ raise ValueError(
86
+ f"GCP bucket name must be between {LENGTH_CONSTRAINTS[StorageTypes.GCP]['bucket_min_length']} "
87
+ + f"and {LENGTH_CONSTRAINTS[StorageTypes.GCP]['bucket_max_length']} characters"
88
+ )
89
+ elif not str_url.startswith(f"gs://{gcp_config.bucket_name}/"):
90
+ raise ValueError(f"GCP URL must start with gs://{gcp_config.bucket_name}")
91
+
92
+
93
+ def validate_url(
94
+ url: "HirundoUrl",
95
+ storage_config: "StorageConfig | ResponseStorageConfig",
96
+ ) -> "HirundoUrl":
97
+ s3_config = storage_config.s3
98
+ gcp_config = storage_config.gcp
99
+ git_config = storage_config.git
100
+ str_url = str(url)
101
+
102
+ if s3_config is not None:
103
+ validate_s3_url(str_url, s3_config)
104
+ elif gcp_config is not None:
105
+ validate_gcp_url(str_url, gcp_config)
106
+ elif (
107
+ git_config is not None
108
+ and not str_url.startswith("https://")
109
+ and not str_url.startswith("ssh://")
110
+ ):
111
+ raise ValueError("Git URL must start with https:// or ssh://")
112
+ elif storage_config.type == StorageTypes.LOCAL and not str_url.startswith(
113
+ "file:///datasets/"
114
+ ):
115
+ raise ValueError("Local URL must start with file:///datasets/")
116
+ return url
117
+
118
+
119
+ def validate_labeling_type(
120
+ labeling_type: "LabelingType", labeling_info: "LabelingInfo"
121
+ ) -> None:
122
+ """
123
+ Validate that the labeling type is compatible with the labeling info
124
+
125
+ Args:
126
+ labeling_type: The type of labeling that will be performed
127
+ labeling_info: The labeling info to validate
128
+ """
129
+ dataset_metadata_types = LABELING_TYPES_TO_DATASET_METADATA_TYPES[labeling_type]
130
+ if labeling_info.type not in dataset_metadata_types:
131
+ raise ValueError(
132
+ f"Cannot use {labeling_info.type.name} labeling info with {labeling_type.name} datasets"
133
+ )
134
+
135
+
136
+ def validate_labeling_info(
137
+ labeling_type: "LabelingType",
138
+ labeling_info: "typing.Union[LabelingInfo, list[LabelingInfo]]",
139
+ storage_config: "typing.Union[StorageConfig, ResponseStorageConfig]",
140
+ ) -> None:
141
+ """
142
+ Validate the labeling info for a dataset
143
+
144
+ Args:
145
+ labeling_type: The type of labeling that will be performed
146
+ labeling_info: The labeling info to validate
147
+ storage_config: The storage configuration for the dataset.
148
+ StorageConfig is used to validate the URLs in the labeling info
149
+ """
150
+ if isinstance(labeling_info, list):
151
+ for labeling in labeling_info:
152
+ validate_labeling_info(labeling_type, labeling, storage_config)
153
+ return
154
+ elif isinstance(labeling_info, HirundoCSV):
155
+ validate_url(labeling_info.csv_url, storage_config)
156
+ elif isinstance(labeling_info, COCO):
157
+ validate_url(labeling_info.json_url, storage_config)
158
+ elif isinstance(labeling_info, YOLO):
159
+ validate_url(labeling_info.labels_dir_url, storage_config)
160
+ if labeling_info.data_yaml_url is not None:
161
+ validate_url(labeling_info.data_yaml_url, storage_config)
162
+ elif isinstance(labeling_info, Keylabs):
163
+ validate_url(labeling_info.labels_dir_url, storage_config)
164
+ validate_labeling_type(labeling_type, labeling_info)
hirundo/_dataframe.py ADDED
@@ -0,0 +1,43 @@
1
+ has_pandas = False
2
+ has_polars = False
3
+
4
+ pd = None
5
+ pl = None
6
+ int32 = type[None]
7
+ float32 = type[None]
8
+ string = type[None]
9
+ # ⬆️ These are just placeholders for the int32, float32 and string types
10
+ # for when neither pandas nor polars are available
11
+
12
+ try:
13
+ import numpy as np
14
+ import pandas as pd
15
+
16
+ has_pandas = True
17
+ int32 = np.int32
18
+ float32 = np.float32
19
+ string = str
20
+ except ImportError:
21
+ pass
22
+
23
+ try:
24
+ import polars as pl
25
+ import polars.datatypes as pl_datatypes
26
+
27
+ has_polars = True
28
+ int32 = pl_datatypes.Int32
29
+ float32 = pl_datatypes.Float32
30
+ string = pl_datatypes.String
31
+ except ImportError:
32
+ pass
33
+
34
+
35
+ __all__ = [
36
+ "has_polars",
37
+ "has_pandas",
38
+ "pd",
39
+ "pl",
40
+ "int32",
41
+ "float32",
42
+ "string",
43
+ ]
hirundo/_env.py CHANGED
@@ -2,11 +2,11 @@ import enum
2
2
  import os
3
3
  from pathlib import Path
4
4
 
5
- from dotenv import load_dotenv
5
+ from dotenv import find_dotenv, load_dotenv
6
6
 
7
7
 
8
8
  class EnvLocation(enum.Enum):
9
- DOTENV = Path.cwd() / ".env"
9
+ DOTENV = find_dotenv(".env")
10
10
  HOME = Path.home() / ".hirundo.conf"
11
11
 
12
12
 
hirundo/_headers.py CHANGED
@@ -1,13 +1,29 @@
1
1
  from hirundo._env import API_KEY, check_api_key
2
2
 
3
- json_headers = {
3
+ HIRUNDO_API_VERSION = "0.2"
4
+
5
+ _json_headers = {
4
6
  "Content-Type": "application/json",
5
7
  "Accept": "application/json",
6
8
  }
7
9
 
8
10
 
9
- def get_auth_headers():
11
+ def _get_auth_headers():
10
12
  check_api_key()
11
13
  return {
12
14
  "Authorization": f"Bearer {API_KEY}",
13
15
  }
16
+
17
+
18
+ def _get_api_version_header():
19
+ return {
20
+ "HIRUNDO-API-VERSION": HIRUNDO_API_VERSION,
21
+ }
22
+
23
+
24
+ def get_headers():
25
+ return {
26
+ **_json_headers,
27
+ **_get_auth_headers(),
28
+ **_get_api_version_header(),
29
+ }
hirundo/_timeouts.py CHANGED
@@ -1,2 +1,3 @@
1
1
  READ_TIMEOUT = 30.0
2
2
  MODIFY_TIMEOUT = 60.0
3
+ DOWNLOAD_READ_TIMEOUT = 600.0 # 10 minutes
hirundo/_urls.py ADDED
@@ -0,0 +1,59 @@
1
+ from typing import Annotated
2
+
3
+ from pydantic import StringConstraints, UrlConstraints
4
+ from pydantic_core import Url
5
+
6
+ from hirundo.dataset_enum import StorageTypes
7
+
8
+ S3BucketUrl = Annotated[
9
+ str,
10
+ StringConstraints(
11
+ min_length=8,
12
+ max_length=1023,
13
+ pattern=r"s3?://[a-z0-9.-]{3,64}[/]?", # Only allow real S3 bucket URLs
14
+ ),
15
+ ]
16
+
17
+ StorageConfigName = Annotated[
18
+ str,
19
+ StringConstraints(
20
+ min_length=1,
21
+ max_length=255,
22
+ pattern=r"^[a-zA-Z0-9-_]+$",
23
+ ),
24
+ ]
25
+
26
+ STORAGE_PATTERNS: dict[StorageTypes, str] = {
27
+ StorageTypes.S3: r"^s3:\/\/[a-z0-9\.\-]{3,63}/[a-zA-Z0-9!\-\/_\.\*'\(\)]+$",
28
+ StorageTypes.GCP: r"^gs:\/\/([a-z0-9][a-z0-9_-]{1,61}[a-z0-9](\.[a-z0-9][a-z0-9_-]{1,61}[a-z0-9])*)\/[^\x00-\x1F\x7F-\x9F\r\n]*$",
29
+ }
30
+
31
+
32
+ LENGTH_CONSTRAINTS: dict[StorageTypes, dict] = {
33
+ StorageTypes.S3: {"min_length": 8, "max_length": 1023, "bucket_max_length": None},
34
+ StorageTypes.GCP: {"min_length": 8, "max_length": 1023, "bucket_max_length": 222},
35
+ }
36
+
37
+ RepoUrl = Annotated[
38
+ Url,
39
+ UrlConstraints(
40
+ allowed_schemes=[
41
+ "ssh",
42
+ "https",
43
+ "http",
44
+ ]
45
+ ),
46
+ ]
47
+ HirundoUrl = Annotated[
48
+ Url,
49
+ UrlConstraints(
50
+ allowed_schemes=[
51
+ "file",
52
+ "https",
53
+ "http",
54
+ "s3",
55
+ "gs",
56
+ "ssh",
57
+ ]
58
+ ),
59
+ ]
hirundo/cli.py CHANGED
@@ -7,6 +7,8 @@ from typing import Annotated
7
7
  from urllib.parse import urlparse
8
8
 
9
9
  import typer
10
+ from rich.console import Console
11
+ from rich.table import Table
10
12
 
11
13
  from hirundo._env import API_HOST, EnvLocation
12
14
 
@@ -189,6 +191,56 @@ def setup(
189
191
  )
190
192
 
191
193
 
194
+ @app.command("check-run", epilog=hirundo_epilog)
195
+ def check_run(
196
+ run_id: str,
197
+ ):
198
+ """
199
+ Check the status of a run.
200
+ """
201
+ from hirundo.dataset_optimization import OptimizationDataset
202
+
203
+ results = OptimizationDataset.check_run_by_id(run_id)
204
+ print(f"Run results saved to {results.cached_zip_path}")
205
+
206
+
207
+ @app.command("list-runs", epilog=hirundo_epilog)
208
+ def list_runs():
209
+ """
210
+ List all runs available.
211
+ """
212
+ from hirundo.dataset_optimization import OptimizationDataset
213
+
214
+ runs = OptimizationDataset.list_runs()
215
+
216
+ console = Console()
217
+ table = Table(
218
+ title="Runs:",
219
+ expand=True,
220
+ )
221
+ cols = (
222
+ "Dataset name",
223
+ "Run ID",
224
+ "Status",
225
+ "Created At",
226
+ "Run Args",
227
+ )
228
+ for col in cols:
229
+ table.add_column(
230
+ col,
231
+ overflow="fold",
232
+ )
233
+ for run in runs:
234
+ table.add_row(
235
+ str(run.name),
236
+ str(run.id),
237
+ str(run.status),
238
+ run.created_at.isoformat(),
239
+ run.run_args.model_dump_json() if run.run_args else None,
240
+ )
241
+ console.print(table)
242
+
243
+
192
244
  typer_click_object = typer.main.get_command(app)
193
245
 
194
246
  if __name__ == "__main__":
@@ -0,0 +1,46 @@
1
+ from enum import Enum
2
+
3
+
4
+ class LabelingType(str, Enum):
5
+ """
6
+ Enum indicate what type of labeling is used for the given dataset.
7
+ Supported types are:
8
+ """
9
+
10
+ SINGLE_LABEL_CLASSIFICATION = "SingleLabelClassification"
11
+ OBJECT_DETECTION = "ObjectDetection"
12
+ SPEECH_TO_TEXT = "SpeechToText"
13
+ OBJECT_SEGMENTATION = "ObjectSegmentation"
14
+ SEMANTIC_SEGMENTATION = "SemanticSegmentation"
15
+ PANOPTIC_SEGMENTATION = "PanopticSegmentation"
16
+
17
+
18
+ class DatasetMetadataType(str, Enum):
19
+ """
20
+ Enum indicate what type of metadata is provided for the given dataset.
21
+ Supported types are:
22
+ """
23
+
24
+ HIRUNDO_CSV = "HirundoCSV"
25
+ COCO = "COCO"
26
+ YOLO = "YOLO"
27
+ KeylabsObjDetImages = "KeylabsObjDetImages"
28
+ KeylabsObjDetVideo = "KeylabsObjDetVideo"
29
+ KeylabsObjSegImages = "KeylabsObjSegImages"
30
+ KeylabsObjSegVideo = "KeylabsObjSegVideo"
31
+
32
+
33
+ class StorageTypes(str, Enum):
34
+ """
35
+ Enum for the different types of storage configs.
36
+ Supported types are:
37
+ """
38
+
39
+ S3 = "S3"
40
+ GCP = "GCP"
41
+ # AZURE = "Azure" TODO: Azure storage config is coming soon
42
+ GIT = "Git"
43
+ LOCAL = "Local"
44
+ """
45
+ Local storage config is only supported for on-premises installations.
46
+ """