hafnia 0.1.24__py3-none-any.whl → 0.1.26__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
hafnia/utils.py CHANGED
@@ -1,18 +1,60 @@
1
- import functools
2
1
  import os
3
- import sys
2
+ import time
3
+ import zipfile
4
4
  from datetime import datetime
5
+ from functools import wraps
5
6
  from pathlib import Path
6
- from typing import Any, Callable, Optional
7
+ from typing import Any, Callable, Iterator, Optional
7
8
  from zipfile import ZipFile
8
9
 
9
- import click
10
+ import pathspec
11
+ import seedir
12
+ from rich import print as rprint
10
13
 
11
- from hafnia.log import logger
14
+ from hafnia.log import sys_logger, user_logger
12
15
 
13
16
  PATH_DATA = Path("./.data")
14
17
  PATH_DATASET = PATH_DATA / "datasets"
15
18
  PATH_RECIPES = PATH_DATA / "recipes"
19
+ FILENAME_HAFNIAIGNORE = ".hafniaignore"
20
+ DEFAULT_IGNORE_SPECIFICATION = [
21
+ "*.jpg",
22
+ "*.png",
23
+ "*.py[cod]",
24
+ "*_cache/",
25
+ ".data",
26
+ ".git",
27
+ ".venv",
28
+ ".vscode",
29
+ "__pycache__",
30
+ "recipe.zip",
31
+ "tests",
32
+ "wandb",
33
+ ]
34
+
35
+
36
+ def timed(label: str):
37
+ """
38
+ Decorator factory that allows custom labels for timing.
39
+ Usage: @timed("Custom Operation")
40
+ """
41
+
42
+ def decorator(func: Callable) -> Callable:
43
+ @wraps(func)
44
+ def wrapper(*args, **kwargs) -> Any:
45
+ operation_label = label or func.__name__
46
+ tik = time.perf_counter()
47
+ try:
48
+ return func(*args, **kwargs)
49
+ except Exception as e:
50
+ sys_logger.error(f"{operation_label} failed: {e}")
51
+ finally:
52
+ elapsed = time.perf_counter() - tik
53
+ sys_logger.debug(f"{operation_label} took {elapsed:.2f} seconds.")
54
+
55
+ return wrapper
56
+
57
+ return decorator
16
58
 
17
59
 
18
60
  def now_as_str() -> str:
@@ -26,58 +68,70 @@ def get_recipe_path(recipe_name: str) -> Path:
26
68
  return path_recipe
27
69
 
28
70
 
29
- def archive_dir(recipe_path: Path, output_path: Optional[Path] = None) -> Path:
71
+ def filter_recipe_files(recipe_path: Path, path_ignore_file: Optional[Path] = None) -> Iterator:
72
+ path_ignore_file = path_ignore_file or recipe_path / FILENAME_HAFNIAIGNORE
73
+ if not path_ignore_file.exists():
74
+ ignore_specification_lines = DEFAULT_IGNORE_SPECIFICATION
75
+ user_logger.info(
76
+ f"No '{FILENAME_HAFNIAIGNORE}' was file found. Files are excluded using the default ignore patterns.\n"
77
+ f"\tDefault ignore patterns: {DEFAULT_IGNORE_SPECIFICATION}\n"
78
+ f"Add a '{FILENAME_HAFNIAIGNORE}' file to the root folder to make custom ignore patterns."
79
+ )
80
+ else:
81
+ ignore_specification_lines = Path(path_ignore_file).read_text().splitlines()
82
+ ignore_specification = pathspec.GitIgnoreSpec.from_lines(ignore_specification_lines)
83
+ include_files = ignore_specification.match_tree(recipe_path, negate=True)
84
+ return include_files
85
+
86
+
87
+ @timed("Wrapping recipe.")
88
+ def archive_dir(
89
+ recipe_path: Path,
90
+ output_path: Optional[Path] = None,
91
+ path_ignore_file: Optional[Path] = None,
92
+ ) -> Path:
30
93
  recipe_zip_path = output_path or recipe_path / "recipe.zip"
31
94
  assert recipe_zip_path.suffix == ".zip", "Output path must be a zip file"
32
95
  recipe_zip_path.parent.mkdir(parents=True, exist_ok=True)
33
96
 
34
- click.echo(f"Creating zip archive {recipe_path}")
35
- with ZipFile(recipe_zip_path, "w") as zip_ref:
36
- for item in recipe_path.rglob("*"):
37
- should_skip = (
38
- item == recipe_zip_path
39
- or item.name.endswith(".zip")
40
- or any(part.startswith(".") for part in item.parts)
41
- or any(part == "__pycache__" for part in item.parts)
42
- )
43
-
44
- if should_skip:
45
- if item != recipe_zip_path:
46
- click.echo(f"[-] {item.relative_to(recipe_path)}")
47
- continue
48
-
49
- if not item.is_file():
50
- continue
51
-
52
- relative_path = item.relative_to(recipe_path)
53
- click.echo(f"[+] {relative_path}")
54
- zip_ref.write(item, relative_path)
55
- return recipe_zip_path
97
+ user_logger.info(f" Creating zip archive of '{recipe_path}'")
98
+ include_files = filter_recipe_files(recipe_path, path_ignore_file)
99
+ with ZipFile(recipe_zip_path, "w", compression=zipfile.ZIP_STORED, allowZip64=True) as zip_ref:
100
+ for str_filepath in include_files:
101
+ full_path = recipe_path / str_filepath
102
+ zip_ref.write(full_path, str_filepath)
103
+ show_recipe_content(recipe_zip_path)
56
104
 
105
+ return recipe_zip_path
57
106
 
58
- def safe(func: Callable) -> Callable:
59
- """
60
- Decorator that catches exceptions, logs them, and exits with code 1.
61
107
 
62
- Args:
63
- func: The function to decorate
108
+ def size_human_readable(size_bytes: int, suffix="B") -> str:
109
+ size_value = float(size_bytes)
110
+ for unit in ("", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"):
111
+ if abs(size_value) < 1024.0:
112
+ return f"{size_value:3.1f} {unit}{suffix}"
113
+ size_value /= 1024.0
114
+ return f"{size_value:.1f}Yi{suffix}"
64
115
 
65
- Returns:
66
- Wrapped function that handles exceptions
67
- """
68
116
 
69
- @functools.wraps(func)
70
- def wrapper(*args: Any, **kwargs: Any) -> Any:
71
- try:
72
- return func(*args, **kwargs)
73
- except Exception as e:
74
- logger.error(f"Error in {func.__name__}: {str(e)}")
75
- sys.exit(1)
117
+ def show_recipe_content(recipe_path: Path, style: str = "emoji", depth_limit: int = 3) -> None:
118
+ def scan(parent: seedir.FakeDir, path: zipfile.Path, depth: int = 0) -> None:
119
+ if depth >= depth_limit:
120
+ return
121
+ for child in path.iterdir():
122
+ if child.is_dir():
123
+ folder = seedir.FakeDir(child.name)
124
+ scan(folder, child, depth + 1)
125
+ folder.parent = parent
126
+ else:
127
+ parent.create_file(child.name)
76
128
 
77
- return wrapper
129
+ recipe = seedir.FakeDir("recipe")
130
+ scan(recipe, zipfile.Path(recipe_path))
131
+ rprint(recipe.seedir(sort=True, first="folders", style=style, printout=False))
132
+ user_logger.info(f"Recipe size: {size_human_readable(os.path.getsize(recipe_path))}. Max size 800 MiB")
78
133
 
79
134
 
80
135
  def is_remote_job() -> bool:
81
136
  """Check if the current job is running in HAFNIA cloud environment."""
82
- is_remote = os.getenv("HAFNIA_CLOUD", "false").lower() == "true"
83
- return is_remote
137
+ return os.getenv("HAFNIA_CLOUD", "false").lower() == "true"
@@ -0,0 +1,363 @@
1
+ Metadata-Version: 2.4
2
+ Name: hafnia
3
+ Version: 0.1.26
4
+ Summary: Python SDK for communication with Hafnia platform.
5
+ Author-email: Milestone Systems <hafniaplatform@milestone.dk>
6
+ License-File: LICENSE
7
+ Requires-Python: >=3.10
8
+ Requires-Dist: boto3>=1.35.91
9
+ Requires-Dist: click>=8.1.8
10
+ Requires-Dist: datasets>=3.2.0
11
+ Requires-Dist: emoji>=2.14.1
12
+ Requires-Dist: flatten-dict>=0.4.2
13
+ Requires-Dist: pathspec>=0.12.1
14
+ Requires-Dist: pillow>=11.1.0
15
+ Requires-Dist: pyarrow>=18.1.0
16
+ Requires-Dist: pydantic>=2.10.4
17
+ Requires-Dist: rich>=13.9.4
18
+ Requires-Dist: seedir>=0.5.0
19
+ Requires-Dist: tqdm>=4.67.1
20
+ Description-Content-Type: text/markdown
21
+
22
+ # Hafnia
23
+
24
+ The `hafnia` python package is a collection of tools to create and run model training recipes on
25
+ the [Hafnia Platform](https://hafnia.milestonesys.com/).
26
+
27
+ The package includes the following interfaces:
28
+
29
+ - `cli`: A Command Line Interface (CLI) to 1) configure/connect to Hafnia's [Training-aaS](https://hafnia.readme.io/docs/training-as-a-service) and 2) create and
30
+ launch recipe scripts.
31
+ - `hafnia`: A python package with helper functions to load and interact with sample datasets and an experiment
32
+ tracker (`HafniaLogger`).
33
+
34
+
35
+ ## The Concept: Training as a Service (Training-aaS)
36
+ `Training-aaS` is the concept of training models on the Hafnia platform on large
37
+ and *hidden* datasets. Hidden datasets refers to datasets that can be used for
38
+ training, but are not available for download or direct access.
39
+
40
+ This is a key feature of the Hafnia platform, as a hidden dataset ensures data
41
+ privacy, and allow models to be trained compliantly and ethically by third parties (you).
42
+
43
+ The `script2model` approach is a Training-aaS concept, where you package your custom training
44
+ script as a *training recipe* and use the recipe to train models on the hidden datasets.
45
+
46
+ To support local development of a training recipe, we have introduced a **sample dataset**
47
+ for each dataset available in the Hafnia [data library](https://hafnia.milestonesys.com/training-aas/datasets). The sample dataset is a small
48
+ and anonymized subset of the full dataset and available for download.
49
+
50
+ With the sample dataset, you can seamlessly switch between local development and Training-aaS.
51
+ Locally, you can create, validate and debug your training recipe. The recipe is then
52
+ launched with Training-aaS, where the recipe runs on the full dataset and can be scaled to run on
53
+ multiple GPUs and instances if needed.
54
+
55
+ ## Getting started: Configuration
56
+ To get started with Hafnia:
57
+
58
+ 1. Install `hafnia` with your favorite python package manager. With pip do this:
59
+
60
+ `pip install hafnia`
61
+ 1. Sign in to the [Hafnia Platform](https://hafnia.milestonesys.com/).
62
+ 1. Create an API KEY for Training aaS. For more instructions, follow this
63
+ [guide](https://hafnia.readme.io/docs/create-an-api-key).
64
+ Copy the key and save it for later use.
65
+ 1. From terminal, configure your machine to access Hafnia:
66
+
67
+ ```
68
+ # Start configuration with
69
+ hafnia configure
70
+
71
+ # You are then prompted:
72
+ Profile Name [default]: # Press [Enter] or select an optional name
73
+ Hafnia API Key: # Pass your HAFNIA API key
74
+ Hafnia Platform URL [https://api.mdi.milestonesys.com]: # Press [Enter]
75
+ ```
76
+ 1. Download `mnist` from terminal to verify that your configuration is working.
77
+
78
+ ```bash
79
+ hafnia data download mnist --force
80
+ ```
81
+
82
+ ## Getting started: Loading datasets samples
83
+ With Hafnia configured on your local machine, it is now possible to download
84
+ and explore the dataset sample with a python script:
85
+
86
+ ```python
87
+ from hafnia.data import load_dataset
88
+
89
+ dataset_splits = load_dataset("mnist")
90
+ ```
91
+
92
+ ### Dataset Format
93
+ The returned sample dataset is a [hugging face dataset](https://huggingface.co/docs/datasets/index)
94
+ and contains train, validation and test splits.
95
+
96
+ ```python
97
+ print(dataset_splits)
98
+
99
+ # Output:
100
+ >>> DatasetDict({
101
+ train: Dataset({
102
+ features: ['image_id', 'image', 'height', 'width', 'objects', 'Weather', 'Surface Conditions'],
103
+ num_rows: 172
104
+ })
105
+ validation: Dataset({
106
+ features: ['image_id', 'image', 'height', 'width', 'objects', 'Weather', 'Surface Conditions'],
107
+ num_rows: 21
108
+ })
109
+ test: Dataset({
110
+ features: ['image_id', 'image', 'height', 'width', 'objects', 'Weather', 'Surface Conditions'],
111
+ num_rows: 21
112
+ })
113
+ })
114
+
115
+ ```
116
+
117
+ A Hugging Face dataset is a dictionary with splits, where each split is a `Dataset` object.
118
+ Each `Dataset` is structured as a table with a set of columns (also called features) and a row for each sample.
119
+
120
+ The features of the dataset can be viewed with the `features` attribute.
121
+ ```python
122
+ # View features of the train split
123
+ pprint.pprint(dataset["train"].features)
124
+ {'Surface Conditions': ClassLabel(names=['Dry', 'Wet'], id=None),
125
+ 'Weather': ClassLabel(names=['Clear', 'Foggy'], id=None),
126
+ 'height': Value(dtype='int64', id=None),
127
+ 'image': Image(mode=None, decode=True, id=None),
128
+ 'image_id': Value(dtype='int64', id=None),
129
+ 'objects': Sequence(feature={'bbox': Sequence(feature=Value(dtype='int64',
130
+ id=None),
131
+ length=-1,
132
+ id=None),
133
+ 'class_idx': ClassLabel(names=['Vehicle.Bicycle',
134
+ 'Vehicle.Motorcycle',
135
+ 'Vehicle.Car',
136
+ 'Vehicle.Van',
137
+ 'Vehicle.RV',
138
+ 'Vehicle.Single_Truck',
139
+ 'Vehicle.Combo_Truck',
140
+ 'Vehicle.Pickup_Truck',
141
+ 'Vehicle.Trailer',
142
+ 'Vehicle.Emergency_Vehicle',
143
+ 'Vehicle.Bus',
144
+ 'Vehicle.Heavy_Duty_Vehicle'],
145
+ id=None),
146
+ 'class_name': Value(dtype='string', id=None),
147
+ 'id': Value(dtype='string', id=None)},
148
+ length=-1,
149
+ id=None),
150
+ 'width': Value(dtype='int64', id=None)}
151
+ ```
152
+
153
+ View the first sample in the training set:
154
+ ```python
155
+ # Print sample from the training set
156
+ pprint.pprint(dataset["train"][0])
157
+
158
+ {'image': <PIL.PngImagePlugin.PngImageFile image mode=RGB size=1920x1080 at 0x79D6292C5ED0>,
159
+ 'image_id': 4920,
160
+ 'height': 1080,
161
+ 'Weather': 0,
162
+ 'Surface Conditions': 0,
163
+ 'objects': {'bbox': [[441, 180, 121, 126],
164
+ [549, 151, 131, 103],
165
+ [1845, 722, 68, 130],
166
+ [1810, 571, 110, 149]],
167
+ 'class_idx': [7, 7, 2, 2],
168
+ 'class_name': ['Vehicle.Pickup_Truck',
169
+ 'Vehicle.Pickup_Truck',
170
+ 'Vehicle.Car',
171
+ 'Vehicle.Car'],
172
+ 'id': ['HW6WiLAJ', 'T/ccFpRi', 'CS0O8B6W', 'DKrJGzjp']},
173
+ 'width': 1920}
174
+
175
+ ```
176
+
177
+ For hafnia based datasets, we want to standardized how a dataset and dataset tasks are represented.
178
+ We have defined a set of features that are common across all datasets in the Hafnia data library.
179
+
180
+ - `image`: The image itself, stored as a PIL image
181
+ - `height`: The height of the image in pixels
182
+ - `width`: The width of the image in pixels
183
+ - `[IMAGE_CLASSIFICATION_TASK]`: [Optional] Image classification tasks are top-level `ClassLabel` feature.
184
+ `ClassLabel` is a Hugging Face feature that maps class indices to class names.
185
+ In above example we have two classification tasks:
186
+ - `Weather`: Classifies the weather conditions in the image, with possible values `Clear` and `Foggy`
187
+ - `Surface Conditions`: Classifies the surface conditions in the image, with possible values `Dry` and `Wet`
188
+ - `objects`: A dictionary containing information about objects in the image, including:
189
+ - `bbox`: Bounding boxes for each object, represented with a list of bounding box coordinates
190
+ `[xmin, ymin, bbox_width, bbox_height]`. Each bounding box is defined with a top-left corner coordinate
191
+ `(xmin, ymin)` and bounding box width and height `(bbox_width, bbox_height)` in pixels.
192
+ - `class_idx`: Class indices for each detected object. This is a
193
+ `ClassLabel` feature that maps to the `class_name` feature.
194
+ - `class_name`: Class names for each detected object
195
+ - `id`: Unique identifiers for each detected object
196
+
197
+ ### Dataset Locally vs. Training-aaS
198
+ An important feature of `load_dataset` is that it will return the full dataset
199
+ when loaded with Training-aaS on the Hafnia platform.
200
+
201
+ This enables seamlessly switching between running/validating a training script
202
+ locally (on the sample dataset) and running full model trainings with Training-aaS (on the full dataset).
203
+ without changing code or configurations for the training script.
204
+
205
+ Available datasets with corresponding sample datasets can be found in [data library](https://hafnia.milestonesys.com/training-aas/datasets) including metadata and description for each dataset.
206
+
207
+
208
+ ## Getting started: Experiment Tracking with HafniaLogger
209
+ The `HafniaLogger` is an important part of the recipe script and enables you to track, log and
210
+ reproduce your experiments.
211
+
212
+ When integrated into your training script, the `HafniaLogger` is responsible for collecting:
213
+
214
+ - **Trained Model**: The model trained during the experiment
215
+ - **Model Checkpoints**: Intermediate model states saved during training
216
+ - **Experiment Configurations**: Hyperparameters and other settings used in your experiment
217
+ - **Training/Evaluation Metrics**: Performance data such as loss values, accuracy, and custom metrics
218
+
219
+ ### Basic Implementation Example
220
+
221
+ Here's how to integrate the `HafniaLogger` into your training script:
222
+
223
+ ```python
224
+ from hafnia.experiment import HafniaLogger
225
+
226
+ batch_size = 128
227
+ learning_rate = 0.001
228
+
229
+ # Initialize Hafnia logger
230
+ logger = HafniaLogger()
231
+
232
+ # Log experiment parameters
233
+ logger.log_configuration({"batch_size": 128, "learning_rate": 0.001})
234
+
235
+ # Store checkpoints in this path
236
+ ckpt_dir = logger.path_model_checkpoints()
237
+
238
+ # Store the trained model in this path
239
+ model_dir = logger.path_model()
240
+
241
+ # Log scalar and metric values during training and validation
242
+ logger.log_scalar("train/loss", value=0.1, step=100)
243
+ logger.log_metric("train/accuracy", value=0.98, step=100)
244
+
245
+ logger.log_scalar("validation/loss", value=0.1, step=100)
246
+ logger.log_metric("validation/accuracy", value=0.95, step=100)
247
+ ```
248
+
249
+ Similar to `load_dataset`, the tracker behaves differently when running locally or in the cloud.
250
+ Locally, experiment data is stored in a local folder `.data/experiments/{DATE_TIME}`.
251
+
252
+ In the cloud, the experiment data will be available in the Hafnia platform under
253
+ [experiments](https://hafnia.milestonesys.com/training-aas/experiments).
254
+
255
+ ## Example: Torch Dataloader
256
+ Commonly for `torch`-based training scripts, a dataset is used in combination
257
+ with a dataloader that performs data augmentations and batching of the dataset as torch tensors.
258
+
259
+ To support this, we have provided a torch dataloader example script
260
+ [example_torchvision_dataloader.py](./examples/example_torchvision_dataloader.py).
261
+
262
+ The script demonstrates how to load a dataset sample, apply data augmentations using
263
+ `torchvision.transforms.v2`, and visualize the dataset with `torch_helpers.draw_image_and_targets`.
264
+
265
+ Note also how `torch_helpers.TorchVisionCollateFn` is used in combination with the `DataLoader` from
266
+ `torch.utils.data` to handle the dataset's collate function.
267
+
268
+ The dataloader and visualization function supports computer vision tasks
269
+ and datasets available in the data library.
270
+
271
+ ```python
272
+ # Load Hugging Face dataset
273
+ dataset_splits = load_dataset("midwest-vehicle-detection")
274
+
275
+ # Define transforms
276
+ train_transforms = v2.Compose(
277
+ [
278
+ v2.RandomResizedCrop(size=(224, 224), antialias=True),
279
+ v2.RandomHorizontalFlip(p=0.5),
280
+ v2.ToDtype(torch.float32, scale=True),
281
+ v2.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
282
+ ]
283
+ )
284
+ test_transforms = v2.Compose(
285
+ [
286
+ v2.Resize(size=(224, 224), antialias=True),
287
+ v2.ToDtype(torch.float32, scale=True),
288
+ v2.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
289
+ ]
290
+ )
291
+
292
+ keep_metadata = True
293
+ train_dataset = torch_helpers.TorchvisionDataset(
294
+ dataset_splits["train"], transforms=train_transforms, keep_metadata=keep_metadata
295
+ )
296
+ test_dataset = torch_helpers.TorchvisionDataset(
297
+ dataset_splits["test"], transforms=test_transforms, keep_metadata=keep_metadata
298
+ )
299
+
300
+ # Visualize sample
301
+ image, targets = train_dataset[0]
302
+ visualize_image = torch_helpers.draw_image_and_targets(image=image, targets=targets)
303
+ pil_image = torchvision.transforms.functional.to_pil_image(visualize_image)
304
+ pil_image.save("visualized_labels.png")
305
+
306
+ # Create DataLoaders - using TorchVisionCollateFn
307
+ collate_fn = torch_helpers.TorchVisionCollateFn(
308
+ skip_stacking=["objects.bbox", "objects.class_idx"]
309
+ )
310
+ train_loader = DataLoader(train_dataset, batch_size=8, shuffle=True, collate_fn=collate_fn)
311
+ ```
312
+
313
+
314
+ ## Example: Training-aaS
315
+ By combining logging and dataset loading, we can now construct our model training recipe.
316
+
317
+ To demonstrate this, we have provided a recipe project that serves as a template for creating and structuring training recipes
318
+ [recipe-classification](https://github.com/milestone-hafnia/recipe-classification)
319
+
320
+ The project also contains additional information on how to structure your training recipe, use the `HafniaLogger`, the `load_dataset` function and different approach for launching
321
+ the training recipe on the Hafnia platform.
322
+
323
+
324
+ ## Create, Build and Run `recipe.zip` locally
325
+ In order to test recipe compatibility with Hafnia cloud use the following command to build and
326
+ start the job locally.
327
+
328
+ ```bash
329
+ # Create 'recipe.zip' from source folder '.'
330
+ hafnia recipe create .
331
+
332
+ # Build the docker image locally from a 'recipe.zip' file
333
+ hafnia runc build-local recipe.zip
334
+
335
+ # Execute the docker image locally with a desired dataset
336
+ hafnia runc launch-local --dataset mnist "python scripts/train.py"
337
+ ```
338
+
339
+ ## Detailed Documentation
340
+ For more information, go to our [documentation page](https://hafnia.readme.io/docs/welcome-to-hafnia)
341
+ or in below markdown pages.
342
+
343
+ - [CLI](docs/cli.md) - Detailed guide for the Hafnia command-line interface
344
+ - [Release lifecycle](docs/release.md) - Details about package release lifecycle.
345
+
346
+ ## Development
347
+ For development, we are using an uv based virtual python environment.
348
+
349
+ Install uv
350
+ ```bash
351
+ curl -LsSf https://astral.sh/uv/install.sh | sh
352
+ ```
353
+
354
+ Create virtual environment and install python dependencies
355
+
356
+ ```bash
357
+ uv sync
358
+ ```
359
+
360
+ Run tests:
361
+ ```bash
362
+ uv run pytest tests
363
+ ```
@@ -0,0 +1,27 @@
1
+ cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ cli/__main__.py,sha256=MX0RT1BP3t59rzCvdUqfw39Kw05HOF4OEtjDTwIU9h8,1594
3
+ cli/config.py,sha256=R9w0NKIOtIxRKNs7ieeUrIKwRkrTlK5PqOVjc5VYljE,4923
4
+ cli/consts.py,sha256=sj0MRwbbCT2Yl77FPddck1VWkFxp7QY6I9l1o75j_aE,963
5
+ cli/data_cmds.py,sha256=BQiythAPwAwudgdUa68v50a345uw5flrcDiBHLGp9lo,1460
6
+ cli/experiment_cmds.py,sha256=L-k_ZJ4B7I4cA8OvHcheSwXM6nx9aTF9G7eKBzAcOzQ,1961
7
+ cli/profile_cmds.py,sha256=-HQcFgYI6Rqaefi0Nj-91KhiqPKUj7zOaiJWbHx_bac,3196
8
+ cli/recipe_cmds.py,sha256=qnMfF-te47HXNkgyA0hm9X3etDQsqMnrVEGDCrzVjZU,1462
9
+ cli/runc_cmds.py,sha256=6fHMi_dEd8g3Cx9PEfU4gJMZf5-G0IUPDcZh6DNq8Mw,4953
10
+ hafnia/__init__.py,sha256=Zphq-cQoX95Z11zm4lkrU-YiAJxddR7IBfwDkxeHoDE,108
11
+ hafnia/http.py,sha256=psCWdNKfKYiBrYD6bezat1AeHh77JJtJrPePiUAjTIk,2948
12
+ hafnia/log.py,sha256=sWF8tz78yBtwZ9ddzm19L1MBSBJ3L4G704IGeT1_OEU,784
13
+ hafnia/torch_helpers.py,sha256=P_Jl4IwqUebKVCOXNe6iTorJZA3S-3d92HV274UHIko,7456
14
+ hafnia/utils.py,sha256=mJ5aOjSVSOrrQnpmaKLK71ld5jYpmtd3HciTIk_Wk88,4659
15
+ hafnia/data/__init__.py,sha256=Pntmo_1fst8OhyrHB60jQ8mhJJ4hL38tdjLvt0YXEJo,73
16
+ hafnia/data/factory.py,sha256=4fZDkWNyOK1QNCmsxsXfSztPJkJW_HBIa_PTdGCYHCM,2551
17
+ hafnia/experiment/__init__.py,sha256=OEFE6HqhO5zcTCLZcPcPVjIg7wMFFnvZ1uOtAVhRz7M,85
18
+ hafnia/experiment/hafnia_logger.py,sha256=usL5pl7XLJP-g1vZrwvbky5YiD6Bg-rOODYYAX5z43I,6830
19
+ hafnia/platform/__init__.py,sha256=I-VIVXDxwBAUzxx8Zx0g_wykyDdFGTsjb_mYLmvxk2Y,443
20
+ hafnia/platform/builder.py,sha256=OFPnOjE3bAbWjUgYErXtffDKTiW_9ol95eVzKqL27WM,5433
21
+ hafnia/platform/download.py,sha256=t055axPNHlXTYCQgZHOS2YMQt1I2_bc4G8dltsOKttY,4760
22
+ hafnia/platform/experiment.py,sha256=-nAfTmn1c8sE6pHDCTNZvWDTopkXndarJAPIGvsnk60,2389
23
+ hafnia-0.1.26.dist-info/METADATA,sha256=Lds8gx_ffd8_l9kByvK_e-HPFehSSUv8E_85d8ZelSE,14990
24
+ hafnia-0.1.26.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
25
+ hafnia-0.1.26.dist-info/entry_points.txt,sha256=FCJVIQ8GP2VE9I3eeGVF5eLxVDNW_01pOJCpG_CGnMM,45
26
+ hafnia-0.1.26.dist-info/licenses/LICENSE,sha256=wLZw1B7_mod_CO1H8LXqQgfqlWD6QceJR8--LJYRZGE,1078
27
+ hafnia-0.1.26.dist-info/RECORD,,
@@ -1,6 +1,6 @@
1
1
  MIT License
2
2
 
3
- Copyright (c) 2025 Data-insight-Platform
3
+ Copyright (c) 2025 Milestone Systems A/S
4
4
 
5
5
  Permission is hereby granted, free of charge, to any person obtaining a copy
6
6
  of this software and associated documentation files (the "Software"), to deal
hafnia/platform/api.py DELETED
@@ -1,12 +0,0 @@
1
- import urllib3
2
-
3
- from hafnia.http import fetch
4
-
5
-
6
- def get_organization_id(endpoint: str, api_key: str) -> str:
7
- headers = {"X-APIKEY": api_key}
8
- try:
9
- org_info = fetch(endpoint, headers=headers)
10
- except urllib3.exceptions.HTTPError as e:
11
- raise ValueError("Failed to fetch organization ID. Verify platform URL and API key.") from e
12
- return org_info[0]["id"]