hafnia 0.1.9__tar.gz → 0.1.12__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. hafnia-0.1.12/LICENSE +21 -0
  2. hafnia-0.1.12/PKG-INFO +197 -0
  3. hafnia-0.1.12/README.md +176 -0
  4. hafnia-0.1.12/examples/example_logger.py +23 -0
  5. {hafnia-0.1.9 → hafnia-0.1.12}/pyproject.toml +1 -1
  6. {hafnia-0.1.9 → hafnia-0.1.12}/src/cli/config.py +3 -0
  7. hafnia-0.1.12/tests/test_check_example_scripts.py +45 -0
  8. {hafnia-0.1.9 → hafnia-0.1.12}/tests/test_samples.py +1 -2
  9. {hafnia-0.1.9 → hafnia-0.1.12}/uv.lock +1 -1
  10. hafnia-0.1.9/PKG-INFO +0 -42
  11. hafnia-0.1.9/README.md +0 -22
  12. hafnia-0.1.9/examples/core/example_run.ipynb +0 -64
  13. {hafnia-0.1.9 → hafnia-0.1.12}/.devcontainer/devcontainer.json +0 -0
  14. {hafnia-0.1.9 → hafnia-0.1.12}/.devcontainer/hooks/post_create +0 -0
  15. {hafnia-0.1.9 → hafnia-0.1.12}/.github/dependabot.yaml +0 -0
  16. {hafnia-0.1.9 → hafnia-0.1.12}/.github/workflows/Dockerfile +0 -0
  17. {hafnia-0.1.9 → hafnia-0.1.12}/.github/workflows/build.yaml +0 -0
  18. {hafnia-0.1.9 → hafnia-0.1.12}/.github/workflows/ci_cd.yaml +0 -0
  19. {hafnia-0.1.9 → hafnia-0.1.12}/.github/workflows/lint.yaml +0 -0
  20. {hafnia-0.1.9 → hafnia-0.1.12}/.github/workflows/publish_docker.yaml +0 -0
  21. {hafnia-0.1.9 → hafnia-0.1.12}/.github/workflows/publish_pypi.yaml +0 -0
  22. {hafnia-0.1.9 → hafnia-0.1.12}/.github/workflows/publish_release.yaml +0 -0
  23. {hafnia-0.1.9 → hafnia-0.1.12}/.github/workflows/tests.yaml +0 -0
  24. {hafnia-0.1.9 → hafnia-0.1.12}/.gitignore +0 -0
  25. {hafnia-0.1.9 → hafnia-0.1.12}/.pre-commit-config.yaml +0 -0
  26. {hafnia-0.1.9 → hafnia-0.1.12}/.python-version +0 -0
  27. {hafnia-0.1.9 → hafnia-0.1.12}/.vscode/extensions.json +0 -0
  28. {hafnia-0.1.9 → hafnia-0.1.12}/.vscode/launch.json +0 -0
  29. {hafnia-0.1.9 → hafnia-0.1.12}/.vscode/settings.json +0 -0
  30. {hafnia-0.1.9 → hafnia-0.1.12}/docs/cli.md +0 -0
  31. {hafnia-0.1.9 → hafnia-0.1.12}/docs/release.md +0 -0
  32. {hafnia-0.1.9 → hafnia-0.1.12}/docs/s2m.md +0 -0
  33. {hafnia-0.1.9/examples/core → hafnia-0.1.12/examples}/dataset_builder.py +0 -0
  34. /hafnia-0.1.9/examples/core/example_run.py → /hafnia-0.1.12/examples/example_load_dataset.py +0 -0
  35. /hafnia-0.1.9/examples/core/dataloader_example.py → /hafnia-0.1.12/examples/example_torchvision_dataloader.py +0 -0
  36. {hafnia-0.1.9 → hafnia-0.1.12}/examples/script2model/pytorch/Dockerfile +0 -0
  37. {hafnia-0.1.9 → hafnia-0.1.12}/examples/script2model/pytorch/src/lib/train_utils.py +0 -0
  38. {hafnia-0.1.9 → hafnia-0.1.12}/examples/script2model/pytorch/src/scripts/train.py +0 -0
  39. {hafnia-0.1.9 → hafnia-0.1.12}/src/cli/__init__.py +0 -0
  40. {hafnia-0.1.9 → hafnia-0.1.12}/src/cli/__main__.py +0 -0
  41. {hafnia-0.1.9 → hafnia-0.1.12}/src/cli/consts.py +0 -0
  42. {hafnia-0.1.9 → hafnia-0.1.12}/src/cli/data_cmds.py +0 -0
  43. {hafnia-0.1.9 → hafnia-0.1.12}/src/cli/experiment_cmds.py +0 -0
  44. {hafnia-0.1.9 → hafnia-0.1.12}/src/cli/profile_cmds.py +0 -0
  45. {hafnia-0.1.9 → hafnia-0.1.12}/src/cli/runc_cmds.py +0 -0
  46. {hafnia-0.1.9 → hafnia-0.1.12}/src/hafnia/__init__.py +0 -0
  47. {hafnia-0.1.9 → hafnia-0.1.12}/src/hafnia/data/__init__.py +0 -0
  48. {hafnia-0.1.9 → hafnia-0.1.12}/src/hafnia/data/factory.py +0 -0
  49. {hafnia-0.1.9 → hafnia-0.1.12}/src/hafnia/experiment/__init__.py +0 -0
  50. {hafnia-0.1.9 → hafnia-0.1.12}/src/hafnia/experiment/hafnia_logger.py +0 -0
  51. {hafnia-0.1.9 → hafnia-0.1.12}/src/hafnia/http.py +0 -0
  52. {hafnia-0.1.9 → hafnia-0.1.12}/src/hafnia/log.py +0 -0
  53. {hafnia-0.1.9 → hafnia-0.1.12}/src/hafnia/platform/__init__.py +0 -0
  54. {hafnia-0.1.9 → hafnia-0.1.12}/src/hafnia/platform/api.py +0 -0
  55. {hafnia-0.1.9 → hafnia-0.1.12}/src/hafnia/platform/builder.py +0 -0
  56. {hafnia-0.1.9 → hafnia-0.1.12}/src/hafnia/platform/download.py +0 -0
  57. {hafnia-0.1.9 → hafnia-0.1.12}/src/hafnia/platform/executor.py +0 -0
  58. {hafnia-0.1.9 → hafnia-0.1.12}/src/hafnia/platform/experiment.py +0 -0
  59. {hafnia-0.1.9 → hafnia-0.1.12}/src/hafnia/torch_helpers.py +0 -0
  60. {hafnia-0.1.9 → hafnia-0.1.12}/src/hafnia/utils.py +0 -0
  61. {hafnia-0.1.9 → hafnia-0.1.12}/tests/test_builder.py +0 -0
  62. {hafnia-0.1.9 → hafnia-0.1.12}/tests/test_cli.py +0 -0
  63. {hafnia-0.1.9 → hafnia-0.1.12}/tests/test_executor.py +0 -0
  64. {hafnia-0.1.9 → hafnia-0.1.12}/tests/test_mdi_logger.py +0 -0
hafnia-0.1.12/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Data-insight-Platform
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
hafnia-0.1.12/PKG-INFO ADDED
@@ -0,0 +1,197 @@
1
+ Metadata-Version: 2.4
2
+ Name: hafnia
3
+ Version: 0.1.12
4
+ Summary: Python tools for communication with Hafnia platform.
5
+ Author-email: Ivan Sahumbaiev <ivsa@milestone.dk>
6
+ License-File: LICENSE
7
+ Requires-Python: >=3.10
8
+ Requires-Dist: boto3>=1.35.91
9
+ Requires-Dist: click>=8.1.8
10
+ Requires-Dist: datasets>=3.2.0
11
+ Requires-Dist: pillow>=11.1.0
12
+ Requires-Dist: pyarrow>=18.1.0
13
+ Requires-Dist: pydantic>=2.10.4
14
+ Requires-Dist: rich>=13.9.4
15
+ Requires-Dist: tqdm>=4.67.1
16
+ Provides-Extra: torch
17
+ Requires-Dist: flatten-dict>=0.4.2; extra == 'torch'
18
+ Requires-Dist: torch>=2.6.0; extra == 'torch'
19
+ Requires-Dist: torchvision>=0.21.0; extra == 'torch'
20
+ Description-Content-Type: text/markdown
21
+
22
+ # Hafnia
23
+
24
+ The `hafnia` python package is a collection of tools to create and run model training recipes on
25
+ the [Hafnia Platform](https://hafnia.milestonesys.com/).
26
+
27
+ The package includes the following interfaces:
28
+
29
+ - `cli`: A Command Line Interface (CLI) to 1) configure/connect to Hafnia and 2) create and
30
+ launch [Training-aaS](https://hafnia.readme.io/docs/training-as-a-service) recipe scripts.
31
+ - `hafnia`: A python package with helper functions to load and interact with sample datasets and an experiment
32
+ tracker (`HafniaLogger`).
33
+
34
+
35
+ ## The Concept: Training as a Service (Training-aaS)
36
+ `Training-aaS` is the concept of training models on the Hafnia platform on large
37
+ and *hidden* datasets. Hidden datasets refers to datasets that can be used for
38
+ training, but are not available for download or direct access.
39
+
40
+ This is a key feature of the Hafnia platform, as a hidden dataset ensures data
41
+ privacy, and allow models to be trained compliantly and ethically by third parties (you).
42
+
43
+ The `script2model` approach is a Training-aaS concept, where you package your custom training
44
+ script as a *training recipe* and use the recipe to train models on the hidden datasets.
45
+
46
+ To support local development of a training recipe, we have introduced a **sample dataset**
47
+ for each dataset available on the Hafnia platform. The sample dataset is a small
48
+ and anonymized subset of the full dataset and available for download.
49
+
50
+ With the sample dataset, you can seamlessly switch between local and Hafnia training.
51
+ Locally, you can create, validate and debug your training recipe. The recipe is then
52
+ launched with Hafnia Training-aaS, where the recipe runs on the full dataset and can be scaled to run on
53
+ multiple GPUs and instances if needed.
54
+
55
+
56
+ ## Getting started: Configuration
57
+ To get started with Hafnia:
58
+
59
+ 1. Install `hafnia` with your favorite python package manager. With pip do this:
60
+
61
+ `pip install hafnia`
62
+ 1. Sign in to the [Hafnia Platform](https://hafnia.milestonesys.com/).
63
+ 1. Create an API KEY for Training aaS. For more instructions, follow this
64
+ [guide](https://hafnia.readme.io/docs/create-an-api-key).
65
+ Copy the key and save it for later use.
66
+ 1. From terminal, configure your machine to access Hafnia:
67
+
68
+ ```
69
+ # Start configuration with
70
+ hafnia configure
71
+
72
+ # You are then prompted:
73
+ Profile Name [default]: # Press [Enter] or select an optional name
74
+ Hafnia API Key: # Pass your HAFNIA API key
75
+ Hafnia Platform URL [https://api.mdi.milestonesys.com]: # Press [Enter]
76
+ ```
77
+ 1. Download `mnist` from terminal to verify configuration is working.
78
+
79
+ ```bash
80
+ hafnia data download mnist --force
81
+ ```
82
+
83
+ ## Getting started: Loading datasets samples
84
+ With Hafnia configured on your local machine, it is now possible to download
85
+ and explore the dataset sample with a python script:
86
+
87
+ ```python
88
+ from hafnia.data import load_dataset
89
+
90
+ dataset_splits = load_dataset("midwest-vehicle-detection")
91
+ print(dataset_splits)
92
+ print(dataset_splits["train"])
93
+ ```
94
+
95
+ Datasets with corresponding sample datasets can be found in [data library](https://hafnia.milestonesys.com/training-aas/datasets). It is early days for the data library,
96
+ but we are actively working on adding more datasets.
97
+
98
+ The returned sample dataset is a [hugging face dataset](https://huggingface.co/docs/datasets/index)
99
+ and contains train, validation and test splits.
100
+
101
+ An important feature of `load_dataset` is that it will return the full dataset
102
+ when loaded on the Hafnia platform.
103
+
104
+ This enables seamlessly switching between running/validating a training script
105
+ locally (on the sample dataset) and running full model trainings in the cloud
106
+ without changing code or configurations for the training script.
107
+
108
+ ## Getting started: Experiment Tracking with HafniaLogger
109
+ The `HafniaLogger` is an important part of the recipe script and enables you to track, log and
110
+ reproduce your experiments.
111
+
112
+ When integrated into your training script, the `HafniaLogger` is responsible for collecting:
113
+
114
+ - **Trained Model**: The model trained during the experiment
115
+ - **Model Checkpoints**: Intermediate model states saved during training
116
+ - **Experiment Configurations**: Hyperparameters and other settings used in your experiment
117
+ - **Training/Evaluation Metrics**: Performance data such as loss values, accuracy, and custom metrics
118
+
119
+ ### Basic Implementation Example
120
+
121
+ Here's how to integrate the `HafniaLogger` into your training script:
122
+
123
+ ```python
124
+ from hafnia.experiment import HafniaLogger
125
+
126
+ batch_size = 128
127
+ learning_rate = 0.001
128
+
129
+ # Initialize Hafnia logger
130
+ logger = HafniaLogger()
131
+
132
+ # Log experiment parameters
133
+ logger.log_configuration({"batch_size": 128, "learning_rate": 0.001})
134
+
135
+ # Store checkpoints in this path
136
+ ckpt_dir = logger.path_model_checkpoints()
137
+
138
+ # Store the trained model in this path
139
+ model_dir = logger.path_model()
140
+
141
+ # Log scalar and metric values during training and validation
142
+ logger.log_scalar("train/loss", value=0.1, step=100)
143
+ logger.log_metric("train/accuracy", value=0.98, step=100)
144
+
145
+ logger.log_scalar("validation/loss", value=0.1, step=100)
146
+ logger.log_metric("validation/accuracy", value=0.95, step=100)
147
+ ```
148
+
149
+ Similar to `load_dataset`, the tracker behaves differently when running locally or in the cloud.
150
+ Locally, experiment data is stored in a local folder `.data/experiments/{DATE_TIME}`.
151
+
152
+ In the cloud, the experiment data will be available in the Hafnia platform under
153
+ [experiments](https://hafnia.milestonesys.com/training-aas/experiments).
154
+
155
+ ## Example: Torch Dataloader
156
+ Commonly for `torch`-based training scripts, a dataset is used in combination
157
+ with a dataloader that performs data augmentations and batching of the dataset as torch tensors.
158
+
159
+ To support this, we have provided a torch dataloader example script
160
+ [example_torchvision_dataloader.py](./examples/example_torchvision_dataloader.py).
161
+
162
+ The script demonstrates how to make a dataloader with data augmentation (`torchvision.transforms.v2`)
163
+ and a helper function for visualizing image and labels.
164
+
165
+ The dataloader and visualization function supports computer vision tasks
166
+ and datasets available in the data library.
167
+
168
+ ## Example: Training-aaS
169
+ By combining logging and dataset loading, we can now construct our model training recipe.
170
+
171
+ To demonstrate this, we have provided a recipe project that serves as a template for creating and structuring training recipes
172
+ [recipe-classification](https://github.com/Data-insight-Platform/recipe-classification)
173
+
174
+ The project also contains additional information on how to structure your training recipe, use the `HafniaLogger`, the `load_dataset` function and different approach for launching
175
+ the training recipe on the Hafnia platform.
176
+
177
+ ## Detailed Documentation
178
+ For more information, go to our [documentation page](https://hafnia.readme.io/docs/welcome-to-hafnia)
179
+ or in below markdown pages.
180
+
181
+ - [CLI](docs/cli.md) - Detailed guide for the Hafnia command-line interface
182
+ - [Script2Model Documentation](docs/s2m.md) - Detailed guide for script2model
183
+ - [Release lifecycle](docs/release.md) - Details about package release lifecycle.
184
+
185
+ ## Development
186
+ For development, we are using an uv based virtual python environment
187
+
188
+ Install uv
189
+
190
+ curl -LsSf https://astral.sh/uv/install.sh | sh
191
+
192
+
193
+ Install python dependencies including developer (`--dev`) and optional dependencies (`--all-extras`).
194
+
195
+ uv sync --all-extras --dev
196
+
197
+
@@ -0,0 +1,176 @@
1
+ # Hafnia
2
+
3
+ The `hafnia` python package is a collection of tools to create and run model training recipes on
4
+ the [Hafnia Platform](https://hafnia.milestonesys.com/).
5
+
6
+ The package includes the following interfaces:
7
+
8
+ - `cli`: A Command Line Interface (CLI) to 1) configure/connect to Hafnia and 2) create and
9
+ launch [Training-aaS](https://hafnia.readme.io/docs/training-as-a-service) recipe scripts.
10
+ - `hafnia`: A python package with helper functions to load and interact with sample datasets and an experiment
11
+ tracker (`HafniaLogger`).
12
+
13
+
14
+ ## The Concept: Training as a Service (Training-aaS)
15
+ `Training-aaS` is the concept of training models on the Hafnia platform on large
16
+ and *hidden* datasets. Hidden datasets refers to datasets that can be used for
17
+ training, but are not available for download or direct access.
18
+
19
+ This is a key feature of the Hafnia platform, as a hidden dataset ensures data
20
+ privacy, and allow models to be trained compliantly and ethically by third parties (you).
21
+
22
+ The `script2model` approach is a Training-aaS concept, where you package your custom training
23
+ script as a *training recipe* and use the recipe to train models on the hidden datasets.
24
+
25
+ To support local development of a training recipe, we have introduced a **sample dataset**
26
+ for each dataset available on the Hafnia platform. The sample dataset is a small
27
+ and anonymized subset of the full dataset and available for download.
28
+
29
+ With the sample dataset, you can seamlessly switch between local and Hafnia training.
30
+ Locally, you can create, validate and debug your training recipe. The recipe is then
31
+ launched with Hafnia Training-aaS, where the recipe runs on the full dataset and can be scaled to run on
32
+ multiple GPUs and instances if needed.
33
+
34
+
35
+ ## Getting started: Configuration
36
+ To get started with Hafnia:
37
+
38
+ 1. Install `hafnia` with your favorite python package manager. With pip do this:
39
+
40
+ `pip install hafnia`
41
+ 1. Sign in to the [Hafnia Platform](https://hafnia.milestonesys.com/).
42
+ 1. Create an API KEY for Training aaS. For more instructions, follow this
43
+ [guide](https://hafnia.readme.io/docs/create-an-api-key).
44
+ Copy the key and save it for later use.
45
+ 1. From terminal, configure your machine to access Hafnia:
46
+
47
+ ```
48
+ # Start configuration with
49
+ hafnia configure
50
+
51
+ # You are then prompted:
52
+ Profile Name [default]: # Press [Enter] or select an optional name
53
+ Hafnia API Key: # Pass your HAFNIA API key
54
+ Hafnia Platform URL [https://api.mdi.milestonesys.com]: # Press [Enter]
55
+ ```
56
+ 1. Download `mnist` from terminal to verify configuration is working.
57
+
58
+ ```bash
59
+ hafnia data download mnist --force
60
+ ```
61
+
62
+ ## Getting started: Loading datasets samples
63
+ With Hafnia configured on your local machine, it is now possible to download
64
+ and explore the dataset sample with a python script:
65
+
66
+ ```python
67
+ from hafnia.data import load_dataset
68
+
69
+ dataset_splits = load_dataset("midwest-vehicle-detection")
70
+ print(dataset_splits)
71
+ print(dataset_splits["train"])
72
+ ```
73
+
74
+ Datasets with corresponding sample datasets can be found in [data library](https://hafnia.milestonesys.com/training-aas/datasets). It is early days for the data library,
75
+ but we are actively working on adding more datasets.
76
+
77
+ The returned sample dataset is a [hugging face dataset](https://huggingface.co/docs/datasets/index)
78
+ and contains train, validation and test splits.
79
+
80
+ An important feature of `load_dataset` is that it will return the full dataset
81
+ when loaded on the Hafnia platform.
82
+
83
+ This enables seamlessly switching between running/validating a training script
84
+ locally (on the sample dataset) and running full model trainings in the cloud
85
+ without changing code or configurations for the training script.
86
+
87
+ ## Getting started: Experiment Tracking with HafniaLogger
88
+ The `HafniaLogger` is an important part of the recipe script and enables you to track, log and
89
+ reproduce your experiments.
90
+
91
+ When integrated into your training script, the `HafniaLogger` is responsible for collecting:
92
+
93
+ - **Trained Model**: The model trained during the experiment
94
+ - **Model Checkpoints**: Intermediate model states saved during training
95
+ - **Experiment Configurations**: Hyperparameters and other settings used in your experiment
96
+ - **Training/Evaluation Metrics**: Performance data such as loss values, accuracy, and custom metrics
97
+
98
+ ### Basic Implementation Example
99
+
100
+ Here's how to integrate the `HafniaLogger` into your training script:
101
+
102
+ ```python
103
+ from hafnia.experiment import HafniaLogger
104
+
105
+ batch_size = 128
106
+ learning_rate = 0.001
107
+
108
+ # Initialize Hafnia logger
109
+ logger = HafniaLogger()
110
+
111
+ # Log experiment parameters
112
+ logger.log_configuration({"batch_size": 128, "learning_rate": 0.001})
113
+
114
+ # Store checkpoints in this path
115
+ ckpt_dir = logger.path_model_checkpoints()
116
+
117
+ # Store the trained model in this path
118
+ model_dir = logger.path_model()
119
+
120
+ # Log scalar and metric values during training and validation
121
+ logger.log_scalar("train/loss", value=0.1, step=100)
122
+ logger.log_metric("train/accuracy", value=0.98, step=100)
123
+
124
+ logger.log_scalar("validation/loss", value=0.1, step=100)
125
+ logger.log_metric("validation/accuracy", value=0.95, step=100)
126
+ ```
127
+
128
+ Similar to `load_dataset`, the tracker behaves differently when running locally or in the cloud.
129
+ Locally, experiment data is stored in a local folder `.data/experiments/{DATE_TIME}`.
130
+
131
+ In the cloud, the experiment data will be available in the Hafnia platform under
132
+ [experiments](https://hafnia.milestonesys.com/training-aas/experiments).
133
+
134
+ ## Example: Torch Dataloader
135
+ Commonly for `torch`-based training scripts, a dataset is used in combination
136
+ with a dataloader that performs data augmentations and batching of the dataset as torch tensors.
137
+
138
+ To support this, we have provided a torch dataloader example script
139
+ [example_torchvision_dataloader.py](./examples/example_torchvision_dataloader.py).
140
+
141
+ The script demonstrates how to make a dataloader with data augmentation (`torchvision.transforms.v2`)
142
+ and a helper function for visualizing image and labels.
143
+
144
+ The dataloader and visualization function supports computer vision tasks
145
+ and datasets available in the data library.
146
+
147
+ ## Example: Training-aaS
148
+ By combining logging and dataset loading, we can now construct our model training recipe.
149
+
150
+ To demonstrate this, we have provided a recipe project that serves as a template for creating and structuring training recipes
151
+ [recipe-classification](https://github.com/Data-insight-Platform/recipe-classification)
152
+
153
+ The project also contains additional information on how to structure your training recipe, use the `HafniaLogger`, the `load_dataset` function and different approach for launching
154
+ the training recipe on the Hafnia platform.
155
+
156
+ ## Detailed Documentation
157
+ For more information, go to our [documentation page](https://hafnia.readme.io/docs/welcome-to-hafnia)
158
+ or in below markdown pages.
159
+
160
+ - [CLI](docs/cli.md) - Detailed guide for the Hafnia command-line interface
161
+ - [Script2Model Documentation](docs/s2m.md) - Detailed guide for script2model
162
+ - [Release lifecycle](docs/release.md) - Details about package release lifecycle.
163
+
164
+ ## Development
165
+ For development, we are using an uv based virtual python environment
166
+
167
+ Install uv
168
+
169
+ curl -LsSf https://astral.sh/uv/install.sh | sh
170
+
171
+
172
+ Install python dependencies including developer (`--dev`) and optional dependencies (`--all-extras`).
173
+
174
+ uv sync --all-extras --dev
175
+
176
+
@@ -0,0 +1,23 @@
1
+ from hafnia.experiment import HafniaLogger
2
+
3
+ batch_size = 128
4
+ learning_rate = 0.001
5
+
6
+ # Initialize Hafnia logger
7
+ logger = HafniaLogger()
8
+
9
+ # Log experiment parameters
10
+ logger.log_configuration({"batch_size": 128, "learning_rate": 0.001})
11
+
12
+ # Store checkpoints in this path
13
+ ckpt_dir = logger.path_model_checkpoints()
14
+
15
+ # Store the trained model in this path
16
+ model_dir = logger.path_model()
17
+
18
+ # Log scalar and metric values during training and validation
19
+ logger.log_scalar("train/loss", value=0.1, step=100)
20
+ logger.log_metric("train/accuracy", value=0.98, step=100)
21
+
22
+ logger.log_scalar("validation/loss", value=0.1, step=100)
23
+ logger.log_metric("validation/accuracy", value=0.95, step=100)
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "hafnia"
3
- version = "0.1.9"
3
+ version = "0.1.12"
4
4
  description = "Python tools for communication with Hafnia platform."
5
5
  readme = "README.md"
6
6
  authors = [{ name = "Ivan Sahumbaiev", email = "ivsa@milestone.dk" }]
@@ -140,6 +140,9 @@ class Config:
140
140
  del self.config_data.profiles[profile_name]
141
141
  self.save_config()
142
142
 
143
+ def is_configured(self) -> bool:
144
+ return self.config_data.active_profile is not None
145
+
143
146
  def clear(self) -> None:
144
147
  self.config_data = ConfigFileSchema(active_profile=None, profiles={})
145
148
  if self.config_path.exists():
@@ -0,0 +1,45 @@
1
+ import subprocess
2
+ import sys
3
+ from pathlib import Path
4
+
5
+ import pytest
6
+
7
+ from cli.config import Config
8
+
9
+
10
+ @pytest.mark.parametrize(
11
+ "script_path_str",
12
+ [
13
+ "examples/example_torchvision_dataloader.py",
14
+ "examples/example_load_dataset.py",
15
+ "examples/example_logger.py",
16
+ # Add other example scripts here
17
+ ],
18
+ )
19
+ def test_example_scripts(script_path_str: str):
20
+ if not Config().is_configured():
21
+ pytest.skip("Not logged in to Hafnia")
22
+
23
+ script_path = Path(script_path_str)
24
+ if not script_path.exists():
25
+ pytest.fail(f"Script {script_path} does not exist")
26
+
27
+ try:
28
+ result = subprocess.run(
29
+ [sys.executable, str(script_path)],
30
+ capture_output=True,
31
+ text=True,
32
+ timeout=120, # 2-minute timeout
33
+ )
34
+
35
+ # Print output for debugging if there was an error
36
+ if result.returncode != 0:
37
+ print(f"STDOUT: {result.stdout}")
38
+ print(f"STDERR: {result.stderr}")
39
+
40
+ assert result.returncode == 0, f"Script {script_path} failed with return code {result.returncode}"
41
+
42
+ except subprocess.TimeoutExpired:
43
+ pytest.fail(f"Script {script_path} timed out")
44
+ except Exception as e:
45
+ pytest.fail(f"Failed to run script {script_path}: {str(e)}")
@@ -13,7 +13,6 @@ from hafnia.data import load_dataset
13
13
 
14
14
  FORCE_REDOWNLOAD = False
15
15
 
16
- HAFNIA_LOGGED_IN = Config().config_data.active_profile is not None
17
16
  DATASETS_EXPECTED = [
18
17
  (
19
18
  "midwest-vehicle-detection",
@@ -36,7 +35,7 @@ DATASET_IDS = [dataset[0] for dataset in DATASETS_EXPECTED]
36
35
  @pytest.fixture(params=DATASETS_EXPECTED, ids=DATASET_IDS, scope="session")
37
36
  def loaded_dataset(request):
38
37
  """Fixture that loads a dataset and returns it along with metadata."""
39
- if not HAFNIA_LOGGED_IN:
38
+ if not Config().is_configured():
40
39
  pytest.skip("Not logged in to Hafnia")
41
40
 
42
41
  dataset_name, expected_lengths, task_type = request.param
@@ -549,7 +549,7 @@ http = [
549
549
 
550
550
  [[package]]
551
551
  name = "hafnia"
552
- version = "0.1.8"
552
+ version = "0.1.12"
553
553
  source = { editable = "." }
554
554
  dependencies = [
555
555
  { name = "boto3" },
hafnia-0.1.9/PKG-INFO DELETED
@@ -1,42 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: hafnia
3
- Version: 0.1.9
4
- Summary: Python tools for communication with Hafnia platform.
5
- Author-email: Ivan Sahumbaiev <ivsa@milestone.dk>
6
- Requires-Python: >=3.10
7
- Requires-Dist: boto3>=1.35.91
8
- Requires-Dist: click>=8.1.8
9
- Requires-Dist: datasets>=3.2.0
10
- Requires-Dist: pillow>=11.1.0
11
- Requires-Dist: pyarrow>=18.1.0
12
- Requires-Dist: pydantic>=2.10.4
13
- Requires-Dist: rich>=13.9.4
14
- Requires-Dist: tqdm>=4.67.1
15
- Provides-Extra: torch
16
- Requires-Dist: flatten-dict>=0.4.2; extra == 'torch'
17
- Requires-Dist: torch>=2.6.0; extra == 'torch'
18
- Requires-Dist: torchvision>=0.21.0; extra == 'torch'
19
- Description-Content-Type: text/markdown
20
-
21
- # Project Hafnia
22
-
23
- Project Hafnia is a comprehensive solution for managing data science experiments and resources. It provides tools and interfaces for experiment management, data handling, and container orchestration.
24
-
25
-
26
- ## Documentation
27
-
28
- - [CLI Documentation](docs/cli.md) - Detailed guide for the Hafnia command-line interface
29
- - [Script2Model Documentation](docs/s2m.md) - Detailed guide for script2model
30
- - [Release lyfecycle](docs/release.md) - Details about package release lifecycle.
31
-
32
-
33
- ## Key Components
34
-
35
- - **CLI Tool**: Command-line interface for platform interaction
36
-
37
-
38
- ## Install
39
-
40
- ```bash
41
- pip install hafnia
42
- ```
hafnia-0.1.9/README.md DELETED
@@ -1,22 +0,0 @@
1
- # Project Hafnia
2
-
3
- Project Hafnia is a comprehensive solution for managing data science experiments and resources. It provides tools and interfaces for experiment management, data handling, and container orchestration.
4
-
5
-
6
- ## Documentation
7
-
8
- - [CLI Documentation](docs/cli.md) - Detailed guide for the Hafnia command-line interface
9
- - [Script2Model Documentation](docs/s2m.md) - Detailed guide for script2model
10
- - [Release lyfecycle](docs/release.md) - Details about package release lifecycle.
11
-
12
-
13
- ## Key Components
14
-
15
- - **CLI Tool**: Command-line interface for platform interaction
16
-
17
-
18
- ## Install
19
-
20
- ```bash
21
- pip install hafnia
22
- ```
@@ -1,64 +0,0 @@
1
- {
2
- "cells": [
3
- {
4
- "cell_type": "code",
5
- "execution_count": null,
6
- "metadata": {},
7
- "outputs": [],
8
- "source": [
9
- "from hafnia.data import load_dataset"
10
- ]
11
- },
12
- {
13
- "cell_type": "code",
14
- "execution_count": null,
15
- "metadata": {},
16
- "outputs": [],
17
- "source": [
18
- "dataset = load_dataset(\"mnist\")"
19
- ]
20
- },
21
- {
22
- "cell_type": "code",
23
- "execution_count": null,
24
- "metadata": {},
25
- "outputs": [],
26
- "source": [
27
- "for i in range(10):\n",
28
- " plt.subplot(2, 5, i + 1)\n",
29
- " plt.imshow(dataset[\"train\"][i][\"image\"], cmap=\"gray\")\n",
30
- " plt.title(dataset[\"train\"][i][\"label\"])\n",
31
- " plt.axis(\"off\")"
32
- ]
33
- },
34
- {
35
- "cell_type": "code",
36
- "execution_count": null,
37
- "metadata": {},
38
- "outputs": [],
39
- "source": []
40
- }
41
- ],
42
- "metadata": {
43
- "kernelspec": {
44
- "display_name": ".venv",
45
- "language": "python",
46
- "name": "python3"
47
- },
48
- "language_info": {
49
- "codemirror_mode": {
50
- "name": "ipython",
51
- "version": 3
52
- },
53
- "file_extension": ".py",
54
- "mimetype": "text/x-python",
55
- "name": "python",
56
- "nbconvert_exporter": "python",
57
- "pygments_lexer": "ipython3",
58
- "version": "3.10.12"
59
- },
60
- "orig_nbformat": 4
61
- },
62
- "nbformat": 4,
63
- "nbformat_minor": 2
64
- }
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes