hafnia 0.1.8__tar.gz → 0.1.9__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. {hafnia-0.1.8 → hafnia-0.1.9}/.devcontainer/devcontainer.json +1 -1
  2. {hafnia-0.1.8 → hafnia-0.1.9}/.github/workflows/ci_cd.yaml +2 -2
  3. {hafnia-0.1.8 → hafnia-0.1.9}/.vscode/launch.json +1 -1
  4. {hafnia-0.1.8 → hafnia-0.1.9}/.vscode/settings.json +1 -1
  5. {hafnia-0.1.8 → hafnia-0.1.9}/PKG-INFO +4 -5
  6. {hafnia-0.1.8 → hafnia-0.1.9}/README.md +2 -3
  7. hafnia-0.1.9/docs/cli.md +98 -0
  8. {hafnia-0.1.8 → hafnia-0.1.9}/docs/s2m.md +13 -13
  9. hafnia-0.1.9/examples/script2model/pytorch/Dockerfile +10 -0
  10. {hafnia-0.1.8 → hafnia-0.1.9}/examples/script2model/pytorch/src/lib/train_utils.py +10 -10
  11. {hafnia-0.1.8 → hafnia-0.1.9}/examples/script2model/pytorch/src/scripts/train.py +2 -2
  12. {hafnia-0.1.8 → hafnia-0.1.9}/pyproject.toml +4 -4
  13. {hafnia-0.1.8 → hafnia-0.1.9}/src/cli/__main__.py +6 -10
  14. {hafnia-0.1.8 → hafnia-0.1.9}/src/cli/config.py +2 -4
  15. {hafnia-0.1.8 → hafnia-0.1.9}/src/cli/consts.py +5 -9
  16. {hafnia-0.1.8 → hafnia-0.1.9}/src/cli/data_cmds.py +5 -9
  17. {hafnia-0.1.8 → hafnia-0.1.9}/src/cli/experiment_cmds.py +4 -12
  18. {hafnia-0.1.8 → hafnia-0.1.9}/src/cli/runc_cmds.py +1 -3
  19. {hafnia-0.1.8 → hafnia-0.1.9}/src/hafnia/data/factory.py +3 -5
  20. hafnia-0.1.9/src/hafnia/experiment/__init__.py +3 -0
  21. hafnia-0.1.8/src/hafnia/experiment/mdi_logger.py → hafnia-0.1.9/src/hafnia/experiment/hafnia_logger.py +1 -1
  22. {hafnia-0.1.8 → hafnia-0.1.9}/src/hafnia/http.py +2 -6
  23. {hafnia-0.1.8 → hafnia-0.1.9}/src/hafnia/platform/builder.py +11 -25
  24. {hafnia-0.1.8 → hafnia-0.1.9}/src/hafnia/platform/executor.py +6 -10
  25. {hafnia-0.1.8 → hafnia-0.1.9}/src/hafnia/platform/experiment.py +2 -4
  26. {hafnia-0.1.8 → hafnia-0.1.9}/src/hafnia/torch_helpers.py +10 -24
  27. {hafnia-0.1.8 → hafnia-0.1.9}/tests/test_builder.py +2 -6
  28. {hafnia-0.1.8 → hafnia-0.1.9}/tests/test_cli.py +4 -12
  29. {hafnia-0.1.8 → hafnia-0.1.9}/tests/test_mdi_logger.py +6 -6
  30. {hafnia-0.1.8 → hafnia-0.1.9}/tests/test_samples.py +6 -12
  31. {hafnia-0.1.8 → hafnia-0.1.9}/uv.lock +1 -1
  32. hafnia-0.1.8/docs/cli.md +0 -98
  33. hafnia-0.1.8/examples/script2model/pytorch/Dockerfile +0 -10
  34. hafnia-0.1.8/src/hafnia/experiment/__init__.py +0 -3
  35. {hafnia-0.1.8 → hafnia-0.1.9}/.devcontainer/hooks/post_create +0 -0
  36. {hafnia-0.1.8 → hafnia-0.1.9}/.github/dependabot.yaml +0 -0
  37. {hafnia-0.1.8 → hafnia-0.1.9}/.github/workflows/Dockerfile +0 -0
  38. {hafnia-0.1.8 → hafnia-0.1.9}/.github/workflows/build.yaml +0 -0
  39. {hafnia-0.1.8 → hafnia-0.1.9}/.github/workflows/lint.yaml +0 -0
  40. {hafnia-0.1.8 → hafnia-0.1.9}/.github/workflows/publish_docker.yaml +0 -0
  41. {hafnia-0.1.8 → hafnia-0.1.9}/.github/workflows/publish_pypi.yaml +0 -0
  42. {hafnia-0.1.8 → hafnia-0.1.9}/.github/workflows/publish_release.yaml +0 -0
  43. {hafnia-0.1.8 → hafnia-0.1.9}/.github/workflows/tests.yaml +0 -0
  44. {hafnia-0.1.8 → hafnia-0.1.9}/.gitignore +0 -0
  45. {hafnia-0.1.8 → hafnia-0.1.9}/.pre-commit-config.yaml +0 -0
  46. {hafnia-0.1.8 → hafnia-0.1.9}/.python-version +0 -0
  47. {hafnia-0.1.8 → hafnia-0.1.9}/.vscode/extensions.json +0 -0
  48. {hafnia-0.1.8 → hafnia-0.1.9}/docs/release.md +0 -0
  49. {hafnia-0.1.8 → hafnia-0.1.9}/examples/core/dataloader_example.py +0 -0
  50. {hafnia-0.1.8 → hafnia-0.1.9}/examples/core/dataset_builder.py +0 -0
  51. {hafnia-0.1.8 → hafnia-0.1.9}/examples/core/example_run.ipynb +0 -0
  52. {hafnia-0.1.8 → hafnia-0.1.9}/examples/core/example_run.py +0 -0
  53. {hafnia-0.1.8 → hafnia-0.1.9}/src/cli/__init__.py +0 -0
  54. {hafnia-0.1.8 → hafnia-0.1.9}/src/cli/profile_cmds.py +0 -0
  55. {hafnia-0.1.8 → hafnia-0.1.9}/src/hafnia/__init__.py +0 -0
  56. {hafnia-0.1.8 → hafnia-0.1.9}/src/hafnia/data/__init__.py +0 -0
  57. {hafnia-0.1.8 → hafnia-0.1.9}/src/hafnia/log.py +0 -0
  58. {hafnia-0.1.8 → hafnia-0.1.9}/src/hafnia/platform/__init__.py +0 -0
  59. {hafnia-0.1.8 → hafnia-0.1.9}/src/hafnia/platform/api.py +0 -0
  60. {hafnia-0.1.8 → hafnia-0.1.9}/src/hafnia/platform/download.py +0 -0
  61. {hafnia-0.1.8 → hafnia-0.1.9}/src/hafnia/utils.py +0 -0
  62. {hafnia-0.1.8 → hafnia-0.1.9}/tests/test_executor.py +0 -0
@@ -1,5 +1,5 @@
1
1
  {
2
- "name": "MDI Tools Container",
2
+ "name": "Hafnia Tools Container",
3
3
  "image": "mcr.microsoft.com/devcontainers/python:1-3.10-bullseye",
4
4
  "features": {
5
5
  "ghcr.io/jsburckhardt/devcontainer-features/uv:1": {},
@@ -45,7 +45,7 @@ jobs:
45
45
  python-version-file: "pyproject.toml"
46
46
 
47
47
  publish-pypi:
48
- name: Publish Package
48
+ name: Publish Package to PyPI
49
49
  needs: build
50
50
  if: github.event_name == 'push' && github.ref == 'refs/heads/main'
51
51
  uses: ./.github/workflows/publish_pypi.yaml
@@ -53,7 +53,7 @@ jobs:
53
53
  environment: pypi
54
54
 
55
55
  publish-pypi-test:
56
- name: Publish Package
56
+ name: Publish Package to TestPyPI
57
57
  needs: build
58
58
  if: github.event_name == 'push' && github.ref == 'refs/heads/main'
59
59
  uses: ./.github/workflows/publish_pypi.yaml
@@ -22,7 +22,7 @@
22
22
  ],
23
23
  },
24
24
  {
25
- "name": "debug (mdi data download mnist)",
25
+ "name": "debug (hafnia data download mnist)",
26
26
  "type": "debugpy",
27
27
  "request": "launch",
28
28
  "program": "${workspaceFolder}/src/cli/__main__.py",
@@ -4,7 +4,7 @@
4
4
  "editor.defaultFormatter": "vscode.json-language-features"
5
5
  },
6
6
  "editor.rulers": [
7
- 100, // This should match the 'line-length' setting in pyproject.toml
7
+ 120, // This should match the 'line-length' setting in pyproject.toml
8
8
  ],
9
9
  "notebook.formatOnSave.enabled": true,
10
10
  "notebook.codeActionsOnSave": {
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: hafnia
3
- Version: 0.1.8
4
- Summary: Python tools for communication with MDI platform.
3
+ Version: 0.1.9
4
+ Summary: Python tools for communication with Hafnia platform.
5
5
  Author-email: Ivan Sahumbaiev <ivsa@milestone.dk>
6
6
  Requires-Python: >=3.10
7
7
  Requires-Dist: boto3>=1.35.91
@@ -25,7 +25,7 @@ Project Hafnia is a comprehensive solution for managing data science experiments
25
25
 
26
26
  ## Documentation
27
27
 
28
- - [CLI Documentation](docs/cli.md) - Detailed guide for the MDI command-line interface
28
+ - [CLI Documentation](docs/cli.md) - Detailed guide for the Hafnia command-line interface
29
29
  - [Script2Model Documentation](docs/s2m.md) - Detailed guide for script2model
30
30
  - [Release lyfecycle](docs/release.md) - Details about package release lifecycle.
31
31
 
@@ -38,6 +38,5 @@ Project Hafnia is a comprehensive solution for managing data science experiments
38
38
  ## Install
39
39
 
40
40
  ```bash
41
- pip install -i https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple/hafnia
41
+ pip install hafnia
42
42
  ```
43
-
@@ -5,7 +5,7 @@ Project Hafnia is a comprehensive solution for managing data science experiments
5
5
 
6
6
  ## Documentation
7
7
 
8
- - [CLI Documentation](docs/cli.md) - Detailed guide for the MDI command-line interface
8
+ - [CLI Documentation](docs/cli.md) - Detailed guide for the Hafnia command-line interface
9
9
  - [Script2Model Documentation](docs/s2m.md) - Detailed guide for script2model
10
10
  - [Release lyfecycle](docs/release.md) - Details about package release lifecycle.
11
11
 
@@ -18,6 +18,5 @@ Project Hafnia is a comprehensive solution for managing data science experiments
18
18
  ## Install
19
19
 
20
20
  ```bash
21
- pip install -i https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple/hafnia
21
+ pip install hafnia
22
22
  ```
23
-
@@ -0,0 +1,98 @@
1
+ A command-line interface tool for managing data science experiments and resources on the Project Hafnia.
2
+
3
+ ## Features
4
+
5
+ - **Platform Configuration**: Easy setup and management of Hafnia platform settings
6
+
7
+ ## Installation
8
+
9
+ ## CLI Commands
10
+
11
+ ### Core Commands
12
+
13
+ - `hafnia configure` - Configure Hafnia CLI settings
14
+ - `hafnia clear` - Remove stored configuration
15
+ - `hafnia profile` - Manage profiles (see subcommands below)
16
+
17
+ ### Profile Management
18
+
19
+ - `hafnia profile ls` - List all available profiles
20
+ - `hafnia profile use <profile_name>` - Switch to a different profile
21
+ - `hafnia profile rm <profile_name>` - Remove a specific profile
22
+ - `hafnia profile active` - Show detailed information about the active profile
23
+
24
+ ### Data Management
25
+
26
+ - `hafnia data get <url> <destination>` - Download resource from Hafnia platform to local destination
27
+
28
+ ### Experiment Management
29
+
30
+ - `hafnia runc launch <task>` - Launch a job within the image
31
+ - `hafnia runc build <recipe_url> [state_file] [ecr_repository] [image_name]` - Build docker image with a given recipe
32
+ - `hafnia runc build-local <recipe> [state_file] [image_name]` - Build recipe from local path as image with prefix - localhost
33
+
34
+ ## Configuration
35
+
36
+ he CLI tool supports multiple configuration profiles:
37
+
38
+ 1. Run `hafnia configure`
39
+ 2. Enter a profile name (defaults to "default")
40
+ 3. Enter your Hafnia API Key when prompted
41
+ 4. Provide the Hafnia Platform URL (defaults to "https://api.mdi.milestonesys.com")
42
+ 5. The organization ID will be retrieved automatically
43
+ 6. Verify your configuration with `hafnia profile active`
44
+
45
+ ## Example Usage
46
+
47
+ ```bash
48
+ # Configure the CLI with a new profile
49
+ hafnia configure
50
+
51
+ # List all available profiles
52
+ hafnia profile ls
53
+
54
+ # Switch to a different profile
55
+ hafnia profile use production
56
+
57
+ # View active profile details
58
+ hafnia profile active
59
+
60
+ # Remove a profile
61
+ hafnia profile rm old-profile
62
+
63
+ # Clear all configuration
64
+ hafnia clear
65
+
66
+ # Download a dataset sample
67
+ hafnia data download mnist
68
+
69
+ # Add '--force' to re-download dataset
70
+ hafnia data download mnist --force
71
+
72
+ # Download a resource from the platform
73
+ hafnia data get https://api.mdi.milestonesys.com/api/v1/datasets/my-dataset ./data
74
+
75
+ # Build a Docker image from a recipe
76
+ hafnia runc build https://api.mdi.milestonesys.com/api/v1/recipes/my-recipe
77
+
78
+ # Build a Docker image from a local recipe
79
+ hafnia runc build-local ./my-recipe
80
+
81
+ # Launch a task within the image
82
+ hafnia runc launch train
83
+ ```
84
+
85
+ ## Environment Variables
86
+
87
+ The CLI tool uses configuration stored in your local environment. You can view the current settings using:
88
+
89
+ ```bash
90
+ hafnia profile active
91
+ ```
92
+
93
+ Available environment variables:
94
+
95
+ - `MDI_CONFIG_PATH` - Custom path to the configuration file
96
+ - `MDI_API_KEY_SECRET_NAME` - Name of the AWS Secrets Manager secret containing the API key
97
+ - `AWS_REGION` - AWS region for ECR and Secrets Manager operations
98
+ - `RECIPE_DIR` - Directory containing recipe code (used by the `runc launch` command)
@@ -1,6 +1,6 @@
1
1
  # Script2Model: Converting Scripts to Models
2
2
 
3
- Script2Model is a utility in the MDI Python Tools that helps you convert your Python scripts into deployable models on the MDI platform.
3
+ Script2Model is a utility in the Hafnia Python Tools that helps you convert your Python scripts into deployable models on the Hafnia platform.
4
4
 
5
5
  ## Overview
6
6
 
@@ -9,11 +9,11 @@ Script2Model simplifies the process of converting experimental code into product
9
9
  1. Analyzing your Python script
10
10
  2. Extracting the necessary dependencies and functionality
11
11
  3. Creating a standardized model structure
12
- 4. Packaging the model for deployment on the MDI platform
12
+ 4. Packaging the model for deployment on the Hafnia platform
13
13
 
14
14
  ## Prerequisites
15
15
 
16
- - An active MDI platform profile (configure with `mdi configure`)
16
+ - An active Hafnia platform profile (configure with `hafnia configure`)
17
17
  - Python scripts with well-defined inputs and outputs
18
18
  - Required dependencies installed
19
19
 
@@ -22,7 +22,7 @@ Script2Model simplifies the process of converting experimental code into product
22
22
  To convert a Python script to a model:
23
23
 
24
24
  ```bash
25
- mdi script2model convert <script_path> [--output <output_dir>] [--name <model_name>]
25
+ hafnia script2model convert <script_path> [--output <output_dir>] [--name <model_name>]
26
26
  ```
27
27
 
28
28
  ### Parameters
@@ -35,10 +35,10 @@ mdi script2model convert <script_path> [--output <output_dir>] [--name <model_na
35
35
 
36
36
  ```bash
37
37
  # Convert a training script to a model
38
- mdi script2model convert ./src/scripts/train.py --name my-classifier
38
+ hafnia script2model convert ./src/scripts/train.py --name my-classifier
39
39
 
40
- # Deploy the model to MDI platform
41
- mdi script2model deploy ./model --experiment-id exp-123456
40
+ # Deploy the model to Hafnia platform
41
+ hafnia script2model deploy ./model --experiment-id exp-123456
42
42
  ```
43
43
 
44
44
  ## Working with Model Configuration
@@ -47,10 +47,10 @@ Script2Model automatically detects function signatures and generates a configura
47
47
 
48
48
  ```bash
49
49
  # Generate a configuration template
50
- mdi script2model config-template ./src/scripts/train.py
50
+ hafnia script2model config-template ./src/scripts/train.py
51
51
 
52
52
  # Convert with a custom configuration
53
- mdi script2model convert ./src/scripts/train.py --config ./config.json
53
+ hafnia script2model convert ./src/scripts/train.py --config ./config.json
54
54
  ```
55
55
 
56
56
  ## Best Practices
@@ -61,15 +61,15 @@ mdi script2model convert ./src/scripts/train.py --config ./config.json
61
61
  4. **Dependencies**: List all dependencies in requirements.txt or environment.yml
62
62
  5. **Testing**: Test your script locally before conversion
63
63
 
64
- ## Logging with MDILogger
64
+ ## Logging with HafniaLogger
65
65
 
66
- Script2Model integrates with MDILogger to track model training and evaluation metrics:
66
+ Script2Model integrates with HafniaLogger to track model training and evaluation metrics:
67
67
 
68
68
  ```python
69
- from hafnia.experiment import MDILogger
69
+ from hafnia.experiment import HafniaLogger
70
70
 
71
71
  # Initialize logger
72
- logger = MDILogger(Path("./logs"), update_interval=5)
72
+ logger = HafniaLogger(Path("./logs"), update_interval=5)
73
73
 
74
74
  # Log metrics during training
75
75
  logger.log_metric("accuracy", value=0.95, step=100)
@@ -0,0 +1,10 @@
1
+ FROM pytorch/pytorch:latest
2
+
3
+ RUN pip install torchmetrics tqdm tensorboard
4
+
5
+ ## Hafnia section of the Dockerfile
6
+ RUN pip install hafnia
7
+
8
+ # Copy recipe files in the docker image
9
+ ENV RECIPE_DIR=/opt/recipe
10
+ COPY src $RECIPE_DIR
@@ -8,7 +8,7 @@ from torchvision.models import resnet18
8
8
  from torchvision.transforms import v2
9
9
 
10
10
  from hafnia.data import load_dataset
11
- from hafnia.experiment import MDILogger
11
+ from hafnia.experiment import HafniaLogger
12
12
 
13
13
 
14
14
  def create_transforms() -> v2.Compose:
@@ -43,10 +43,10 @@ def create_dataloaders(
43
43
  Tuple[DataLoader, DataLoader]: Training and testing DataLoaders.
44
44
  """
45
45
  transforms = create_transforms()
46
- mdi_dataset = load_dataset(data_root)
47
- train_split = mdi_dataset["train"]
46
+ hafnia_dataset = load_dataset(data_root)
47
+ train_split = hafnia_dataset["train"]
48
48
  train_split.set_transform(transforms)
49
- test_split = mdi_dataset["test"]
49
+ test_split = hafnia_dataset["test"]
50
50
  test_split.set_transform(transforms)
51
51
 
52
52
  train_loader = DataLoader(
@@ -83,7 +83,7 @@ def run_train_epoch(
83
83
  criterion: nn.Module,
84
84
  metrics: nn.Module,
85
85
  device: torch.device,
86
- ml_logger: MDILogger,
86
+ ml_logger: HafniaLogger,
87
87
  log_interval: int,
88
88
  max_steps_per_epoch: int,
89
89
  ) -> Dict[str, float]:
@@ -98,7 +98,7 @@ def run_train_epoch(
98
98
  criterion (nn.Module): Loss function.
99
99
  metrics (MulticlassAccuracy): Metrics calculator.
100
100
  device (torch.device): Computation device.
101
- ml_logger (MDILogger): Logger for metrics.
101
+ ml_logger (HafniaLogger): Logger for metrics.
102
102
  log_interval (int): Interval for logging.
103
103
  max_steps_per_epoch (int): Maximum steps per epoch.
104
104
 
@@ -154,7 +154,7 @@ def run_eval(
154
154
  criterion: nn.Module,
155
155
  metrics: nn.Module,
156
156
  device: torch.device,
157
- ml_logger: MDILogger,
157
+ ml_logger: HafniaLogger,
158
158
  ):
159
159
  """
160
160
  Runs evaluation on the test dataset.
@@ -166,7 +166,7 @@ def run_eval(
166
166
  criterion (nn.Module): Loss function.
167
167
  metrics (MulticlassAccuracy): Metrics calculator.
168
168
  device (torch.device): Computation device.
169
- ml_logger (MDILogger): Logger for metrics.
169
+ ml_logger (HafniaLogger): Logger for metrics.
170
170
 
171
171
  Returns:
172
172
  Dict[str, float]: Dictionary containing average loss and accuracy.
@@ -201,7 +201,7 @@ def run_eval(
201
201
 
202
202
 
203
203
  def train_loop(
204
- logger: MDILogger,
204
+ logger: HafniaLogger,
205
205
  train_dataloader: DataLoader,
206
206
  test_dataloader: DataLoader,
207
207
  model: nn.Module,
@@ -215,7 +215,7 @@ def train_loop(
215
215
  Main training loop.
216
216
 
217
217
  Args:
218
- logger (MDILogger): Logger for metrics.
218
+ logger (HafniaLogger): Logger for metrics.
219
219
  train_dataloader (DataLoader): Training DataLoader.
220
220
  test_dataloader (DataLoader): Testing DataLoader.
221
221
  model (nn.Module): The model to train.
@@ -7,7 +7,7 @@ from pathlib import Path
7
7
  import torch
8
8
  from train_utils import create_dataloaders, create_model, train_loop
9
9
 
10
- from hafnia.experiment import MDILogger
10
+ from hafnia.experiment import HafniaLogger
11
11
 
12
12
  DATA_DIR = os.getenv("MDI_DATASET_DIR", "/opt/ml/input/data/training")
13
13
  ARTIFACT_DIR = os.getenv("MDI_ARTIFACT_DIR", "/opt/ml/output/data")
@@ -37,7 +37,7 @@ def main(args: argparse.Namespace):
37
37
  model_dir = Path(MODEL_DIR)
38
38
  model_dir.mkdir(parents=True, exist_ok=True)
39
39
 
40
- logger = MDILogger(artifacts_dir)
40
+ logger = HafniaLogger(artifacts_dir)
41
41
  logger.log_configuration(vars(args))
42
42
  train_dataloader, test_dataloader = create_dataloaders(DATA_DIR, args.batch_size)
43
43
  model = create_model(num_classes=10)
@@ -1,7 +1,7 @@
1
1
  [project]
2
2
  name = "hafnia"
3
- version = "0.1.8"
4
- description = "Python tools for communication with MDI platform."
3
+ version = "0.1.9"
4
+ description = "Python tools for communication with Hafnia platform."
5
5
  readme = "README.md"
6
6
  authors = [{ name = "Ivan Sahumbaiev", email = "ivsa@milestone.dk" }]
7
7
  requires-python = ">=3.10"
@@ -31,7 +31,7 @@ lint = [
31
31
  default-groups = "all"
32
32
 
33
33
  [project.scripts]
34
- mdi = 'cli.__main__:main'
34
+ hafnia = 'cli.__main__:main'
35
35
 
36
36
  [project.optional-dependencies]
37
37
  # Use "uv sync --extra torch" to install torch dependencies
@@ -48,7 +48,7 @@ build-backend = "hatchling.build"
48
48
  [tool.ruff]
49
49
  select = ["I", "E", "F"]
50
50
  ignore = ["E501"]
51
- line-length = 100
51
+ line-length = 120
52
52
 
53
53
  [tool.ruff.isort]
54
54
  relative-imports-order = "closest-to-furthest"
@@ -8,14 +8,14 @@ from cli.config import Config, ConfigSchema
8
8
  @click.group()
9
9
  @click.pass_context
10
10
  def main(ctx: click.Context) -> None:
11
- """MDI CLI."""
11
+ """Hafnia CLI."""
12
12
  ctx.obj = Config()
13
13
 
14
14
 
15
15
  @main.command("configure")
16
16
  @click.pass_obj
17
17
  def configure(cfg: Config) -> None:
18
- """Configure MDI CLI settings."""
18
+ """Configure Hafnia CLI settings."""
19
19
 
20
20
  from hafnia.platform.api import get_organization_id
21
21
 
@@ -26,20 +26,16 @@ def configure(cfg: Config) -> None:
26
26
  except ValueError:
27
27
  raise click.ClickException(consts.ERROR_CREATE_PROFILE)
28
28
 
29
- api_key = click.prompt("MDI API Key", type=str, hide_input=True)
29
+ api_key = click.prompt("Hafnia API Key", type=str, hide_input=True)
30
30
  try:
31
31
  cfg.api_key = api_key.strip()
32
32
  except ValueError as e:
33
33
  click.echo(f"Error: {str(e)}", err=True)
34
34
  return
35
- platform_url = click.prompt(
36
- "MDI Platform URL", type=str, default="https://api.mdi.milestonesys.com"
37
- )
35
+ platform_url = click.prompt("Hafnia Platform URL", type=str, default="https://api.mdi.milestonesys.com")
38
36
  cfg.platform_url = platform_url.strip()
39
37
  try:
40
- cfg.organization_id = get_organization_id(
41
- cfg.get_platform_endpoint("organizations"), cfg.api_key
42
- )
38
+ cfg.organization_id = get_organization_id(cfg.get_platform_endpoint("organizations"), cfg.api_key)
43
39
  except Exception:
44
40
  raise click.ClickException(consts.ERROR_ORG_ID)
45
41
  cfg.save_config()
@@ -51,7 +47,7 @@ def configure(cfg: Config) -> None:
51
47
  def clear(cfg: Config) -> None:
52
48
  """Remove stored configuration."""
53
49
  cfg.clear()
54
- click.echo("Successfully cleared MDI configuration.")
50
+ click.echo("Successfully cleared Hafnia configuration.")
55
51
 
56
52
 
57
53
  main.add_command(profile_cmds.profile)
@@ -92,11 +92,9 @@ class Config:
92
92
  if config_env_path:
93
93
  return Path(config_env_path).expanduser()
94
94
 
95
- return Path.home() / ".mdi" / "config.json"
95
+ return Path.home() / ".hafnia" / "config.json"
96
96
 
97
- def add_profile(
98
- self, profile_name: str, profile: ConfigSchema, set_active: bool = False
99
- ) -> None:
97
+ def add_profile(self, profile_name: str, profile: ConfigSchema, set_active: bool = False) -> None:
100
98
  profile_name = profile_name.strip()
101
99
  self.config_data.profiles[profile_name] = profile
102
100
  if set_active:
@@ -1,11 +1,7 @@
1
- ERROR_CONFIGURE: str = "Please configure the CLI with `mdi configure`"
2
- ERROR_PROFILE_NOT_EXIST: str = (
3
- "No active profile configured. Please configure the CLI with `mdi configure`"
4
- )
5
- ERROR_PROFILE_REMOVE_ACTIVE: str = (
6
- "Cannot remove active profile. Please switch to another profile first."
7
- )
8
- ERROR_API_KEY_NOT_SET: str = "API key not set. Please configure the CLI with `mdi configure`."
1
+ ERROR_CONFIGURE: str = "Please configure the CLI with `hafnia configure`"
2
+ ERROR_PROFILE_NOT_EXIST: str = "No active profile configured. Please configure the CLI with `hafnia configure`"
3
+ ERROR_PROFILE_REMOVE_ACTIVE: str = "Cannot remove active profile. Please switch to another profile first."
4
+ ERROR_API_KEY_NOT_SET: str = "API key not set. Please configure the CLI with `hafnia configure`."
9
5
  ERROR_ORG_ID: str = "Failed to fetch organization ID. Verify platform URL and API key."
10
6
  ERROR_CREATE_PROFILE: str = "Failed to create profile. Profile name must be unique and not empty."
11
7
 
@@ -15,4 +11,4 @@ ERROR_EXPERIMENT_DIR: str = "Source directory does not exist"
15
11
 
16
12
  PROFILE_SWITCHED_SUCCESS: str = "Switched to profile:"
17
13
  PROFILE_REMOVED_SUCCESS: str = "Removed profile:"
18
- PROFILE_TABLE_HEADER: str = "MDI Platform Profile:"
14
+ PROFILE_TABLE_HEADER: str = "Hafnia Platform Profile:"
@@ -18,14 +18,12 @@ def data():
18
18
  @click.argument("destination")
19
19
  @click.pass_obj
20
20
  def data_get(cfg: Config, url: str, destination: click.Path) -> None:
21
- """Download resource from MDI platform"""
21
+ """Download resource from Hafnia platform"""
22
22
 
23
23
  from hafnia.platform import download_resource
24
24
 
25
25
  try:
26
- result = download_resource(
27
- resource_url=url, destination=str(destination), api_key=cfg.api_key
28
- )
26
+ result = download_resource(resource_url=url, destination=str(destination), api_key=cfg.api_key)
29
27
  except Exception:
30
28
  raise click.ClickException(consts.ERROR_GET_RESOURCE)
31
29
 
@@ -37,10 +35,8 @@ def data_get(cfg: Config, url: str, destination: click.Path) -> None:
37
35
  @click.argument("destination", default=None, required=False)
38
36
  @click.option("--force", is_flag=True, default=False, help="Force download")
39
37
  @click.pass_obj
40
- def data_download(
41
- cfg: Config, dataset_name: str, destination: Optional[click.Path], force: bool
42
- ) -> None:
43
- """Download dataset from MDI platform"""
38
+ def data_download(cfg: Config, dataset_name: str, destination: Optional[click.Path], force: bool) -> None:
39
+ """Download dataset from Hafnia platform"""
44
40
 
45
41
  from hafnia.data.factory import download_or_get_dataset_path
46
42
 
@@ -52,7 +48,7 @@ def data_download(
52
48
  endpoint=endpoint_dataset,
53
49
  api_key=api_key,
54
50
  output_dir=destination,
55
- force=force,
51
+ force_redownload=force,
56
52
  )
57
53
  except Exception:
58
54
  raise click.ClickException(consts.ERROR_GET_RESOURCE)
@@ -20,9 +20,7 @@ def experiment() -> None:
20
20
  @click.argument("dataset_name")
21
21
  @click.argument("env_name")
22
22
  @click.pass_obj
23
- def create(
24
- cfg: Config, name: str, source_dir: Path, exec_cmd: str, dataset_name: str, env_name: str
25
- ) -> None:
23
+ def create(cfg: Config, name: str, source_dir: Path, exec_cmd: str, dataset_name: str, env_name: str) -> None:
26
24
  """Create a new experiment run"""
27
25
  from hafnia.platform import (
28
26
  create_experiment,
@@ -35,25 +33,19 @@ def create(
35
33
  raise click.ClickException(consts.ERROR_EXPERIMENT_DIR)
36
34
 
37
35
  try:
38
- dataset_id = get_dataset_id(
39
- dataset_name, cfg.get_platform_endpoint("datasets"), cfg.api_key
40
- )
36
+ dataset_id = get_dataset_id(dataset_name, cfg.get_platform_endpoint("datasets"), cfg.api_key)
41
37
  except (IndexError, KeyError):
42
38
  raise click.ClickException(f"Dataset '{dataset_name}' not found.")
43
39
  except Exception:
44
40
  raise click.ClickException(f"Error retrieving dataset '{dataset_name}'.")
45
41
 
46
42
  try:
47
- recipe_id = create_recipe(
48
- source_dir, cfg.get_platform_endpoint("recipes"), cfg.api_key, cfg.organization_id
49
- )
43
+ recipe_id = create_recipe(source_dir, cfg.get_platform_endpoint("recipes"), cfg.api_key, cfg.organization_id)
50
44
  except Exception:
51
45
  raise click.ClickException(f"Failed to create recipe from '{source_dir}'")
52
46
 
53
47
  try:
54
- env_id = get_exp_environment_id(
55
- env_name, cfg.get_platform_endpoint("experiment_environments"), cfg.api_key
56
- )
48
+ env_id = get_exp_environment_id(env_name, cfg.get_platform_endpoint("experiment_environments"), cfg.api_key)
57
49
  except Exception:
58
50
  raise click.ClickException(f"Environment '{env_name}' not found")
59
51
 
@@ -28,9 +28,7 @@ def launch(task: str) -> None:
28
28
  @click.argument("ecr_repository", default="localhost")
29
29
  @click.argument("image_name", default="recipe")
30
30
  @click.pass_obj
31
- def build(
32
- cfg: Config, recipe_url: str, state_file: str, ecr_repository: str, image_name: str
33
- ) -> None:
31
+ def build(cfg: Config, recipe_url: str, state_file: str, ecr_repository: str, image_name: str) -> None:
34
32
  """Build docker image with a given recipe."""
35
33
  from hafnia.platform.builder import build_image, prepare_recipe
36
34
 
@@ -33,9 +33,7 @@ def download_or_get_dataset_path(
33
33
  dataset_path_sample = dataset_path_base / "sample"
34
34
 
35
35
  if dataset_path_sample.exists() and not force_redownload:
36
- logger.info(
37
- "Dataset found locally. Set 'force=True' or add `--force` flag with cli to re-download"
38
- )
36
+ logger.info("Dataset found locally. Set 'force=True' or add `--force` flag with cli to re-download")
39
37
  return dataset_path_sample
40
38
 
41
39
  dataset_id = get_dataset_id(dataset_name, endpoint, api_key)
@@ -44,7 +42,7 @@ def download_or_get_dataset_path(
44
42
  if force_redownload and dataset_path_sample.exists():
45
43
  # Remove old files to avoid old files conflicting with new files
46
44
  shutil.rmtree(dataset_path_sample, ignore_errors=True)
47
- status = download_resource(dataset_access_info_url, dataset_path_base, api_key)
45
+ status = download_resource(dataset_access_info_url, str(dataset_path_base), api_key)
48
46
  if status:
49
47
  return dataset_path_sample
50
48
  raise RuntimeError("Failed to download dataset")
@@ -68,7 +66,7 @@ def load_from_platform(
68
66
 
69
67
 
70
68
  def load_dataset(dataset_name: str, force_redownload: bool = False) -> Union[Dataset, DatasetDict]:
71
- """Load a dataset either from a local path or from the MDI platform."""
69
+ """Load a dataset either from a local path or from the Hafnia platform."""
72
70
 
73
71
  if utils.is_remote_job():
74
72
  path_dataset = Path(os.getenv("MDI_DATASET_DIR", "/opt/ml/input/data/training"))
@@ -0,0 +1,3 @@
1
+ from hafnia.experiment.hafnia_logger import HafniaLogger
2
+
3
+ __all__ = ["HafniaLogger"]
@@ -73,7 +73,7 @@ class Entity(BaseModel):
73
73
  )
74
74
 
75
75
 
76
- class MDILogger:
76
+ class HafniaLogger:
77
77
  EXPERIMENT_FILE = "experiment.parquet"
78
78
 
79
79
  def __init__(self, log_dir: Union[Path, str] = "./.data"):
@@ -31,9 +31,7 @@ def fetch(endpoint: str, headers: Dict, params: Optional[Dict] = None) -> Dict:
31
31
  http.clear()
32
32
 
33
33
 
34
- def post(
35
- endpoint: str, headers: Dict, data: Union[Path, Dict, bytes], multipart: bool = False
36
- ) -> Dict:
34
+ def post(endpoint: str, headers: Dict, data: Union[Path, Dict, bytes], multipart: bool = False) -> Dict:
37
35
  """Posts data to backend endpoint.
38
36
 
39
37
  Args:
@@ -76,9 +74,7 @@ def post(
76
74
 
77
75
  if response.status not in (200, 201):
78
76
  error_details = response.data.decode("utf-8")
79
- raise urllib3.exceptions.HTTPError(
80
- f"Request failed with status {response.status}: {error_details}"
81
- )
77
+ raise urllib3.exceptions.HTTPError(f"Request failed with status {response.status}: {error_details}")
82
78
 
83
79
  return json.loads(response.data.decode("utf-8"))
84
80
  finally: