hafnia 0.2.3__tar.gz → 0.3.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {hafnia-0.2.3 → hafnia-0.3.0}/.github/workflows/build.yaml +2 -2
- {hafnia-0.2.3 → hafnia-0.3.0}/.github/workflows/check_release.yaml +1 -1
- {hafnia-0.2.3 → hafnia-0.3.0}/.github/workflows/ci_cd.yaml +3 -2
- {hafnia-0.2.3 → hafnia-0.3.0}/.github/workflows/lint.yaml +2 -2
- {hafnia-0.2.3 → hafnia-0.3.0}/.github/workflows/publish_docker.yaml +6 -6
- {hafnia-0.2.3 → hafnia-0.3.0}/.github/workflows/publish_pypi.yaml +1 -1
- hafnia-0.3.0/.github/workflows/tests.yaml +36 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/.gitignore +1 -1
- {hafnia-0.2.3 → hafnia-0.3.0}/.vscode/extensions.json +2 -1
- {hafnia-0.2.3 → hafnia-0.3.0}/.vscode/launch.json +30 -2
- {hafnia-0.2.3 → hafnia-0.3.0}/PKG-INFO +34 -30
- {hafnia-0.2.3 → hafnia-0.3.0}/README.md +31 -29
- {hafnia-0.2.3 → hafnia-0.3.0}/examples/example_dataset_recipe.py +27 -22
- {hafnia-0.2.3 → hafnia-0.3.0}/examples/example_hafnia_dataset.py +31 -10
- {hafnia-0.2.3 → hafnia-0.3.0}/pyproject.toml +4 -1
- {hafnia-0.2.3 → hafnia-0.3.0}/src/cli/__main__.py +13 -2
- {hafnia-0.2.3 → hafnia-0.3.0}/src/cli/config.py +2 -1
- {hafnia-0.2.3 → hafnia-0.3.0}/src/cli/consts.py +1 -1
- {hafnia-0.2.3 → hafnia-0.3.0}/src/cli/dataset_cmds.py +6 -14
- hafnia-0.3.0/src/cli/dataset_recipe_cmds.py +78 -0
- hafnia-0.3.0/src/cli/experiment_cmds.py +243 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/src/cli/profile_cmds.py +6 -5
- {hafnia-0.2.3 → hafnia-0.3.0}/src/cli/runc_cmds.py +5 -5
- hafnia-0.3.0/src/cli/trainer_package_cmds.py +65 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/src/hafnia/__init__.py +2 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/src/hafnia/data/factory.py +1 -2
- {hafnia-0.2.3 → hafnia-0.3.0}/src/hafnia/dataset/dataset_helpers.py +0 -12
- {hafnia-0.2.3 → hafnia-0.3.0}/src/hafnia/dataset/dataset_names.py +8 -4
- {hafnia-0.2.3 → hafnia-0.3.0}/src/hafnia/dataset/dataset_recipe/dataset_recipe.py +119 -33
- {hafnia-0.2.3 → hafnia-0.3.0}/src/hafnia/dataset/dataset_recipe/recipe_transforms.py +32 -4
- {hafnia-0.2.3 → hafnia-0.3.0}/src/hafnia/dataset/dataset_recipe/recipe_types.py +1 -1
- {hafnia-0.2.3 → hafnia-0.3.0}/src/hafnia/dataset/dataset_upload_helper.py +206 -53
- hafnia-0.3.0/src/hafnia/dataset/hafnia_dataset.py +848 -0
- hafnia-0.3.0/src/hafnia/dataset/license_types.py +63 -0
- hafnia-0.3.0/src/hafnia/dataset/operations/dataset_stats.py +272 -0
- hafnia-0.3.0/src/hafnia/dataset/operations/dataset_transformations.py +403 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/src/hafnia/dataset/operations/table_transformations.py +39 -2
- {hafnia-0.2.3 → hafnia-0.3.0}/src/hafnia/dataset/primitives/__init__.py +8 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/src/hafnia/dataset/primitives/classification.py +1 -1
- {hafnia-0.2.3 → hafnia-0.3.0}/src/hafnia/experiment/hafnia_logger.py +112 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/src/hafnia/http.py +16 -2
- {hafnia-0.2.3 → hafnia-0.3.0}/src/hafnia/platform/__init__.py +9 -3
- {hafnia-0.2.3 → hafnia-0.3.0}/src/hafnia/platform/builder.py +12 -10
- hafnia-0.3.0/src/hafnia/platform/dataset_recipe.py +99 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/src/hafnia/platform/datasets.py +67 -23
- {hafnia-0.2.3 → hafnia-0.3.0}/src/hafnia/platform/download.py +2 -1
- hafnia-0.3.0/src/hafnia/platform/experiment.py +68 -0
- hafnia-0.3.0/src/hafnia/platform/trainer_package.py +57 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/src/hafnia/utils.py +64 -13
- {hafnia-0.2.3 → hafnia-0.3.0}/src/hafnia/visualizations/image_visualizations.py +3 -3
- {hafnia-0.2.3 → hafnia-0.3.0}/tests/conftest.py +2 -0
- hafnia-0.3.0/tests/data/expected_images/test_samples/test_check_dataset[caltech-101].png +0 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/tests/data/expected_images/test_samples/test_check_dataset[caltech-256].png +0 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/tests/data/expected_images/test_samples/test_check_dataset[midwest-vehicle-detection].png +0 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/tests/data/expected_images/test_samples/test_check_dataset[tiny-dataset].png +0 -0
- hafnia-0.2.3/tests/data/expected_images/test_visualizations/test_draw_annotations[tiny-dataset].png → hafnia-0.3.0/tests/data/expected_images/test_visualizations/test_draw_annotations[micro-tiny-dataset].png +0 -0
- hafnia-0.3.0/tests/data/micro_test_datasets/micro-coco-2017/annotations.jsonl +3 -0
- hafnia-0.3.0/tests/data/micro_test_datasets/micro-coco-2017/annotations.parquet +0 -0
- {hafnia-0.2.3/tests/data/micro_test_datasets/coco-2017 → hafnia-0.3.0/tests/data/micro_test_datasets/micro-coco-2017}/dataset_info.json +94 -2
- hafnia-0.3.0/tests/data/micro_test_datasets/micro-tiny-dataset/annotations.jsonl +3 -0
- hafnia-0.3.0/tests/data/micro_test_datasets/micro-tiny-dataset/annotations.parquet +0 -0
- {hafnia-0.2.3/tests/data/micro_test_datasets/tiny-dataset → hafnia-0.3.0/tests/data/micro_test_datasets/micro-tiny-dataset}/dataset_info.json +3 -1
- hafnia-0.3.0/tests/helper_testing.py +188 -0
- {hafnia-0.2.3/tests → hafnia-0.3.0/tests/integration}/test_check_example_scripts.py +1 -1
- hafnia-0.3.0/tests/integration/test_cli_integration.py +99 -0
- hafnia-0.3.0/tests/integration/test_dataset_merges.py +51 -0
- hafnia-0.3.0/tests/integration/test_dataset_recipes_with_platform.py +48 -0
- {hafnia-0.2.3/tests → hafnia-0.3.0/tests/integration}/test_samples.py +22 -4
- {hafnia-0.2.3/tests → hafnia-0.3.0/tests/unit}/dataset/dataset_recipe/test_dataset_recipes.py +20 -19
- {hafnia-0.2.3/tests → hafnia-0.3.0/tests/unit}/dataset/dataset_recipe/test_recipe_transformations.py +55 -25
- hafnia-0.3.0/tests/unit/dataset/operations/test_dataset_stats.py +56 -0
- hafnia-0.3.0/tests/unit/dataset/operations/test_dataset_transformations.py +312 -0
- {hafnia-0.2.3/tests → hafnia-0.3.0/tests/unit}/dataset/operations/test_table_transformations.py +3 -3
- {hafnia-0.2.3/tests → hafnia-0.3.0/tests/unit}/dataset/test_hafnia_dataset.py +66 -7
- {hafnia-0.2.3/tests → hafnia-0.3.0/tests/unit}/dataset/test_shape_primitives.py +1 -1
- {hafnia-0.2.3/tests → hafnia-0.3.0/tests/unit}/test_builder.py +19 -24
- {hafnia-0.2.3/tests → hafnia-0.3.0/tests/unit}/test_utils.py +17 -17
- {hafnia-0.2.3/tests → hafnia-0.3.0/tests/unit}/test_visualizations.py +3 -4
- {hafnia-0.2.3 → hafnia-0.3.0}/uv.lock +1482 -1
- hafnia-0.2.3/.github/workflows/tests.yaml +0 -25
- hafnia-0.2.3/src/cli/experiment_cmds.py +0 -60
- hafnia-0.2.3/src/cli/recipe_cmds.py +0 -45
- hafnia-0.2.3/src/hafnia/dataset/hafnia_dataset.py +0 -610
- hafnia-0.2.3/src/hafnia/dataset/operations/dataset_stats.py +0 -15
- hafnia-0.2.3/src/hafnia/dataset/operations/dataset_transformations.py +0 -82
- hafnia-0.2.3/src/hafnia/platform/experiment.py +0 -73
- hafnia-0.2.3/tests/data/expected_images/test_samples/test_check_dataset[caltech-101].png +0 -0
- hafnia-0.2.3/tests/data/micro_test_datasets/coco-2017/annotations.jsonl +0 -3
- hafnia-0.2.3/tests/data/micro_test_datasets/coco-2017/annotations.parquet +0 -0
- hafnia-0.2.3/tests/data/micro_test_datasets/tiny-dataset/annotations.jsonl +0 -3
- hafnia-0.2.3/tests/data/micro_test_datasets/tiny-dataset/annotations.parquet +0 -0
- hafnia-0.2.3/tests/dataset/operations/test_dataset_transformations.py +0 -0
- hafnia-0.2.3/tests/helper_testing.py +0 -108
- {hafnia-0.2.3 → hafnia-0.3.0}/.devcontainer/devcontainer.json +0 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/.devcontainer/hooks/post_create +0 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/.github/dependabot.yaml +0 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/.github/workflows/Dockerfile +0 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/.pre-commit-config.yaml +0 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/.python-version +0 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/.vscode/settings.json +0 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/LICENSE +0 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/docs/cli.md +0 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/docs/release.md +0 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/examples/example_logger.py +0 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/examples/example_torchvision_dataloader.py +0 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/src/cli/__init__.py +0 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/src/hafnia/data/__init__.py +0 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/src/hafnia/dataset/primitives/bbox.py +0 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/src/hafnia/dataset/primitives/bitmask.py +0 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/src/hafnia/dataset/primitives/point.py +0 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/src/hafnia/dataset/primitives/polygon.py +0 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/src/hafnia/dataset/primitives/primitive.py +0 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/src/hafnia/dataset/primitives/segmentation.py +0 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/src/hafnia/dataset/primitives/utils.py +0 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/src/hafnia/experiment/__init__.py +0 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/src/hafnia/log.py +0 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/src/hafnia/torch_helpers.py +0 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/src/hafnia/visualizations/colors.py +0 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/tests/__init__.py +0 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/tests/data/expected_images/test_samples/test_check_dataset[cifar100].png +0 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/tests/data/expected_images/test_samples/test_check_dataset[cifar10].png +0 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/tests/data/expected_images/test_samples/test_check_dataset[coco-2017].png +0 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/tests/data/expected_images/test_samples/test_check_dataset[mnist].png +0 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[caltech-101].png +0 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[caltech-256].png +0 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[cifar100].png +0 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[cifar10].png +0 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[coco-2017].png +0 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[midwest-vehicle-detection].png +0 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[mnist].png +0 -0
- {hafnia-0.2.3 → hafnia-0.3.0}/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[tiny-dataset].png +0 -0
- /hafnia-0.2.3/tests/data/expected_images/test_visualizations/test_blur_anonymization[coco-2017].png → /hafnia-0.3.0/tests/data/expected_images/test_visualizations/test_blur_anonymization[micro-coco-2017].png +0 -0
- /hafnia-0.2.3/tests/data/expected_images/test_visualizations/test_blur_anonymization[tiny-dataset].png → /hafnia-0.3.0/tests/data/expected_images/test_visualizations/test_blur_anonymization[micro-tiny-dataset].png +0 -0
- /hafnia-0.2.3/tests/data/expected_images/test_visualizations/test_draw_annotations[coco-2017].png → /hafnia-0.3.0/tests/data/expected_images/test_visualizations/test_draw_annotations[micro-coco-2017].png +0 -0
- /hafnia-0.2.3/tests/data/expected_images/test_visualizations/test_mask_region[coco-2017].png → /hafnia-0.3.0/tests/data/expected_images/test_visualizations/test_mask_region[micro-coco-2017].png +0 -0
- /hafnia-0.2.3/tests/data/expected_images/test_visualizations/test_mask_region[tiny-dataset].png → /hafnia-0.3.0/tests/data/expected_images/test_visualizations/test_mask_region[micro-tiny-dataset].png +0 -0
- /hafnia-0.2.3/tests/data/micro_test_datasets/coco-2017/data/4e95c6eb6209880a.jpg → /hafnia-0.3.0/tests/data/micro_test_datasets/micro-coco-2017/data/3b4/3b4165c8c4f830be4e95c6eb6209880a.jpg +0 -0
- /hafnia-0.2.3/tests/data/micro_test_datasets/coco-2017/data/cf86c7a23edb55ce.jpg → /hafnia-0.3.0/tests/data/micro_test_datasets/micro-coco-2017/data/837/837b642d8a7b3b8dcf86c7a23edb55ce.jpg +0 -0
- /hafnia-0.2.3/tests/data/micro_test_datasets/coco-2017/data/182a2c0a3ce312cf.jpg → /hafnia-0.3.0/tests/data/micro_test_datasets/micro-coco-2017/data/dc8/dc8efc98ce6304fe182a2c0a3ce312cf.jpg +0 -0
- /hafnia-0.2.3/tests/data/micro_test_datasets/tiny-dataset/data/3251d85443622e4c.png → /hafnia-0.3.0/tests/data/micro_test_datasets/micro-tiny-dataset/data/3dd/3ddec2275a02e79e3251d85443622e4c.png +0 -0
- /hafnia-0.2.3/tests/data/micro_test_datasets/tiny-dataset/data/3657ababa44af9b6.png → /hafnia-0.3.0/tests/data/micro_test_datasets/micro-tiny-dataset/data/4d8/4d8450b045e60e8f3657ababa44af9b6.png +0 -0
- /hafnia-0.2.3/tests/data/micro_test_datasets/tiny-dataset/data/222bbd5721a8a86e.png → /hafnia-0.3.0/tests/data/micro_test_datasets/micro-tiny-dataset/data/907/907f182da7bcedb8222bbd5721a8a86e.png +0 -0
- {hafnia-0.2.3/tests → hafnia-0.3.0/tests/unit}/dataset/dataset_recipe/test_dataset_recipe_helpers.py +0 -0
- {hafnia-0.2.3/tests → hafnia-0.3.0/tests/unit}/dataset/test_colors.py +0 -0
- {hafnia-0.2.3/tests → hafnia-0.3.0/tests/unit}/dataset/test_dataset_helpers.py +0 -0
- {hafnia-0.2.3/tests → hafnia-0.3.0/tests/unit}/test_cli.py +0 -0
- {hafnia-0.2.3/tests → hafnia-0.3.0/tests/unit}/test_hafnia_logger.py +0 -0
|
@@ -17,8 +17,8 @@ jobs:
|
|
|
17
17
|
outputs:
|
|
18
18
|
package-version: ${{ steps.extract-version.outputs.package_version }}
|
|
19
19
|
steps:
|
|
20
|
-
- uses: actions/checkout@
|
|
21
|
-
- uses: actions/setup-python@
|
|
20
|
+
- uses: actions/checkout@v5.0.0
|
|
21
|
+
- uses: actions/setup-python@v6.0.0
|
|
22
22
|
with:
|
|
23
23
|
python-version-file: ${{ inputs.python-version-file }}
|
|
24
24
|
|
|
@@ -19,9 +19,9 @@ jobs:
|
|
|
19
19
|
runs-on: ubuntu-latest
|
|
20
20
|
needs: lint
|
|
21
21
|
steps:
|
|
22
|
-
- uses: actions/checkout@
|
|
22
|
+
- uses: actions/checkout@v5.0.0
|
|
23
23
|
- name: Run Trivy vulnerability scanner
|
|
24
|
-
uses: aquasecurity/trivy-action@0.
|
|
24
|
+
uses: aquasecurity/trivy-action@0.33.1
|
|
25
25
|
with:
|
|
26
26
|
scan-type: 'fs'
|
|
27
27
|
scan-ref: '.'
|
|
@@ -33,6 +33,7 @@ jobs:
|
|
|
33
33
|
test:
|
|
34
34
|
name: Run Tests
|
|
35
35
|
needs: lint
|
|
36
|
+
secrets: inherit
|
|
36
37
|
uses: ./.github/workflows/tests.yaml
|
|
37
38
|
with:
|
|
38
39
|
python-version-file: "pyproject.toml"
|
|
@@ -10,8 +10,8 @@ jobs:
|
|
|
10
10
|
lint:
|
|
11
11
|
runs-on: ubuntu-latest
|
|
12
12
|
steps:
|
|
13
|
-
- uses: actions/checkout@
|
|
14
|
-
- uses: actions/setup-python@
|
|
13
|
+
- uses: actions/checkout@v5.0.0
|
|
14
|
+
- uses: actions/setup-python@v6.0.0
|
|
15
15
|
with:
|
|
16
16
|
python-version-file: ${{ inputs.python-version-file }}
|
|
17
17
|
- uses: pre-commit/action@v3.0.1
|
|
@@ -24,14 +24,14 @@ jobs:
|
|
|
24
24
|
build:
|
|
25
25
|
runs-on: ubuntu-latest
|
|
26
26
|
steps:
|
|
27
|
-
- uses: actions/checkout@
|
|
28
|
-
- uses: actions/setup-python@
|
|
27
|
+
- uses: actions/checkout@v5.0.0
|
|
28
|
+
- uses: actions/setup-python@v6.0.0
|
|
29
29
|
id: python
|
|
30
30
|
with:
|
|
31
31
|
python-version-file: ${{ inputs.python-version-file }}
|
|
32
32
|
|
|
33
33
|
- name: Download package artifact
|
|
34
|
-
uses: actions/download-artifact@
|
|
34
|
+
uses: actions/download-artifact@v5.0.0
|
|
35
35
|
with:
|
|
36
36
|
name: python-package
|
|
37
37
|
path: dist/
|
|
@@ -47,7 +47,7 @@ jobs:
|
|
|
47
47
|
echo "aws_region=${{ secrets.STAGE_AWS_REGION }}" >> $GITHUB_OUTPUT
|
|
48
48
|
fi
|
|
49
49
|
- name: Configure AWS credentials
|
|
50
|
-
uses: aws-actions/configure-aws-credentials@
|
|
50
|
+
uses: aws-actions/configure-aws-credentials@v5.0.0
|
|
51
51
|
with:
|
|
52
52
|
role-to-assume: arn:aws:iam::${{ steps.env-vars.outputs.aws_account_id }}:role/${{ secrets.AWS_ROLE_NAME }}
|
|
53
53
|
aws-region: ${{ steps.env-vars.outputs.aws_region }}
|
|
@@ -63,7 +63,7 @@ jobs:
|
|
|
63
63
|
uses: docker/build-push-action@v6.18.0
|
|
64
64
|
env:
|
|
65
65
|
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
|
|
66
|
-
ECR_REPOSITORY:
|
|
66
|
+
ECR_REPOSITORY: platform_sdk_runtime
|
|
67
67
|
with:
|
|
68
68
|
context: .
|
|
69
69
|
file: .github/workflows/Dockerfile
|
|
@@ -77,4 +77,4 @@ jobs:
|
|
|
77
77
|
cache-from: type=gha
|
|
78
78
|
cache-to: type=gha,mode=max
|
|
79
79
|
build-args: |
|
|
80
|
-
PYTHON_VERSION=${{ steps.python.outputs.python-version }}
|
|
80
|
+
PYTHON_VERSION=${{ steps.python.outputs.python-version }}
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
name: Python Tests
|
|
2
|
+
|
|
3
|
+
on:
|
|
4
|
+
workflow_dispatch:
|
|
5
|
+
workflow_call:
|
|
6
|
+
inputs:
|
|
7
|
+
python-version-file:
|
|
8
|
+
required: true
|
|
9
|
+
type: string
|
|
10
|
+
jobs:
|
|
11
|
+
test:
|
|
12
|
+
runs-on: ${{ matrix.os }}
|
|
13
|
+
strategy:
|
|
14
|
+
matrix:
|
|
15
|
+
os: [ubuntu-latest, windows-latest]
|
|
16
|
+
steps:
|
|
17
|
+
- uses: actions/checkout@v5.0.0
|
|
18
|
+
- uses: actions/setup-python@v6.0.0
|
|
19
|
+
with:
|
|
20
|
+
python-version-file: ${{ inputs.python-version-file }}
|
|
21
|
+
- name: Install uv
|
|
22
|
+
uses: astral-sh/setup-uv@v6
|
|
23
|
+
with:
|
|
24
|
+
version: 0.6.8
|
|
25
|
+
- name: Install the project
|
|
26
|
+
run: uv sync --group dev
|
|
27
|
+
- name: Mount secrets config
|
|
28
|
+
shell: bash
|
|
29
|
+
env:
|
|
30
|
+
HAFNIA_CONFIG: ${{ secrets.HAFNIA_CONFIG }}
|
|
31
|
+
run: |
|
|
32
|
+
mkdir -p ~/.hafnia
|
|
33
|
+
echo "$HAFNIA_CONFIG" | jq . > ~/.hafnia/config.json
|
|
34
|
+
- name: Run tests
|
|
35
|
+
run: uv run pytest tests
|
|
36
|
+
|
|
@@ -48,17 +48,45 @@
|
|
|
48
48
|
],
|
|
49
49
|
},
|
|
50
50
|
{
|
|
51
|
-
"name": "
|
|
51
|
+
"name": "cmd: 'hafnia dataset [X]'",
|
|
52
52
|
"type": "debugpy",
|
|
53
53
|
"request": "launch",
|
|
54
54
|
"program": "${workspaceFolder}/src/cli/__main__.py",
|
|
55
55
|
"args": [
|
|
56
56
|
"dataset",
|
|
57
|
+
//"ls",
|
|
57
58
|
"download",
|
|
58
59
|
"mnist",
|
|
59
|
-
//"./.data",
|
|
60
60
|
"--force"
|
|
61
61
|
]
|
|
62
|
+
},
|
|
63
|
+
{
|
|
64
|
+
"name": "cmd: 'hafnia experiment [X]'",
|
|
65
|
+
"type": "debugpy",
|
|
66
|
+
"request": "launch",
|
|
67
|
+
"program": "${workspaceFolder}/src/cli/__main__.py",
|
|
68
|
+
"args": [
|
|
69
|
+
"experiment",
|
|
70
|
+
"create",
|
|
71
|
+
// "--trainer-path",
|
|
72
|
+
// "${workspaceFolder}/../trainer-classification",
|
|
73
|
+
//"--trainer-id",
|
|
74
|
+
//"e47d701d-c5ed-4014-9480-434f04e9459b",
|
|
75
|
+
"--trainer-path",
|
|
76
|
+
"${workspaceFolder}/../trainer-classification",
|
|
77
|
+
"--dataset",
|
|
78
|
+
"mnist",
|
|
79
|
+
]
|
|
80
|
+
},
|
|
81
|
+
{
|
|
82
|
+
"name": "cmd: 'hafnia train-recipe [X]'",
|
|
83
|
+
"type": "debugpy",
|
|
84
|
+
"request": "launch",
|
|
85
|
+
"program": "${workspaceFolder}/src/cli/__main__.py",
|
|
86
|
+
"args": [
|
|
87
|
+
"trainer",
|
|
88
|
+
"ls"
|
|
89
|
+
]
|
|
62
90
|
}
|
|
63
91
|
]
|
|
64
92
|
}
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: hafnia
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.3.0
|
|
4
4
|
Summary: Python SDK for communication with Hafnia platform.
|
|
5
5
|
Author-email: Milestone Systems <hafniaplatform@milestone.dk>
|
|
6
6
|
License-File: LICENSE
|
|
@@ -9,6 +9,7 @@ Requires-Dist: boto3>=1.35.91
|
|
|
9
9
|
Requires-Dist: click>=8.1.8
|
|
10
10
|
Requires-Dist: emoji>=2.14.1
|
|
11
11
|
Requires-Dist: flatten-dict>=0.4.2
|
|
12
|
+
Requires-Dist: mlflow>=3.2.0
|
|
12
13
|
Requires-Dist: more-itertools>=10.7.0
|
|
13
14
|
Requires-Dist: opencv-python-headless>=4.11.0.86
|
|
14
15
|
Requires-Dist: pathspec>=0.12.1
|
|
@@ -19,6 +20,7 @@ Requires-Dist: pycocotools>=2.0.10
|
|
|
19
20
|
Requires-Dist: pydantic>=2.10.4
|
|
20
21
|
Requires-Dist: rich>=13.9.4
|
|
21
22
|
Requires-Dist: s5cmd>=0.2.0
|
|
23
|
+
Requires-Dist: sagemaker-mlflow>=0.1.0
|
|
22
24
|
Requires-Dist: seedir>=0.5.0
|
|
23
25
|
Requires-Dist: tqdm>=4.67.1
|
|
24
26
|
Requires-Dist: xxhash>=3.5.0
|
|
@@ -26,13 +28,13 @@ Description-Content-Type: text/markdown
|
|
|
26
28
|
|
|
27
29
|
# Hafnia
|
|
28
30
|
|
|
29
|
-
The `hafnia` python
|
|
31
|
+
The `hafnia` python sdk and cli is a collection of tools to create and run model trainer packages on
|
|
30
32
|
the [Hafnia Platform](https://hafnia.milestonesys.com/).
|
|
31
33
|
|
|
32
34
|
The package includes the following interfaces:
|
|
33
35
|
|
|
34
36
|
- `cli`: A Command Line Interface (CLI) to 1) configure/connect to Hafnia's [Training-aaS](https://hafnia.readme.io/docs/training-as-a-service) and 2) create and
|
|
35
|
-
launch
|
|
37
|
+
launch trainer packages.
|
|
36
38
|
- `hafnia`: A python package including `HafniaDataset` to manage datasets and `HafniaLogger` to do
|
|
37
39
|
experiment tracking.
|
|
38
40
|
|
|
@@ -42,19 +44,19 @@ experiment tracking.
|
|
|
42
44
|
and *hidden* datasets. Hidden datasets refers to datasets that can be used for
|
|
43
45
|
training, but are not available for download or direct access.
|
|
44
46
|
|
|
45
|
-
This is a key
|
|
47
|
+
This is a key for the Hafnia platform, as a hidden dataset ensures data
|
|
46
48
|
privacy, and allow models to be trained compliantly and ethically by third parties (you).
|
|
47
49
|
|
|
48
50
|
The `script2model` approach is a Training-aaS concept, where you package your custom training
|
|
49
|
-
script as a *
|
|
51
|
+
project or script as a *trainer package* and use the package to train models on the hidden datasets.
|
|
50
52
|
|
|
51
|
-
To support local development of a
|
|
53
|
+
To support local development of a trainer package, we have introduced a **sample dataset**
|
|
52
54
|
for each dataset available in the Hafnia [data library](https://hafnia.milestonesys.com/training-aas/datasets). The sample dataset is a small
|
|
53
|
-
and anonymized subset of the full dataset and available for download.
|
|
55
|
+
and an anonymized subset of the full dataset and available for download.
|
|
54
56
|
|
|
55
57
|
With the sample dataset, you can seamlessly switch between local development and Training-aaS.
|
|
56
|
-
Locally, you can create, validate and debug your
|
|
57
|
-
launched with Training-aaS, where the
|
|
58
|
+
Locally, you can create, validate and debug your trainer package. The trainer package is then
|
|
59
|
+
launched with Training-aaS, where the package runs on the full dataset and can be scaled to run on
|
|
58
60
|
multiple GPUs and instances if needed.
|
|
59
61
|
|
|
60
62
|
## Getting started: Configuration
|
|
@@ -122,19 +124,19 @@ midwest-vehicle-detection
|
|
|
122
124
|
You can interact with data as you want, but we also provide `HafniaDataset`
|
|
123
125
|
for loading/saving, managing and interacting with the dataset.
|
|
124
126
|
|
|
125
|
-
We recommend
|
|
126
|
-
|
|
127
|
+
We recommend the example script [examples/example_hafnia_dataset.py](examples/example_hafnia_dataset.py)
|
|
128
|
+
for a short introduction on the `HafniaDataset`.
|
|
127
129
|
|
|
128
130
|
Below is a short introduction to the `HafniaDataset` class.
|
|
129
131
|
|
|
130
132
|
```python
|
|
131
133
|
from hafnia.dataset.hafnia_dataset import HafniaDataset, Sample
|
|
132
134
|
|
|
133
|
-
# Load dataset
|
|
135
|
+
# Load dataset from path
|
|
134
136
|
dataset = HafniaDataset.read_from_path(path_dataset)
|
|
135
137
|
|
|
136
|
-
#
|
|
137
|
-
|
|
138
|
+
# Or get dataset directly by name
|
|
139
|
+
dataset = HafniaDataset.from_name("midwest-vehicle-detection")
|
|
138
140
|
|
|
139
141
|
# Print dataset information
|
|
140
142
|
dataset.print_stats()
|
|
@@ -199,6 +201,8 @@ DatasetInfo(
|
|
|
199
201
|
'duration_average': 120.0,
|
|
200
202
|
...
|
|
201
203
|
}
|
|
204
|
+
"format_version": "0.0.2",
|
|
205
|
+
"updated_at": "2025-09-24T21:50:20.231263"
|
|
202
206
|
)
|
|
203
207
|
```
|
|
204
208
|
|
|
@@ -238,7 +242,7 @@ Sample(
|
|
|
238
242
|
height=1080,
|
|
239
243
|
width=1920,
|
|
240
244
|
split='train',
|
|
241
|
-
|
|
245
|
+
tags=["sample"],
|
|
242
246
|
collection_index=None,
|
|
243
247
|
collection_id=None,
|
|
244
248
|
remote_path='s3://mdi-production-midwest-vehicle-detection/sample/data/343403325f27e390.png',
|
|
@@ -302,10 +306,10 @@ Sample(
|
|
|
302
306
|
)
|
|
303
307
|
```
|
|
304
308
|
|
|
305
|
-
To learn more,
|
|
309
|
+
To learn more, we recommend the `HafniaDataset` example script [examples/example_hafnia_dataset.py](examples/example_hafnia_dataset.py).
|
|
306
310
|
|
|
307
311
|
### Dataset Locally vs. Training-aaS
|
|
308
|
-
An important feature of `
|
|
312
|
+
An important feature of `HafniaDataset.from_name` is that it will return the full dataset
|
|
309
313
|
when loaded with Training-aaS on the Hafnia platform.
|
|
310
314
|
|
|
311
315
|
This enables seamlessly switching between running/validating a training script
|
|
@@ -316,7 +320,7 @@ Available datasets with corresponding sample datasets can be found in [data libr
|
|
|
316
320
|
|
|
317
321
|
|
|
318
322
|
## Getting started: Experiment Tracking with HafniaLogger
|
|
319
|
-
The `HafniaLogger` is an important part of the
|
|
323
|
+
The `HafniaLogger` is an important part of the trainer and enables you to track, log and
|
|
320
324
|
reproduce your experiments.
|
|
321
325
|
|
|
322
326
|
When integrated into your training script, the `HafniaLogger` is responsible for collecting:
|
|
@@ -422,25 +426,25 @@ train_loader = DataLoader(train_dataset, batch_size=8, shuffle=True, collate_fn=
|
|
|
422
426
|
|
|
423
427
|
|
|
424
428
|
## Example: Training-aaS
|
|
425
|
-
By combining logging and dataset loading, we can now construct our model
|
|
429
|
+
By combining logging and dataset loading, we can now construct our model trainer package.
|
|
426
430
|
|
|
427
|
-
To demonstrate this, we have provided a
|
|
428
|
-
[
|
|
431
|
+
To demonstrate this, we have provided a trainer package project that serves as a template for creating and structuring trainers. The example repo is called
|
|
432
|
+
[trainer-classification](https://github.com/milestone-hafnia/trainer-classification)
|
|
429
433
|
|
|
430
|
-
The project also contains additional information on how to structure your
|
|
431
|
-
the
|
|
434
|
+
The project also contains additional information on how to structure your trainer package, use the `HafniaLogger`, loading a dataset and different approach for launching
|
|
435
|
+
the trainer on the Hafnia platform.
|
|
432
436
|
|
|
433
437
|
|
|
434
|
-
## Create, Build and Run `
|
|
435
|
-
In order to test
|
|
438
|
+
## Create, Build and Run `trainer.zip` locally
|
|
439
|
+
In order to test trainer package compatibility with Hafnia cloud use the following command to build and
|
|
436
440
|
start the job locally.
|
|
437
441
|
|
|
438
442
|
```bash
|
|
439
|
-
# Create '
|
|
440
|
-
hafnia
|
|
441
|
-
|
|
442
|
-
# Build the docker image locally from a '
|
|
443
|
-
hafnia runc build-local
|
|
443
|
+
# Create 'trainer.zip' in the root folder of your training trainer project '../trainer/classification'
|
|
444
|
+
hafnia trainer create-zip ../trainer-classification
|
|
445
|
+
|
|
446
|
+
# Build the docker image locally from a 'trainer.zip' file
|
|
447
|
+
hafnia runc build-local trainer.zip
|
|
444
448
|
|
|
445
449
|
# Execute the docker image locally with a desired dataset
|
|
446
450
|
hafnia runc launch-local --dataset mnist "python scripts/train.py"
|
|
@@ -1,12 +1,12 @@
|
|
|
1
1
|
# Hafnia
|
|
2
2
|
|
|
3
|
-
The `hafnia` python
|
|
3
|
+
The `hafnia` python sdk and cli is a collection of tools to create and run model trainer packages on
|
|
4
4
|
the [Hafnia Platform](https://hafnia.milestonesys.com/).
|
|
5
5
|
|
|
6
6
|
The package includes the following interfaces:
|
|
7
7
|
|
|
8
8
|
- `cli`: A Command Line Interface (CLI) to 1) configure/connect to Hafnia's [Training-aaS](https://hafnia.readme.io/docs/training-as-a-service) and 2) create and
|
|
9
|
-
launch
|
|
9
|
+
launch trainer packages.
|
|
10
10
|
- `hafnia`: A python package including `HafniaDataset` to manage datasets and `HafniaLogger` to do
|
|
11
11
|
experiment tracking.
|
|
12
12
|
|
|
@@ -16,19 +16,19 @@ experiment tracking.
|
|
|
16
16
|
and *hidden* datasets. Hidden datasets refers to datasets that can be used for
|
|
17
17
|
training, but are not available for download or direct access.
|
|
18
18
|
|
|
19
|
-
This is a key
|
|
19
|
+
This is a key for the Hafnia platform, as a hidden dataset ensures data
|
|
20
20
|
privacy, and allow models to be trained compliantly and ethically by third parties (you).
|
|
21
21
|
|
|
22
22
|
The `script2model` approach is a Training-aaS concept, where you package your custom training
|
|
23
|
-
script as a *
|
|
23
|
+
project or script as a *trainer package* and use the package to train models on the hidden datasets.
|
|
24
24
|
|
|
25
|
-
To support local development of a
|
|
25
|
+
To support local development of a trainer package, we have introduced a **sample dataset**
|
|
26
26
|
for each dataset available in the Hafnia [data library](https://hafnia.milestonesys.com/training-aas/datasets). The sample dataset is a small
|
|
27
|
-
and anonymized subset of the full dataset and available for download.
|
|
27
|
+
and an anonymized subset of the full dataset and available for download.
|
|
28
28
|
|
|
29
29
|
With the sample dataset, you can seamlessly switch between local development and Training-aaS.
|
|
30
|
-
Locally, you can create, validate and debug your
|
|
31
|
-
launched with Training-aaS, where the
|
|
30
|
+
Locally, you can create, validate and debug your trainer package. The trainer package is then
|
|
31
|
+
launched with Training-aaS, where the package runs on the full dataset and can be scaled to run on
|
|
32
32
|
multiple GPUs and instances if needed.
|
|
33
33
|
|
|
34
34
|
## Getting started: Configuration
|
|
@@ -96,19 +96,19 @@ midwest-vehicle-detection
|
|
|
96
96
|
You can interact with data as you want, but we also provide `HafniaDataset`
|
|
97
97
|
for loading/saving, managing and interacting with the dataset.
|
|
98
98
|
|
|
99
|
-
We recommend
|
|
100
|
-
|
|
99
|
+
We recommend the example script [examples/example_hafnia_dataset.py](examples/example_hafnia_dataset.py)
|
|
100
|
+
for a short introduction on the `HafniaDataset`.
|
|
101
101
|
|
|
102
102
|
Below is a short introduction to the `HafniaDataset` class.
|
|
103
103
|
|
|
104
104
|
```python
|
|
105
105
|
from hafnia.dataset.hafnia_dataset import HafniaDataset, Sample
|
|
106
106
|
|
|
107
|
-
# Load dataset
|
|
107
|
+
# Load dataset from path
|
|
108
108
|
dataset = HafniaDataset.read_from_path(path_dataset)
|
|
109
109
|
|
|
110
|
-
#
|
|
111
|
-
|
|
110
|
+
# Or get dataset directly by name
|
|
111
|
+
dataset = HafniaDataset.from_name("midwest-vehicle-detection")
|
|
112
112
|
|
|
113
113
|
# Print dataset information
|
|
114
114
|
dataset.print_stats()
|
|
@@ -173,6 +173,8 @@ DatasetInfo(
|
|
|
173
173
|
'duration_average': 120.0,
|
|
174
174
|
...
|
|
175
175
|
}
|
|
176
|
+
"format_version": "0.0.2",
|
|
177
|
+
"updated_at": "2025-09-24T21:50:20.231263"
|
|
176
178
|
)
|
|
177
179
|
```
|
|
178
180
|
|
|
@@ -212,7 +214,7 @@ Sample(
|
|
|
212
214
|
height=1080,
|
|
213
215
|
width=1920,
|
|
214
216
|
split='train',
|
|
215
|
-
|
|
217
|
+
tags=["sample"],
|
|
216
218
|
collection_index=None,
|
|
217
219
|
collection_id=None,
|
|
218
220
|
remote_path='s3://mdi-production-midwest-vehicle-detection/sample/data/343403325f27e390.png',
|
|
@@ -276,10 +278,10 @@ Sample(
|
|
|
276
278
|
)
|
|
277
279
|
```
|
|
278
280
|
|
|
279
|
-
To learn more,
|
|
281
|
+
To learn more, we recommend the `HafniaDataset` example script [examples/example_hafnia_dataset.py](examples/example_hafnia_dataset.py).
|
|
280
282
|
|
|
281
283
|
### Dataset Locally vs. Training-aaS
|
|
282
|
-
An important feature of `
|
|
284
|
+
An important feature of `HafniaDataset.from_name` is that it will return the full dataset
|
|
283
285
|
when loaded with Training-aaS on the Hafnia platform.
|
|
284
286
|
|
|
285
287
|
This enables seamlessly switching between running/validating a training script
|
|
@@ -290,7 +292,7 @@ Available datasets with corresponding sample datasets can be found in [data libr
|
|
|
290
292
|
|
|
291
293
|
|
|
292
294
|
## Getting started: Experiment Tracking with HafniaLogger
|
|
293
|
-
The `HafniaLogger` is an important part of the
|
|
295
|
+
The `HafniaLogger` is an important part of the trainer and enables you to track, log and
|
|
294
296
|
reproduce your experiments.
|
|
295
297
|
|
|
296
298
|
When integrated into your training script, the `HafniaLogger` is responsible for collecting:
|
|
@@ -396,25 +398,25 @@ train_loader = DataLoader(train_dataset, batch_size=8, shuffle=True, collate_fn=
|
|
|
396
398
|
|
|
397
399
|
|
|
398
400
|
## Example: Training-aaS
|
|
399
|
-
By combining logging and dataset loading, we can now construct our model
|
|
401
|
+
By combining logging and dataset loading, we can now construct our model trainer package.
|
|
400
402
|
|
|
401
|
-
To demonstrate this, we have provided a
|
|
402
|
-
[
|
|
403
|
+
To demonstrate this, we have provided a trainer package project that serves as a template for creating and structuring trainers. The example repo is called
|
|
404
|
+
[trainer-classification](https://github.com/milestone-hafnia/trainer-classification)
|
|
403
405
|
|
|
404
|
-
The project also contains additional information on how to structure your
|
|
405
|
-
the
|
|
406
|
+
The project also contains additional information on how to structure your trainer package, use the `HafniaLogger`, loading a dataset and different approach for launching
|
|
407
|
+
the trainer on the Hafnia platform.
|
|
406
408
|
|
|
407
409
|
|
|
408
|
-
## Create, Build and Run `
|
|
409
|
-
In order to test
|
|
410
|
+
## Create, Build and Run `trainer.zip` locally
|
|
411
|
+
In order to test trainer package compatibility with Hafnia cloud use the following command to build and
|
|
410
412
|
start the job locally.
|
|
411
413
|
|
|
412
414
|
```bash
|
|
413
|
-
# Create '
|
|
414
|
-
hafnia
|
|
415
|
-
|
|
416
|
-
# Build the docker image locally from a '
|
|
417
|
-
hafnia runc build-local
|
|
415
|
+
# Create 'trainer.zip' in the root folder of your training trainer project '../trainer/classification'
|
|
416
|
+
hafnia trainer create-zip ../trainer-classification
|
|
417
|
+
|
|
418
|
+
# Build the docker image locally from a 'trainer.zip' file
|
|
419
|
+
hafnia runc build-local trainer.zip
|
|
418
420
|
|
|
419
421
|
# Execute the docker image locally with a desired dataset
|
|
420
422
|
hafnia runc launch-local --dataset mnist "python scripts/train.py"
|
|
@@ -2,6 +2,7 @@ from pathlib import Path
|
|
|
2
2
|
|
|
3
3
|
from rich import print as rprint
|
|
4
4
|
|
|
5
|
+
from hafnia import utils
|
|
5
6
|
from hafnia.data.factory import load_dataset
|
|
6
7
|
from hafnia.dataset.dataset_recipe.dataset_recipe import DatasetRecipe
|
|
7
8
|
from hafnia.dataset.dataset_recipe.recipe_transforms import (
|
|
@@ -15,10 +16,6 @@ from hafnia.dataset.hafnia_dataset import HafniaDataset
|
|
|
15
16
|
# A DatasetRecipe is a recipe for the dataset you want to create.
|
|
16
17
|
# The recipe itself is not executed - this is just a specification of the dataset you want!
|
|
17
18
|
|
|
18
|
-
# A DatasetRecipe is an important concept in Hafnia as it allows you to merge multiple datasets
|
|
19
|
-
# and transformations in a single recipe. This is especially useful for Training as a Service (TaaS)
|
|
20
|
-
# where you need to define the dataset you want as a configuration and load it in the TaaS platform.
|
|
21
|
-
|
|
22
19
|
# The 'DatasetRecipe' interface is similar to the 'HafniaDataset' interface.
|
|
23
20
|
# To demonstrate, we will first create a dataset with the regular 'HafniaDataset' interface.
|
|
24
21
|
# This line will get the "mnist" dataset, shuffle it, and select 20 samples.
|
|
@@ -34,30 +31,38 @@ dataset = dataset_recipe.build()
|
|
|
34
31
|
# You can print the dataset recipe to the operations that were applied to it.
|
|
35
32
|
rprint(dataset_recipe)
|
|
36
33
|
|
|
37
|
-
#
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
# This is an important feature of a 'DatasetRecipe' it only registers operations and that the recipe itself
|
|
42
|
-
# - and not the dataset - can be saved as a file and loaded from file.
|
|
43
|
-
# Meaning you can easily save, share, load and build the dataset later or in a different environment.
|
|
44
|
-
# For TaaS, this is the only way to include multiple datasets during training.
|
|
45
|
-
|
|
34
|
+
# The key for recipes is that they can be saved and loaded as a JSON.
|
|
35
|
+
# This also allows the recipe to be saved, shared, loaded and used later to build a dataset
|
|
36
|
+
# in a different environment.
|
|
46
37
|
|
|
47
|
-
#
|
|
48
|
-
|
|
49
|
-
|
|
38
|
+
# Example: Saving and loading a dataset recipe from file.
|
|
39
|
+
path_recipe = Path(".data/dataset_recipes/example_recipe.json")
|
|
40
|
+
json_str: str = dataset_recipe.as_json_file(path_recipe)
|
|
41
|
+
dataset_recipe_again: DatasetRecipe = DatasetRecipe.from_json_file(path_recipe)
|
|
50
42
|
|
|
51
|
-
#
|
|
43
|
+
# Verify that the loaded recipe is identical to the original recipe.
|
|
52
44
|
assert dataset_recipe_again == dataset_recipe
|
|
53
45
|
|
|
54
|
-
#
|
|
46
|
+
# It is also possible to generate the recipe as python code
|
|
55
47
|
dataset_recipe.as_python_code()
|
|
56
48
|
|
|
57
|
-
#
|
|
58
|
-
|
|
49
|
+
# The recipe also allows you to combine multiple datasets and transformations that can be
|
|
50
|
+
# executed in the TaaS platform. This is demonstrated below:
|
|
51
|
+
if utils.is_hafnia_configured(): # First ensure you are connected to the hafnia platform
|
|
52
|
+
# Upload the dataset recipe - this will make it available for TaaS and for users of your organization
|
|
53
|
+
dataset_recipe.as_platform_recipe(recipe_name="example-mnist-recipe")
|
|
54
|
+
|
|
55
|
+
# The recipe is now available in TaaS, for different environments and other users in your organization
|
|
56
|
+
dataset_recipe_again = DatasetRecipe.from_recipe_name(name="example-mnist-recipe")
|
|
57
|
+
|
|
58
|
+
# Launch an experiment with the dataset recipe using the CLI:
|
|
59
|
+
# hafnia experiment create --dataset-recipe example-mnist-recipe --trainer-path ../trainer-classification
|
|
60
|
+
|
|
61
|
+
# Coming soon: Dataset recipes will be included in the web platform to them to be shared, managed
|
|
62
|
+
# and used in experiments.
|
|
59
63
|
|
|
60
|
-
|
|
64
|
+
### More examples dataset recipes ###
|
|
65
|
+
# Example: 'DatasetRecipe' by merging multiple dataset recipes
|
|
61
66
|
dataset_recipe = DatasetRecipe.from_merger(
|
|
62
67
|
recipes=[
|
|
63
68
|
DatasetRecipe.from_name(name="mnist"),
|
|
@@ -166,4 +171,4 @@ rprint(explicit_recipe_from_implicit)
|
|
|
166
171
|
|
|
167
172
|
# Verify that the conversion produces the same result
|
|
168
173
|
assert explicit_recipe_from_implicit == explicit_recipe
|
|
169
|
-
rprint("
|
|
174
|
+
rprint("Conversion successful - recipes are equivalent!")
|