hafnia 0.2.4__tar.gz → 0.4.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {hafnia-0.2.4 → hafnia-0.4.0}/.github/workflows/build.yaml +1 -1
- {hafnia-0.2.4 → hafnia-0.4.0}/.github/workflows/ci_cd.yaml +2 -1
- {hafnia-0.2.4 → hafnia-0.4.0}/.github/workflows/lint.yaml +1 -1
- {hafnia-0.2.4 → hafnia-0.4.0}/.github/workflows/publish_docker.yaml +4 -4
- {hafnia-0.2.4 → hafnia-0.4.0}/.github/workflows/tests.yaml +15 -3
- {hafnia-0.2.4 → hafnia-0.4.0}/.gitignore +1 -1
- hafnia-0.4.0/.trivyignore +3 -0
- {hafnia-0.2.4 → hafnia-0.4.0}/.vscode/extensions.json +2 -1
- {hafnia-0.2.4 → hafnia-0.4.0}/.vscode/launch.json +30 -2
- {hafnia-0.2.4 → hafnia-0.4.0}/PKG-INFO +40 -34
- {hafnia-0.2.4 → hafnia-0.4.0}/README.md +35 -32
- hafnia-0.4.0/examples/example_dataset_recipe.py +247 -0
- {hafnia-0.2.4 → hafnia-0.4.0}/examples/example_hafnia_dataset.py +39 -13
- {hafnia-0.2.4 → hafnia-0.4.0}/examples/example_torchvision_dataloader.py +2 -2
- {hafnia-0.2.4 → hafnia-0.4.0}/pyproject.toml +10 -2
- {hafnia-0.2.4 → hafnia-0.4.0}/src/cli/__main__.py +16 -3
- {hafnia-0.2.4 → hafnia-0.4.0}/src/cli/config.py +45 -4
- {hafnia-0.2.4 → hafnia-0.4.0}/src/cli/consts.py +1 -1
- {hafnia-0.2.4 → hafnia-0.4.0}/src/cli/dataset_cmds.py +6 -14
- hafnia-0.4.0/src/cli/dataset_recipe_cmds.py +78 -0
- hafnia-0.4.0/src/cli/experiment_cmds.py +243 -0
- hafnia-0.4.0/src/cli/keychain.py +88 -0
- {hafnia-0.2.4 → hafnia-0.4.0}/src/cli/profile_cmds.py +10 -6
- {hafnia-0.2.4 → hafnia-0.4.0}/src/cli/runc_cmds.py +5 -5
- hafnia-0.4.0/src/cli/trainer_package_cmds.py +65 -0
- {hafnia-0.2.4 → hafnia-0.4.0}/src/hafnia/__init__.py +2 -0
- {hafnia-0.2.4 → hafnia-0.4.0}/src/hafnia/data/factory.py +1 -2
- {hafnia-0.2.4 → hafnia-0.4.0}/src/hafnia/dataset/dataset_helpers.py +9 -14
- {hafnia-0.2.4 → hafnia-0.4.0}/src/hafnia/dataset/dataset_names.py +10 -5
- {hafnia-0.2.4 → hafnia-0.4.0}/src/hafnia/dataset/dataset_recipe/dataset_recipe.py +165 -67
- hafnia-0.4.0/src/hafnia/dataset/dataset_recipe/recipe_transforms.py +97 -0
- {hafnia-0.2.4 → hafnia-0.4.0}/src/hafnia/dataset/dataset_recipe/recipe_types.py +1 -1
- {hafnia-0.2.4 → hafnia-0.4.0}/src/hafnia/dataset/dataset_upload_helper.py +265 -56
- hafnia-0.4.0/src/hafnia/dataset/format_conversions/image_classification_from_directory.py +106 -0
- hafnia-0.4.0/src/hafnia/dataset/format_conversions/torchvision_datasets.py +281 -0
- hafnia-0.4.0/src/hafnia/dataset/hafnia_dataset.py +974 -0
- hafnia-0.4.0/src/hafnia/dataset/license_types.py +63 -0
- hafnia-0.4.0/src/hafnia/dataset/operations/dataset_stats.py +271 -0
- hafnia-0.4.0/src/hafnia/dataset/operations/dataset_transformations.py +407 -0
- {hafnia-0.2.4 → hafnia-0.4.0}/src/hafnia/dataset/operations/table_transformations.py +43 -5
- {hafnia-0.2.4 → hafnia-0.4.0}/src/hafnia/dataset/primitives/__init__.py +8 -0
- {hafnia-0.2.4 → hafnia-0.4.0}/src/hafnia/dataset/primitives/bbox.py +25 -12
- {hafnia-0.2.4 → hafnia-0.4.0}/src/hafnia/dataset/primitives/bitmask.py +26 -14
- {hafnia-0.2.4 → hafnia-0.4.0}/src/hafnia/dataset/primitives/classification.py +16 -8
- {hafnia-0.2.4 → hafnia-0.4.0}/src/hafnia/dataset/primitives/point.py +7 -3
- {hafnia-0.2.4 → hafnia-0.4.0}/src/hafnia/dataset/primitives/polygon.py +16 -9
- {hafnia-0.2.4 → hafnia-0.4.0}/src/hafnia/dataset/primitives/segmentation.py +10 -7
- {hafnia-0.2.4 → hafnia-0.4.0}/src/hafnia/experiment/hafnia_logger.py +111 -8
- {hafnia-0.2.4 → hafnia-0.4.0}/src/hafnia/http.py +16 -2
- {hafnia-0.2.4 → hafnia-0.4.0}/src/hafnia/platform/__init__.py +9 -3
- {hafnia-0.2.4 → hafnia-0.4.0}/src/hafnia/platform/builder.py +12 -10
- hafnia-0.4.0/src/hafnia/platform/dataset_recipe.py +104 -0
- {hafnia-0.2.4 → hafnia-0.4.0}/src/hafnia/platform/datasets.py +47 -9
- {hafnia-0.2.4 → hafnia-0.4.0}/src/hafnia/platform/download.py +25 -19
- hafnia-0.4.0/src/hafnia/platform/experiment.py +68 -0
- hafnia-0.4.0/src/hafnia/platform/trainer_package.py +57 -0
- {hafnia-0.2.4 → hafnia-0.4.0}/src/hafnia/utils.py +81 -13
- {hafnia-0.2.4 → hafnia-0.4.0}/src/hafnia/visualizations/image_visualizations.py +4 -4
- {hafnia-0.2.4 → hafnia-0.4.0}/tests/conftest.py +2 -0
- hafnia-0.4.0/tests/data/dataset_image_metadata_schema.yaml +344 -0
- hafnia-0.4.0/tests/data/expected_images/test_samples/test_check_dataset[caltech-101].png +0 -0
- hafnia-0.4.0/tests/data/expected_images/test_samples/test_check_dataset[caltech-256].png +0 -0
- hafnia-0.4.0/tests/data/expected_images/test_samples/test_check_dataset[cifar100].png +0 -0
- hafnia-0.4.0/tests/data/expected_images/test_samples/test_check_dataset[cifar10].png +0 -0
- {hafnia-0.2.4 → hafnia-0.4.0}/tests/data/expected_images/test_samples/test_check_dataset[midwest-vehicle-detection].png +0 -0
- hafnia-0.4.0/tests/data/expected_images/test_samples/test_check_dataset[mnist].png +0 -0
- {hafnia-0.2.4 → hafnia-0.4.0}/tests/data/expected_images/test_samples/test_check_dataset[tiny-dataset].png +0 -0
- hafnia-0.4.0/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[caltech-101].png +0 -0
- hafnia-0.4.0/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[caltech-256].png +0 -0
- hafnia-0.4.0/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[cifar100].png +0 -0
- hafnia-0.4.0/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[cifar10].png +0 -0
- hafnia-0.4.0/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[mnist].png +0 -0
- hafnia-0.2.4/tests/data/expected_images/test_visualizations/test_draw_annotations[tiny-dataset].png → hafnia-0.4.0/tests/data/expected_images/test_visualizations/test_draw_annotations[micro-tiny-dataset].png +0 -0
- hafnia-0.4.0/tests/data/micro_test_datasets/micro-coco-2017/annotations.jsonl +3 -0
- hafnia-0.4.0/tests/data/micro_test_datasets/micro-coco-2017/annotations.parquet +0 -0
- {hafnia-0.2.4/tests/data/micro_test_datasets/coco-2017 → hafnia-0.4.0/tests/data/micro_test_datasets/micro-coco-2017}/dataset_info.json +98 -3
- hafnia-0.4.0/tests/data/micro_test_datasets/micro-tiny-dataset/annotations.jsonl +3 -0
- hafnia-0.4.0/tests/data/micro_test_datasets/micro-tiny-dataset/annotations.parquet +0 -0
- {hafnia-0.2.4/tests/data/micro_test_datasets/tiny-dataset → hafnia-0.4.0/tests/data/micro_test_datasets/micro-tiny-dataset}/dataset_info.json +6 -1
- hafnia-0.4.0/tests/helper_testing.py +192 -0
- {hafnia-0.2.4/tests → hafnia-0.4.0/tests/integration}/test_check_example_scripts.py +1 -1
- hafnia-0.4.0/tests/integration/test_cli_integration.py +104 -0
- hafnia-0.4.0/tests/integration/test_dataset_merges.py +59 -0
- hafnia-0.4.0/tests/integration/test_dataset_recipes_with_platform.py +74 -0
- {hafnia-0.2.4/tests → hafnia-0.4.0/tests/integration}/test_samples.py +30 -9
- hafnia-0.4.0/tests/integration/test_torchvision_datasets.py +21 -0
- {hafnia-0.2.4/tests → hafnia-0.4.0/tests/unit}/dataset/dataset_recipe/test_dataset_recipes.py +28 -64
- {hafnia-0.2.4/tests → hafnia-0.4.0/tests/unit}/dataset/dataset_recipe/test_recipe_transformations.py +89 -25
- hafnia-0.4.0/tests/unit/dataset/format_conversions/test_image_classification_directory.py +47 -0
- hafnia-0.4.0/tests/unit/dataset/operations/test_dataset_stats.py +56 -0
- hafnia-0.4.0/tests/unit/dataset/operations/test_dataset_transformations.py +312 -0
- {hafnia-0.2.4/tests → hafnia-0.4.0/tests/unit}/dataset/operations/test_table_transformations.py +3 -3
- hafnia-0.4.0/tests/unit/dataset/test_hafnia_dataset.py +208 -0
- {hafnia-0.2.4/tests → hafnia-0.4.0/tests/unit}/dataset/test_shape_primitives.py +41 -2
- {hafnia-0.2.4/tests → hafnia-0.4.0/tests/unit}/test_builder.py +19 -24
- {hafnia-0.2.4/tests → hafnia-0.4.0/tests/unit}/test_cli.py +2 -2
- {hafnia-0.2.4/tests → hafnia-0.4.0/tests/unit}/test_utils.py +17 -17
- {hafnia-0.2.4/tests → hafnia-0.4.0/tests/unit}/test_visualizations.py +3 -4
- hafnia-0.4.0/uv.lock +4350 -0
- hafnia-0.2.4/examples/example_dataset_recipe.py +0 -169
- hafnia-0.2.4/src/cli/experiment_cmds.py +0 -60
- hafnia-0.2.4/src/cli/recipe_cmds.py +0 -45
- hafnia-0.2.4/src/hafnia/dataset/dataset_recipe/recipe_transforms.py +0 -53
- hafnia-0.2.4/src/hafnia/dataset/hafnia_dataset.py +0 -610
- hafnia-0.2.4/src/hafnia/dataset/operations/dataset_stats.py +0 -15
- hafnia-0.2.4/src/hafnia/dataset/operations/dataset_transformations.py +0 -82
- hafnia-0.2.4/src/hafnia/platform/experiment.py +0 -73
- hafnia-0.2.4/tests/data/expected_images/test_samples/test_check_dataset[caltech-101].png +0 -0
- hafnia-0.2.4/tests/data/expected_images/test_samples/test_check_dataset[caltech-256].png +0 -0
- hafnia-0.2.4/tests/data/expected_images/test_samples/test_check_dataset[cifar100].png +0 -0
- hafnia-0.2.4/tests/data/expected_images/test_samples/test_check_dataset[cifar10].png +0 -0
- hafnia-0.2.4/tests/data/expected_images/test_samples/test_check_dataset[mnist].png +0 -0
- hafnia-0.2.4/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[caltech-101].png +0 -0
- hafnia-0.2.4/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[caltech-256].png +0 -0
- hafnia-0.2.4/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[cifar100].png +0 -0
- hafnia-0.2.4/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[cifar10].png +0 -0
- hafnia-0.2.4/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[mnist].png +0 -0
- hafnia-0.2.4/tests/data/micro_test_datasets/coco-2017/annotations.jsonl +0 -3
- hafnia-0.2.4/tests/data/micro_test_datasets/coco-2017/annotations.parquet +0 -0
- hafnia-0.2.4/tests/data/micro_test_datasets/tiny-dataset/annotations.jsonl +0 -3
- hafnia-0.2.4/tests/data/micro_test_datasets/tiny-dataset/annotations.parquet +0 -0
- hafnia-0.2.4/tests/dataset/operations/test_dataset_transformations.py +0 -0
- hafnia-0.2.4/tests/dataset/test_hafnia_dataset.py +0 -110
- hafnia-0.2.4/tests/helper_testing.py +0 -108
- hafnia-0.2.4/uv.lock +0 -1861
- {hafnia-0.2.4 → hafnia-0.4.0}/.devcontainer/devcontainer.json +0 -0
- {hafnia-0.2.4 → hafnia-0.4.0}/.devcontainer/hooks/post_create +0 -0
- {hafnia-0.2.4 → hafnia-0.4.0}/.github/dependabot.yaml +0 -0
- {hafnia-0.2.4 → hafnia-0.4.0}/.github/workflows/Dockerfile +0 -0
- {hafnia-0.2.4 → hafnia-0.4.0}/.github/workflows/check_release.yaml +0 -0
- {hafnia-0.2.4 → hafnia-0.4.0}/.github/workflows/publish_pypi.yaml +0 -0
- {hafnia-0.2.4 → hafnia-0.4.0}/.pre-commit-config.yaml +0 -0
- {hafnia-0.2.4 → hafnia-0.4.0}/.python-version +0 -0
- {hafnia-0.2.4 → hafnia-0.4.0}/.vscode/settings.json +0 -0
- {hafnia-0.2.4 → hafnia-0.4.0}/LICENSE +0 -0
- {hafnia-0.2.4 → hafnia-0.4.0}/docs/cli.md +0 -0
- {hafnia-0.2.4 → hafnia-0.4.0}/docs/release.md +0 -0
- {hafnia-0.2.4 → hafnia-0.4.0}/examples/example_logger.py +0 -0
- {hafnia-0.2.4 → hafnia-0.4.0}/src/cli/__init__.py +0 -0
- {hafnia-0.2.4 → hafnia-0.4.0}/src/hafnia/data/__init__.py +0 -0
- {hafnia-0.2.4 → hafnia-0.4.0}/src/hafnia/dataset/primitives/primitive.py +0 -0
- {hafnia-0.2.4 → hafnia-0.4.0}/src/hafnia/dataset/primitives/utils.py +0 -0
- {hafnia-0.2.4 → hafnia-0.4.0}/src/hafnia/experiment/__init__.py +0 -0
- {hafnia-0.2.4 → hafnia-0.4.0}/src/hafnia/log.py +0 -0
- {hafnia-0.2.4 → hafnia-0.4.0}/src/hafnia/torch_helpers.py +0 -0
- {hafnia-0.2.4 → hafnia-0.4.0}/src/hafnia/visualizations/colors.py +0 -0
- {hafnia-0.2.4 → hafnia-0.4.0}/tests/__init__.py +0 -0
- {hafnia-0.2.4 → hafnia-0.4.0}/tests/data/expected_images/test_samples/test_check_dataset[coco-2017].png +0 -0
- {hafnia-0.2.4 → hafnia-0.4.0}/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[coco-2017].png +0 -0
- {hafnia-0.2.4 → hafnia-0.4.0}/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[midwest-vehicle-detection].png +0 -0
- {hafnia-0.2.4 → hafnia-0.4.0}/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[tiny-dataset].png +0 -0
- /hafnia-0.2.4/tests/data/expected_images/test_visualizations/test_blur_anonymization[coco-2017].png → /hafnia-0.4.0/tests/data/expected_images/test_visualizations/test_blur_anonymization[micro-coco-2017].png +0 -0
- /hafnia-0.2.4/tests/data/expected_images/test_visualizations/test_blur_anonymization[tiny-dataset].png → /hafnia-0.4.0/tests/data/expected_images/test_visualizations/test_blur_anonymization[micro-tiny-dataset].png +0 -0
- /hafnia-0.2.4/tests/data/expected_images/test_visualizations/test_draw_annotations[coco-2017].png → /hafnia-0.4.0/tests/data/expected_images/test_visualizations/test_draw_annotations[micro-coco-2017].png +0 -0
- /hafnia-0.2.4/tests/data/expected_images/test_visualizations/test_mask_region[coco-2017].png → /hafnia-0.4.0/tests/data/expected_images/test_visualizations/test_mask_region[micro-coco-2017].png +0 -0
- /hafnia-0.2.4/tests/data/expected_images/test_visualizations/test_mask_region[tiny-dataset].png → /hafnia-0.4.0/tests/data/expected_images/test_visualizations/test_mask_region[micro-tiny-dataset].png +0 -0
- /hafnia-0.2.4/tests/data/micro_test_datasets/coco-2017/data/4e95c6eb6209880a.jpg → /hafnia-0.4.0/tests/data/micro_test_datasets/micro-coco-2017/data/3b4/3b4165c8c4f830be4e95c6eb6209880a.jpg +0 -0
- /hafnia-0.2.4/tests/data/micro_test_datasets/coco-2017/data/cf86c7a23edb55ce.jpg → /hafnia-0.4.0/tests/data/micro_test_datasets/micro-coco-2017/data/837/837b642d8a7b3b8dcf86c7a23edb55ce.jpg +0 -0
- /hafnia-0.2.4/tests/data/micro_test_datasets/coco-2017/data/182a2c0a3ce312cf.jpg → /hafnia-0.4.0/tests/data/micro_test_datasets/micro-coco-2017/data/dc8/dc8efc98ce6304fe182a2c0a3ce312cf.jpg +0 -0
- /hafnia-0.2.4/tests/data/micro_test_datasets/tiny-dataset/data/3251d85443622e4c.png → /hafnia-0.4.0/tests/data/micro_test_datasets/micro-tiny-dataset/data/3dd/3ddec2275a02e79e3251d85443622e4c.png +0 -0
- /hafnia-0.2.4/tests/data/micro_test_datasets/tiny-dataset/data/3657ababa44af9b6.png → /hafnia-0.4.0/tests/data/micro_test_datasets/micro-tiny-dataset/data/4d8/4d8450b045e60e8f3657ababa44af9b6.png +0 -0
- /hafnia-0.2.4/tests/data/micro_test_datasets/tiny-dataset/data/222bbd5721a8a86e.png → /hafnia-0.4.0/tests/data/micro_test_datasets/micro-tiny-dataset/data/907/907f182da7bcedb8222bbd5721a8a86e.png +0 -0
- {hafnia-0.2.4/tests → hafnia-0.4.0/tests/unit}/dataset/dataset_recipe/test_dataset_recipe_helpers.py +0 -0
- {hafnia-0.2.4/tests → hafnia-0.4.0/tests/unit}/dataset/test_colors.py +0 -0
- {hafnia-0.2.4/tests → hafnia-0.4.0/tests/unit}/dataset/test_dataset_helpers.py +0 -0
- {hafnia-0.2.4/tests → hafnia-0.4.0/tests/unit}/test_hafnia_logger.py +0 -0
|
@@ -18,7 +18,7 @@ jobs:
|
|
|
18
18
|
package-version: ${{ steps.extract-version.outputs.package_version }}
|
|
19
19
|
steps:
|
|
20
20
|
- uses: actions/checkout@v5.0.0
|
|
21
|
-
- uses: actions/setup-python@
|
|
21
|
+
- uses: actions/setup-python@v6.0.0
|
|
22
22
|
with:
|
|
23
23
|
python-version-file: ${{ inputs.python-version-file }}
|
|
24
24
|
|
|
@@ -21,7 +21,7 @@ jobs:
|
|
|
21
21
|
steps:
|
|
22
22
|
- uses: actions/checkout@v5.0.0
|
|
23
23
|
- name: Run Trivy vulnerability scanner
|
|
24
|
-
uses: aquasecurity/trivy-action@0.
|
|
24
|
+
uses: aquasecurity/trivy-action@0.33.1
|
|
25
25
|
with:
|
|
26
26
|
scan-type: 'fs'
|
|
27
27
|
scan-ref: '.'
|
|
@@ -33,6 +33,7 @@ jobs:
|
|
|
33
33
|
test:
|
|
34
34
|
name: Run Tests
|
|
35
35
|
needs: lint
|
|
36
|
+
secrets: inherit
|
|
36
37
|
uses: ./.github/workflows/tests.yaml
|
|
37
38
|
with:
|
|
38
39
|
python-version-file: "pyproject.toml"
|
|
@@ -25,7 +25,7 @@ jobs:
|
|
|
25
25
|
runs-on: ubuntu-latest
|
|
26
26
|
steps:
|
|
27
27
|
- uses: actions/checkout@v5.0.0
|
|
28
|
-
- uses: actions/setup-python@
|
|
28
|
+
- uses: actions/setup-python@v6.0.0
|
|
29
29
|
id: python
|
|
30
30
|
with:
|
|
31
31
|
python-version-file: ${{ inputs.python-version-file }}
|
|
@@ -47,7 +47,7 @@ jobs:
|
|
|
47
47
|
echo "aws_region=${{ secrets.STAGE_AWS_REGION }}" >> $GITHUB_OUTPUT
|
|
48
48
|
fi
|
|
49
49
|
- name: Configure AWS credentials
|
|
50
|
-
uses: aws-actions/configure-aws-credentials@
|
|
50
|
+
uses: aws-actions/configure-aws-credentials@v5.1.0
|
|
51
51
|
with:
|
|
52
52
|
role-to-assume: arn:aws:iam::${{ steps.env-vars.outputs.aws_account_id }}:role/${{ secrets.AWS_ROLE_NAME }}
|
|
53
53
|
aws-region: ${{ steps.env-vars.outputs.aws_region }}
|
|
@@ -63,7 +63,7 @@ jobs:
|
|
|
63
63
|
uses: docker/build-push-action@v6.18.0
|
|
64
64
|
env:
|
|
65
65
|
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
|
|
66
|
-
ECR_REPOSITORY:
|
|
66
|
+
ECR_REPOSITORY: platform_sdk_runtime
|
|
67
67
|
with:
|
|
68
68
|
context: .
|
|
69
69
|
file: .github/workflows/Dockerfile
|
|
@@ -77,4 +77,4 @@ jobs:
|
|
|
77
77
|
cache-from: type=gha
|
|
78
78
|
cache-to: type=gha,mode=max
|
|
79
79
|
build-args: |
|
|
80
|
-
PYTHON_VERSION=${{ steps.python.outputs.python-version }}
|
|
80
|
+
PYTHON_VERSION=${{ steps.python.outputs.python-version }}
|
|
@@ -9,10 +9,14 @@ on:
|
|
|
9
9
|
type: string
|
|
10
10
|
jobs:
|
|
11
11
|
test:
|
|
12
|
-
runs-on:
|
|
12
|
+
runs-on: ${{ matrix.os }}
|
|
13
|
+
strategy:
|
|
14
|
+
max-parallel: 1
|
|
15
|
+
matrix:
|
|
16
|
+
os: [ubuntu-latest, windows-latest]
|
|
13
17
|
steps:
|
|
14
18
|
- uses: actions/checkout@v5.0.0
|
|
15
|
-
- uses: actions/setup-python@
|
|
19
|
+
- uses: actions/setup-python@v6.0.0
|
|
16
20
|
with:
|
|
17
21
|
python-version-file: ${{ inputs.python-version-file }}
|
|
18
22
|
- name: Install uv
|
|
@@ -21,5 +25,13 @@ jobs:
|
|
|
21
25
|
version: 0.6.8
|
|
22
26
|
- name: Install the project
|
|
23
27
|
run: uv sync --group dev
|
|
28
|
+
- name: Mount secrets config
|
|
29
|
+
shell: bash
|
|
30
|
+
env:
|
|
31
|
+
HAFNIA_CONFIG: ${{ secrets.HAFNIA_CONFIG }}
|
|
32
|
+
run: |
|
|
33
|
+
mkdir -p ~/.hafnia
|
|
34
|
+
echo "$HAFNIA_CONFIG" | jq . > ~/.hafnia/config.json
|
|
24
35
|
- name: Run tests
|
|
25
|
-
run: uv run pytest tests
|
|
36
|
+
run: uv run pytest tests
|
|
37
|
+
|
|
@@ -48,17 +48,45 @@
|
|
|
48
48
|
],
|
|
49
49
|
},
|
|
50
50
|
{
|
|
51
|
-
"name": "
|
|
51
|
+
"name": "cmd: 'hafnia dataset [X]'",
|
|
52
52
|
"type": "debugpy",
|
|
53
53
|
"request": "launch",
|
|
54
54
|
"program": "${workspaceFolder}/src/cli/__main__.py",
|
|
55
55
|
"args": [
|
|
56
56
|
"dataset",
|
|
57
|
+
//"ls",
|
|
57
58
|
"download",
|
|
58
59
|
"mnist",
|
|
59
|
-
//"./.data",
|
|
60
60
|
"--force"
|
|
61
61
|
]
|
|
62
|
+
},
|
|
63
|
+
{
|
|
64
|
+
"name": "cmd: 'hafnia experiment [X]'",
|
|
65
|
+
"type": "debugpy",
|
|
66
|
+
"request": "launch",
|
|
67
|
+
"program": "${workspaceFolder}/src/cli/__main__.py",
|
|
68
|
+
"args": [
|
|
69
|
+
"experiment",
|
|
70
|
+
"create",
|
|
71
|
+
// "--trainer-path",
|
|
72
|
+
// "${workspaceFolder}/../trainer-classification",
|
|
73
|
+
//"--trainer-id",
|
|
74
|
+
//"e47d701d-c5ed-4014-9480-434f04e9459b",
|
|
75
|
+
"--trainer-path",
|
|
76
|
+
"${workspaceFolder}/../trainer-classification",
|
|
77
|
+
"--dataset",
|
|
78
|
+
"mnist",
|
|
79
|
+
]
|
|
80
|
+
},
|
|
81
|
+
{
|
|
82
|
+
"name": "cmd: 'hafnia train-recipe [X]'",
|
|
83
|
+
"type": "debugpy",
|
|
84
|
+
"request": "launch",
|
|
85
|
+
"program": "${workspaceFolder}/src/cli/__main__.py",
|
|
86
|
+
"args": [
|
|
87
|
+
"trainer",
|
|
88
|
+
"ls"
|
|
89
|
+
]
|
|
62
90
|
}
|
|
63
91
|
]
|
|
64
92
|
}
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: hafnia
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.4.0
|
|
4
4
|
Summary: Python SDK for communication with Hafnia platform.
|
|
5
5
|
Author-email: Milestone Systems <hafniaplatform@milestone.dk>
|
|
6
6
|
License-File: LICENSE
|
|
@@ -9,6 +9,9 @@ Requires-Dist: boto3>=1.35.91
|
|
|
9
9
|
Requires-Dist: click>=8.1.8
|
|
10
10
|
Requires-Dist: emoji>=2.14.1
|
|
11
11
|
Requires-Dist: flatten-dict>=0.4.2
|
|
12
|
+
Requires-Dist: keyring>=25.6.0
|
|
13
|
+
Requires-Dist: mcp==1.16.0
|
|
14
|
+
Requires-Dist: mlflow>=3.4.0
|
|
12
15
|
Requires-Dist: more-itertools>=10.7.0
|
|
13
16
|
Requires-Dist: opencv-python-headless>=4.11.0.86
|
|
14
17
|
Requires-Dist: pathspec>=0.12.1
|
|
@@ -19,20 +22,20 @@ Requires-Dist: pycocotools>=2.0.10
|
|
|
19
22
|
Requires-Dist: pydantic>=2.10.4
|
|
20
23
|
Requires-Dist: rich>=13.9.4
|
|
21
24
|
Requires-Dist: s5cmd>=0.2.0
|
|
25
|
+
Requires-Dist: sagemaker-mlflow>=0.1.0
|
|
22
26
|
Requires-Dist: seedir>=0.5.0
|
|
23
|
-
Requires-Dist: tqdm>=4.67.1
|
|
24
27
|
Requires-Dist: xxhash>=3.5.0
|
|
25
28
|
Description-Content-Type: text/markdown
|
|
26
29
|
|
|
27
30
|
# Hafnia
|
|
28
31
|
|
|
29
|
-
The `hafnia` python
|
|
32
|
+
The `hafnia` python sdk and cli is a collection of tools to create and run model trainer packages on
|
|
30
33
|
the [Hafnia Platform](https://hafnia.milestonesys.com/).
|
|
31
34
|
|
|
32
35
|
The package includes the following interfaces:
|
|
33
36
|
|
|
34
37
|
- `cli`: A Command Line Interface (CLI) to 1) configure/connect to Hafnia's [Training-aaS](https://hafnia.readme.io/docs/training-as-a-service) and 2) create and
|
|
35
|
-
launch
|
|
38
|
+
launch trainer packages.
|
|
36
39
|
- `hafnia`: A python package including `HafniaDataset` to manage datasets and `HafniaLogger` to do
|
|
37
40
|
experiment tracking.
|
|
38
41
|
|
|
@@ -42,19 +45,19 @@ experiment tracking.
|
|
|
42
45
|
and *hidden* datasets. Hidden datasets refers to datasets that can be used for
|
|
43
46
|
training, but are not available for download or direct access.
|
|
44
47
|
|
|
45
|
-
This is a key
|
|
48
|
+
This is a key for the Hafnia platform, as a hidden dataset ensures data
|
|
46
49
|
privacy, and allow models to be trained compliantly and ethically by third parties (you).
|
|
47
50
|
|
|
48
51
|
The `script2model` approach is a Training-aaS concept, where you package your custom training
|
|
49
|
-
script as a *
|
|
52
|
+
project or script as a *trainer package* and use the package to train models on the hidden datasets.
|
|
50
53
|
|
|
51
|
-
To support local development of a
|
|
54
|
+
To support local development of a trainer package, we have introduced a **sample dataset**
|
|
52
55
|
for each dataset available in the Hafnia [data library](https://hafnia.milestonesys.com/training-aas/datasets). The sample dataset is a small
|
|
53
|
-
and anonymized subset of the full dataset and available for download.
|
|
56
|
+
and an anonymized subset of the full dataset and available for download.
|
|
54
57
|
|
|
55
58
|
With the sample dataset, you can seamlessly switch between local development and Training-aaS.
|
|
56
|
-
Locally, you can create, validate and debug your
|
|
57
|
-
launched with Training-aaS, where the
|
|
59
|
+
Locally, you can create, validate and debug your trainer package. The trainer package is then
|
|
60
|
+
launched with Training-aaS, where the package runs on the full dataset and can be scaled to run on
|
|
58
61
|
multiple GPUs and instances if needed.
|
|
59
62
|
|
|
60
63
|
## Getting started: Configuration
|
|
@@ -78,6 +81,7 @@ Copy the key and save it for later use.
|
|
|
78
81
|
Hafnia API Key: # Pass your HAFNIA API key
|
|
79
82
|
Hafnia Platform URL [https://api.mdi.milestonesys.com]: # Press [Enter]
|
|
80
83
|
```
|
|
84
|
+
|
|
81
85
|
1. Download `mnist` from terminal to verify that your configuration is working.
|
|
82
86
|
|
|
83
87
|
```bash
|
|
@@ -89,7 +93,7 @@ With Hafnia configured on your local machine, it is now possible to download
|
|
|
89
93
|
and explore the dataset sample with a python script:
|
|
90
94
|
|
|
91
95
|
```python
|
|
92
|
-
from hafnia.data import
|
|
96
|
+
from hafnia.data import get_dataset_path
|
|
93
97
|
from hafnia.dataset.hafnia_dataset import HafniaDataset
|
|
94
98
|
|
|
95
99
|
# To download the sample dataset use:
|
|
@@ -122,19 +126,19 @@ midwest-vehicle-detection
|
|
|
122
126
|
You can interact with data as you want, but we also provide `HafniaDataset`
|
|
123
127
|
for loading/saving, managing and interacting with the dataset.
|
|
124
128
|
|
|
125
|
-
We recommend
|
|
126
|
-
|
|
129
|
+
We recommend the example script [examples/example_hafnia_dataset.py](examples/example_hafnia_dataset.py)
|
|
130
|
+
for a short introduction on the `HafniaDataset`.
|
|
127
131
|
|
|
128
132
|
Below is a short introduction to the `HafniaDataset` class.
|
|
129
133
|
|
|
130
134
|
```python
|
|
131
135
|
from hafnia.dataset.hafnia_dataset import HafniaDataset, Sample
|
|
132
136
|
|
|
133
|
-
# Load dataset
|
|
137
|
+
# Load dataset from path
|
|
134
138
|
dataset = HafniaDataset.read_from_path(path_dataset)
|
|
135
139
|
|
|
136
|
-
#
|
|
137
|
-
|
|
140
|
+
# Or get dataset directly by name
|
|
141
|
+
dataset = HafniaDataset.from_name("midwest-vehicle-detection")
|
|
138
142
|
|
|
139
143
|
# Print dataset information
|
|
140
144
|
dataset.print_stats()
|
|
@@ -199,6 +203,8 @@ DatasetInfo(
|
|
|
199
203
|
'duration_average': 120.0,
|
|
200
204
|
...
|
|
201
205
|
}
|
|
206
|
+
"format_version": "0.0.2",
|
|
207
|
+
"updated_at": "2025-09-24T21:50:20.231263"
|
|
202
208
|
)
|
|
203
209
|
```
|
|
204
210
|
|
|
@@ -238,7 +244,7 @@ Sample(
|
|
|
238
244
|
height=1080,
|
|
239
245
|
width=1920,
|
|
240
246
|
split='train',
|
|
241
|
-
|
|
247
|
+
tags=["sample"],
|
|
242
248
|
collection_index=None,
|
|
243
249
|
collection_id=None,
|
|
244
250
|
remote_path='s3://mdi-production-midwest-vehicle-detection/sample/data/343403325f27e390.png',
|
|
@@ -302,10 +308,10 @@ Sample(
|
|
|
302
308
|
)
|
|
303
309
|
```
|
|
304
310
|
|
|
305
|
-
To learn more,
|
|
311
|
+
To learn more, we recommend the `HafniaDataset` example script [examples/example_hafnia_dataset.py](examples/example_hafnia_dataset.py).
|
|
306
312
|
|
|
307
313
|
### Dataset Locally vs. Training-aaS
|
|
308
|
-
An important feature of `
|
|
314
|
+
An important feature of `HafniaDataset.from_name` is that it will return the full dataset
|
|
309
315
|
when loaded with Training-aaS on the Hafnia platform.
|
|
310
316
|
|
|
311
317
|
This enables seamlessly switching between running/validating a training script
|
|
@@ -316,7 +322,7 @@ Available datasets with corresponding sample datasets can be found in [data libr
|
|
|
316
322
|
|
|
317
323
|
|
|
318
324
|
## Getting started: Experiment Tracking with HafniaLogger
|
|
319
|
-
The `HafniaLogger` is an important part of the
|
|
325
|
+
The `HafniaLogger` is an important part of the trainer and enables you to track, log and
|
|
320
326
|
reproduce your experiments.
|
|
321
327
|
|
|
322
328
|
When integrated into your training script, the `HafniaLogger` is responsible for collecting:
|
|
@@ -356,7 +362,7 @@ logger.log_scalar("validation/loss", value=0.1, step=100)
|
|
|
356
362
|
logger.log_metric("validation/accuracy", value=0.95, step=100)
|
|
357
363
|
```
|
|
358
364
|
|
|
359
|
-
|
|
365
|
+
The tracker behaves differently when running locally or in the cloud.
|
|
360
366
|
Locally, experiment data is stored in a local folder `.data/experiments/{DATE_TIME}`.
|
|
361
367
|
|
|
362
368
|
In the cloud, the experiment data will be available in the Hafnia platform under
|
|
@@ -380,7 +386,7 @@ and datasets available in the data library.
|
|
|
380
386
|
|
|
381
387
|
```python
|
|
382
388
|
# Load Hugging Face dataset
|
|
383
|
-
dataset_splits =
|
|
389
|
+
dataset_splits = HafniaDataset.from_name("midwest-vehicle-detection")
|
|
384
390
|
|
|
385
391
|
# Define transforms
|
|
386
392
|
train_transforms = v2.Compose(
|
|
@@ -422,25 +428,25 @@ train_loader = DataLoader(train_dataset, batch_size=8, shuffle=True, collate_fn=
|
|
|
422
428
|
|
|
423
429
|
|
|
424
430
|
## Example: Training-aaS
|
|
425
|
-
By combining logging and dataset loading, we can now construct our model
|
|
431
|
+
By combining logging and dataset loading, we can now construct our model trainer package.
|
|
426
432
|
|
|
427
|
-
To demonstrate this, we have provided a
|
|
428
|
-
[
|
|
433
|
+
To demonstrate this, we have provided a trainer package project that serves as a template for creating and structuring trainers. The example repo is called
|
|
434
|
+
[trainer-classification](https://github.com/milestone-hafnia/trainer-classification)
|
|
429
435
|
|
|
430
|
-
The project also contains additional information on how to structure your
|
|
431
|
-
the
|
|
436
|
+
The project also contains additional information on how to structure your trainer package, use the `HafniaLogger`, loading a dataset and different approach for launching
|
|
437
|
+
the trainer on the Hafnia platform.
|
|
432
438
|
|
|
433
439
|
|
|
434
|
-
## Create, Build and Run `
|
|
435
|
-
In order to test
|
|
440
|
+
## Create, Build and Run `trainer.zip` locally
|
|
441
|
+
In order to test trainer package compatibility with Hafnia cloud use the following command to build and
|
|
436
442
|
start the job locally.
|
|
437
443
|
|
|
438
444
|
```bash
|
|
439
|
-
# Create '
|
|
440
|
-
hafnia
|
|
441
|
-
|
|
442
|
-
# Build the docker image locally from a '
|
|
443
|
-
hafnia runc build-local
|
|
445
|
+
# Create 'trainer.zip' in the root folder of your training trainer project '../trainer/classification'
|
|
446
|
+
hafnia trainer create-zip ../trainer-classification
|
|
447
|
+
|
|
448
|
+
# Build the docker image locally from a 'trainer.zip' file
|
|
449
|
+
hafnia runc build-local trainer.zip
|
|
444
450
|
|
|
445
451
|
# Execute the docker image locally with a desired dataset
|
|
446
452
|
hafnia runc launch-local --dataset mnist "python scripts/train.py"
|
|
@@ -1,12 +1,12 @@
|
|
|
1
1
|
# Hafnia
|
|
2
2
|
|
|
3
|
-
The `hafnia` python
|
|
3
|
+
The `hafnia` python sdk and cli is a collection of tools to create and run model trainer packages on
|
|
4
4
|
the [Hafnia Platform](https://hafnia.milestonesys.com/).
|
|
5
5
|
|
|
6
6
|
The package includes the following interfaces:
|
|
7
7
|
|
|
8
8
|
- `cli`: A Command Line Interface (CLI) to 1) configure/connect to Hafnia's [Training-aaS](https://hafnia.readme.io/docs/training-as-a-service) and 2) create and
|
|
9
|
-
launch
|
|
9
|
+
launch trainer packages.
|
|
10
10
|
- `hafnia`: A python package including `HafniaDataset` to manage datasets and `HafniaLogger` to do
|
|
11
11
|
experiment tracking.
|
|
12
12
|
|
|
@@ -16,19 +16,19 @@ experiment tracking.
|
|
|
16
16
|
and *hidden* datasets. Hidden datasets refers to datasets that can be used for
|
|
17
17
|
training, but are not available for download or direct access.
|
|
18
18
|
|
|
19
|
-
This is a key
|
|
19
|
+
This is a key for the Hafnia platform, as a hidden dataset ensures data
|
|
20
20
|
privacy, and allow models to be trained compliantly and ethically by third parties (you).
|
|
21
21
|
|
|
22
22
|
The `script2model` approach is a Training-aaS concept, where you package your custom training
|
|
23
|
-
script as a *
|
|
23
|
+
project or script as a *trainer package* and use the package to train models on the hidden datasets.
|
|
24
24
|
|
|
25
|
-
To support local development of a
|
|
25
|
+
To support local development of a trainer package, we have introduced a **sample dataset**
|
|
26
26
|
for each dataset available in the Hafnia [data library](https://hafnia.milestonesys.com/training-aas/datasets). The sample dataset is a small
|
|
27
|
-
and anonymized subset of the full dataset and available for download.
|
|
27
|
+
and an anonymized subset of the full dataset and available for download.
|
|
28
28
|
|
|
29
29
|
With the sample dataset, you can seamlessly switch between local development and Training-aaS.
|
|
30
|
-
Locally, you can create, validate and debug your
|
|
31
|
-
launched with Training-aaS, where the
|
|
30
|
+
Locally, you can create, validate and debug your trainer package. The trainer package is then
|
|
31
|
+
launched with Training-aaS, where the package runs on the full dataset and can be scaled to run on
|
|
32
32
|
multiple GPUs and instances if needed.
|
|
33
33
|
|
|
34
34
|
## Getting started: Configuration
|
|
@@ -52,6 +52,7 @@ Copy the key and save it for later use.
|
|
|
52
52
|
Hafnia API Key: # Pass your HAFNIA API key
|
|
53
53
|
Hafnia Platform URL [https://api.mdi.milestonesys.com]: # Press [Enter]
|
|
54
54
|
```
|
|
55
|
+
|
|
55
56
|
1. Download `mnist` from terminal to verify that your configuration is working.
|
|
56
57
|
|
|
57
58
|
```bash
|
|
@@ -63,7 +64,7 @@ With Hafnia configured on your local machine, it is now possible to download
|
|
|
63
64
|
and explore the dataset sample with a python script:
|
|
64
65
|
|
|
65
66
|
```python
|
|
66
|
-
from hafnia.data import
|
|
67
|
+
from hafnia.data import get_dataset_path
|
|
67
68
|
from hafnia.dataset.hafnia_dataset import HafniaDataset
|
|
68
69
|
|
|
69
70
|
# To download the sample dataset use:
|
|
@@ -96,19 +97,19 @@ midwest-vehicle-detection
|
|
|
96
97
|
You can interact with data as you want, but we also provide `HafniaDataset`
|
|
97
98
|
for loading/saving, managing and interacting with the dataset.
|
|
98
99
|
|
|
99
|
-
We recommend
|
|
100
|
-
|
|
100
|
+
We recommend the example script [examples/example_hafnia_dataset.py](examples/example_hafnia_dataset.py)
|
|
101
|
+
for a short introduction on the `HafniaDataset`.
|
|
101
102
|
|
|
102
103
|
Below is a short introduction to the `HafniaDataset` class.
|
|
103
104
|
|
|
104
105
|
```python
|
|
105
106
|
from hafnia.dataset.hafnia_dataset import HafniaDataset, Sample
|
|
106
107
|
|
|
107
|
-
# Load dataset
|
|
108
|
+
# Load dataset from path
|
|
108
109
|
dataset = HafniaDataset.read_from_path(path_dataset)
|
|
109
110
|
|
|
110
|
-
#
|
|
111
|
-
|
|
111
|
+
# Or get dataset directly by name
|
|
112
|
+
dataset = HafniaDataset.from_name("midwest-vehicle-detection")
|
|
112
113
|
|
|
113
114
|
# Print dataset information
|
|
114
115
|
dataset.print_stats()
|
|
@@ -173,6 +174,8 @@ DatasetInfo(
|
|
|
173
174
|
'duration_average': 120.0,
|
|
174
175
|
...
|
|
175
176
|
}
|
|
177
|
+
"format_version": "0.0.2",
|
|
178
|
+
"updated_at": "2025-09-24T21:50:20.231263"
|
|
176
179
|
)
|
|
177
180
|
```
|
|
178
181
|
|
|
@@ -212,7 +215,7 @@ Sample(
|
|
|
212
215
|
height=1080,
|
|
213
216
|
width=1920,
|
|
214
217
|
split='train',
|
|
215
|
-
|
|
218
|
+
tags=["sample"],
|
|
216
219
|
collection_index=None,
|
|
217
220
|
collection_id=None,
|
|
218
221
|
remote_path='s3://mdi-production-midwest-vehicle-detection/sample/data/343403325f27e390.png',
|
|
@@ -276,10 +279,10 @@ Sample(
|
|
|
276
279
|
)
|
|
277
280
|
```
|
|
278
281
|
|
|
279
|
-
To learn more,
|
|
282
|
+
To learn more, we recommend the `HafniaDataset` example script [examples/example_hafnia_dataset.py](examples/example_hafnia_dataset.py).
|
|
280
283
|
|
|
281
284
|
### Dataset Locally vs. Training-aaS
|
|
282
|
-
An important feature of `
|
|
285
|
+
An important feature of `HafniaDataset.from_name` is that it will return the full dataset
|
|
283
286
|
when loaded with Training-aaS on the Hafnia platform.
|
|
284
287
|
|
|
285
288
|
This enables seamlessly switching between running/validating a training script
|
|
@@ -290,7 +293,7 @@ Available datasets with corresponding sample datasets can be found in [data libr
|
|
|
290
293
|
|
|
291
294
|
|
|
292
295
|
## Getting started: Experiment Tracking with HafniaLogger
|
|
293
|
-
The `HafniaLogger` is an important part of the
|
|
296
|
+
The `HafniaLogger` is an important part of the trainer and enables you to track, log and
|
|
294
297
|
reproduce your experiments.
|
|
295
298
|
|
|
296
299
|
When integrated into your training script, the `HafniaLogger` is responsible for collecting:
|
|
@@ -330,7 +333,7 @@ logger.log_scalar("validation/loss", value=0.1, step=100)
|
|
|
330
333
|
logger.log_metric("validation/accuracy", value=0.95, step=100)
|
|
331
334
|
```
|
|
332
335
|
|
|
333
|
-
|
|
336
|
+
The tracker behaves differently when running locally or in the cloud.
|
|
334
337
|
Locally, experiment data is stored in a local folder `.data/experiments/{DATE_TIME}`.
|
|
335
338
|
|
|
336
339
|
In the cloud, the experiment data will be available in the Hafnia platform under
|
|
@@ -354,7 +357,7 @@ and datasets available in the data library.
|
|
|
354
357
|
|
|
355
358
|
```python
|
|
356
359
|
# Load Hugging Face dataset
|
|
357
|
-
dataset_splits =
|
|
360
|
+
dataset_splits = HafniaDataset.from_name("midwest-vehicle-detection")
|
|
358
361
|
|
|
359
362
|
# Define transforms
|
|
360
363
|
train_transforms = v2.Compose(
|
|
@@ -396,25 +399,25 @@ train_loader = DataLoader(train_dataset, batch_size=8, shuffle=True, collate_fn=
|
|
|
396
399
|
|
|
397
400
|
|
|
398
401
|
## Example: Training-aaS
|
|
399
|
-
By combining logging and dataset loading, we can now construct our model
|
|
402
|
+
By combining logging and dataset loading, we can now construct our model trainer package.
|
|
400
403
|
|
|
401
|
-
To demonstrate this, we have provided a
|
|
402
|
-
[
|
|
404
|
+
To demonstrate this, we have provided a trainer package project that serves as a template for creating and structuring trainers. The example repo is called
|
|
405
|
+
[trainer-classification](https://github.com/milestone-hafnia/trainer-classification)
|
|
403
406
|
|
|
404
|
-
The project also contains additional information on how to structure your
|
|
405
|
-
the
|
|
407
|
+
The project also contains additional information on how to structure your trainer package, use the `HafniaLogger`, loading a dataset and different approach for launching
|
|
408
|
+
the trainer on the Hafnia platform.
|
|
406
409
|
|
|
407
410
|
|
|
408
|
-
## Create, Build and Run `
|
|
409
|
-
In order to test
|
|
411
|
+
## Create, Build and Run `trainer.zip` locally
|
|
412
|
+
In order to test trainer package compatibility with Hafnia cloud use the following command to build and
|
|
410
413
|
start the job locally.
|
|
411
414
|
|
|
412
415
|
```bash
|
|
413
|
-
# Create '
|
|
414
|
-
hafnia
|
|
415
|
-
|
|
416
|
-
# Build the docker image locally from a '
|
|
417
|
-
hafnia runc build-local
|
|
416
|
+
# Create 'trainer.zip' in the root folder of your training trainer project '../trainer/classification'
|
|
417
|
+
hafnia trainer create-zip ../trainer-classification
|
|
418
|
+
|
|
419
|
+
# Build the docker image locally from a 'trainer.zip' file
|
|
420
|
+
hafnia runc build-local trainer.zip
|
|
418
421
|
|
|
419
422
|
# Execute the docker image locally with a desired dataset
|
|
420
423
|
hafnia runc launch-local --dataset mnist "python scripts/train.py"
|