hafnia 0.3.0__tar.gz → 0.4.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (182) hide show
  1. {hafnia-0.3.0 → hafnia-0.4.1}/.github/workflows/build.yaml +2 -2
  2. {hafnia-0.3.0 → hafnia-0.4.1}/.github/workflows/check_release.yaml +1 -1
  3. {hafnia-0.3.0 → hafnia-0.4.1}/.github/workflows/publish_docker.yaml +2 -2
  4. {hafnia-0.3.0 → hafnia-0.4.1}/.github/workflows/publish_pypi.yaml +1 -1
  5. {hafnia-0.3.0 → hafnia-0.4.1}/.github/workflows/tests.yaml +2 -1
  6. hafnia-0.4.1/.trivyignore +3 -0
  7. {hafnia-0.3.0 → hafnia-0.4.1}/PKG-INFO +11 -9
  8. {hafnia-0.3.0 → hafnia-0.4.1}/README.md +7 -6
  9. {hafnia-0.3.0 → hafnia-0.4.1}/examples/example_dataset_recipe.py +105 -30
  10. {hafnia-0.3.0 → hafnia-0.4.1}/examples/example_hafnia_dataset.py +24 -9
  11. {hafnia-0.3.0 → hafnia-0.4.1}/examples/example_torchvision_dataloader.py +2 -2
  12. {hafnia-0.3.0 → hafnia-0.4.1}/pyproject.toml +10 -5
  13. {hafnia-0.3.0 → hafnia-0.4.1}/src/cli/__main__.py +3 -1
  14. {hafnia-0.3.0 → hafnia-0.4.1}/src/cli/config.py +43 -3
  15. hafnia-0.4.1/src/cli/keychain.py +88 -0
  16. {hafnia-0.3.0 → hafnia-0.4.1}/src/cli/profile_cmds.py +5 -2
  17. {hafnia-0.3.0 → hafnia-0.4.1}/src/hafnia/__init__.py +1 -1
  18. {hafnia-0.3.0 → hafnia-0.4.1}/src/hafnia/dataset/dataset_helpers.py +9 -2
  19. hafnia-0.4.1/src/hafnia/dataset/dataset_names.py +190 -0
  20. {hafnia-0.3.0 → hafnia-0.4.1}/src/hafnia/dataset/dataset_recipe/dataset_recipe.py +49 -37
  21. {hafnia-0.3.0 → hafnia-0.4.1}/src/hafnia/dataset/dataset_recipe/recipe_transforms.py +18 -2
  22. {hafnia-0.3.0 → hafnia-0.4.1}/src/hafnia/dataset/dataset_upload_helper.py +83 -22
  23. hafnia-0.4.1/src/hafnia/dataset/format_conversions/format_image_classification_folder.py +110 -0
  24. hafnia-0.4.1/src/hafnia/dataset/format_conversions/format_yolo.py +164 -0
  25. hafnia-0.4.1/src/hafnia/dataset/format_conversions/torchvision_datasets.py +287 -0
  26. {hafnia-0.3.0 → hafnia-0.4.1}/src/hafnia/dataset/hafnia_dataset.py +396 -96
  27. {hafnia-0.3.0 → hafnia-0.4.1}/src/hafnia/dataset/operations/dataset_stats.py +84 -73
  28. {hafnia-0.3.0 → hafnia-0.4.1}/src/hafnia/dataset/operations/dataset_transformations.py +116 -47
  29. {hafnia-0.3.0 → hafnia-0.4.1}/src/hafnia/dataset/operations/table_transformations.py +135 -17
  30. {hafnia-0.3.0 → hafnia-0.4.1}/src/hafnia/dataset/primitives/bbox.py +25 -14
  31. {hafnia-0.3.0 → hafnia-0.4.1}/src/hafnia/dataset/primitives/bitmask.py +22 -15
  32. {hafnia-0.3.0 → hafnia-0.4.1}/src/hafnia/dataset/primitives/classification.py +16 -8
  33. {hafnia-0.3.0 → hafnia-0.4.1}/src/hafnia/dataset/primitives/point.py +7 -3
  34. {hafnia-0.3.0 → hafnia-0.4.1}/src/hafnia/dataset/primitives/polygon.py +15 -10
  35. {hafnia-0.3.0 → hafnia-0.4.1}/src/hafnia/dataset/primitives/primitive.py +1 -1
  36. {hafnia-0.3.0 → hafnia-0.4.1}/src/hafnia/dataset/primitives/segmentation.py +12 -9
  37. {hafnia-0.3.0 → hafnia-0.4.1}/src/hafnia/experiment/hafnia_logger.py +0 -9
  38. {hafnia-0.3.0 → hafnia-0.4.1}/src/hafnia/platform/dataset_recipe.py +7 -2
  39. {hafnia-0.3.0 → hafnia-0.4.1}/src/hafnia/platform/datasets.py +5 -9
  40. {hafnia-0.3.0 → hafnia-0.4.1}/src/hafnia/platform/download.py +24 -90
  41. {hafnia-0.3.0 → hafnia-0.4.1}/src/hafnia/torch_helpers.py +12 -12
  42. {hafnia-0.3.0 → hafnia-0.4.1}/src/hafnia/utils.py +17 -0
  43. {hafnia-0.3.0 → hafnia-0.4.1}/src/hafnia/visualizations/image_visualizations.py +3 -1
  44. {hafnia-0.3.0 → hafnia-0.4.1}/tests/conftest.py +3 -7
  45. hafnia-0.4.1/tests/data/dataset_formats/format_yolo/data/000000000139.jpg +0 -0
  46. hafnia-0.4.1/tests/data/dataset_formats/format_yolo/data/000000000139.txt +20 -0
  47. hafnia-0.4.1/tests/data/dataset_formats/format_yolo/data/000000000285.jpg +0 -0
  48. hafnia-0.4.1/tests/data/dataset_formats/format_yolo/data/000000000285.txt +1 -0
  49. hafnia-0.4.1/tests/data/dataset_formats/format_yolo/data/000000000632.jpg +0 -0
  50. hafnia-0.4.1/tests/data/dataset_formats/format_yolo/data/000000000632.txt +18 -0
  51. hafnia-0.4.1/tests/data/dataset_formats/format_yolo/images.txt +3 -0
  52. hafnia-0.4.1/tests/data/dataset_formats/format_yolo/obj.names +80 -0
  53. hafnia-0.4.1/tests/data/dataset_image_metadata_schema.yaml +336 -0
  54. hafnia-0.4.1/tests/data/expected_images/test_dataset_transformations/test_video_storage_format_read_image.png +0 -0
  55. hafnia-0.4.1/tests/data/expected_images/test_format_yolo/test_format_yolo_import_export_tiny_dataset.png +0 -0
  56. hafnia-0.4.1/tests/data/expected_images/test_format_yolo/test_import_yolo_format_visualized.png +0 -0
  57. hafnia-0.4.1/tests/data/expected_images/test_samples/test_check_dataset[caltech-101].png +0 -0
  58. hafnia-0.4.1/tests/data/expected_images/test_samples/test_check_dataset[caltech-256].png +0 -0
  59. hafnia-0.4.1/tests/data/expected_images/test_samples/test_check_dataset[cifar100].png +0 -0
  60. hafnia-0.4.1/tests/data/expected_images/test_samples/test_check_dataset[cifar10].png +0 -0
  61. hafnia-0.4.1/tests/data/expected_images/test_samples/test_check_dataset[midwest-vehicle-detection].png +0 -0
  62. hafnia-0.4.1/tests/data/expected_images/test_samples/test_check_dataset[mnist].png +0 -0
  63. {hafnia-0.3.0 → hafnia-0.4.1}/tests/data/expected_images/test_samples/test_check_dataset[tiny-dataset].png +0 -0
  64. hafnia-0.4.1/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[caltech-101].png +0 -0
  65. hafnia-0.4.1/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[caltech-256].png +0 -0
  66. hafnia-0.4.1/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[cifar100].png +0 -0
  67. hafnia-0.4.1/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[cifar10].png +0 -0
  68. hafnia-0.4.1/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[midwest-vehicle-detection].png +0 -0
  69. hafnia-0.4.1/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[mnist].png +0 -0
  70. {hafnia-0.3.0 → hafnia-0.4.1}/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[tiny-dataset].png +0 -0
  71. hafnia-0.4.1/tests/data/expected_images/test_visualizations/test_blur_anonymization[micro-coco-2017].png +0 -0
  72. hafnia-0.4.1/tests/data/expected_images/test_visualizations/test_blur_anonymization[micro-tiny-dataset].png +0 -0
  73. hafnia-0.4.1/tests/data/expected_images/test_visualizations/test_draw_annotations[micro-coco-2017].png +0 -0
  74. hafnia-0.4.1/tests/data/expected_images/test_visualizations/test_draw_annotations[micro-tiny-dataset].png +0 -0
  75. hafnia-0.4.1/tests/data/expected_images/test_visualizations/test_mask_region[micro-coco-2017].png +0 -0
  76. hafnia-0.4.1/tests/data/expected_images/test_visualizations/test_mask_region[micro-tiny-dataset].png +0 -0
  77. hafnia-0.4.1/tests/data/micro_test_datasets/micro-coco-2017/annotations.jsonl +3 -0
  78. hafnia-0.4.1/tests/data/micro_test_datasets/micro-coco-2017/annotations.parquet +0 -0
  79. hafnia-0.4.1/tests/data/micro_test_datasets/micro-coco-2017/data/657/657dff54d5175e2ae9f4b9629cf57646.jpg +0 -0
  80. hafnia-0.4.1/tests/data/micro_test_datasets/micro-coco-2017/data/825/825fa2d2d9416694b8e81a47ca38f580.jpg +0 -0
  81. hafnia-0.4.1/tests/data/micro_test_datasets/micro-coco-2017/data/aa3/aa3cc40b5cde88e5bd189c0b3e6c223c.jpg +0 -0
  82. {hafnia-0.3.0 → hafnia-0.4.1}/tests/data/micro_test_datasets/micro-coco-2017/dataset_info.json +10 -9
  83. hafnia-0.4.1/tests/data/micro_test_datasets/micro-tiny-dataset/annotations.jsonl +3 -0
  84. hafnia-0.4.1/tests/data/micro_test_datasets/micro-tiny-dataset/annotations.parquet +0 -0
  85. hafnia-0.4.1/tests/data/micro_test_datasets/micro-tiny-dataset/data/2da/2da1d8dbf2b60bdab8dff1d7f5c2dfb5.png +0 -0
  86. {hafnia-0.3.0 → hafnia-0.4.1}/tests/data/micro_test_datasets/micro-tiny-dataset/dataset_info.json +8 -42
  87. {hafnia-0.3.0 → hafnia-0.4.1}/tests/helper_testing.py +106 -5
  88. {hafnia-0.3.0 → hafnia-0.4.1}/tests/integration/test_cli_integration.py +11 -6
  89. {hafnia-0.3.0 → hafnia-0.4.1}/tests/integration/test_dataset_merges.py +17 -6
  90. {hafnia-0.3.0 → hafnia-0.4.1}/tests/integration/test_dataset_recipes_with_platform.py +28 -2
  91. {hafnia-0.3.0 → hafnia-0.4.1}/tests/integration/test_samples.py +9 -13
  92. hafnia-0.4.1/tests/integration/test_torchvision_datasets.py +21 -0
  93. {hafnia-0.3.0 → hafnia-0.4.1}/tests/unit/dataset/dataset_recipe/test_dataset_recipes.py +9 -46
  94. {hafnia-0.3.0 → hafnia-0.4.1}/tests/unit/dataset/dataset_recipe/test_recipe_transformations.py +43 -9
  95. hafnia-0.4.1/tests/unit/dataset/format_conversions/test_format_yolo.py +85 -0
  96. hafnia-0.4.1/tests/unit/dataset/format_conversions/test_image_classification_directory.py +47 -0
  97. {hafnia-0.3.0 → hafnia-0.4.1}/tests/unit/dataset/operations/test_dataset_stats.py +8 -8
  98. {hafnia-0.3.0 → hafnia-0.4.1}/tests/unit/dataset/operations/test_dataset_transformations.py +89 -9
  99. {hafnia-0.3.0 → hafnia-0.4.1}/tests/unit/dataset/operations/test_table_transformations.py +9 -3
  100. hafnia-0.4.1/tests/unit/dataset/test_dataset_names.py +13 -0
  101. {hafnia-0.3.0 → hafnia-0.4.1}/tests/unit/dataset/test_hafnia_dataset.py +43 -4
  102. {hafnia-0.3.0 → hafnia-0.4.1}/tests/unit/dataset/test_shape_primitives.py +42 -3
  103. {hafnia-0.3.0 → hafnia-0.4.1}/tests/unit/test_cli.py +2 -2
  104. hafnia-0.4.1/uv.lock +4106 -0
  105. hafnia-0.3.0/src/hafnia/dataset/dataset_names.py +0 -76
  106. hafnia-0.3.0/tests/data/expected_images/test_samples/test_check_dataset[caltech-101].png +0 -0
  107. hafnia-0.3.0/tests/data/expected_images/test_samples/test_check_dataset[caltech-256].png +0 -0
  108. hafnia-0.3.0/tests/data/expected_images/test_samples/test_check_dataset[cifar100].png +0 -0
  109. hafnia-0.3.0/tests/data/expected_images/test_samples/test_check_dataset[cifar10].png +0 -0
  110. hafnia-0.3.0/tests/data/expected_images/test_samples/test_check_dataset[midwest-vehicle-detection].png +0 -0
  111. hafnia-0.3.0/tests/data/expected_images/test_samples/test_check_dataset[mnist].png +0 -0
  112. hafnia-0.3.0/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[caltech-101].png +0 -0
  113. hafnia-0.3.0/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[caltech-256].png +0 -0
  114. hafnia-0.3.0/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[cifar100].png +0 -0
  115. hafnia-0.3.0/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[cifar10].png +0 -0
  116. hafnia-0.3.0/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[midwest-vehicle-detection].png +0 -0
  117. hafnia-0.3.0/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[mnist].png +0 -0
  118. hafnia-0.3.0/tests/data/expected_images/test_visualizations/test_blur_anonymization[micro-coco-2017].png +0 -0
  119. hafnia-0.3.0/tests/data/expected_images/test_visualizations/test_blur_anonymization[micro-tiny-dataset].png +0 -0
  120. hafnia-0.3.0/tests/data/expected_images/test_visualizations/test_draw_annotations[micro-coco-2017].png +0 -0
  121. hafnia-0.3.0/tests/data/expected_images/test_visualizations/test_draw_annotations[micro-tiny-dataset].png +0 -0
  122. hafnia-0.3.0/tests/data/expected_images/test_visualizations/test_mask_region[micro-coco-2017].png +0 -0
  123. hafnia-0.3.0/tests/data/expected_images/test_visualizations/test_mask_region[micro-tiny-dataset].png +0 -0
  124. hafnia-0.3.0/tests/data/micro_test_datasets/micro-coco-2017/annotations.jsonl +0 -3
  125. hafnia-0.3.0/tests/data/micro_test_datasets/micro-coco-2017/annotations.parquet +0 -0
  126. hafnia-0.3.0/tests/data/micro_test_datasets/micro-coco-2017/data/3b4/3b4165c8c4f830be4e95c6eb6209880a.jpg +0 -0
  127. hafnia-0.3.0/tests/data/micro_test_datasets/micro-coco-2017/data/837/837b642d8a7b3b8dcf86c7a23edb55ce.jpg +0 -0
  128. hafnia-0.3.0/tests/data/micro_test_datasets/micro-coco-2017/data/dc8/dc8efc98ce6304fe182a2c0a3ce312cf.jpg +0 -0
  129. hafnia-0.3.0/tests/data/micro_test_datasets/micro-tiny-dataset/annotations.jsonl +0 -3
  130. hafnia-0.3.0/tests/data/micro_test_datasets/micro-tiny-dataset/annotations.parquet +0 -0
  131. hafnia-0.3.0/tests/data/micro_test_datasets/micro-tiny-dataset/data/907/907f182da7bcedb8222bbd5721a8a86e.png +0 -0
  132. hafnia-0.3.0/uv.lock +0 -3308
  133. {hafnia-0.3.0 → hafnia-0.4.1}/.devcontainer/devcontainer.json +0 -0
  134. {hafnia-0.3.0 → hafnia-0.4.1}/.devcontainer/hooks/post_create +0 -0
  135. {hafnia-0.3.0 → hafnia-0.4.1}/.github/dependabot.yaml +0 -0
  136. {hafnia-0.3.0 → hafnia-0.4.1}/.github/workflows/Dockerfile +0 -0
  137. {hafnia-0.3.0 → hafnia-0.4.1}/.github/workflows/ci_cd.yaml +0 -0
  138. {hafnia-0.3.0 → hafnia-0.4.1}/.github/workflows/lint.yaml +0 -0
  139. {hafnia-0.3.0 → hafnia-0.4.1}/.gitignore +0 -0
  140. {hafnia-0.3.0 → hafnia-0.4.1}/.pre-commit-config.yaml +0 -0
  141. {hafnia-0.3.0 → hafnia-0.4.1}/.python-version +0 -0
  142. {hafnia-0.3.0 → hafnia-0.4.1}/.vscode/extensions.json +0 -0
  143. {hafnia-0.3.0 → hafnia-0.4.1}/.vscode/launch.json +0 -0
  144. {hafnia-0.3.0 → hafnia-0.4.1}/.vscode/settings.json +0 -0
  145. {hafnia-0.3.0 → hafnia-0.4.1}/LICENSE +0 -0
  146. {hafnia-0.3.0 → hafnia-0.4.1}/docs/cli.md +0 -0
  147. {hafnia-0.3.0 → hafnia-0.4.1}/docs/release.md +0 -0
  148. {hafnia-0.3.0 → hafnia-0.4.1}/examples/example_logger.py +0 -0
  149. {hafnia-0.3.0 → hafnia-0.4.1}/src/cli/__init__.py +0 -0
  150. {hafnia-0.3.0 → hafnia-0.4.1}/src/cli/consts.py +0 -0
  151. {hafnia-0.3.0 → hafnia-0.4.1}/src/cli/dataset_cmds.py +0 -0
  152. {hafnia-0.3.0 → hafnia-0.4.1}/src/cli/dataset_recipe_cmds.py +0 -0
  153. {hafnia-0.3.0 → hafnia-0.4.1}/src/cli/experiment_cmds.py +0 -0
  154. {hafnia-0.3.0 → hafnia-0.4.1}/src/cli/runc_cmds.py +0 -0
  155. {hafnia-0.3.0 → hafnia-0.4.1}/src/cli/trainer_package_cmds.py +0 -0
  156. {hafnia-0.3.0 → hafnia-0.4.1}/src/hafnia/data/__init__.py +0 -0
  157. {hafnia-0.3.0 → hafnia-0.4.1}/src/hafnia/data/factory.py +0 -0
  158. {hafnia-0.3.0 → hafnia-0.4.1}/src/hafnia/dataset/dataset_recipe/recipe_types.py +0 -0
  159. {hafnia-0.3.0 → hafnia-0.4.1}/src/hafnia/dataset/license_types.py +0 -0
  160. {hafnia-0.3.0 → hafnia-0.4.1}/src/hafnia/dataset/primitives/__init__.py +0 -0
  161. {hafnia-0.3.0 → hafnia-0.4.1}/src/hafnia/dataset/primitives/utils.py +0 -0
  162. {hafnia-0.3.0 → hafnia-0.4.1}/src/hafnia/experiment/__init__.py +0 -0
  163. {hafnia-0.3.0 → hafnia-0.4.1}/src/hafnia/http.py +0 -0
  164. {hafnia-0.3.0 → hafnia-0.4.1}/src/hafnia/log.py +0 -0
  165. {hafnia-0.3.0 → hafnia-0.4.1}/src/hafnia/platform/__init__.py +0 -0
  166. {hafnia-0.3.0 → hafnia-0.4.1}/src/hafnia/platform/builder.py +0 -0
  167. {hafnia-0.3.0 → hafnia-0.4.1}/src/hafnia/platform/experiment.py +0 -0
  168. {hafnia-0.3.0 → hafnia-0.4.1}/src/hafnia/platform/trainer_package.py +0 -0
  169. {hafnia-0.3.0 → hafnia-0.4.1}/src/hafnia/visualizations/colors.py +0 -0
  170. {hafnia-0.3.0 → hafnia-0.4.1}/tests/__init__.py +0 -0
  171. {hafnia-0.3.0 → hafnia-0.4.1}/tests/data/expected_images/test_samples/test_check_dataset[coco-2017].png +0 -0
  172. {hafnia-0.3.0 → hafnia-0.4.1}/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[coco-2017].png +0 -0
  173. {hafnia-0.3.0 → hafnia-0.4.1}/tests/data/micro_test_datasets/micro-tiny-dataset/data/3dd/3ddec2275a02e79e3251d85443622e4c.png +0 -0
  174. {hafnia-0.3.0 → hafnia-0.4.1}/tests/data/micro_test_datasets/micro-tiny-dataset/data/4d8/4d8450b045e60e8f3657ababa44af9b6.png +0 -0
  175. {hafnia-0.3.0 → hafnia-0.4.1}/tests/integration/test_check_example_scripts.py +0 -0
  176. {hafnia-0.3.0 → hafnia-0.4.1}/tests/unit/dataset/dataset_recipe/test_dataset_recipe_helpers.py +0 -0
  177. {hafnia-0.3.0 → hafnia-0.4.1}/tests/unit/dataset/test_colors.py +0 -0
  178. {hafnia-0.3.0 → hafnia-0.4.1}/tests/unit/dataset/test_dataset_helpers.py +0 -0
  179. {hafnia-0.3.0 → hafnia-0.4.1}/tests/unit/test_builder.py +0 -0
  180. {hafnia-0.3.0 → hafnia-0.4.1}/tests/unit/test_hafnia_logger.py +0 -0
  181. {hafnia-0.3.0 → hafnia-0.4.1}/tests/unit/test_utils.py +0 -0
  182. {hafnia-0.3.0 → hafnia-0.4.1}/tests/unit/test_visualizations.py +0 -0
@@ -29,7 +29,7 @@ jobs:
29
29
  echo "package_version=$VERSION" >> $GITHUB_OUTPUT
30
30
 
31
31
  - name: Install uv
32
- uses: astral-sh/setup-uv@v6
32
+ uses: astral-sh/setup-uv@v7
33
33
  with:
34
34
  version: 0.6.8
35
35
 
@@ -45,7 +45,7 @@ jobs:
45
45
  run: uv build
46
46
 
47
47
  - name: Upload package artifact
48
- uses: actions/upload-artifact@v4.6.2
48
+ uses: actions/upload-artifact@v5.0.0
49
49
  with:
50
50
  name: python-package
51
51
  path: dist/
@@ -20,7 +20,7 @@ jobs:
20
20
  make_release: ${{ steps.check_release.outputs.make_release }}
21
21
  steps:
22
22
  - name: Download package artifact
23
- uses: actions/download-artifact@v5.0.0
23
+ uses: actions/download-artifact@v6.0.0
24
24
  with:
25
25
  name: python-package
26
26
  path: dist/
@@ -31,7 +31,7 @@ jobs:
31
31
  python-version-file: ${{ inputs.python-version-file }}
32
32
 
33
33
  - name: Download package artifact
34
- uses: actions/download-artifact@v5.0.0
34
+ uses: actions/download-artifact@v6.0.0
35
35
  with:
36
36
  name: python-package
37
37
  path: dist/
@@ -47,7 +47,7 @@ jobs:
47
47
  echo "aws_region=${{ secrets.STAGE_AWS_REGION }}" >> $GITHUB_OUTPUT
48
48
  fi
49
49
  - name: Configure AWS credentials
50
- uses: aws-actions/configure-aws-credentials@v5.0.0
50
+ uses: aws-actions/configure-aws-credentials@v5.1.0
51
51
  with:
52
52
  role-to-assume: arn:aws:iam::${{ steps.env-vars.outputs.aws_account_id }}:role/${{ secrets.AWS_ROLE_NAME }}
53
53
  aws-region: ${{ steps.env-vars.outputs.aws_region }}
@@ -17,7 +17,7 @@ jobs:
17
17
  contents: read
18
18
  steps:
19
19
  - name: Download package artifact
20
- uses: actions/download-artifact@v5.0.0
20
+ uses: actions/download-artifact@v6.0.0
21
21
  with:
22
22
  name: python-package
23
23
  path: dist/
@@ -11,6 +11,7 @@ jobs:
11
11
  test:
12
12
  runs-on: ${{ matrix.os }}
13
13
  strategy:
14
+ max-parallel: 1
14
15
  matrix:
15
16
  os: [ubuntu-latest, windows-latest]
16
17
  steps:
@@ -19,7 +20,7 @@ jobs:
19
20
  with:
20
21
  python-version-file: ${{ inputs.python-version-file }}
21
22
  - name: Install uv
22
- uses: astral-sh/setup-uv@v6
23
+ uses: astral-sh/setup-uv@v7
23
24
  with:
24
25
  version: 0.6.8
25
26
  - name: Install the project
@@ -0,0 +1,3 @@
1
+ # Ignore 'CVE-2024-37059' issue https://avd.aquasec.com/nvd/2024/cve-2024-37059/
2
+ # The vulnerability does not apply to our platform as models are not loaded on our platform.
3
+ CVE-2024-37059
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: hafnia
3
- Version: 0.3.0
3
+ Version: 0.4.1
4
4
  Summary: Python SDK for communication with Hafnia platform.
5
5
  Author-email: Milestone Systems <hafniaplatform@milestone.dk>
6
6
  License-File: LICENSE
@@ -9,7 +9,9 @@ Requires-Dist: boto3>=1.35.91
9
9
  Requires-Dist: click>=8.1.8
10
10
  Requires-Dist: emoji>=2.14.1
11
11
  Requires-Dist: flatten-dict>=0.4.2
12
- Requires-Dist: mlflow>=3.2.0
12
+ Requires-Dist: keyring>=25.6.0
13
+ Requires-Dist: mcp==1.16.0
14
+ Requires-Dist: mlflow>=3.4.0
13
15
  Requires-Dist: more-itertools>=10.7.0
14
16
  Requires-Dist: opencv-python-headless>=4.11.0.86
15
17
  Requires-Dist: pathspec>=0.12.1
@@ -22,7 +24,6 @@ Requires-Dist: rich>=13.9.4
22
24
  Requires-Dist: s5cmd>=0.2.0
23
25
  Requires-Dist: sagemaker-mlflow>=0.1.0
24
26
  Requires-Dist: seedir>=0.5.0
25
- Requires-Dist: tqdm>=4.67.1
26
27
  Requires-Dist: xxhash>=3.5.0
27
28
  Description-Content-Type: text/markdown
28
29
 
@@ -80,6 +81,7 @@ Copy the key and save it for later use.
80
81
  Hafnia API Key: # Pass your HAFNIA API key
81
82
  Hafnia Platform URL [https://api.mdi.milestonesys.com]: # Press [Enter]
82
83
  ```
84
+
83
85
  1. Download `mnist` from terminal to verify that your configuration is working.
84
86
 
85
87
  ```bash
@@ -91,7 +93,7 @@ With Hafnia configured on your local machine, it is now possible to download
91
93
  and explore the dataset sample with a python script:
92
94
 
93
95
  ```python
94
- from hafnia.data import load_dataset, get_dataset_path
96
+ from hafnia.data import get_dataset_path
95
97
  from hafnia.dataset.hafnia_dataset import HafniaDataset
96
98
 
97
99
  # To download the sample dataset use:
@@ -156,7 +158,7 @@ and `dataset.samples` with annotations as a polars DataFrame
156
158
  print(dataset.samples.head(2))
157
159
  shape: (2, 14)
158
160
  ┌──────────────┬─────────────────────────────────┬────────┬───────┬───┬─────────────────────────────────┬──────────┬──────────┬─────────────────────────────────┐
159
- │ sample_index ┆ file_name ┆ height ┆ width ┆ … ┆ objects ┆ bitmasks ┆ polygons ┆ meta │
161
+ │ sample_index ┆ file_name ┆ height ┆ width ┆ … ┆ bboxes ┆ bitmasks ┆ polygons ┆ meta │
160
162
  │ --- ┆ --- ┆ --- ┆ --- ┆ ┆ --- ┆ --- ┆ --- ┆ --- │
161
163
  │ u32 ┆ str ┆ i64 ┆ i64 ┆ ┆ list[struct[11]] ┆ null ┆ null ┆ struct[5] │
162
164
  ╞══════════════╪═════════════════════════════════╪════════╪═══════╪═══╪═════════════════════════════════╪══════════╪══════════╪═════════════════════════════════╡
@@ -216,7 +218,7 @@ sample_dict = dataset[0]
216
218
 
217
219
  for sample_dict in dataset:
218
220
  sample = Sample(**sample_dict)
219
- print(sample.sample_id, sample.objects)
221
+ print(sample.sample_id, sample.bboxes)
220
222
  break
221
223
  ```
222
224
  Not that it is possible to create a `Sample` object from the sample dictionary.
@@ -360,7 +362,7 @@ logger.log_scalar("validation/loss", value=0.1, step=100)
360
362
  logger.log_metric("validation/accuracy", value=0.95, step=100)
361
363
  ```
362
364
 
363
- Similar to `load_dataset`, the tracker behaves differently when running locally or in the cloud.
365
+ The tracker behaves differently when running locally or in the cloud.
364
366
  Locally, experiment data is stored in a local folder `.data/experiments/{DATE_TIME}`.
365
367
 
366
368
  In the cloud, the experiment data will be available in the Hafnia platform under
@@ -384,7 +386,7 @@ and datasets available in the data library.
384
386
 
385
387
  ```python
386
388
  # Load Hugging Face dataset
387
- dataset_splits = load_dataset("midwest-vehicle-detection")
389
+ dataset_splits = HafniaDataset.from_name("midwest-vehicle-detection")
388
390
 
389
391
  # Define transforms
390
392
  train_transforms = v2.Compose(
@@ -419,7 +421,7 @@ pil_image.save("visualized_labels.png")
419
421
 
420
422
  # Create DataLoaders - using TorchVisionCollateFn
421
423
  collate_fn = torch_helpers.TorchVisionCollateFn(
422
- skip_stacking=["objects.bbox", "objects.class_idx"]
424
+ skip_stacking=["bboxes.bbox", "bboxes.class_idx"]
423
425
  )
424
426
  train_loader = DataLoader(train_dataset, batch_size=8, shuffle=True, collate_fn=collate_fn)
425
427
  ```
@@ -52,6 +52,7 @@ Copy the key and save it for later use.
52
52
  Hafnia API Key: # Pass your HAFNIA API key
53
53
  Hafnia Platform URL [https://api.mdi.milestonesys.com]: # Press [Enter]
54
54
  ```
55
+
55
56
  1. Download `mnist` from terminal to verify that your configuration is working.
56
57
 
57
58
  ```bash
@@ -63,7 +64,7 @@ With Hafnia configured on your local machine, it is now possible to download
63
64
  and explore the dataset sample with a python script:
64
65
 
65
66
  ```python
66
- from hafnia.data import load_dataset, get_dataset_path
67
+ from hafnia.data import get_dataset_path
67
68
  from hafnia.dataset.hafnia_dataset import HafniaDataset
68
69
 
69
70
  # To download the sample dataset use:
@@ -128,7 +129,7 @@ and `dataset.samples` with annotations as a polars DataFrame
128
129
  print(dataset.samples.head(2))
129
130
  shape: (2, 14)
130
131
  ┌──────────────┬─────────────────────────────────┬────────┬───────┬───┬─────────────────────────────────┬──────────┬──────────┬─────────────────────────────────┐
131
- │ sample_index ┆ file_name ┆ height ┆ width ┆ … ┆ objects ┆ bitmasks ┆ polygons ┆ meta │
132
+ │ sample_index ┆ file_name ┆ height ┆ width ┆ … ┆ bboxes ┆ bitmasks ┆ polygons ┆ meta │
132
133
  │ --- ┆ --- ┆ --- ┆ --- ┆ ┆ --- ┆ --- ┆ --- ┆ --- │
133
134
  │ u32 ┆ str ┆ i64 ┆ i64 ┆ ┆ list[struct[11]] ┆ null ┆ null ┆ struct[5] │
134
135
  ╞══════════════╪═════════════════════════════════╪════════╪═══════╪═══╪═════════════════════════════════╪══════════╪══════════╪═════════════════════════════════╡
@@ -188,7 +189,7 @@ sample_dict = dataset[0]
188
189
 
189
190
  for sample_dict in dataset:
190
191
  sample = Sample(**sample_dict)
191
- print(sample.sample_id, sample.objects)
192
+ print(sample.sample_id, sample.bboxes)
192
193
  break
193
194
  ```
194
195
  Not that it is possible to create a `Sample` object from the sample dictionary.
@@ -332,7 +333,7 @@ logger.log_scalar("validation/loss", value=0.1, step=100)
332
333
  logger.log_metric("validation/accuracy", value=0.95, step=100)
333
334
  ```
334
335
 
335
- Similar to `load_dataset`, the tracker behaves differently when running locally or in the cloud.
336
+ The tracker behaves differently when running locally or in the cloud.
336
337
  Locally, experiment data is stored in a local folder `.data/experiments/{DATE_TIME}`.
337
338
 
338
339
  In the cloud, the experiment data will be available in the Hafnia platform under
@@ -356,7 +357,7 @@ and datasets available in the data library.
356
357
 
357
358
  ```python
358
359
  # Load Hugging Face dataset
359
- dataset_splits = load_dataset("midwest-vehicle-detection")
360
+ dataset_splits = HafniaDataset.from_name("midwest-vehicle-detection")
360
361
 
361
362
  # Define transforms
362
363
  train_transforms = v2.Compose(
@@ -391,7 +392,7 @@ pil_image.save("visualized_labels.png")
391
392
 
392
393
  # Create DataLoaders - using TorchVisionCollateFn
393
394
  collate_fn = torch_helpers.TorchVisionCollateFn(
394
- skip_stacking=["objects.bbox", "objects.class_idx"]
395
+ skip_stacking=["bboxes.bbox", "bboxes.class_idx"]
395
396
  )
396
397
  train_loader = DataLoader(train_dataset, batch_size=8, shuffle=True, collate_fn=collate_fn)
397
398
  ```
@@ -3,7 +3,7 @@ from pathlib import Path
3
3
  from rich import print as rprint
4
4
 
5
5
  from hafnia import utils
6
- from hafnia.data.factory import load_dataset
6
+ from hafnia.dataset.dataset_names import OPS_REMOVE_CLASS
7
7
  from hafnia.dataset.dataset_recipe.dataset_recipe import DatasetRecipe
8
8
  from hafnia.dataset.dataset_recipe.recipe_transforms import (
9
9
  SelectSamples,
@@ -50,7 +50,7 @@ dataset_recipe.as_python_code()
50
50
  # executed in the TaaS platform. This is demonstrated below:
51
51
  if utils.is_hafnia_configured(): # First ensure you are connected to the hafnia platform
52
52
  # Upload the dataset recipe - this will make it available for TaaS and for users of your organization
53
- dataset_recipe.as_platform_recipe(recipe_name="example-mnist-recipe")
53
+ dataset_recipe.as_platform_recipe(recipe_name="example-mnist-recipe", overwrite=True)
54
54
 
55
55
  # The recipe is now available in TaaS, for different environments and other users in your organization
56
56
  dataset_recipe_again = DatasetRecipe.from_recipe_name(name="example-mnist-recipe")
@@ -95,53 +95,128 @@ rprint(dataset_recipe) # as a python object
95
95
  print(dataset_recipe.as_json_str()) # as a JSON string
96
96
 
97
97
 
98
- # Example: Using the 'load_dataset' function
99
- merged_dataset: HafniaDataset = load_dataset(dataset_recipe)
100
- # You get a few extra things when using `load_dataset`.
101
- # 1) You get the dataset directly - you don't have to call `build()` on the recipe.
102
- # 2) The dataset is cached if it already exists, so you don't have to
103
- # download or rebuild the dataset on the second run.
104
- # 3) You can use an implicit form of the recipe. One example of this is that you just specify
105
- # the dataset name `load_dataset("mnist")` or path `load_dataset(Path(".data/datasets/mnist"))`
106
-
107
-
98
+ ### Real-world Example: Merge datasets to create a Person+Vehicle dataset ###
99
+ # 1) The first step is to use the regular 'HafniaDataset' interface to investigate and understand the datasets
100
+
101
+ # 1a) Explore 'coco-2017'
102
+ coco = HafniaDataset.from_name("coco-2017")
103
+ coco.print_stats() # Print dataset statistics
104
+ coco_class_names = coco.info.get_task_by_primitive("Bbox").class_names # Get the class names for the bbox task
105
+ # You will notice coco has 80 classes including 'person' and various vehicle classes such as 'car', 'bus', 'truck', etc.
106
+ # but also many unrelated classes such as 'toaster', 'hair drier', etc.
107
+
108
+ # 1b) Explore 'midwest-vehicle-detection'
109
+ midwest = HafniaDataset.from_name("midwest-vehicle-detection")
110
+ midwest.print_stats() # Print dataset statistics
111
+ midwest_class_names = midwest.info.get_task_by_primitive("Bbox").class_names
112
+ # You will also notice midwest has similar classes, but they are named differently, e.g. 'Persons',
113
+ # 'Vehicle.Car', 'Vehicle.Bicycle', etc.
114
+
115
+ # 2) We will now use the 'HafniaDataset' interface to verify operations (class remapping, merging, filtering)
116
+
117
+ # 2a) Remap class names to have the same class names across datasets
118
+ mappings_coco = {
119
+ "person": "Person",
120
+ "bicycle": "Vehicle",
121
+ "car": "Vehicle",
122
+ "motorcycle": "Vehicle",
123
+ "bus": "Vehicle",
124
+ "train": "Vehicle",
125
+ "truck": "Vehicle",
126
+ }
127
+ mapping_midwest = {
128
+ "Person": "Person",
129
+ "Vehicle*": "Vehicle", # Wildcard mapping. Selects class names starting with 'Vehicle.' e.g. 'Vehicle.Bicycle', "Vehicle.Car', etc.
130
+ "Vehicle.Trailer": OPS_REMOVE_CLASS, # Use this to remove a class
131
+ }
132
+ coco_remapped = coco.class_mapper(class_mapping=mappings_coco, method="remove_undefined", task_name="object_detection")
133
+ midwest_remapped = midwest.class_mapper(class_mapping=mapping_midwest, task_name="object_detection")
134
+
135
+ # 2b) Merge datasets
136
+ merged_dataset_all_images = HafniaDataset.from_merge(dataset0=coco_remapped, dataset1=midwest_remapped)
137
+
138
+ # 2c) Remove images without 'Person' or 'Vehicle' annotations
139
+ merged_dataset = merged_dataset_all_images.select_samples_by_class_name(
140
+ name=["Person", "Vehicle"], task_name="object_detection"
141
+ )
142
+ merged_dataset.print_stats()
143
+
144
+ # 3) Once you have verified operations using the 'HafniaDataset' interface, you can convert
145
+ # the operations to a single 'DatasetRecipe'
146
+ merged_recipe = DatasetRecipe.from_merge(
147
+ recipe0=DatasetRecipe.from_name("coco-2017").class_mapper(
148
+ class_mapping=mappings_coco, method="remove_undefined", task_name="object_detection"
149
+ ),
150
+ recipe1=DatasetRecipe.from_name("midwest-vehicle-detection").class_mapper(
151
+ class_mapping=mapping_midwest, task_name="object_detection"
152
+ ),
153
+ ).select_samples_by_class_name(name=["Person", "Vehicle"], task_name="object_detection")
154
+
155
+ # 3a) Verify again on the sample datasets, that the recipe works and can build as a dataset
156
+ merged_dataset = merged_recipe.build()
157
+ merged_dataset.print_stats()
158
+
159
+ # 3b) Optionally: Save the recipe to file
160
+ path_recipe = Path(".data/dataset_recipes/example-merged-person-vehicle-recipe.json")
161
+ merged_recipe.as_json_file(path_recipe)
162
+ if utils.is_hafnia_configured():
163
+ # 3c) Upload dataset recipe to Training-aaS platform
164
+ recipe_response = merged_recipe.as_platform_recipe(recipe_name="person-vehicle-detection", overwrite=True)
165
+ print(f"Recipe Name: '{recipe_response['name']}', Recipe id: '{recipe_response['id']}'")
166
+
167
+ # 4) The recipe is now available in TaaS for you and other users in your organization
168
+ # 4a) View recipes from your terminal with 'hafnia dataset-recipe ls'
169
+ # 4b) (Coming soon) Or go to 'Dataset Recipes' in the TaaS web platform: https://hafnia.milestonesys.com/training-aas/dataset-recipes
170
+
171
+ # 5) Launch an experiment with the dataset:
172
+ # 5a) Using the CLI:
173
+ # 'hafnia experiment create --dataset-recipe person-vehicle-detection --trainer-path ../trainer-classification'
174
+ # 5b) (Coming soon) Or through the TaaS web platform: https://hafnia.milestonesys.com/training-aas/experiments
175
+
176
+ # 6) Monitor and manage your experiments
177
+ # 6a) View experiments using the web platform https://staging02.mdi.milestonesys.com/training-aas/experiments
178
+ # 6b) Or use the CLI: 'hafnia experiment ls'
108
179
  ### DatasetRecipe Implicit Form ###
109
180
  # Below we demonstrate the difference between implicit and explicit forms of dataset recipes.
110
181
  # Example: Get dataset by name with implicit and explicit forms
111
- dataset = load_dataset("mnist") # Implicit form
112
- dataset = load_dataset(DatasetRecipe.from_name(name="mnist")) # Explicit form
182
+ recipe_implicit_form = "mnist"
183
+ recipe_explicit_form = DatasetRecipe.from_name(name="mnist")
184
+
185
+ # The implicit form can now be loaded and built as a dataset
186
+ dataset_implicit = DatasetRecipe.from_implicit_form(recipe_implicit_form).build()
187
+ # Or directly as a dataset
188
+ dataset_implicit = HafniaDataset.from_recipe(recipe_implicit_form)
189
+
113
190
 
114
191
  # Example: Get dataset from path with implicit and explicit forms:
115
- dataset = load_dataset(Path(".data/datasets/mnist")) # Implicit form
116
- dataset = load_dataset(DatasetRecipe.from_path(path_folder=Path(".data/datasets/mnist"))) # Explicit form
192
+ recipe_implicit_form = Path(".data/datasets/mnist")
193
+ recipe_explicit_form = DatasetRecipe.from_path(path_folder=Path(".data/datasets/mnist"))
117
194
 
118
195
  # Example: Merge datasets with implicit and explicit forms
119
- dataset = load_dataset(("mnist", "mnist")) # Implicit form
120
- dataset = load_dataset( # Explicit form
121
- DatasetRecipe.from_merger(
122
- recipes=[
123
- DatasetRecipe.from_name(name="mnist"),
124
- DatasetRecipe.from_name(name="mnist"),
125
- ]
126
- )
196
+ recipe_implicit_form = ("mnist", "mnist")
197
+ recipe_explicit_form = DatasetRecipe.from_merger(
198
+ recipes=[
199
+ DatasetRecipe.from_name(name="mnist"),
200
+ DatasetRecipe.from_name(name="mnist"),
201
+ ]
127
202
  )
128
203
 
129
204
  # Example: Define a dataset with transformations using implicit and explicit forms
130
- dataset = load_dataset(["mnist", SelectSamples(n_samples=20), Shuffle()]) # Implicit form
131
- dataset = load_dataset(DatasetRecipe.from_name(name="mnist").select_samples(n_samples=20).shuffle()) # Explicit form
205
+ recipe_implicit_form = ["mnist", SelectSamples(n_samples=20), Shuffle()]
206
+ recipe_explicit_form = DatasetRecipe.from_name(name="mnist").select_samples(n_samples=20).shuffle()
132
207
 
133
208
 
134
209
  # Example: Complex nested example with implicit vs explicit forms
135
210
  # Implicit form of a complex dataset recipe
136
211
  split_ratio = {"train": 0.8, "val": 0.1, "test": 0.1}
137
- implicit_recipe = (
212
+ recipe_implicit_complex = (
138
213
  ("mnist", "mnist"),
139
214
  [Path(".data/datasets/mnist"), SelectSamples(n_samples=30), SplitsByRatios(split_ratios=split_ratio)],
140
215
  ["mnist", SelectSamples(n_samples=20), Shuffle()],
141
216
  )
142
217
 
143
218
  # Explicit form of the same complex dataset recipe
144
- explicit_recipe = DatasetRecipe.from_merger(
219
+ recipe_explicit_complex = DatasetRecipe.from_merger(
145
220
  recipes=[
146
221
  DatasetRecipe.from_merger(
147
222
  recipes=[
@@ -165,10 +240,10 @@ explicit_recipe = DatasetRecipe.from_merger(
165
240
 
166
241
 
167
242
  # To convert from implicit to explicit recipe form, you can use the `from_implicit_form` method.
168
- explicit_recipe_from_implicit = DatasetRecipe.from_implicit_form(implicit_recipe)
243
+ explicit_recipe_from_implicit = DatasetRecipe.from_implicit_form(recipe_implicit_complex)
169
244
  rprint("Converted explicit recipe:")
170
245
  rprint(explicit_recipe_from_implicit)
171
246
 
172
247
  # Verify that the conversion produces the same result
173
- assert explicit_recipe_from_implicit == explicit_recipe
248
+ assert explicit_recipe_from_implicit == recipe_explicit_complex
174
249
  rprint("Conversion successful - recipes are equivalent!")
@@ -5,7 +5,6 @@ import numpy as np
5
5
  from PIL import Image
6
6
  from rich import print as rprint
7
7
 
8
- from hafnia.data import load_dataset
9
8
  from hafnia.dataset.dataset_names import SplitName
10
9
  from hafnia.dataset.hafnia_dataset import DatasetInfo, HafniaDataset, Sample, TaskInfo
11
10
  from hafnia.dataset.primitives.bbox import Bbox
@@ -19,7 +18,7 @@ from hafnia.dataset.primitives.polygon import Polygon
19
18
  # And configure it with your Hafnia account:
20
19
  # hafnia configure
21
20
 
22
- # Load dataset
21
+ # Load sample dataset
23
22
  dataset = HafniaDataset.from_name("mnist")
24
23
 
25
24
  # Dataset information is stored in 'dataset.info'
@@ -34,8 +33,8 @@ dataset.print_class_distribution()
34
33
  dataset.print_stats() # Print verbose dataset statistics
35
34
 
36
35
  # Get dataset stats
37
- dataset.class_counts_all() # Get class counts for all tasks
38
- dataset.class_counts_for_task(primitive=Classification) # Get class counts for a specific task
36
+ dataset.calculate_class_counts() # Get class counts for all tasks
37
+ dataset.calculate_task_class_counts(primitive=Classification) # Get class counts for a specific task
39
38
 
40
39
  # Create a dataset split for training
41
40
  dataset_train = dataset.create_split_dataset("train")
@@ -53,6 +52,12 @@ new_dataset_splits = dataset.splits_by_ratios(split_ratios)
53
52
  # Get only samples with specific class names
54
53
  dataset_ones = dataset.select_samples_by_class_name(name="1 - one", primitive=Classification)
55
54
 
55
+ # Get access to a few full and public dataset through Hafnia (no login required)
56
+ # Available datasets: "mnist", "caltech-101", "caltech-256", "cifar10", "cifar100"
57
+ public_dataset = HafniaDataset.from_name_public_dataset("mnist", n_samples=100)
58
+ public_dataset.print_stats()
59
+
60
+
56
61
  # Rename class names with mapping
57
62
  class_mapping_strict = {
58
63
  "0 - zero": "even", # "0 - zero" will be renamed to "even". "even" appear first and get class index 0
@@ -70,7 +75,7 @@ dataset_mapped = dataset.class_mapper(class_mapping=class_mapping_strict)
70
75
  dataset_mapped.print_class_distribution()
71
76
 
72
77
  # Support Chaining Operations (load, shuffle, select samples)
73
- dataset = load_dataset("midwest-vehicle-detection").shuffle(seed=42).select_samples(n_samples=10)
78
+ dataset = HafniaDataset.from_name("midwest-vehicle-detection").shuffle(seed=42).select_samples(n_samples=10)
74
79
 
75
80
 
76
81
  # Write dataset to disk
@@ -81,9 +86,19 @@ dataset.write(path_dataset)
81
86
  # Load dataset from disk
82
87
  dataset_again = HafniaDataset.from_path(path_dataset)
83
88
 
89
+ ## Dataset importers and exporters ##
90
+ dataset_coco = HafniaDataset.from_name("coco-2017").select_samples(n_samples=5, seed=42)
91
+ path_yolo_format = Path(".data/tmp/yolo_dataset")
92
+
93
+ # Export dataset to YOLO format
94
+ dataset_coco.to_yolo_format(path_export_yolo_dataset=path_yolo_format)
95
+
96
+ # Import dataset from YOLO format
97
+ dataset_coco_imported = HafniaDataset.from_yolo_format(path_yolo_format)
84
98
 
99
+ ## Custom dataset operations and statistics ##
85
100
  # Want custom dataset transformations or statistics? Use the polars table (dataset.samples) directly
86
- n_objects = dataset.samples["objects"].list.len().sum()
101
+ n_objects = dataset.samples["bboxes"].list.len().sum()
87
102
  n_objects = dataset.samples[Bbox.column_name()].list.len().sum() # Use Bbox.column_name() to avoid magic variables
88
103
  n_classifications = dataset.samples[Classification.column_name()].list.len().sum()
89
104
 
@@ -101,7 +116,7 @@ for sample_dict in dataset_train:
101
116
  # Unpack dict into a Sample-object! Important for data validation, useability, IDE completion and mypy hints
102
117
  sample: Sample = Sample(**sample_dict)
103
118
 
104
- objects: List[Bbox] = sample.objects # Use 'sample.objects' access bounding boxes as a list of Bbox objects
119
+ bboxes: List[Bbox] = sample.bboxes # Use 'sample.bboxes' access bounding boxes as a list of Bbox objects
105
120
  bitmasks: List[Bitmask] = sample.bitmasks # Use 'sample.bitmasks' to access bitmasks as a list of Bitmask objects
106
121
  polygons: List[Polygon] = sample.polygons # Use 'sample.polygons' to access polygons as a list of Polygon objects
107
122
  classifications: List[Classification] = sample.classifications # As a list of Classification objects
@@ -124,12 +139,12 @@ for i_fake_sample in range(5):
124
139
  bboxes = [Bbox(top_left_x=0.1, top_left_y=0.20, width=0.1, height=0.2, class_name="car")]
125
140
  classifications = [Classification(class_name="vehicle", class_idx=0)]
126
141
  sample = Sample(
127
- file_name=f"path/to/image_{i_fake_sample:05}.jpg",
142
+ file_path=f"path/to/image_{i_fake_sample:05}.jpg",
128
143
  height=480,
129
144
  width=640,
130
145
  split="train",
131
146
  tags=["sample"],
132
- objects=bboxes,
147
+ bboxes=bboxes,
133
148
  classifications=classifications,
134
149
  )
135
150
  fake_samples.append(sample)
@@ -7,12 +7,12 @@ from torch.utils.data import DataLoader
7
7
  from torchvision.transforms import v2
8
8
 
9
9
  from hafnia import torch_helpers
10
- from hafnia.data import load_dataset
10
+ from hafnia.dataset.hafnia_dataset import HafniaDataset
11
11
 
12
12
  if __name__ == "__main__":
13
13
  torch.manual_seed(1)
14
14
  # Load Hugging Face dataset
15
- dataset = load_dataset("midwest-vehicle-detection")
15
+ dataset = HafniaDataset.from_name("midwest-vehicle-detection")
16
16
 
17
17
  # Define transforms
18
18
  train_transforms = v2.Compose(
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "hafnia"
3
- version = "0.3.0"
3
+ version = "0.4.1"
4
4
  description = "Python SDK for communication with Hafnia platform."
5
5
  readme = "README.md"
6
6
  authors = [
@@ -13,7 +13,7 @@ dependencies = [
13
13
  "click>=8.1.8",
14
14
  "emoji>=2.14.1",
15
15
  "flatten-dict>=0.4.2",
16
- "mlflow>=3.2.0",
16
+ "keyring>=25.6.0",
17
17
  "more-itertools>=10.7.0",
18
18
  "opencv-python-headless>=4.11.0.86",
19
19
  "pathspec>=0.12.1",
@@ -24,10 +24,11 @@ dependencies = [
24
24
  "pydantic>=2.10.4",
25
25
  "rich>=13.9.4",
26
26
  "s5cmd>=0.2.0",
27
- "sagemaker-mlflow>=0.1.0",
28
27
  "seedir>=0.5.0",
29
- "tqdm>=4.67.1",
30
28
  "xxhash>=3.5.0",
29
+ "mlflow>=3.4.0",
30
+ "sagemaker-mlflow>=0.1.0",
31
+ "mcp==1.16.0",
31
32
  ]
32
33
 
33
34
  [dependency-groups]
@@ -37,7 +38,7 @@ dev = [
37
38
  "torch>=2.6.0",
38
39
  "torchvision>=0.21.0",
39
40
  "flatten-dict>=0.4.2",
40
- "pytest-cov>=7.0.0",
41
+ "pytest-cov>=7.0.0",
41
42
  ]
42
43
 
43
44
  test = ["pytest>=8.3.4", "pre-commit>=4.2.0", "ruff>=0.9.1"]
@@ -71,5 +72,9 @@ disallow_incomplete_defs = false
71
72
  disallow_untyped_calls = false
72
73
  warn_unused_ignores = false
73
74
 
75
+ [[tool.mypy.overrides]]
76
+ module = "yaml"
77
+ ignore_missing_imports = true
78
+
74
79
  [tool.pytest.ini_options]
75
80
  markers = ["slow: marks tests as slow (deselect with '-m \"not slow\"')"]
@@ -37,7 +37,9 @@ def configure(cfg: Config) -> None:
37
37
 
38
38
  platform_url = click.prompt("Hafnia Platform URL", type=str, default=consts.DEFAULT_API_URL)
39
39
 
40
- cfg_profile = ConfigSchema(api_key=api_key, platform_url=platform_url)
40
+ use_keychain = click.confirm("Store API key in system keychain?", default=False)
41
+
42
+ cfg_profile = ConfigSchema(platform_url=platform_url, api_key=api_key, use_keychain=use_keychain)
41
43
  cfg.add_profile(profile_name, cfg_profile, set_active=True)
42
44
  cfg.save_config()
43
45
  profile_cmds.profile_show(cfg)