hafnia 0.3.0__tar.gz → 0.4.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (152) hide show
  1. {hafnia-0.3.0 → hafnia-0.4.0}/.github/workflows/publish_docker.yaml +1 -1
  2. {hafnia-0.3.0 → hafnia-0.4.0}/.github/workflows/tests.yaml +1 -0
  3. hafnia-0.4.0/.trivyignore +3 -0
  4. {hafnia-0.3.0 → hafnia-0.4.0}/PKG-INFO +8 -6
  5. {hafnia-0.3.0 → hafnia-0.4.0}/README.md +4 -3
  6. {hafnia-0.3.0 → hafnia-0.4.0}/examples/example_dataset_recipe.py +103 -30
  7. {hafnia-0.3.0 → hafnia-0.4.0}/examples/example_hafnia_dataset.py +9 -4
  8. {hafnia-0.3.0 → hafnia-0.4.0}/examples/example_torchvision_dataloader.py +2 -2
  9. {hafnia-0.3.0 → hafnia-0.4.0}/pyproject.toml +10 -5
  10. {hafnia-0.3.0 → hafnia-0.4.0}/src/cli/__main__.py +3 -1
  11. {hafnia-0.3.0 → hafnia-0.4.0}/src/cli/config.py +43 -3
  12. hafnia-0.4.0/src/cli/keychain.py +88 -0
  13. {hafnia-0.3.0 → hafnia-0.4.0}/src/cli/profile_cmds.py +5 -2
  14. {hafnia-0.3.0 → hafnia-0.4.0}/src/hafnia/__init__.py +1 -1
  15. {hafnia-0.3.0 → hafnia-0.4.0}/src/hafnia/dataset/dataset_helpers.py +9 -2
  16. {hafnia-0.3.0 → hafnia-0.4.0}/src/hafnia/dataset/dataset_names.py +2 -1
  17. {hafnia-0.3.0 → hafnia-0.4.0}/src/hafnia/dataset/dataset_recipe/dataset_recipe.py +49 -37
  18. {hafnia-0.3.0 → hafnia-0.4.0}/src/hafnia/dataset/dataset_recipe/recipe_transforms.py +18 -2
  19. {hafnia-0.3.0 → hafnia-0.4.0}/src/hafnia/dataset/dataset_upload_helper.py +60 -4
  20. hafnia-0.4.0/src/hafnia/dataset/format_conversions/image_classification_from_directory.py +106 -0
  21. hafnia-0.4.0/src/hafnia/dataset/format_conversions/torchvision_datasets.py +281 -0
  22. {hafnia-0.3.0 → hafnia-0.4.0}/src/hafnia/dataset/hafnia_dataset.py +176 -50
  23. {hafnia-0.3.0 → hafnia-0.4.0}/src/hafnia/dataset/operations/dataset_stats.py +2 -3
  24. {hafnia-0.3.0 → hafnia-0.4.0}/src/hafnia/dataset/operations/dataset_transformations.py +19 -15
  25. {hafnia-0.3.0 → hafnia-0.4.0}/src/hafnia/dataset/operations/table_transformations.py +4 -3
  26. {hafnia-0.3.0 → hafnia-0.4.0}/src/hafnia/dataset/primitives/bbox.py +25 -12
  27. {hafnia-0.3.0 → hafnia-0.4.0}/src/hafnia/dataset/primitives/bitmask.py +26 -14
  28. {hafnia-0.3.0 → hafnia-0.4.0}/src/hafnia/dataset/primitives/classification.py +16 -8
  29. {hafnia-0.3.0 → hafnia-0.4.0}/src/hafnia/dataset/primitives/point.py +7 -3
  30. {hafnia-0.3.0 → hafnia-0.4.0}/src/hafnia/dataset/primitives/polygon.py +16 -9
  31. {hafnia-0.3.0 → hafnia-0.4.0}/src/hafnia/dataset/primitives/segmentation.py +10 -7
  32. {hafnia-0.3.0 → hafnia-0.4.0}/src/hafnia/experiment/hafnia_logger.py +0 -9
  33. {hafnia-0.3.0 → hafnia-0.4.0}/src/hafnia/platform/dataset_recipe.py +7 -2
  34. {hafnia-0.3.0 → hafnia-0.4.0}/src/hafnia/platform/datasets.py +3 -3
  35. {hafnia-0.3.0 → hafnia-0.4.0}/src/hafnia/platform/download.py +23 -18
  36. {hafnia-0.3.0 → hafnia-0.4.0}/src/hafnia/utils.py +17 -0
  37. {hafnia-0.3.0 → hafnia-0.4.0}/src/hafnia/visualizations/image_visualizations.py +1 -1
  38. hafnia-0.4.0/tests/data/dataset_image_metadata_schema.yaml +344 -0
  39. hafnia-0.4.0/tests/data/expected_images/test_samples/test_check_dataset[caltech-101].png +0 -0
  40. hafnia-0.4.0/tests/data/expected_images/test_samples/test_check_dataset[caltech-256].png +0 -0
  41. hafnia-0.4.0/tests/data/expected_images/test_samples/test_check_dataset[cifar100].png +0 -0
  42. hafnia-0.4.0/tests/data/expected_images/test_samples/test_check_dataset[cifar10].png +0 -0
  43. hafnia-0.4.0/tests/data/expected_images/test_samples/test_check_dataset[mnist].png +0 -0
  44. hafnia-0.4.0/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[caltech-101].png +0 -0
  45. hafnia-0.4.0/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[caltech-256].png +0 -0
  46. hafnia-0.4.0/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[cifar100].png +0 -0
  47. hafnia-0.4.0/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[cifar10].png +0 -0
  48. hafnia-0.4.0/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[mnist].png +0 -0
  49. {hafnia-0.3.0 → hafnia-0.4.0}/tests/data/micro_test_datasets/micro-coco-2017/annotations.jsonl +3 -3
  50. {hafnia-0.3.0 → hafnia-0.4.0}/tests/data/micro_test_datasets/micro-coco-2017/annotations.parquet +0 -0
  51. {hafnia-0.3.0 → hafnia-0.4.0}/tests/data/micro_test_datasets/micro-coco-2017/dataset_info.json +6 -3
  52. {hafnia-0.3.0 → hafnia-0.4.0}/tests/data/micro_test_datasets/micro-tiny-dataset/annotations.jsonl +3 -3
  53. hafnia-0.4.0/tests/data/micro_test_datasets/micro-tiny-dataset/annotations.parquet +0 -0
  54. {hafnia-0.3.0 → hafnia-0.4.0}/tests/data/micro_test_datasets/micro-tiny-dataset/dataset_info.json +5 -2
  55. {hafnia-0.3.0 → hafnia-0.4.0}/tests/helper_testing.py +5 -1
  56. {hafnia-0.3.0 → hafnia-0.4.0}/tests/integration/test_cli_integration.py +11 -6
  57. {hafnia-0.3.0 → hafnia-0.4.0}/tests/integration/test_dataset_merges.py +9 -1
  58. {hafnia-0.3.0 → hafnia-0.4.0}/tests/integration/test_dataset_recipes_with_platform.py +28 -2
  59. {hafnia-0.3.0 → hafnia-0.4.0}/tests/integration/test_samples.py +8 -5
  60. hafnia-0.4.0/tests/integration/test_torchvision_datasets.py +21 -0
  61. {hafnia-0.3.0 → hafnia-0.4.0}/tests/unit/dataset/dataset_recipe/test_dataset_recipes.py +9 -46
  62. {hafnia-0.3.0 → hafnia-0.4.0}/tests/unit/dataset/dataset_recipe/test_recipe_transformations.py +37 -3
  63. hafnia-0.4.0/tests/unit/dataset/format_conversions/test_image_classification_directory.py +47 -0
  64. {hafnia-0.3.0 → hafnia-0.4.0}/tests/unit/dataset/test_hafnia_dataset.py +42 -3
  65. {hafnia-0.3.0 → hafnia-0.4.0}/tests/unit/dataset/test_shape_primitives.py +41 -2
  66. {hafnia-0.3.0 → hafnia-0.4.0}/tests/unit/test_cli.py +2 -2
  67. hafnia-0.4.0/uv.lock +4350 -0
  68. hafnia-0.3.0/tests/data/expected_images/test_samples/test_check_dataset[caltech-101].png +0 -0
  69. hafnia-0.3.0/tests/data/expected_images/test_samples/test_check_dataset[caltech-256].png +0 -0
  70. hafnia-0.3.0/tests/data/expected_images/test_samples/test_check_dataset[cifar100].png +0 -0
  71. hafnia-0.3.0/tests/data/expected_images/test_samples/test_check_dataset[cifar10].png +0 -0
  72. hafnia-0.3.0/tests/data/expected_images/test_samples/test_check_dataset[mnist].png +0 -0
  73. hafnia-0.3.0/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[caltech-101].png +0 -0
  74. hafnia-0.3.0/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[caltech-256].png +0 -0
  75. hafnia-0.3.0/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[cifar100].png +0 -0
  76. hafnia-0.3.0/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[cifar10].png +0 -0
  77. hafnia-0.3.0/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[mnist].png +0 -0
  78. hafnia-0.3.0/tests/data/micro_test_datasets/micro-tiny-dataset/annotations.parquet +0 -0
  79. hafnia-0.3.0/uv.lock +0 -3308
  80. {hafnia-0.3.0 → hafnia-0.4.0}/.devcontainer/devcontainer.json +0 -0
  81. {hafnia-0.3.0 → hafnia-0.4.0}/.devcontainer/hooks/post_create +0 -0
  82. {hafnia-0.3.0 → hafnia-0.4.0}/.github/dependabot.yaml +0 -0
  83. {hafnia-0.3.0 → hafnia-0.4.0}/.github/workflows/Dockerfile +0 -0
  84. {hafnia-0.3.0 → hafnia-0.4.0}/.github/workflows/build.yaml +0 -0
  85. {hafnia-0.3.0 → hafnia-0.4.0}/.github/workflows/check_release.yaml +0 -0
  86. {hafnia-0.3.0 → hafnia-0.4.0}/.github/workflows/ci_cd.yaml +0 -0
  87. {hafnia-0.3.0 → hafnia-0.4.0}/.github/workflows/lint.yaml +0 -0
  88. {hafnia-0.3.0 → hafnia-0.4.0}/.github/workflows/publish_pypi.yaml +0 -0
  89. {hafnia-0.3.0 → hafnia-0.4.0}/.gitignore +0 -0
  90. {hafnia-0.3.0 → hafnia-0.4.0}/.pre-commit-config.yaml +0 -0
  91. {hafnia-0.3.0 → hafnia-0.4.0}/.python-version +0 -0
  92. {hafnia-0.3.0 → hafnia-0.4.0}/.vscode/extensions.json +0 -0
  93. {hafnia-0.3.0 → hafnia-0.4.0}/.vscode/launch.json +0 -0
  94. {hafnia-0.3.0 → hafnia-0.4.0}/.vscode/settings.json +0 -0
  95. {hafnia-0.3.0 → hafnia-0.4.0}/LICENSE +0 -0
  96. {hafnia-0.3.0 → hafnia-0.4.0}/docs/cli.md +0 -0
  97. {hafnia-0.3.0 → hafnia-0.4.0}/docs/release.md +0 -0
  98. {hafnia-0.3.0 → hafnia-0.4.0}/examples/example_logger.py +0 -0
  99. {hafnia-0.3.0 → hafnia-0.4.0}/src/cli/__init__.py +0 -0
  100. {hafnia-0.3.0 → hafnia-0.4.0}/src/cli/consts.py +0 -0
  101. {hafnia-0.3.0 → hafnia-0.4.0}/src/cli/dataset_cmds.py +0 -0
  102. {hafnia-0.3.0 → hafnia-0.4.0}/src/cli/dataset_recipe_cmds.py +0 -0
  103. {hafnia-0.3.0 → hafnia-0.4.0}/src/cli/experiment_cmds.py +0 -0
  104. {hafnia-0.3.0 → hafnia-0.4.0}/src/cli/runc_cmds.py +0 -0
  105. {hafnia-0.3.0 → hafnia-0.4.0}/src/cli/trainer_package_cmds.py +0 -0
  106. {hafnia-0.3.0 → hafnia-0.4.0}/src/hafnia/data/__init__.py +0 -0
  107. {hafnia-0.3.0 → hafnia-0.4.0}/src/hafnia/data/factory.py +0 -0
  108. {hafnia-0.3.0 → hafnia-0.4.0}/src/hafnia/dataset/dataset_recipe/recipe_types.py +0 -0
  109. {hafnia-0.3.0 → hafnia-0.4.0}/src/hafnia/dataset/license_types.py +0 -0
  110. {hafnia-0.3.0 → hafnia-0.4.0}/src/hafnia/dataset/primitives/__init__.py +0 -0
  111. {hafnia-0.3.0 → hafnia-0.4.0}/src/hafnia/dataset/primitives/primitive.py +0 -0
  112. {hafnia-0.3.0 → hafnia-0.4.0}/src/hafnia/dataset/primitives/utils.py +0 -0
  113. {hafnia-0.3.0 → hafnia-0.4.0}/src/hafnia/experiment/__init__.py +0 -0
  114. {hafnia-0.3.0 → hafnia-0.4.0}/src/hafnia/http.py +0 -0
  115. {hafnia-0.3.0 → hafnia-0.4.0}/src/hafnia/log.py +0 -0
  116. {hafnia-0.3.0 → hafnia-0.4.0}/src/hafnia/platform/__init__.py +0 -0
  117. {hafnia-0.3.0 → hafnia-0.4.0}/src/hafnia/platform/builder.py +0 -0
  118. {hafnia-0.3.0 → hafnia-0.4.0}/src/hafnia/platform/experiment.py +0 -0
  119. {hafnia-0.3.0 → hafnia-0.4.0}/src/hafnia/platform/trainer_package.py +0 -0
  120. {hafnia-0.3.0 → hafnia-0.4.0}/src/hafnia/torch_helpers.py +0 -0
  121. {hafnia-0.3.0 → hafnia-0.4.0}/src/hafnia/visualizations/colors.py +0 -0
  122. {hafnia-0.3.0 → hafnia-0.4.0}/tests/__init__.py +0 -0
  123. {hafnia-0.3.0 → hafnia-0.4.0}/tests/conftest.py +0 -0
  124. {hafnia-0.3.0 → hafnia-0.4.0}/tests/data/expected_images/test_samples/test_check_dataset[coco-2017].png +0 -0
  125. {hafnia-0.3.0 → hafnia-0.4.0}/tests/data/expected_images/test_samples/test_check_dataset[midwest-vehicle-detection].png +0 -0
  126. {hafnia-0.3.0 → hafnia-0.4.0}/tests/data/expected_images/test_samples/test_check_dataset[tiny-dataset].png +0 -0
  127. {hafnia-0.3.0 → hafnia-0.4.0}/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[coco-2017].png +0 -0
  128. {hafnia-0.3.0 → hafnia-0.4.0}/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[midwest-vehicle-detection].png +0 -0
  129. {hafnia-0.3.0 → hafnia-0.4.0}/tests/data/expected_images/test_samples/test_dataset_draw_image_and_target[tiny-dataset].png +0 -0
  130. {hafnia-0.3.0 → hafnia-0.4.0}/tests/data/expected_images/test_visualizations/test_blur_anonymization[micro-coco-2017].png +0 -0
  131. {hafnia-0.3.0 → hafnia-0.4.0}/tests/data/expected_images/test_visualizations/test_blur_anonymization[micro-tiny-dataset].png +0 -0
  132. {hafnia-0.3.0 → hafnia-0.4.0}/tests/data/expected_images/test_visualizations/test_draw_annotations[micro-coco-2017].png +0 -0
  133. {hafnia-0.3.0 → hafnia-0.4.0}/tests/data/expected_images/test_visualizations/test_draw_annotations[micro-tiny-dataset].png +0 -0
  134. {hafnia-0.3.0 → hafnia-0.4.0}/tests/data/expected_images/test_visualizations/test_mask_region[micro-coco-2017].png +0 -0
  135. {hafnia-0.3.0 → hafnia-0.4.0}/tests/data/expected_images/test_visualizations/test_mask_region[micro-tiny-dataset].png +0 -0
  136. {hafnia-0.3.0 → hafnia-0.4.0}/tests/data/micro_test_datasets/micro-coco-2017/data/3b4/3b4165c8c4f830be4e95c6eb6209880a.jpg +0 -0
  137. {hafnia-0.3.0 → hafnia-0.4.0}/tests/data/micro_test_datasets/micro-coco-2017/data/837/837b642d8a7b3b8dcf86c7a23edb55ce.jpg +0 -0
  138. {hafnia-0.3.0 → hafnia-0.4.0}/tests/data/micro_test_datasets/micro-coco-2017/data/dc8/dc8efc98ce6304fe182a2c0a3ce312cf.jpg +0 -0
  139. {hafnia-0.3.0 → hafnia-0.4.0}/tests/data/micro_test_datasets/micro-tiny-dataset/data/3dd/3ddec2275a02e79e3251d85443622e4c.png +0 -0
  140. {hafnia-0.3.0 → hafnia-0.4.0}/tests/data/micro_test_datasets/micro-tiny-dataset/data/4d8/4d8450b045e60e8f3657ababa44af9b6.png +0 -0
  141. {hafnia-0.3.0 → hafnia-0.4.0}/tests/data/micro_test_datasets/micro-tiny-dataset/data/907/907f182da7bcedb8222bbd5721a8a86e.png +0 -0
  142. {hafnia-0.3.0 → hafnia-0.4.0}/tests/integration/test_check_example_scripts.py +0 -0
  143. {hafnia-0.3.0 → hafnia-0.4.0}/tests/unit/dataset/dataset_recipe/test_dataset_recipe_helpers.py +0 -0
  144. {hafnia-0.3.0 → hafnia-0.4.0}/tests/unit/dataset/operations/test_dataset_stats.py +0 -0
  145. {hafnia-0.3.0 → hafnia-0.4.0}/tests/unit/dataset/operations/test_dataset_transformations.py +0 -0
  146. {hafnia-0.3.0 → hafnia-0.4.0}/tests/unit/dataset/operations/test_table_transformations.py +0 -0
  147. {hafnia-0.3.0 → hafnia-0.4.0}/tests/unit/dataset/test_colors.py +0 -0
  148. {hafnia-0.3.0 → hafnia-0.4.0}/tests/unit/dataset/test_dataset_helpers.py +0 -0
  149. {hafnia-0.3.0 → hafnia-0.4.0}/tests/unit/test_builder.py +0 -0
  150. {hafnia-0.3.0 → hafnia-0.4.0}/tests/unit/test_hafnia_logger.py +0 -0
  151. {hafnia-0.3.0 → hafnia-0.4.0}/tests/unit/test_utils.py +0 -0
  152. {hafnia-0.3.0 → hafnia-0.4.0}/tests/unit/test_visualizations.py +0 -0
@@ -47,7 +47,7 @@ jobs:
47
47
  echo "aws_region=${{ secrets.STAGE_AWS_REGION }}" >> $GITHUB_OUTPUT
48
48
  fi
49
49
  - name: Configure AWS credentials
50
- uses: aws-actions/configure-aws-credentials@v5.0.0
50
+ uses: aws-actions/configure-aws-credentials@v5.1.0
51
51
  with:
52
52
  role-to-assume: arn:aws:iam::${{ steps.env-vars.outputs.aws_account_id }}:role/${{ secrets.AWS_ROLE_NAME }}
53
53
  aws-region: ${{ steps.env-vars.outputs.aws_region }}
@@ -11,6 +11,7 @@ jobs:
11
11
  test:
12
12
  runs-on: ${{ matrix.os }}
13
13
  strategy:
14
+ max-parallel: 1
14
15
  matrix:
15
16
  os: [ubuntu-latest, windows-latest]
16
17
  steps:
@@ -0,0 +1,3 @@
1
+ # Ignore 'CVE-2024-37059' issue https://avd.aquasec.com/nvd/2024/cve-2024-37059/
2
+ # The vulnerability does not apply to our platform as models are not loaded on our platform.
3
+ CVE-2024-37059
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: hafnia
3
- Version: 0.3.0
3
+ Version: 0.4.0
4
4
  Summary: Python SDK for communication with Hafnia platform.
5
5
  Author-email: Milestone Systems <hafniaplatform@milestone.dk>
6
6
  License-File: LICENSE
@@ -9,7 +9,9 @@ Requires-Dist: boto3>=1.35.91
9
9
  Requires-Dist: click>=8.1.8
10
10
  Requires-Dist: emoji>=2.14.1
11
11
  Requires-Dist: flatten-dict>=0.4.2
12
- Requires-Dist: mlflow>=3.2.0
12
+ Requires-Dist: keyring>=25.6.0
13
+ Requires-Dist: mcp==1.16.0
14
+ Requires-Dist: mlflow>=3.4.0
13
15
  Requires-Dist: more-itertools>=10.7.0
14
16
  Requires-Dist: opencv-python-headless>=4.11.0.86
15
17
  Requires-Dist: pathspec>=0.12.1
@@ -22,7 +24,6 @@ Requires-Dist: rich>=13.9.4
22
24
  Requires-Dist: s5cmd>=0.2.0
23
25
  Requires-Dist: sagemaker-mlflow>=0.1.0
24
26
  Requires-Dist: seedir>=0.5.0
25
- Requires-Dist: tqdm>=4.67.1
26
27
  Requires-Dist: xxhash>=3.5.0
27
28
  Description-Content-Type: text/markdown
28
29
 
@@ -80,6 +81,7 @@ Copy the key and save it for later use.
80
81
  Hafnia API Key: # Pass your HAFNIA API key
81
82
  Hafnia Platform URL [https://api.mdi.milestonesys.com]: # Press [Enter]
82
83
  ```
84
+
83
85
  1. Download `mnist` from terminal to verify that your configuration is working.
84
86
 
85
87
  ```bash
@@ -91,7 +93,7 @@ With Hafnia configured on your local machine, it is now possible to download
91
93
  and explore the dataset sample with a python script:
92
94
 
93
95
  ```python
94
- from hafnia.data import load_dataset, get_dataset_path
96
+ from hafnia.data import get_dataset_path
95
97
  from hafnia.dataset.hafnia_dataset import HafniaDataset
96
98
 
97
99
  # To download the sample dataset use:
@@ -360,7 +362,7 @@ logger.log_scalar("validation/loss", value=0.1, step=100)
360
362
  logger.log_metric("validation/accuracy", value=0.95, step=100)
361
363
  ```
362
364
 
363
- Similar to `load_dataset`, the tracker behaves differently when running locally or in the cloud.
365
+ The tracker behaves differently when running locally or in the cloud.
364
366
  Locally, experiment data is stored in a local folder `.data/experiments/{DATE_TIME}`.
365
367
 
366
368
  In the cloud, the experiment data will be available in the Hafnia platform under
@@ -384,7 +386,7 @@ and datasets available in the data library.
384
386
 
385
387
  ```python
386
388
  # Load Hugging Face dataset
387
- dataset_splits = load_dataset("midwest-vehicle-detection")
389
+ dataset_splits = HafniaDataset.from_name("midwest-vehicle-detection")
388
390
 
389
391
  # Define transforms
390
392
  train_transforms = v2.Compose(
@@ -52,6 +52,7 @@ Copy the key and save it for later use.
52
52
  Hafnia API Key: # Pass your HAFNIA API key
53
53
  Hafnia Platform URL [https://api.mdi.milestonesys.com]: # Press [Enter]
54
54
  ```
55
+
55
56
  1. Download `mnist` from terminal to verify that your configuration is working.
56
57
 
57
58
  ```bash
@@ -63,7 +64,7 @@ With Hafnia configured on your local machine, it is now possible to download
63
64
  and explore the dataset sample with a python script:
64
65
 
65
66
  ```python
66
- from hafnia.data import load_dataset, get_dataset_path
67
+ from hafnia.data import get_dataset_path
67
68
  from hafnia.dataset.hafnia_dataset import HafniaDataset
68
69
 
69
70
  # To download the sample dataset use:
@@ -332,7 +333,7 @@ logger.log_scalar("validation/loss", value=0.1, step=100)
332
333
  logger.log_metric("validation/accuracy", value=0.95, step=100)
333
334
  ```
334
335
 
335
- Similar to `load_dataset`, the tracker behaves differently when running locally or in the cloud.
336
+ The tracker behaves differently when running locally or in the cloud.
336
337
  Locally, experiment data is stored in a local folder `.data/experiments/{DATE_TIME}`.
337
338
 
338
339
  In the cloud, the experiment data will be available in the Hafnia platform under
@@ -356,7 +357,7 @@ and datasets available in the data library.
356
357
 
357
358
  ```python
358
359
  # Load Hugging Face dataset
359
- dataset_splits = load_dataset("midwest-vehicle-detection")
360
+ dataset_splits = HafniaDataset.from_name("midwest-vehicle-detection")
360
361
 
361
362
  # Define transforms
362
363
  train_transforms = v2.Compose(
@@ -3,7 +3,7 @@ from pathlib import Path
3
3
  from rich import print as rprint
4
4
 
5
5
  from hafnia import utils
6
- from hafnia.data.factory import load_dataset
6
+ from hafnia.dataset.dataset_names import OPS_REMOVE_CLASS
7
7
  from hafnia.dataset.dataset_recipe.dataset_recipe import DatasetRecipe
8
8
  from hafnia.dataset.dataset_recipe.recipe_transforms import (
9
9
  SelectSamples,
@@ -50,7 +50,7 @@ dataset_recipe.as_python_code()
50
50
  # executed in the TaaS platform. This is demonstrated below:
51
51
  if utils.is_hafnia_configured(): # First ensure you are connected to the hafnia platform
52
52
  # Upload the dataset recipe - this will make it available for TaaS and for users of your organization
53
- dataset_recipe.as_platform_recipe(recipe_name="example-mnist-recipe")
53
+ dataset_recipe.as_platform_recipe(recipe_name="example-mnist-recipe", overwrite=True)
54
54
 
55
55
  # The recipe is now available in TaaS, for different environments and other users in your organization
56
56
  dataset_recipe_again = DatasetRecipe.from_recipe_name(name="example-mnist-recipe")
@@ -95,53 +95,126 @@ rprint(dataset_recipe) # as a python object
95
95
  print(dataset_recipe.as_json_str()) # as a JSON string
96
96
 
97
97
 
98
- # Example: Using the 'load_dataset' function
99
- merged_dataset: HafniaDataset = load_dataset(dataset_recipe)
100
- # You get a few extra things when using `load_dataset`.
101
- # 1) You get the dataset directly - you don't have to call `build()` on the recipe.
102
- # 2) The dataset is cached if it already exists, so you don't have to
103
- # download or rebuild the dataset on the second run.
104
- # 3) You can use an implicit form of the recipe. One example of this is that you just specify
105
- # the dataset name `load_dataset("mnist")` or path `load_dataset(Path(".data/datasets/mnist"))`
106
-
107
-
98
+ ### Real-world Example: Merge datasets to create a Person+Vehicle dataset ###
99
+ # 1) The first step is to use the regular 'HafniaDataset' interface to investigate and understand the datasets
100
+
101
+ # 1a) Explore 'coco-2017'
102
+ coco = HafniaDataset.from_name("coco-2017")
103
+ coco.print_stats() # Print dataset statistics
104
+ coco_class_names = coco.info.get_task_by_primitive("Bbox").class_names # Get the class names for the bbox task
105
+ # You will notice coco has 80 classes including 'person' and various vehicle classes such as 'car', 'bus', 'truck', etc.
106
+ # but also many unrelated classes such as 'toaster', 'hair drier', etc.
107
+
108
+ # 1b) Explore 'midwest-vehicle-detection'
109
+ midwest = HafniaDataset.from_name("midwest-vehicle-detection")
110
+ midwest.print_stats() # Print dataset statistics
111
+ midwest_class_names = midwest.info.get_task_by_primitive("Bbox").class_names
112
+ # You will also notice midwest has similar classes, but they are named differently, e.g. 'Persons',
113
+ # 'Vehicle.Car', 'Vehicle.Bicycle', etc.
114
+
115
+ # 2) We will now use the 'HafniaDataset' interface to verify operations (class remapping, merging, filtering)
116
+
117
+ # 2a) Remap class names to have the same class names across datasets
118
+ mappings_coco = {
119
+ "person": "Person",
120
+ "bicycle": "Vehicle",
121
+ "car": "Vehicle",
122
+ "motorcycle": "Vehicle",
123
+ "bus": "Vehicle",
124
+ "train": "Vehicle",
125
+ "truck": "Vehicle",
126
+ }
127
+ mapping_midwest = {
128
+ "Person": "Person",
129
+ "Vehicle*": "Vehicle", # Wildcard mapping. Selects class names starting with 'Vehicle.' e.g. 'Vehicle.Bicycle', "Vehicle.Car', etc.
130
+ "Vehicle.Trailer": OPS_REMOVE_CLASS, # Use this to remove a class
131
+ }
132
+ coco_remapped = coco.class_mapper(class_mapping=mappings_coco, method="remove_undefined", task_name="bboxes")
133
+ midwest_remapped = midwest.class_mapper(class_mapping=mapping_midwest, task_name="bboxes")
134
+
135
+ # 2b) Merge datasets
136
+ merged_dataset_all_images = HafniaDataset.from_merge(dataset0=coco_remapped, dataset1=midwest_remapped)
137
+
138
+ # 2c) Remove images without 'Person' or 'Vehicle' annotations
139
+ merged_dataset = merged_dataset_all_images.select_samples_by_class_name(name=["Person", "Vehicle"], task_name="bboxes")
140
+ merged_dataset.print_stats()
141
+
142
+ # 3) Once you have verified operations using the 'HafniaDataset' interface, you can convert
143
+ # the operations to a single 'DatasetRecipe'
144
+ merged_recipe = DatasetRecipe.from_merge(
145
+ recipe0=DatasetRecipe.from_name("coco-2017").class_mapper(
146
+ class_mapping=mappings_coco, method="remove_undefined", task_name="bboxes"
147
+ ),
148
+ recipe1=DatasetRecipe.from_name("midwest-vehicle-detection").class_mapper(
149
+ class_mapping=mapping_midwest, task_name="bboxes"
150
+ ),
151
+ ).select_samples_by_class_name(name=["Person", "Vehicle"], task_name="bboxes")
152
+
153
+ # 3a) Verify again on the sample datasets, that the recipe works and can build as a dataset
154
+ merged_dataset = merged_recipe.build()
155
+ merged_dataset.print_stats()
156
+
157
+ # 3b) Optionally: Save the recipe to file
158
+ path_recipe = Path(".data/dataset_recipes/example-merged-person-vehicle-recipe.json")
159
+ merged_recipe.as_json_file(path_recipe)
160
+ if utils.is_hafnia_configured():
161
+ # 3c) Upload dataset recipe to Training-aaS platform
162
+ recipe_response = merged_recipe.as_platform_recipe(recipe_name="person-vehicle-detection", overwrite=True)
163
+ print(f"Recipe Name: '{recipe_response['name']}', Recipe id: '{recipe_response['id']}'")
164
+
165
+ # 4) The recipe is now available in TaaS for you and other users in your organization
166
+ # 4a) View recipes from your terminal with 'hafnia dataset-recipe ls'
167
+ # 4b) (Coming soon) Or go to 'Dataset Recipes' in the TaaS web platform: https://hafnia.milestonesys.com/training-aas/dataset-recipes
168
+
169
+ # 5) Launch an experiment with the dataset:
170
+ # 5a) Using the CLI:
171
+ # 'hafnia experiment create --dataset-recipe person-vehicle-detection --trainer-path ../trainer-classification'
172
+ # 5b) (Coming soon) Or through the TaaS web platform: https://hafnia.milestonesys.com/training-aas/experiments
173
+
174
+ # 6) Monitor and manage your experiments
175
+ # 6a) View experiments using the web platform https://staging02.mdi.milestonesys.com/training-aas/experiments
176
+ # 6b) Or use the CLI: 'hafnia experiment ls'
108
177
  ### DatasetRecipe Implicit Form ###
109
178
  # Below we demonstrate the difference between implicit and explicit forms of dataset recipes.
110
179
  # Example: Get dataset by name with implicit and explicit forms
111
- dataset = load_dataset("mnist") # Implicit form
112
- dataset = load_dataset(DatasetRecipe.from_name(name="mnist")) # Explicit form
180
+ recipe_implicit_form = "mnist"
181
+ recipe_explicit_form = DatasetRecipe.from_name(name="mnist")
182
+
183
+ # The implicit form can now be loaded and built as a dataset
184
+ dataset_implicit = DatasetRecipe.from_implicit_form(recipe_implicit_form).build()
185
+ # Or directly as a dataset
186
+ dataset_implicit = HafniaDataset.from_recipe(recipe_implicit_form)
187
+
113
188
 
114
189
  # Example: Get dataset from path with implicit and explicit forms:
115
- dataset = load_dataset(Path(".data/datasets/mnist")) # Implicit form
116
- dataset = load_dataset(DatasetRecipe.from_path(path_folder=Path(".data/datasets/mnist"))) # Explicit form
190
+ recipe_implicit_form = Path(".data/datasets/mnist")
191
+ recipe_explicit_form = DatasetRecipe.from_path(path_folder=Path(".data/datasets/mnist"))
117
192
 
118
193
  # Example: Merge datasets with implicit and explicit forms
119
- dataset = load_dataset(("mnist", "mnist")) # Implicit form
120
- dataset = load_dataset( # Explicit form
121
- DatasetRecipe.from_merger(
122
- recipes=[
123
- DatasetRecipe.from_name(name="mnist"),
124
- DatasetRecipe.from_name(name="mnist"),
125
- ]
126
- )
194
+ recipe_implicit_form = ("mnist", "mnist")
195
+ recipe_explicit_form = DatasetRecipe.from_merger(
196
+ recipes=[
197
+ DatasetRecipe.from_name(name="mnist"),
198
+ DatasetRecipe.from_name(name="mnist"),
199
+ ]
127
200
  )
128
201
 
129
202
  # Example: Define a dataset with transformations using implicit and explicit forms
130
- dataset = load_dataset(["mnist", SelectSamples(n_samples=20), Shuffle()]) # Implicit form
131
- dataset = load_dataset(DatasetRecipe.from_name(name="mnist").select_samples(n_samples=20).shuffle()) # Explicit form
203
+ recipe_implicit_form = ["mnist", SelectSamples(n_samples=20), Shuffle()]
204
+ recipe_explicit_form = DatasetRecipe.from_name(name="mnist").select_samples(n_samples=20).shuffle()
132
205
 
133
206
 
134
207
  # Example: Complex nested example with implicit vs explicit forms
135
208
  # Implicit form of a complex dataset recipe
136
209
  split_ratio = {"train": 0.8, "val": 0.1, "test": 0.1}
137
- implicit_recipe = (
210
+ recipe_implicit_complex = (
138
211
  ("mnist", "mnist"),
139
212
  [Path(".data/datasets/mnist"), SelectSamples(n_samples=30), SplitsByRatios(split_ratios=split_ratio)],
140
213
  ["mnist", SelectSamples(n_samples=20), Shuffle()],
141
214
  )
142
215
 
143
216
  # Explicit form of the same complex dataset recipe
144
- explicit_recipe = DatasetRecipe.from_merger(
217
+ recipe_explicit_complex = DatasetRecipe.from_merger(
145
218
  recipes=[
146
219
  DatasetRecipe.from_merger(
147
220
  recipes=[
@@ -165,10 +238,10 @@ explicit_recipe = DatasetRecipe.from_merger(
165
238
 
166
239
 
167
240
  # To convert from implicit to explicit recipe form, you can use the `from_implicit_form` method.
168
- explicit_recipe_from_implicit = DatasetRecipe.from_implicit_form(implicit_recipe)
241
+ explicit_recipe_from_implicit = DatasetRecipe.from_implicit_form(recipe_implicit_complex)
169
242
  rprint("Converted explicit recipe:")
170
243
  rprint(explicit_recipe_from_implicit)
171
244
 
172
245
  # Verify that the conversion produces the same result
173
- assert explicit_recipe_from_implicit == explicit_recipe
246
+ assert explicit_recipe_from_implicit == recipe_explicit_complex
174
247
  rprint("Conversion successful - recipes are equivalent!")
@@ -5,7 +5,6 @@ import numpy as np
5
5
  from PIL import Image
6
6
  from rich import print as rprint
7
7
 
8
- from hafnia.data import load_dataset
9
8
  from hafnia.dataset.dataset_names import SplitName
10
9
  from hafnia.dataset.hafnia_dataset import DatasetInfo, HafniaDataset, Sample, TaskInfo
11
10
  from hafnia.dataset.primitives.bbox import Bbox
@@ -19,7 +18,7 @@ from hafnia.dataset.primitives.polygon import Polygon
19
18
  # And configure it with your Hafnia account:
20
19
  # hafnia configure
21
20
 
22
- # Load dataset
21
+ # Load sample dataset
23
22
  dataset = HafniaDataset.from_name("mnist")
24
23
 
25
24
  # Dataset information is stored in 'dataset.info'
@@ -53,6 +52,12 @@ new_dataset_splits = dataset.splits_by_ratios(split_ratios)
53
52
  # Get only samples with specific class names
54
53
  dataset_ones = dataset.select_samples_by_class_name(name="1 - one", primitive=Classification)
55
54
 
55
+ # Get access to a few full and public dataset through Hafnia (no login required)
56
+ # Available datasets: "mnist", "caltech-101", "caltech-256", "cifar10", "cifar100"
57
+ public_dataset = HafniaDataset.from_name_public_dataset("mnist", n_samples=100)
58
+ public_dataset.print_stats()
59
+
60
+
56
61
  # Rename class names with mapping
57
62
  class_mapping_strict = {
58
63
  "0 - zero": "even", # "0 - zero" will be renamed to "even". "even" appear first and get class index 0
@@ -70,7 +75,7 @@ dataset_mapped = dataset.class_mapper(class_mapping=class_mapping_strict)
70
75
  dataset_mapped.print_class_distribution()
71
76
 
72
77
  # Support Chaining Operations (load, shuffle, select samples)
73
- dataset = load_dataset("midwest-vehicle-detection").shuffle(seed=42).select_samples(n_samples=10)
78
+ dataset = HafniaDataset.from_name("midwest-vehicle-detection").shuffle(seed=42).select_samples(n_samples=10)
74
79
 
75
80
 
76
81
  # Write dataset to disk
@@ -124,7 +129,7 @@ for i_fake_sample in range(5):
124
129
  bboxes = [Bbox(top_left_x=0.1, top_left_y=0.20, width=0.1, height=0.2, class_name="car")]
125
130
  classifications = [Classification(class_name="vehicle", class_idx=0)]
126
131
  sample = Sample(
127
- file_name=f"path/to/image_{i_fake_sample:05}.jpg",
132
+ file_path=f"path/to/image_{i_fake_sample:05}.jpg",
128
133
  height=480,
129
134
  width=640,
130
135
  split="train",
@@ -7,12 +7,12 @@ from torch.utils.data import DataLoader
7
7
  from torchvision.transforms import v2
8
8
 
9
9
  from hafnia import torch_helpers
10
- from hafnia.data import load_dataset
10
+ from hafnia.dataset.hafnia_dataset import HafniaDataset
11
11
 
12
12
  if __name__ == "__main__":
13
13
  torch.manual_seed(1)
14
14
  # Load Hugging Face dataset
15
- dataset = load_dataset("midwest-vehicle-detection")
15
+ dataset = HafniaDataset.from_name("midwest-vehicle-detection")
16
16
 
17
17
  # Define transforms
18
18
  train_transforms = v2.Compose(
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "hafnia"
3
- version = "0.3.0"
3
+ version = "0.4.0"
4
4
  description = "Python SDK for communication with Hafnia platform."
5
5
  readme = "README.md"
6
6
  authors = [
@@ -13,7 +13,7 @@ dependencies = [
13
13
  "click>=8.1.8",
14
14
  "emoji>=2.14.1",
15
15
  "flatten-dict>=0.4.2",
16
- "mlflow>=3.2.0",
16
+ "keyring>=25.6.0",
17
17
  "more-itertools>=10.7.0",
18
18
  "opencv-python-headless>=4.11.0.86",
19
19
  "pathspec>=0.12.1",
@@ -24,10 +24,11 @@ dependencies = [
24
24
  "pydantic>=2.10.4",
25
25
  "rich>=13.9.4",
26
26
  "s5cmd>=0.2.0",
27
- "sagemaker-mlflow>=0.1.0",
28
27
  "seedir>=0.5.0",
29
- "tqdm>=4.67.1",
30
28
  "xxhash>=3.5.0",
29
+ "mlflow>=3.4.0",
30
+ "sagemaker-mlflow>=0.1.0",
31
+ "mcp==1.16.0",
31
32
  ]
32
33
 
33
34
  [dependency-groups]
@@ -37,7 +38,7 @@ dev = [
37
38
  "torch>=2.6.0",
38
39
  "torchvision>=0.21.0",
39
40
  "flatten-dict>=0.4.2",
40
- "pytest-cov>=7.0.0",
41
+ "pytest-cov>=7.0.0",
41
42
  ]
42
43
 
43
44
  test = ["pytest>=8.3.4", "pre-commit>=4.2.0", "ruff>=0.9.1"]
@@ -71,5 +72,9 @@ disallow_incomplete_defs = false
71
72
  disallow_untyped_calls = false
72
73
  warn_unused_ignores = false
73
74
 
75
+ [[tool.mypy.overrides]]
76
+ module = "yaml"
77
+ ignore_missing_imports = true
78
+
74
79
  [tool.pytest.ini_options]
75
80
  markers = ["slow: marks tests as slow (deselect with '-m \"not slow\"')"]
@@ -37,7 +37,9 @@ def configure(cfg: Config) -> None:
37
37
 
38
38
  platform_url = click.prompt("Hafnia Platform URL", type=str, default=consts.DEFAULT_API_URL)
39
39
 
40
- cfg_profile = ConfigSchema(api_key=api_key, platform_url=platform_url)
40
+ use_keychain = click.confirm("Store API key in system keychain?", default=False)
41
+
42
+ cfg_profile = ConfigSchema(platform_url=platform_url, api_key=api_key, use_keychain=use_keychain)
41
43
  cfg.add_profile(profile_name, cfg_profile, set_active=True)
42
44
  cfg.save_config()
43
45
  profile_cmds.profile_show(cfg)
@@ -6,6 +6,7 @@ from typing import Dict, List, Optional
6
6
  from pydantic import BaseModel, field_validator
7
7
 
8
8
  import cli.consts as consts
9
+ import cli.keychain as keychain
9
10
  from hafnia.log import sys_logger, user_logger
10
11
 
11
12
  PLATFORM_API_MAPPING = {
@@ -19,9 +20,18 @@ PLATFORM_API_MAPPING = {
19
20
  }
20
21
 
21
22
 
23
+ class SecretStr(str):
24
+ def __repr__(self):
25
+ return "********"
26
+
27
+ def __str__(self):
28
+ return "********"
29
+
30
+
22
31
  class ConfigSchema(BaseModel):
23
32
  platform_url: str = ""
24
33
  api_key: Optional[str] = None
34
+ use_keychain: bool = False
25
35
 
26
36
  @field_validator("api_key")
27
37
  def validate_api_key(cls, value: Optional[str]) -> Optional[str]:
@@ -35,7 +45,7 @@ class ConfigSchema(BaseModel):
35
45
  sys_logger.warning("API key is missing the 'ApiKey ' prefix. Prefix is being added automatically.")
36
46
  value = f"ApiKey {value}"
37
47
 
38
- return value
48
+ return SecretStr(value) # Keeps the API key masked in logs and repr
39
49
 
40
50
 
41
51
  class ConfigFileSchema(BaseModel):
@@ -70,13 +80,32 @@ class Config:
70
80
 
71
81
  @property
72
82
  def api_key(self) -> str:
83
+ # Check keychain first if enabled
84
+ if self.config.use_keychain:
85
+ keychain_key = keychain.get_api_key(self.active_profile)
86
+ if keychain_key is not None:
87
+ return keychain_key
88
+
89
+ # Fall back to config file
73
90
  if self.config.api_key is not None:
74
91
  return self.config.api_key
92
+
75
93
  raise ValueError(consts.ERROR_API_KEY_NOT_SET)
76
94
 
77
95
  @api_key.setter
78
96
  def api_key(self, value: str) -> None:
79
- self.config.api_key = value
97
+ # Store in keychain if enabled
98
+ if self.config.use_keychain:
99
+ if keychain.store_api_key(self.active_profile, value):
100
+ # Successfully stored in keychain, don't store in config
101
+ self.config.api_key = None
102
+ else:
103
+ # Keychain storage failed, fall back to config file
104
+ sys_logger.warning("Failed to store in keychain, falling back to config file")
105
+ self.config.api_key = value
106
+ else:
107
+ # Not using keychain, store in config file
108
+ self.config.api_key = value
80
109
 
81
110
  @property
82
111
  def platform_url(self) -> str:
@@ -152,8 +181,19 @@ class Config:
152
181
  raise ValueError("Failed to parse configuration file")
153
182
 
154
183
  def save_config(self) -> None:
184
+ # Create a copy to avoid modifying the original data
185
+ config_to_save = self.config_data.model_dump()
186
+
187
+ # Store API key in keychain if enabled, and don't write to file
188
+ for profile_name, profile_data in config_to_save["profiles"].items():
189
+ if profile_data.get("use_keychain", False):
190
+ api_key = profile_data.get("api_key")
191
+ if api_key:
192
+ keychain.store_api_key(profile_name, api_key)
193
+ profile_data["api_key"] = None
194
+
155
195
  with open(self.config_path, "w") as f:
156
- json.dump(self.config_data.model_dump(), f, indent=4)
196
+ json.dump(config_to_save, f, indent=4)
157
197
 
158
198
  def remove_profile(self, profile_name: str) -> None:
159
199
  if profile_name not in self.config_data.profiles:
@@ -0,0 +1,88 @@
1
+ """Keychain storage for API keys using the system keychain."""
2
+
3
+ from typing import Optional
4
+
5
+ from hafnia.log import sys_logger
6
+
7
+ # Keyring is optional - gracefully degrade if not available
8
+ try:
9
+ import keyring
10
+
11
+ KEYRING_AVAILABLE = True
12
+ except ImportError:
13
+ KEYRING_AVAILABLE = False
14
+ sys_logger.debug("keyring library not available, keychain storage disabled")
15
+
16
+ KEYRING_SERVICE_NAME = "hafnia-cli"
17
+
18
+
19
+ def store_api_key(profile_name: str, api_key: str) -> bool:
20
+ """
21
+ Store an API key in the system keychain.
22
+
23
+ Args:
24
+ profile_name: The profile name to associate with the key
25
+ api_key: The API key to store
26
+
27
+ Returns:
28
+ True if successfully stored, False otherwise
29
+ """
30
+ if not KEYRING_AVAILABLE:
31
+ sys_logger.warning("Keyring library not available, cannot store API key in keychain")
32
+ return False
33
+
34
+ try:
35
+ keyring.set_password(KEYRING_SERVICE_NAME, profile_name, api_key)
36
+ sys_logger.debug(f"Stored API key for profile '{profile_name}' in keychain")
37
+ return True
38
+ except Exception as e:
39
+ sys_logger.warning(f"Failed to store API key in keychain: {e}")
40
+ return False
41
+
42
+
43
+ def get_api_key(profile_name: str) -> Optional[str]:
44
+ """
45
+ Retrieve an API key from the system keychain.
46
+
47
+ Args:
48
+ profile_name: The profile name to retrieve the key for
49
+
50
+ Returns:
51
+ The API key if found, None otherwise
52
+ """
53
+ if not KEYRING_AVAILABLE:
54
+ return None
55
+
56
+ try:
57
+ api_key = keyring.get_password(KEYRING_SERVICE_NAME, profile_name)
58
+ if api_key:
59
+ sys_logger.debug(f"Retrieved API key for profile '{profile_name}' from keychain")
60
+ return api_key
61
+ except Exception as e:
62
+ sys_logger.warning(f"Failed to retrieve API key from keychain: {e}")
63
+ return None
64
+
65
+
66
+ def delete_api_key(profile_name: str) -> bool:
67
+ """
68
+ Delete an API key from the system keychain.
69
+
70
+ Args:
71
+ profile_name: The profile name to delete the key for
72
+
73
+ Returns:
74
+ True if successfully deleted or didn't exist, False on error
75
+ """
76
+ if not KEYRING_AVAILABLE:
77
+ return False
78
+
79
+ try:
80
+ keyring.delete_password(KEYRING_SERVICE_NAME, profile_name)
81
+ sys_logger.debug(f"Deleted API key for profile '{profile_name}' from keychain")
82
+ return True
83
+ except keyring.errors.PasswordDeleteError:
84
+ # Key didn't exist, which is fine
85
+ return True
86
+ except Exception as e:
87
+ sys_logger.warning(f"Failed to delete API key from keychain: {e}")
88
+ return False
@@ -50,10 +50,13 @@ def cmd_profile_use(cfg: Config, profile_name: str) -> None:
50
50
  @click.option(
51
51
  "--activate/--no-activate", help="Activate the created profile after creation", default=True, show_default=True
52
52
  )
53
+ @click.option(
54
+ "--use-keychain", is_flag=True, help="Store API key in system keychain instead of config file", default=False
55
+ )
53
56
  @click.pass_obj
54
- def cmd_profile_create(cfg: Config, name: str, api_url: str, api_key: str, activate: bool) -> None:
57
+ def cmd_profile_create(cfg: Config, name: str, api_url: str, api_key: str, activate: bool, use_keychain: bool) -> None:
55
58
  """Create a new profile."""
56
- cfg_profile = ConfigSchema(platform_url=api_url, api_key=api_key)
59
+ cfg_profile = ConfigSchema(platform_url=api_url, api_key=api_key, use_keychain=use_keychain)
57
60
 
58
61
  cfg.add_profile(profile_name=name, profile=cfg_profile, set_active=activate)
59
62
  profile_show(cfg)
@@ -3,4 +3,4 @@ from importlib.metadata import version
3
3
  __package_name__ = "hafnia"
4
4
  __version__ = version(__package_name__)
5
5
 
6
- __dataset_format_version__ = "0.0.2" # Hafnia dataset format version
6
+ __dataset_format_version__ = "0.1.0" # Hafnia dataset format version
@@ -38,12 +38,19 @@ def hash_from_bytes(data: bytes) -> str:
38
38
 
39
39
  def save_image_with_hash_name(image: np.ndarray, path_folder: Path) -> Path:
40
40
  pil_image = Image.fromarray(image)
41
+ path_image = save_pil_image_with_hash_name(pil_image, path_folder)
42
+ return path_image
43
+
44
+
45
+ def save_pil_image_with_hash_name(image: Image.Image, path_folder: Path, allow_skip: bool = True) -> Path:
41
46
  buffer = io.BytesIO()
42
- pil_image.save(buffer, format="PNG")
47
+ image.save(buffer, format="PNG")
43
48
  hash_value = hash_from_bytes(buffer.getvalue())
44
49
  path_image = Path(path_folder) / relative_path_from_hash(hash=hash_value, suffix=".png")
50
+ if allow_skip and path_image.exists():
51
+ return path_image
45
52
  path_image.parent.mkdir(parents=True, exist_ok=True)
46
- pil_image.save(path_image)
53
+ image.save(path_image)
47
54
  return path_image
48
55
 
49
56