hafnia 0.1.26__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. cli/__main__.py +2 -2
  2. cli/dataset_cmds.py +60 -0
  3. cli/runc_cmds.py +1 -1
  4. hafnia/data/__init__.py +2 -2
  5. hafnia/data/factory.py +9 -56
  6. hafnia/dataset/dataset_helpers.py +91 -0
  7. hafnia/dataset/dataset_names.py +71 -0
  8. hafnia/dataset/dataset_transformation.py +187 -0
  9. hafnia/dataset/dataset_upload_helper.py +468 -0
  10. hafnia/dataset/hafnia_dataset.py +453 -0
  11. hafnia/dataset/primitives/__init__.py +16 -0
  12. hafnia/dataset/primitives/bbox.py +137 -0
  13. hafnia/dataset/primitives/bitmask.py +182 -0
  14. hafnia/dataset/primitives/classification.py +56 -0
  15. hafnia/dataset/primitives/point.py +25 -0
  16. hafnia/dataset/primitives/polygon.py +100 -0
  17. hafnia/dataset/primitives/primitive.py +44 -0
  18. hafnia/dataset/primitives/segmentation.py +51 -0
  19. hafnia/dataset/primitives/utils.py +51 -0
  20. hafnia/dataset/table_transformations.py +183 -0
  21. hafnia/experiment/hafnia_logger.py +2 -2
  22. hafnia/helper_testing.py +63 -0
  23. hafnia/http.py +5 -3
  24. hafnia/platform/__init__.py +2 -2
  25. hafnia/platform/builder.py +25 -19
  26. hafnia/platform/datasets.py +184 -0
  27. hafnia/platform/download.py +85 -23
  28. hafnia/torch_helpers.py +180 -95
  29. hafnia/utils.py +1 -1
  30. hafnia/visualizations/colors.py +267 -0
  31. hafnia/visualizations/image_visualizations.py +202 -0
  32. {hafnia-0.1.26.dist-info → hafnia-0.2.0.dist-info}/METADATA +212 -99
  33. hafnia-0.2.0.dist-info/RECORD +46 -0
  34. cli/data_cmds.py +0 -53
  35. hafnia-0.1.26.dist-info/RECORD +0 -27
  36. {hafnia-0.1.26.dist-info → hafnia-0.2.0.dist-info}/WHEEL +0 -0
  37. {hafnia-0.1.26.dist-info → hafnia-0.2.0.dist-info}/entry_points.txt +0 -0
  38. {hafnia-0.1.26.dist-info → hafnia-0.2.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,267 @@
1
+ from typing import List, Tuple
2
+
3
+
4
+ def get_n_colors(index: int) -> List[Tuple[int, int, int]]:
5
+ n_colors = len(COLORS)
6
+ colors = [COLORS[index % n_colors] for index in range(index)]
7
+ return colors
8
+
9
+
10
+ COLORS = [
11
+ (210, 24, 32),
12
+ (24, 105, 255),
13
+ (0, 138, 0),
14
+ (243, 109, 255),
15
+ (113, 0, 121),
16
+ (170, 251, 0),
17
+ (0, 190, 194),
18
+ (255, 162, 53),
19
+ (93, 61, 4),
20
+ (8, 0, 138),
21
+ (0, 93, 93),
22
+ (154, 125, 130),
23
+ (162, 174, 255),
24
+ (150, 182, 117),
25
+ (158, 40, 255),
26
+ (77, 0, 20),
27
+ (255, 174, 190),
28
+ (206, 0, 146),
29
+ (0, 255, 182),
30
+ (0, 45, 0),
31
+ (158, 117, 0),
32
+ (61, 53, 65),
33
+ (243, 235, 146),
34
+ (101, 97, 138),
35
+ (138, 61, 77),
36
+ (89, 4, 186),
37
+ (85, 138, 113),
38
+ (178, 190, 194),
39
+ (255, 93, 130),
40
+ (28, 198, 0),
41
+ (146, 247, 255),
42
+ (45, 134, 166),
43
+ (57, 93, 40),
44
+ (235, 206, 255),
45
+ (255, 93, 0),
46
+ (166, 97, 170),
47
+ (134, 0, 0),
48
+ (53, 0, 89),
49
+ (0, 81, 142),
50
+ (158, 73, 16),
51
+ (206, 190, 0),
52
+ (0, 40, 40),
53
+ (0, 178, 255),
54
+ (202, 166, 134),
55
+ (190, 154, 194),
56
+ (45, 32, 12),
57
+ (117, 101, 69),
58
+ (130, 121, 223),
59
+ (0, 194, 138),
60
+ (186, 231, 194),
61
+ (134, 142, 166),
62
+ (202, 113, 89),
63
+ (130, 154, 0),
64
+ (45, 0, 255),
65
+ (210, 4, 247),
66
+ (255, 215, 190),
67
+ (146, 206, 247),
68
+ (186, 93, 125),
69
+ (255, 65, 194),
70
+ (190, 134, 255),
71
+ (146, 142, 101),
72
+ (166, 4, 170),
73
+ (134, 227, 117),
74
+ (73, 0, 61),
75
+ (251, 239, 12),
76
+ (105, 85, 93),
77
+ (89, 49, 45),
78
+ (105, 53, 255),
79
+ (182, 4, 77),
80
+ (93, 109, 113),
81
+ (65, 69, 53),
82
+ (101, 113, 0),
83
+ (121, 0, 73),
84
+ (28, 49, 81),
85
+ (121, 65, 158),
86
+ (255, 146, 113),
87
+ (255, 166, 243),
88
+ (186, 158, 65),
89
+ (130, 170, 154),
90
+ (215, 121, 0),
91
+ (73, 61, 113),
92
+ (81, 162, 85),
93
+ (231, 130, 182),
94
+ (210, 227, 251),
95
+ (0, 73, 49),
96
+ (109, 219, 194),
97
+ (61, 77, 93),
98
+ (97, 53, 85),
99
+ (0, 113, 81),
100
+ (93, 24, 0),
101
+ (154, 93, 81),
102
+ (85, 142, 219),
103
+ (202, 202, 154),
104
+ (53, 24, 32),
105
+ (57, 61, 0),
106
+ (0, 154, 150),
107
+ (235, 16, 109),
108
+ (138, 69, 121),
109
+ (117, 170, 194),
110
+ (202, 146, 154),
111
+ (210, 186, 198),
112
+ (154, 206, 0),
113
+ (69, 109, 170),
114
+ (117, 89, 0),
115
+ (206, 77, 12),
116
+ (0, 223, 251),
117
+ (255, 61, 65),
118
+ (255, 202, 73),
119
+ (45, 49, 146),
120
+ (134, 105, 134),
121
+ (158, 130, 190),
122
+ (206, 174, 255),
123
+ (121, 69, 45),
124
+ (198, 251, 130),
125
+ (93, 117, 73),
126
+ (182, 69, 73),
127
+ (255, 223, 239),
128
+ (162, 0, 113),
129
+ (77, 77, 166),
130
+ (166, 170, 202),
131
+ (113, 28, 40),
132
+ (40, 121, 121),
133
+ (8, 73, 0),
134
+ (0, 105, 134),
135
+ (166, 117, 73),
136
+ (251, 182, 130),
137
+ (85, 24, 125),
138
+ (0, 255, 89),
139
+ (0, 65, 77),
140
+ (109, 142, 146),
141
+ (170, 36, 0),
142
+ (190, 210, 109),
143
+ (138, 97, 186),
144
+ (210, 65, 190),
145
+ (73, 97, 81),
146
+ (206, 243, 239),
147
+ (97, 194, 97),
148
+ (20, 138, 77),
149
+ (0, 255, 231),
150
+ (0, 105, 0),
151
+ (178, 121, 158),
152
+ (170, 178, 158),
153
+ (186, 85, 255),
154
+ (198, 121, 206),
155
+ (32, 49, 32),
156
+ (125, 4, 219),
157
+ (194, 198, 247),
158
+ (138, 198, 206),
159
+ (231, 235, 206),
160
+ (40, 28, 57),
161
+ (158, 255, 174),
162
+ (130, 206, 154),
163
+ (49, 166, 12),
164
+ (0, 162, 117),
165
+ (219, 146, 85),
166
+ (61, 20, 4),
167
+ (255, 138, 154),
168
+ (130, 134, 53),
169
+ (105, 77, 113),
170
+ (182, 97, 0),
171
+ (125, 45, 0),
172
+ (162, 178, 57),
173
+ (49, 4, 125),
174
+ (166, 61, 202),
175
+ (154, 32, 45),
176
+ (4, 223, 134),
177
+ (117, 125, 109),
178
+ (138, 150, 210),
179
+ (8, 162, 202),
180
+ (247, 109, 93),
181
+ (16, 85, 202),
182
+ (219, 182, 101),
183
+ (146, 89, 109),
184
+ (162, 255, 227),
185
+ (89, 85, 40),
186
+ (113, 121, 170),
187
+ (215, 89, 101),
188
+ (73, 32, 81),
189
+ (223, 77, 146),
190
+ (0, 0, 202),
191
+ (93, 101, 210),
192
+ (223, 166, 0),
193
+ (178, 73, 146),
194
+ (182, 138, 117),
195
+ (97, 77, 61),
196
+ (166, 150, 162),
197
+ (85, 28, 53),
198
+ (49, 65, 65),
199
+ (117, 117, 134),
200
+ (146, 158, 162),
201
+ (117, 154, 113),
202
+ (255, 130, 32),
203
+ (134, 85, 255),
204
+ (154, 198, 182),
205
+ (223, 150, 243),
206
+ (202, 223, 49),
207
+ (142, 93, 40),
208
+ (53, 190, 227),
209
+ (113, 166, 255),
210
+ (89, 138, 49),
211
+ (255, 194, 235),
212
+ (170, 61, 105),
213
+ (73, 97, 125),
214
+ (73, 53, 28),
215
+ (69, 178, 158),
216
+ (28, 36, 49),
217
+ (247, 49, 239),
218
+ (117, 0, 166),
219
+ (231, 182, 170),
220
+ (130, 105, 101),
221
+ (227, 162, 202),
222
+ (32, 36, 0),
223
+ (121, 182, 16),
224
+ (158, 142, 255),
225
+ (210, 117, 138),
226
+ (202, 182, 219),
227
+ (174, 154, 223),
228
+ (255, 113, 219),
229
+ (210, 247, 178),
230
+ (198, 215, 206),
231
+ (255, 210, 138),
232
+ (93, 223, 53),
233
+ (93, 121, 146),
234
+ (162, 142, 0),
235
+ (174, 223, 239),
236
+ (113, 77, 194),
237
+ (125, 69, 0),
238
+ (101, 146, 182),
239
+ (93, 121, 255),
240
+ (81, 73, 89),
241
+ (150, 158, 81),
242
+ (206, 105, 174),
243
+ (101, 53, 117),
244
+ (219, 210, 227),
245
+ (182, 174, 117),
246
+ (81, 89, 0),
247
+ (182, 89, 57),
248
+ (85, 4, 235),
249
+ (61, 117, 45),
250
+ (146, 130, 154),
251
+ (130, 36, 105),
252
+ (186, 134, 57),
253
+ (138, 178, 227),
254
+ (109, 178, 130),
255
+ (150, 65, 53),
256
+ (109, 65, 73),
257
+ (138, 117, 61),
258
+ (178, 113, 117),
259
+ (146, 28, 73),
260
+ (223, 109, 49),
261
+ (0, 227, 223),
262
+ (146, 4, 202),
263
+ (49, 40, 89),
264
+ (0, 125, 210),
265
+ (162, 109, 255),
266
+ (130, 89, 146),
267
+ ]
@@ -0,0 +1,202 @@
1
+ import shutil
2
+ from pathlib import Path
3
+ from typing import Dict, List, Optional, Tuple, Type, Union
4
+
5
+ import cv2
6
+ import numpy as np
7
+ import numpy.typing as npt
8
+ from PIL import Image
9
+
10
+ from hafnia.dataset.hafnia_dataset import HafniaDataset, Sample
11
+ from hafnia.dataset.primitives import (
12
+ Bbox,
13
+ Bitmask,
14
+ Classification,
15
+ Polygon,
16
+ Segmentation,
17
+ )
18
+ from hafnia.dataset.primitives.primitive import Primitive
19
+
20
+
21
+ def draw_anonymize_by_blurring(
22
+ image: np.ndarray,
23
+ primitives: List[Primitive],
24
+ inplace: bool = False,
25
+ class_names: Union[List[str], str] = "all",
26
+ anonymization_settings: Optional[Dict[Type[Primitive], Dict]] = None,
27
+ ) -> np.ndarray:
28
+ if not inplace:
29
+ image = image.copy()
30
+
31
+ anonymization_settings = anonymization_settings or {}
32
+ if isinstance(class_names, str) and class_names == "all":
33
+ primitives = primitives
34
+ elif isinstance(class_names, list):
35
+ primitives = [primitive for primitive in primitives if primitive.class_name in class_names]
36
+ else:
37
+ raise ValueError(f"Invalid class_names type: {type(class_names)}. Expected 'all' or a list of class names.")
38
+ for primitive in primitives:
39
+ settings = anonymization_settings.get(type(primitive), {})
40
+ image = primitive.anonymize_by_blurring(image, inplace=True, **settings)
41
+ return image
42
+
43
+
44
+ def draw_masks(image: np.ndarray, primitives: List[Primitive], inplace: bool = False) -> np.ndarray:
45
+ if not inplace:
46
+ image = image.copy()
47
+
48
+ for primitive in primitives:
49
+ primitive.mask(image, inplace=True)
50
+ return image
51
+
52
+
53
+ def draw_annotations(
54
+ image: np.ndarray,
55
+ primitives: List[Primitive],
56
+ inplace: bool = False,
57
+ draw_settings: Optional[Dict[Type[Primitive], Dict]] = None,
58
+ ) -> np.ndarray:
59
+ if not inplace:
60
+ image = image.copy()
61
+ draw_settings = draw_settings or {}
62
+ primitives_order = [Segmentation, Bitmask, Bbox, Polygon, Classification]
63
+ primitives = sorted(primitives, key=lambda x: primitives_order.index(type(x)))
64
+ for primitive in primitives:
65
+ draw_settings_for_primitive = draw_settings.get(type(primitive), {})
66
+ image = primitive.draw(image, **draw_settings_for_primitive)
67
+ return image
68
+
69
+
70
+ def concatenate_below(img0: np.ndarray, below_img: np.ndarray) -> np.ndarray:
71
+ scale_factor = img0.shape[1] / below_img.shape[1]
72
+ new_height = int(below_img.shape[0] * scale_factor)
73
+ text_region_resized = cv2.resize(below_img, (img0.shape[1], new_height))
74
+ if len(img0.shape) == 2:
75
+ img0 = cv2.cvtColor(img0, cv2.COLOR_GRAY2BGR)
76
+ if len(text_region_resized.shape) == 2:
77
+ text_region_resized = cv2.cvtColor(text_region_resized, cv2.COLOR_GRAY2BGR)
78
+ frame_visualized = np.concatenate([img0, text_region_resized], axis=0)
79
+ return frame_visualized
80
+
81
+
82
+ def concatenate_below_resize_by_padding(img0: np.ndarray, below_img: np.ndarray):
83
+ max_width = max(img0.shape[1], below_img.shape[1])
84
+
85
+ if len(img0.shape) == 2:
86
+ img0 = cv2.cvtColor(img0, cv2.COLOR_GRAY2RGB)
87
+ if len(below_img.shape) == 2:
88
+ below_img = cv2.cvtColor(below_img, cv2.COLOR_GRAY2RGB)
89
+ img0_padded = resize_width_by_padding(img0, new_width=max_width)
90
+ below_img_padded = resize_width_by_padding(below_img, new_width=max_width)
91
+
92
+ return np.concatenate([img0_padded, below_img_padded], axis=0)
93
+
94
+
95
+ def resize_width_by_padding(img0: np.ndarray, new_width: int) -> np.ndarray:
96
+ img0_new_shape = list(img0.shape)
97
+ img0_new_shape[1] = new_width
98
+ img0_padded = np.zeros(img0_new_shape, dtype=img0.dtype)
99
+ extra_width = new_width - img0.shape[1]
100
+ left_margin = extra_width // 2
101
+ img0_padded[:, left_margin : left_margin + img0.shape[1]] = img0
102
+ return img0_padded
103
+
104
+
105
+ def append_text_below_frame(frame: np.ndarray, text: str) -> np.ndarray:
106
+ font_size_px = int(frame.shape[0] * 0.1) # 10% of the frame height
107
+ font_size_px = max(font_size_px, 7) # Ensure a minimum font size
108
+ font_size_px = min(font_size_px, 50) # Ensure a maximum font size
109
+
110
+ text_region = create_text_img(text, font_size_px=font_size_px)
111
+ frame_with_text = concatenate_below_resize_by_padding(frame, text_region)
112
+ return frame_with_text
113
+
114
+
115
+ def create_text_img(
116
+ text_strings: Union[List[str], str],
117
+ font_size_px: int,
118
+ text_width: Optional[int] = None,
119
+ color: Tuple[int, int, int] = (255, 255, 255),
120
+ bg_color: Tuple[int, int, int] = (0, 0, 0),
121
+ ) -> npt.NDArray[np.uint8]:
122
+ """Private code borrowed by Peter."""
123
+ font_face = cv2.FONT_HERSHEY_SIMPLEX
124
+ thickness = 2
125
+ if font_size_px < 15:
126
+ thickness = 1
127
+ line_type = cv2.LINE_AA
128
+ if isinstance(text_strings, str):
129
+ text_strings = [text_strings]
130
+ font_scale = cv2.getFontScaleFromHeight(fontFace=font_face, pixelHeight=font_size_px, thickness=thickness)
131
+
132
+ text_w_max = 0
133
+ for text in text_strings:
134
+ (text_w, text_h), baseline = cv2.getTextSize(
135
+ text=text, fontFace=font_face, fontScale=font_scale, thickness=thickness
136
+ )
137
+ text_w_max = max(text_w_max, text_w)
138
+
139
+ text_width = text_width or text_w_max + baseline * 2
140
+ text_height = text_h + baseline * 2
141
+
142
+ y_pos = text_h + baseline
143
+ text_imgs = []
144
+ for text in text_strings:
145
+ shape_color_image = (text_height, text_width, 3)
146
+ img = np.full(shape_color_image, bg_color, dtype=np.uint8)
147
+ text_img = cv2.putText(
148
+ img=img,
149
+ text=text,
150
+ org=(baseline, y_pos),
151
+ fontFace=font_face,
152
+ fontScale=font_scale,
153
+ color=color,
154
+ thickness=thickness,
155
+ lineType=line_type,
156
+ )
157
+ text_imgs.append(text_img)
158
+ img_text = np.vstack(text_imgs)
159
+ return img_text
160
+
161
+
162
+ def concatenate_right(img0: np.ndarray, below_img: np.ndarray) -> np.ndarray:
163
+ scale_factor = img0.shape[0] / below_img.shape[0]
164
+ new_width = int(below_img.shape[1] * scale_factor)
165
+ text_region_resized = cv2.resize(below_img, (new_width, img0.shape[0]))
166
+
167
+ frame_visualized = np.concatenate([img0, text_region_resized], axis=1)
168
+ return frame_visualized
169
+
170
+
171
+ def save_dataset_sample_set_visualizations(
172
+ path_dataset: Path,
173
+ path_output_folder: Path,
174
+ max_samples: int = 10,
175
+ draw_settings: Optional[Dict[Type[Primitive], Dict]] = None,
176
+ anonymize_settings: Optional[Dict[Type[Primitive], Dict]] = None,
177
+ ) -> List[Path]:
178
+ dataset = HafniaDataset.read_from_path(path_dataset)
179
+ shutil.rmtree(path_output_folder, ignore_errors=True)
180
+ path_output_folder.mkdir(parents=True)
181
+
182
+ draw_settings = draw_settings or {}
183
+
184
+ paths = []
185
+ dataset_shuffled = dataset.shuffle(seed=42)
186
+ for sample_dict in dataset_shuffled:
187
+ sample = Sample(**sample_dict)
188
+ image = sample.read_image()
189
+ annotations = sample.get_annotations()
190
+
191
+ if anonymize_settings:
192
+ image = draw_anonymize_by_blurring(image, annotations, anonymization_settings=anonymize_settings)
193
+ image = draw_annotations(image, annotations, draw_settings=draw_settings)
194
+
195
+ pil_image = Image.fromarray(image)
196
+ path_image = path_output_folder / Path(sample.file_name).name
197
+ pil_image.save(path_image)
198
+ paths.append(path_image)
199
+
200
+ if len(paths) >= max_samples:
201
+ return paths # Return early if we have enough samples
202
+ return paths