hafnia 0.1.27__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. cli/__main__.py +2 -2
  2. cli/dataset_cmds.py +60 -0
  3. cli/runc_cmds.py +1 -1
  4. hafnia/data/__init__.py +2 -2
  5. hafnia/data/factory.py +9 -56
  6. hafnia/dataset/dataset_helpers.py +91 -0
  7. hafnia/dataset/dataset_names.py +71 -0
  8. hafnia/dataset/dataset_transformation.py +187 -0
  9. hafnia/dataset/dataset_upload_helper.py +468 -0
  10. hafnia/dataset/hafnia_dataset.py +453 -0
  11. hafnia/dataset/primitives/__init__.py +16 -0
  12. hafnia/dataset/primitives/bbox.py +137 -0
  13. hafnia/dataset/primitives/bitmask.py +182 -0
  14. hafnia/dataset/primitives/classification.py +56 -0
  15. hafnia/dataset/primitives/point.py +25 -0
  16. hafnia/dataset/primitives/polygon.py +100 -0
  17. hafnia/dataset/primitives/primitive.py +44 -0
  18. hafnia/dataset/primitives/segmentation.py +51 -0
  19. hafnia/dataset/primitives/utils.py +51 -0
  20. hafnia/dataset/table_transformations.py +183 -0
  21. hafnia/experiment/hafnia_logger.py +2 -2
  22. hafnia/helper_testing.py +63 -0
  23. hafnia/http.py +5 -3
  24. hafnia/platform/__init__.py +2 -2
  25. hafnia/platform/datasets.py +184 -0
  26. hafnia/platform/download.py +85 -23
  27. hafnia/torch_helpers.py +180 -95
  28. hafnia/utils.py +1 -1
  29. hafnia/visualizations/colors.py +267 -0
  30. hafnia/visualizations/image_visualizations.py +202 -0
  31. {hafnia-0.1.27.dist-info → hafnia-0.2.0.dist-info}/METADATA +212 -99
  32. hafnia-0.2.0.dist-info/RECORD +46 -0
  33. cli/data_cmds.py +0 -53
  34. hafnia-0.1.27.dist-info/RECORD +0 -27
  35. {hafnia-0.1.27.dist-info → hafnia-0.2.0.dist-info}/WHEEL +0 -0
  36. {hafnia-0.1.27.dist-info → hafnia-0.2.0.dist-info}/entry_points.txt +0 -0
  37. {hafnia-0.1.27.dist-info → hafnia-0.2.0.dist-info}/licenses/LICENSE +0 -0
hafnia/torch_helpers.py CHANGED
@@ -1,80 +1,126 @@
1
- from typing import List, Optional
1
+ from typing import Dict, List, Optional, Tuple, Type, Union
2
2
 
3
- import datasets
3
+ import numpy as np
4
4
  import torch
5
5
  import torchvision
6
- from flatten_dict import flatten
6
+ from flatten_dict import flatten, unflatten
7
7
  from PIL import Image, ImageDraw, ImageFont
8
8
  from torchvision import tv_tensors
9
9
  from torchvision import utils as tv_utils
10
10
  from torchvision.transforms import v2
11
11
 
12
+ from hafnia.dataset.dataset_names import FieldName
13
+ from hafnia.dataset.hafnia_dataset import HafniaDataset, Sample
14
+ from hafnia.dataset.primitives import (
15
+ PRIMITIVE_COLUMN_NAMES,
16
+ class_color_by_name,
17
+ )
18
+ from hafnia.dataset.primitives.bbox import Bbox
19
+ from hafnia.dataset.primitives.bitmask import Bitmask
20
+ from hafnia.dataset.primitives.classification import Classification
21
+ from hafnia.dataset.primitives.primitive import Primitive
22
+ from hafnia.dataset.primitives.segmentation import Segmentation
23
+ from hafnia.log import user_logger
24
+
25
+
26
+ def get_primitives_per_task_name_for_primitive(
27
+ sample: Sample, PrimitiveType: Type[Primitive], split_by_task_name: bool = True
28
+ ) -> Dict[str, List[Primitive]]:
29
+ if not hasattr(sample, PrimitiveType.column_name()):
30
+ return {}
31
+
32
+ primitives = getattr(sample, PrimitiveType.column_name())
33
+ if primitives is None:
34
+ return {}
35
+
36
+ primitives_by_task_name: Dict[str, List[Primitive]] = {}
37
+ for primitive in primitives:
38
+ if primitive.task_name not in primitives_by_task_name:
39
+ primitives_by_task_name[primitive.task_name] = []
40
+ primitives_by_task_name[primitive.task_name].append(primitive)
41
+ return primitives_by_task_name
42
+
12
43
 
13
44
  class TorchvisionDataset(torch.utils.data.Dataset):
14
45
  def __init__(
15
46
  self,
16
- hf_dataset: datasets.Dataset,
47
+ dataset: HafniaDataset,
17
48
  transforms=None,
18
- classification_tasks: Optional[List] = None,
19
- object_tasks: Optional[List] = None,
20
- segmentation_tasks: Optional[List] = None,
21
49
  keep_metadata: bool = False,
22
50
  ):
23
- self.dataset = hf_dataset
51
+ self.dataset = dataset
52
+
24
53
  self.transforms = transforms
25
- self.object_tasks = object_tasks or ["objects"]
26
- self.segmentation_tasks = segmentation_tasks or ["segmentation"]
27
- self.classification_tasks = classification_tasks or ["classification"]
28
54
  self.keep_metadata = keep_metadata
29
55
 
30
- def __getitem__(self, idx):
31
- sample = self.dataset[idx]
32
-
33
- # For now, we expect a dataset to always have an image field
34
- image = tv_tensors.Image(sample.pop("image"))
35
-
36
- img_shape = image.shape[-2:]
37
- target = {}
56
+ def __getitem__(self, idx: int) -> Tuple[torch.Tensor, Dict]:
57
+ sample_dict = self.dataset[idx]
58
+ sample = Sample(**sample_dict)
59
+ image = tv_tensors.Image(sample.read_image_pillow())
60
+ h, w = image.shape[-2:]
61
+ target_flat = {}
62
+ mask_tasks: Dict[str, List[Segmentation]] = get_primitives_per_task_name_for_primitive(sample, Segmentation)
63
+ for task_name, masks in mask_tasks.items():
64
+ raise NotImplementedError("Segmentation tasks are not yet implemented")
65
+ # target[f"{mask.task_name}.mask"] = tv_tensors.Mask(mask.mask)
66
+
67
+ class_tasks: Dict[str, List] = get_primitives_per_task_name_for_primitive(sample, Classification)
68
+ for task_name, classifications in class_tasks.items():
69
+ assert len(classifications) == 1, "Expected exactly one classification task per sample"
70
+ target_flat[f"{Classification.column_name()}.{task_name}"] = {
71
+ FieldName.CLASS_IDX: classifications[0].class_idx,
72
+ FieldName.CLASS_NAME: classifications[0].class_name,
73
+ }
74
+
75
+ bbox_tasks: Dict[str, List[Bbox]] = get_primitives_per_task_name_for_primitive(sample, Bbox)
76
+ for task_name, bboxes in bbox_tasks.items():
77
+ bboxes_list = [bbox.to_coco(image_height=h, image_width=w) for bbox in bboxes]
78
+ bboxes_tensor = torch.as_tensor(bboxes_list).reshape(-1, 4)
79
+ target_flat[f"{Bbox.column_name()}.{task_name}"] = {
80
+ FieldName.CLASS_IDX: [bbox.class_idx for bbox in bboxes],
81
+ FieldName.CLASS_NAME: [bbox.class_name for bbox in bboxes],
82
+ "bbox": tv_tensors.BoundingBoxes(bboxes_tensor, format="XYWH", canvas_size=(h, w)),
83
+ }
84
+
85
+ bitmask_tasks: Dict[str, List[Bitmask]] = get_primitives_per_task_name_for_primitive(sample, Bitmask)
86
+ for task_name, bitmasks in bitmask_tasks.items():
87
+ bitmasks_np = np.array([bitmask.to_mask(img_height=h, img_width=w) for bitmask in bitmasks])
88
+ target_flat[f"{Bitmask.column_name()}.{task_name}"] = {
89
+ FieldName.CLASS_IDX: [bitmask.class_idx for bitmask in bitmasks],
90
+ FieldName.CLASS_NAME: [bitmask.class_name for bitmask in bitmasks],
91
+ "mask": tv_tensors.Mask(bitmasks_np),
92
+ }
38
93
 
39
- for segmentation_task in self.segmentation_tasks:
40
- if segmentation_task in sample:
41
- target[f"{segmentation_task}.mask"] = tv_tensors.Mask(sample[segmentation_task].pop("mask"))
42
-
43
- for classification_task in self.classification_tasks:
44
- if classification_task in sample:
45
- target[f"{classification_task}.class_idx"] = sample[classification_task].pop("class_idx")
46
-
47
- for object_task in self.object_tasks:
48
- if object_task in sample:
49
- bboxes_list = sample[object_task].pop("bbox")
50
- bboxes = tv_tensors.BoundingBoxes(bboxes_list, format="XYWH", canvas_size=img_shape)
51
- if bboxes.numel() == 0:
52
- bboxes = bboxes.reshape(-1, 4)
53
- target[f"{object_task}.bbox"] = bboxes
54
- target[f"{object_task}.class_idx"] = torch.tensor(sample[object_task].pop("class_idx"))
94
+ if self.transforms:
95
+ image, target_flat = self.transforms(image, target_flat)
55
96
 
56
97
  if self.keep_metadata:
57
- target.update(flatten(sample, reducer="dot"))
58
-
59
- if self.transforms:
60
- image, target = self.transforms(image, target)
98
+ sample_dict = sample_dict.copy()
99
+ drop_columns = PRIMITIVE_COLUMN_NAMES
100
+ for column in drop_columns:
101
+ if column in sample_dict:
102
+ sample_dict.pop(column)
61
103
 
104
+ target = flatten(target_flat, reducer="dot")
62
105
  return image, target
63
106
 
64
107
  def __len__(self):
65
108
  return len(self.dataset)
66
109
 
67
110
 
68
- def draw_image_classification(visualize_image: torch.Tensor, text_label: str) -> torch.Tensor:
111
+ def draw_image_classification(visualize_image: torch.Tensor, text_labels: Union[str, List[str]]) -> torch.Tensor:
112
+ if isinstance(text_labels, str):
113
+ text_labels = [text_labels]
114
+ text = "\n".join(text_labels)
69
115
  max_dim = max(visualize_image.shape[-2:])
70
- font_size = max(int(max_dim * 0.1), 10) # Minimum font size of 10
116
+ font_size = max(int(max_dim * 0.06), 10) # Minimum font size of 10
71
117
  txt_font = ImageFont.load_default(font_size)
72
118
  dummie_draw = ImageDraw.Draw(Image.new("RGB", (10, 10)))
73
- _, _, w, h = dummie_draw.textbbox((0, 0), text=text_label, font=txt_font) # type: ignore[arg-type]
119
+ _, _, w, h = dummie_draw.textbbox((0, 0), text=text, font=txt_font) # type: ignore[arg-type]
74
120
 
75
121
  text_image = Image.new("RGB", (int(w), int(h)))
76
122
  draw = ImageDraw.Draw(text_image)
77
- draw.text((0, 0), text=text_label, font=txt_font) # type: ignore[arg-type]
123
+ draw.text((0, 0), text=text, font=txt_font) # type: ignore[arg-type]
78
124
  text_tensor = v2.functional.to_image(text_image)
79
125
 
80
126
  height = text_tensor.shape[-2] + visualize_image.shape[-2]
@@ -94,77 +140,116 @@ def draw_image_classification(visualize_image: torch.Tensor, text_label: str) ->
94
140
  def draw_image_and_targets(
95
141
  image: torch.Tensor,
96
142
  targets,
97
- detection_tasks: Optional[List[str]] = None,
98
- segmentation_tasks: Optional[List[str]] = None,
99
- classification_tasks: Optional[List[str]] = None,
100
143
  ) -> torch.Tensor:
101
- detection_tasks = detection_tasks or ["objects"]
102
- segmentation_tasks = segmentation_tasks or ["segmentation"]
103
- classification_tasks = classification_tasks or ["classification"]
104
-
105
144
  visualize_image = image.clone()
106
145
  if visualize_image.is_floating_point():
107
146
  visualize_image = image - torch.min(image)
108
147
  visualize_image = visualize_image / visualize_image.max()
109
148
 
110
149
  visualize_image = v2.functional.to_dtype(visualize_image, torch.uint8, scale=True)
111
-
112
- for object_task in detection_tasks:
113
- bbox_field = f"{object_task}.bbox"
114
- if bbox_field in targets:
115
- hugging_face_format = "xywh"
116
- bbox = torchvision.ops.box_convert(targets[bbox_field], in_fmt=hugging_face_format, out_fmt="xyxy")
117
- class_names_field = f"{object_task}.class_name"
118
- class_names = targets.get(class_names_field, None)
119
- visualize_image = tv_utils.draw_bounding_boxes(visualize_image, bbox, labels=class_names, width=2)
120
-
121
- for segmentation_task in segmentation_tasks:
122
- mask_field = f"{segmentation_task}.mask"
123
- if mask_field in targets:
124
- mask = targets[mask_field].squeeze(0)
125
- masks_list = [mask == value for value in mask.unique()]
126
- masks = torch.stack(masks_list, dim=0).to(torch.bool)
127
- visualize_image = tv_utils.draw_segmentation_masks(visualize_image, masks=masks, alpha=0.5)
128
-
129
- for classification_task in classification_tasks:
130
- classification_field = f"{classification_task}.class_idx"
131
- if classification_field in targets:
132
- text_label = f"[{targets[classification_field]}]"
133
- classification_name_field = f"{classification_task}.class_name"
134
- if classification_name_field in targets:
135
- text_label = text_label + f" {targets[classification_name_field]}"
136
- visualize_image = draw_image_classification(visualize_image, text_label)
150
+ targets = unflatten(targets, splitter="dot") # Nested dictionary format
151
+ # NOTE: Order of drawing is important so visualizations are not overlapping in an undesired way
152
+ if Segmentation.column_name() in targets:
153
+ primitive_annotations = targets[Segmentation.column_name()]
154
+ for task_name, task_annotations in primitive_annotations.items():
155
+ raise NotImplementedError("Segmentation tasks are not yet implemented")
156
+ # mask = targets[mask_field].squeeze(0)
157
+ # masks_list = [mask == value for value in mask.unique()]
158
+ # masks = torch.stack(masks_list, dim=0).to(torch.bool)
159
+ # visualize_image = tv_utils.draw_segmentation_masks(visualize_image, masks=masks, alpha=0.5)
160
+
161
+ if Bitmask.column_name() in targets:
162
+ primitive_annotations = targets[Bitmask.column_name()]
163
+ for task_name, task_annotations in primitive_annotations.items():
164
+ colors = [class_color_by_name(class_name) for class_name in task_annotations[FieldName.CLASS_NAME]]
165
+ visualize_image = tv_utils.draw_segmentation_masks(
166
+ image=visualize_image,
167
+ masks=task_annotations["mask"],
168
+ colors=colors,
169
+ )
170
+
171
+ if Bbox.column_name() in targets:
172
+ primitive_annotations = targets[Bbox.column_name()]
173
+ for task_name, task_annotations in primitive_annotations.items():
174
+ bboxes = torchvision.ops.box_convert(task_annotations["bbox"], in_fmt="xywh", out_fmt="xyxy")
175
+ colors = [class_color_by_name(class_name) for class_name in task_annotations[FieldName.CLASS_NAME]]
176
+ visualize_image = tv_utils.draw_bounding_boxes(
177
+ image=visualize_image,
178
+ boxes=bboxes,
179
+ labels=task_annotations[FieldName.CLASS_NAME],
180
+ width=2,
181
+ colors=colors,
182
+ )
183
+
184
+ # Important that classification is drawn last as it will change image dimensions
185
+ if Classification.column_name() in targets:
186
+ primitive_annotations = targets[Classification.column_name()]
187
+ text_labels = []
188
+ for task_name, task_annotations in primitive_annotations.items():
189
+ if task_name == Classification.default_task_name():
190
+ text_label = task_annotations[FieldName.CLASS_NAME]
191
+ else:
192
+ text_label = f"{task_name}: {task_annotations[FieldName.CLASS_NAME]}"
193
+ text_labels.append(text_label)
194
+ visualize_image = draw_image_classification(visualize_image, text_labels)
137
195
  return visualize_image
138
196
 
139
197
 
140
198
  class TorchVisionCollateFn:
141
199
  def __init__(self, skip_stacking: Optional[List] = None):
142
200
  if skip_stacking is None:
143
- skip_stacking = []
144
- self.skip_stacking_list = skip_stacking
201
+ skip_stacking = [f"{Bbox.column_name()}.*", f"{Bitmask.column_name()}.*"]
202
+
203
+ self.wild_card_skip_stacking = []
204
+ self.skip_stacking_list = []
205
+ for skip_name in skip_stacking:
206
+ if skip_name.endswith("*"):
207
+ self.wild_card_skip_stacking.append(skip_name[:-1]) # Remove the trailing '*'
208
+ else:
209
+ self.skip_stacking_list.append(skip_name)
210
+
211
+ def skip_key_name(self, key_name: str) -> bool:
212
+ if key_name in self.skip_stacking_list:
213
+ return True
214
+ if any(key_name.startswith(wild_card) for wild_card in self.wild_card_skip_stacking):
215
+ return True
216
+ return False
145
217
 
146
218
  def __call__(self, batch):
147
219
  images, targets = tuple(zip(*batch, strict=False))
148
220
  if "image" not in self.skip_stacking_list:
149
221
  images = torch.stack(images)
150
222
 
151
- targets_modified = {k: [d[k] for d in targets] for k in targets[0]}
223
+ keys_min = set(targets[0])
224
+ keys_max = set(targets[0])
225
+ for target in targets:
226
+ keys_min = keys_min.intersection(target)
227
+ keys_max = keys_max.union(target)
228
+
229
+ if keys_min != keys_max:
230
+ user_logger.warning(
231
+ "Not all images in the batch contain the same targets. To solve for missing targets "
232
+ f"the following keys {keys_max - keys_min} are dropped from the batch "
233
+ )
234
+
235
+ targets_modified = {k: [d[k] for d in targets] for k in keys_min}
152
236
  for key_name, item_values in targets_modified.items():
153
- if key_name not in self.skip_stacking_list:
154
- first_element = item_values[0]
155
- if isinstance(first_element, torch.Tensor):
156
- item_values = torch.stack(item_values)
157
- elif isinstance(first_element, (int, float)):
158
- item_values = torch.tensor(item_values)
159
- elif isinstance(first_element, (str, list)):
160
- # Skip stacking for certain types such as strings and lists
161
- pass
162
- if isinstance(first_element, tv_tensors.Mask):
163
- item_values = tv_tensors.Mask(item_values)
164
- elif isinstance(first_element, tv_tensors.Image):
165
- item_values = tv_tensors.Image(item_values)
166
- elif isinstance(first_element, tv_tensors.BoundingBoxes):
167
- item_values = tv_tensors.BoundingBoxes(item_values)
168
- targets_modified[key_name] = item_values
237
+ if self.skip_key_name(key_name):
238
+ continue
239
+ first_element = item_values[0]
240
+ if isinstance(first_element, torch.Tensor):
241
+ item_values = torch.stack(item_values)
242
+ elif isinstance(first_element, (int, float)):
243
+ item_values = torch.tensor(item_values)
244
+ elif isinstance(first_element, (str, list)):
245
+ # Skip stacking for certain types such as strings and lists
246
+ pass
247
+ if isinstance(first_element, tv_tensors.Mask):
248
+ item_values = tv_tensors.Mask(item_values)
249
+ elif isinstance(first_element, tv_tensors.Image):
250
+ item_values = tv_tensors.Image(item_values)
251
+ elif isinstance(first_element, tv_tensors.BoundingBoxes):
252
+ item_values = tv_tensors.BoundingBoxes(item_values)
253
+ targets_modified[key_name] = item_values
169
254
 
170
255
  return images, targets_modified
hafnia/utils.py CHANGED
@@ -14,7 +14,7 @@ from rich import print as rprint
14
14
  from hafnia.log import sys_logger, user_logger
15
15
 
16
16
  PATH_DATA = Path("./.data")
17
- PATH_DATASET = PATH_DATA / "datasets"
17
+ PATH_DATASETS = PATH_DATA / "datasets"
18
18
  PATH_RECIPES = PATH_DATA / "recipes"
19
19
  FILENAME_HAFNIAIGNORE = ".hafniaignore"
20
20
  DEFAULT_IGNORE_SPECIFICATION = [
@@ -0,0 +1,267 @@
1
+ from typing import List, Tuple
2
+
3
+
4
+ def get_n_colors(index: int) -> List[Tuple[int, int, int]]:
5
+ n_colors = len(COLORS)
6
+ colors = [COLORS[index % n_colors] for index in range(index)]
7
+ return colors
8
+
9
+
10
+ COLORS = [
11
+ (210, 24, 32),
12
+ (24, 105, 255),
13
+ (0, 138, 0),
14
+ (243, 109, 255),
15
+ (113, 0, 121),
16
+ (170, 251, 0),
17
+ (0, 190, 194),
18
+ (255, 162, 53),
19
+ (93, 61, 4),
20
+ (8, 0, 138),
21
+ (0, 93, 93),
22
+ (154, 125, 130),
23
+ (162, 174, 255),
24
+ (150, 182, 117),
25
+ (158, 40, 255),
26
+ (77, 0, 20),
27
+ (255, 174, 190),
28
+ (206, 0, 146),
29
+ (0, 255, 182),
30
+ (0, 45, 0),
31
+ (158, 117, 0),
32
+ (61, 53, 65),
33
+ (243, 235, 146),
34
+ (101, 97, 138),
35
+ (138, 61, 77),
36
+ (89, 4, 186),
37
+ (85, 138, 113),
38
+ (178, 190, 194),
39
+ (255, 93, 130),
40
+ (28, 198, 0),
41
+ (146, 247, 255),
42
+ (45, 134, 166),
43
+ (57, 93, 40),
44
+ (235, 206, 255),
45
+ (255, 93, 0),
46
+ (166, 97, 170),
47
+ (134, 0, 0),
48
+ (53, 0, 89),
49
+ (0, 81, 142),
50
+ (158, 73, 16),
51
+ (206, 190, 0),
52
+ (0, 40, 40),
53
+ (0, 178, 255),
54
+ (202, 166, 134),
55
+ (190, 154, 194),
56
+ (45, 32, 12),
57
+ (117, 101, 69),
58
+ (130, 121, 223),
59
+ (0, 194, 138),
60
+ (186, 231, 194),
61
+ (134, 142, 166),
62
+ (202, 113, 89),
63
+ (130, 154, 0),
64
+ (45, 0, 255),
65
+ (210, 4, 247),
66
+ (255, 215, 190),
67
+ (146, 206, 247),
68
+ (186, 93, 125),
69
+ (255, 65, 194),
70
+ (190, 134, 255),
71
+ (146, 142, 101),
72
+ (166, 4, 170),
73
+ (134, 227, 117),
74
+ (73, 0, 61),
75
+ (251, 239, 12),
76
+ (105, 85, 93),
77
+ (89, 49, 45),
78
+ (105, 53, 255),
79
+ (182, 4, 77),
80
+ (93, 109, 113),
81
+ (65, 69, 53),
82
+ (101, 113, 0),
83
+ (121, 0, 73),
84
+ (28, 49, 81),
85
+ (121, 65, 158),
86
+ (255, 146, 113),
87
+ (255, 166, 243),
88
+ (186, 158, 65),
89
+ (130, 170, 154),
90
+ (215, 121, 0),
91
+ (73, 61, 113),
92
+ (81, 162, 85),
93
+ (231, 130, 182),
94
+ (210, 227, 251),
95
+ (0, 73, 49),
96
+ (109, 219, 194),
97
+ (61, 77, 93),
98
+ (97, 53, 85),
99
+ (0, 113, 81),
100
+ (93, 24, 0),
101
+ (154, 93, 81),
102
+ (85, 142, 219),
103
+ (202, 202, 154),
104
+ (53, 24, 32),
105
+ (57, 61, 0),
106
+ (0, 154, 150),
107
+ (235, 16, 109),
108
+ (138, 69, 121),
109
+ (117, 170, 194),
110
+ (202, 146, 154),
111
+ (210, 186, 198),
112
+ (154, 206, 0),
113
+ (69, 109, 170),
114
+ (117, 89, 0),
115
+ (206, 77, 12),
116
+ (0, 223, 251),
117
+ (255, 61, 65),
118
+ (255, 202, 73),
119
+ (45, 49, 146),
120
+ (134, 105, 134),
121
+ (158, 130, 190),
122
+ (206, 174, 255),
123
+ (121, 69, 45),
124
+ (198, 251, 130),
125
+ (93, 117, 73),
126
+ (182, 69, 73),
127
+ (255, 223, 239),
128
+ (162, 0, 113),
129
+ (77, 77, 166),
130
+ (166, 170, 202),
131
+ (113, 28, 40),
132
+ (40, 121, 121),
133
+ (8, 73, 0),
134
+ (0, 105, 134),
135
+ (166, 117, 73),
136
+ (251, 182, 130),
137
+ (85, 24, 125),
138
+ (0, 255, 89),
139
+ (0, 65, 77),
140
+ (109, 142, 146),
141
+ (170, 36, 0),
142
+ (190, 210, 109),
143
+ (138, 97, 186),
144
+ (210, 65, 190),
145
+ (73, 97, 81),
146
+ (206, 243, 239),
147
+ (97, 194, 97),
148
+ (20, 138, 77),
149
+ (0, 255, 231),
150
+ (0, 105, 0),
151
+ (178, 121, 158),
152
+ (170, 178, 158),
153
+ (186, 85, 255),
154
+ (198, 121, 206),
155
+ (32, 49, 32),
156
+ (125, 4, 219),
157
+ (194, 198, 247),
158
+ (138, 198, 206),
159
+ (231, 235, 206),
160
+ (40, 28, 57),
161
+ (158, 255, 174),
162
+ (130, 206, 154),
163
+ (49, 166, 12),
164
+ (0, 162, 117),
165
+ (219, 146, 85),
166
+ (61, 20, 4),
167
+ (255, 138, 154),
168
+ (130, 134, 53),
169
+ (105, 77, 113),
170
+ (182, 97, 0),
171
+ (125, 45, 0),
172
+ (162, 178, 57),
173
+ (49, 4, 125),
174
+ (166, 61, 202),
175
+ (154, 32, 45),
176
+ (4, 223, 134),
177
+ (117, 125, 109),
178
+ (138, 150, 210),
179
+ (8, 162, 202),
180
+ (247, 109, 93),
181
+ (16, 85, 202),
182
+ (219, 182, 101),
183
+ (146, 89, 109),
184
+ (162, 255, 227),
185
+ (89, 85, 40),
186
+ (113, 121, 170),
187
+ (215, 89, 101),
188
+ (73, 32, 81),
189
+ (223, 77, 146),
190
+ (0, 0, 202),
191
+ (93, 101, 210),
192
+ (223, 166, 0),
193
+ (178, 73, 146),
194
+ (182, 138, 117),
195
+ (97, 77, 61),
196
+ (166, 150, 162),
197
+ (85, 28, 53),
198
+ (49, 65, 65),
199
+ (117, 117, 134),
200
+ (146, 158, 162),
201
+ (117, 154, 113),
202
+ (255, 130, 32),
203
+ (134, 85, 255),
204
+ (154, 198, 182),
205
+ (223, 150, 243),
206
+ (202, 223, 49),
207
+ (142, 93, 40),
208
+ (53, 190, 227),
209
+ (113, 166, 255),
210
+ (89, 138, 49),
211
+ (255, 194, 235),
212
+ (170, 61, 105),
213
+ (73, 97, 125),
214
+ (73, 53, 28),
215
+ (69, 178, 158),
216
+ (28, 36, 49),
217
+ (247, 49, 239),
218
+ (117, 0, 166),
219
+ (231, 182, 170),
220
+ (130, 105, 101),
221
+ (227, 162, 202),
222
+ (32, 36, 0),
223
+ (121, 182, 16),
224
+ (158, 142, 255),
225
+ (210, 117, 138),
226
+ (202, 182, 219),
227
+ (174, 154, 223),
228
+ (255, 113, 219),
229
+ (210, 247, 178),
230
+ (198, 215, 206),
231
+ (255, 210, 138),
232
+ (93, 223, 53),
233
+ (93, 121, 146),
234
+ (162, 142, 0),
235
+ (174, 223, 239),
236
+ (113, 77, 194),
237
+ (125, 69, 0),
238
+ (101, 146, 182),
239
+ (93, 121, 255),
240
+ (81, 73, 89),
241
+ (150, 158, 81),
242
+ (206, 105, 174),
243
+ (101, 53, 117),
244
+ (219, 210, 227),
245
+ (182, 174, 117),
246
+ (81, 89, 0),
247
+ (182, 89, 57),
248
+ (85, 4, 235),
249
+ (61, 117, 45),
250
+ (146, 130, 154),
251
+ (130, 36, 105),
252
+ (186, 134, 57),
253
+ (138, 178, 227),
254
+ (109, 178, 130),
255
+ (150, 65, 53),
256
+ (109, 65, 73),
257
+ (138, 117, 61),
258
+ (178, 113, 117),
259
+ (146, 28, 73),
260
+ (223, 109, 49),
261
+ (0, 227, 223),
262
+ (146, 4, 202),
263
+ (49, 40, 89),
264
+ (0, 125, 210),
265
+ (162, 109, 255),
266
+ (130, 89, 146),
267
+ ]