dgenerate-ultralytics-headless 8.3.134__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (272) hide show
  1. dgenerate_ultralytics_headless-8.3.134.dist-info/METADATA +400 -0
  2. dgenerate_ultralytics_headless-8.3.134.dist-info/RECORD +272 -0
  3. dgenerate_ultralytics_headless-8.3.134.dist-info/WHEEL +5 -0
  4. dgenerate_ultralytics_headless-8.3.134.dist-info/entry_points.txt +3 -0
  5. dgenerate_ultralytics_headless-8.3.134.dist-info/licenses/LICENSE +661 -0
  6. dgenerate_ultralytics_headless-8.3.134.dist-info/top_level.txt +1 -0
  7. tests/__init__.py +22 -0
  8. tests/conftest.py +83 -0
  9. tests/test_cli.py +138 -0
  10. tests/test_cuda.py +215 -0
  11. tests/test_engine.py +131 -0
  12. tests/test_exports.py +236 -0
  13. tests/test_integrations.py +154 -0
  14. tests/test_python.py +694 -0
  15. tests/test_solutions.py +187 -0
  16. ultralytics/__init__.py +30 -0
  17. ultralytics/assets/bus.jpg +0 -0
  18. ultralytics/assets/zidane.jpg +0 -0
  19. ultralytics/cfg/__init__.py +1023 -0
  20. ultralytics/cfg/datasets/Argoverse.yaml +77 -0
  21. ultralytics/cfg/datasets/DOTAv1.5.yaml +37 -0
  22. ultralytics/cfg/datasets/DOTAv1.yaml +36 -0
  23. ultralytics/cfg/datasets/GlobalWheat2020.yaml +68 -0
  24. ultralytics/cfg/datasets/HomeObjects-3K.yaml +33 -0
  25. ultralytics/cfg/datasets/ImageNet.yaml +2025 -0
  26. ultralytics/cfg/datasets/Objects365.yaml +443 -0
  27. ultralytics/cfg/datasets/SKU-110K.yaml +58 -0
  28. ultralytics/cfg/datasets/VOC.yaml +106 -0
  29. ultralytics/cfg/datasets/VisDrone.yaml +77 -0
  30. ultralytics/cfg/datasets/african-wildlife.yaml +25 -0
  31. ultralytics/cfg/datasets/brain-tumor.yaml +23 -0
  32. ultralytics/cfg/datasets/carparts-seg.yaml +44 -0
  33. ultralytics/cfg/datasets/coco-pose.yaml +42 -0
  34. ultralytics/cfg/datasets/coco.yaml +118 -0
  35. ultralytics/cfg/datasets/coco128-seg.yaml +101 -0
  36. ultralytics/cfg/datasets/coco128.yaml +101 -0
  37. ultralytics/cfg/datasets/coco8-multispectral.yaml +104 -0
  38. ultralytics/cfg/datasets/coco8-pose.yaml +26 -0
  39. ultralytics/cfg/datasets/coco8-seg.yaml +101 -0
  40. ultralytics/cfg/datasets/coco8.yaml +101 -0
  41. ultralytics/cfg/datasets/crack-seg.yaml +22 -0
  42. ultralytics/cfg/datasets/dog-pose.yaml +24 -0
  43. ultralytics/cfg/datasets/dota8-multispectral.yaml +38 -0
  44. ultralytics/cfg/datasets/dota8.yaml +35 -0
  45. ultralytics/cfg/datasets/hand-keypoints.yaml +26 -0
  46. ultralytics/cfg/datasets/lvis.yaml +1240 -0
  47. ultralytics/cfg/datasets/medical-pills.yaml +22 -0
  48. ultralytics/cfg/datasets/open-images-v7.yaml +666 -0
  49. ultralytics/cfg/datasets/package-seg.yaml +22 -0
  50. ultralytics/cfg/datasets/signature.yaml +21 -0
  51. ultralytics/cfg/datasets/tiger-pose.yaml +25 -0
  52. ultralytics/cfg/datasets/xView.yaml +155 -0
  53. ultralytics/cfg/default.yaml +127 -0
  54. ultralytics/cfg/models/11/yolo11-cls-resnet18.yaml +17 -0
  55. ultralytics/cfg/models/11/yolo11-cls.yaml +33 -0
  56. ultralytics/cfg/models/11/yolo11-obb.yaml +50 -0
  57. ultralytics/cfg/models/11/yolo11-pose.yaml +51 -0
  58. ultralytics/cfg/models/11/yolo11-seg.yaml +50 -0
  59. ultralytics/cfg/models/11/yolo11.yaml +50 -0
  60. ultralytics/cfg/models/11/yoloe-11-seg.yaml +48 -0
  61. ultralytics/cfg/models/11/yoloe-11.yaml +48 -0
  62. ultralytics/cfg/models/12/yolo12-cls.yaml +32 -0
  63. ultralytics/cfg/models/12/yolo12-obb.yaml +48 -0
  64. ultralytics/cfg/models/12/yolo12-pose.yaml +49 -0
  65. ultralytics/cfg/models/12/yolo12-seg.yaml +48 -0
  66. ultralytics/cfg/models/12/yolo12.yaml +48 -0
  67. ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +53 -0
  68. ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +45 -0
  69. ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +45 -0
  70. ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +57 -0
  71. ultralytics/cfg/models/v10/yolov10b.yaml +45 -0
  72. ultralytics/cfg/models/v10/yolov10l.yaml +45 -0
  73. ultralytics/cfg/models/v10/yolov10m.yaml +45 -0
  74. ultralytics/cfg/models/v10/yolov10n.yaml +45 -0
  75. ultralytics/cfg/models/v10/yolov10s.yaml +45 -0
  76. ultralytics/cfg/models/v10/yolov10x.yaml +45 -0
  77. ultralytics/cfg/models/v3/yolov3-spp.yaml +49 -0
  78. ultralytics/cfg/models/v3/yolov3-tiny.yaml +40 -0
  79. ultralytics/cfg/models/v3/yolov3.yaml +49 -0
  80. ultralytics/cfg/models/v5/yolov5-p6.yaml +62 -0
  81. ultralytics/cfg/models/v5/yolov5.yaml +51 -0
  82. ultralytics/cfg/models/v6/yolov6.yaml +56 -0
  83. ultralytics/cfg/models/v8/yoloe-v8-seg.yaml +45 -0
  84. ultralytics/cfg/models/v8/yoloe-v8.yaml +45 -0
  85. ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml +28 -0
  86. ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml +28 -0
  87. ultralytics/cfg/models/v8/yolov8-cls.yaml +32 -0
  88. ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +58 -0
  89. ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +60 -0
  90. ultralytics/cfg/models/v8/yolov8-ghost.yaml +50 -0
  91. ultralytics/cfg/models/v8/yolov8-obb.yaml +49 -0
  92. ultralytics/cfg/models/v8/yolov8-p2.yaml +57 -0
  93. ultralytics/cfg/models/v8/yolov8-p6.yaml +59 -0
  94. ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +60 -0
  95. ultralytics/cfg/models/v8/yolov8-pose.yaml +50 -0
  96. ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +49 -0
  97. ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +59 -0
  98. ultralytics/cfg/models/v8/yolov8-seg.yaml +49 -0
  99. ultralytics/cfg/models/v8/yolov8-world.yaml +51 -0
  100. ultralytics/cfg/models/v8/yolov8-worldv2.yaml +49 -0
  101. ultralytics/cfg/models/v8/yolov8.yaml +49 -0
  102. ultralytics/cfg/models/v9/yolov9c-seg.yaml +41 -0
  103. ultralytics/cfg/models/v9/yolov9c.yaml +41 -0
  104. ultralytics/cfg/models/v9/yolov9e-seg.yaml +64 -0
  105. ultralytics/cfg/models/v9/yolov9e.yaml +64 -0
  106. ultralytics/cfg/models/v9/yolov9m.yaml +41 -0
  107. ultralytics/cfg/models/v9/yolov9s.yaml +41 -0
  108. ultralytics/cfg/models/v9/yolov9t.yaml +41 -0
  109. ultralytics/cfg/trackers/botsort.yaml +22 -0
  110. ultralytics/cfg/trackers/bytetrack.yaml +14 -0
  111. ultralytics/data/__init__.py +26 -0
  112. ultralytics/data/annotator.py +66 -0
  113. ultralytics/data/augment.py +2945 -0
  114. ultralytics/data/base.py +438 -0
  115. ultralytics/data/build.py +258 -0
  116. ultralytics/data/converter.py +754 -0
  117. ultralytics/data/dataset.py +834 -0
  118. ultralytics/data/loaders.py +676 -0
  119. ultralytics/data/scripts/download_weights.sh +18 -0
  120. ultralytics/data/scripts/get_coco.sh +61 -0
  121. ultralytics/data/scripts/get_coco128.sh +18 -0
  122. ultralytics/data/scripts/get_imagenet.sh +52 -0
  123. ultralytics/data/split.py +125 -0
  124. ultralytics/data/split_dota.py +325 -0
  125. ultralytics/data/utils.py +777 -0
  126. ultralytics/engine/__init__.py +1 -0
  127. ultralytics/engine/exporter.py +1519 -0
  128. ultralytics/engine/model.py +1156 -0
  129. ultralytics/engine/predictor.py +502 -0
  130. ultralytics/engine/results.py +1840 -0
  131. ultralytics/engine/trainer.py +853 -0
  132. ultralytics/engine/tuner.py +243 -0
  133. ultralytics/engine/validator.py +377 -0
  134. ultralytics/hub/__init__.py +168 -0
  135. ultralytics/hub/auth.py +137 -0
  136. ultralytics/hub/google/__init__.py +176 -0
  137. ultralytics/hub/session.py +446 -0
  138. ultralytics/hub/utils.py +248 -0
  139. ultralytics/models/__init__.py +9 -0
  140. ultralytics/models/fastsam/__init__.py +7 -0
  141. ultralytics/models/fastsam/model.py +61 -0
  142. ultralytics/models/fastsam/predict.py +181 -0
  143. ultralytics/models/fastsam/utils.py +24 -0
  144. ultralytics/models/fastsam/val.py +40 -0
  145. ultralytics/models/nas/__init__.py +7 -0
  146. ultralytics/models/nas/model.py +102 -0
  147. ultralytics/models/nas/predict.py +58 -0
  148. ultralytics/models/nas/val.py +39 -0
  149. ultralytics/models/rtdetr/__init__.py +7 -0
  150. ultralytics/models/rtdetr/model.py +63 -0
  151. ultralytics/models/rtdetr/predict.py +84 -0
  152. ultralytics/models/rtdetr/train.py +85 -0
  153. ultralytics/models/rtdetr/val.py +191 -0
  154. ultralytics/models/sam/__init__.py +6 -0
  155. ultralytics/models/sam/amg.py +260 -0
  156. ultralytics/models/sam/build.py +358 -0
  157. ultralytics/models/sam/model.py +170 -0
  158. ultralytics/models/sam/modules/__init__.py +1 -0
  159. ultralytics/models/sam/modules/blocks.py +1129 -0
  160. ultralytics/models/sam/modules/decoders.py +515 -0
  161. ultralytics/models/sam/modules/encoders.py +854 -0
  162. ultralytics/models/sam/modules/memory_attention.py +299 -0
  163. ultralytics/models/sam/modules/sam.py +1006 -0
  164. ultralytics/models/sam/modules/tiny_encoder.py +1002 -0
  165. ultralytics/models/sam/modules/transformer.py +351 -0
  166. ultralytics/models/sam/modules/utils.py +394 -0
  167. ultralytics/models/sam/predict.py +1605 -0
  168. ultralytics/models/utils/__init__.py +1 -0
  169. ultralytics/models/utils/loss.py +455 -0
  170. ultralytics/models/utils/ops.py +268 -0
  171. ultralytics/models/yolo/__init__.py +7 -0
  172. ultralytics/models/yolo/classify/__init__.py +7 -0
  173. ultralytics/models/yolo/classify/predict.py +88 -0
  174. ultralytics/models/yolo/classify/train.py +233 -0
  175. ultralytics/models/yolo/classify/val.py +215 -0
  176. ultralytics/models/yolo/detect/__init__.py +7 -0
  177. ultralytics/models/yolo/detect/predict.py +124 -0
  178. ultralytics/models/yolo/detect/train.py +217 -0
  179. ultralytics/models/yolo/detect/val.py +451 -0
  180. ultralytics/models/yolo/model.py +354 -0
  181. ultralytics/models/yolo/obb/__init__.py +7 -0
  182. ultralytics/models/yolo/obb/predict.py +66 -0
  183. ultralytics/models/yolo/obb/train.py +81 -0
  184. ultralytics/models/yolo/obb/val.py +283 -0
  185. ultralytics/models/yolo/pose/__init__.py +7 -0
  186. ultralytics/models/yolo/pose/predict.py +79 -0
  187. ultralytics/models/yolo/pose/train.py +154 -0
  188. ultralytics/models/yolo/pose/val.py +394 -0
  189. ultralytics/models/yolo/segment/__init__.py +7 -0
  190. ultralytics/models/yolo/segment/predict.py +113 -0
  191. ultralytics/models/yolo/segment/train.py +123 -0
  192. ultralytics/models/yolo/segment/val.py +428 -0
  193. ultralytics/models/yolo/world/__init__.py +5 -0
  194. ultralytics/models/yolo/world/train.py +119 -0
  195. ultralytics/models/yolo/world/train_world.py +176 -0
  196. ultralytics/models/yolo/yoloe/__init__.py +22 -0
  197. ultralytics/models/yolo/yoloe/predict.py +169 -0
  198. ultralytics/models/yolo/yoloe/train.py +298 -0
  199. ultralytics/models/yolo/yoloe/train_seg.py +124 -0
  200. ultralytics/models/yolo/yoloe/val.py +191 -0
  201. ultralytics/nn/__init__.py +29 -0
  202. ultralytics/nn/autobackend.py +842 -0
  203. ultralytics/nn/modules/__init__.py +182 -0
  204. ultralytics/nn/modules/activation.py +53 -0
  205. ultralytics/nn/modules/block.py +1966 -0
  206. ultralytics/nn/modules/conv.py +712 -0
  207. ultralytics/nn/modules/head.py +880 -0
  208. ultralytics/nn/modules/transformer.py +713 -0
  209. ultralytics/nn/modules/utils.py +164 -0
  210. ultralytics/nn/tasks.py +1627 -0
  211. ultralytics/nn/text_model.py +351 -0
  212. ultralytics/solutions/__init__.py +41 -0
  213. ultralytics/solutions/ai_gym.py +116 -0
  214. ultralytics/solutions/analytics.py +252 -0
  215. ultralytics/solutions/config.py +106 -0
  216. ultralytics/solutions/distance_calculation.py +124 -0
  217. ultralytics/solutions/heatmap.py +127 -0
  218. ultralytics/solutions/instance_segmentation.py +84 -0
  219. ultralytics/solutions/object_blurrer.py +90 -0
  220. ultralytics/solutions/object_counter.py +195 -0
  221. ultralytics/solutions/object_cropper.py +84 -0
  222. ultralytics/solutions/parking_management.py +273 -0
  223. ultralytics/solutions/queue_management.py +93 -0
  224. ultralytics/solutions/region_counter.py +120 -0
  225. ultralytics/solutions/security_alarm.py +154 -0
  226. ultralytics/solutions/similarity_search.py +172 -0
  227. ultralytics/solutions/solutions.py +724 -0
  228. ultralytics/solutions/speed_estimation.py +110 -0
  229. ultralytics/solutions/streamlit_inference.py +196 -0
  230. ultralytics/solutions/templates/similarity-search.html +160 -0
  231. ultralytics/solutions/trackzone.py +88 -0
  232. ultralytics/solutions/vision_eye.py +68 -0
  233. ultralytics/trackers/__init__.py +7 -0
  234. ultralytics/trackers/basetrack.py +124 -0
  235. ultralytics/trackers/bot_sort.py +260 -0
  236. ultralytics/trackers/byte_tracker.py +480 -0
  237. ultralytics/trackers/track.py +125 -0
  238. ultralytics/trackers/utils/__init__.py +1 -0
  239. ultralytics/trackers/utils/gmc.py +376 -0
  240. ultralytics/trackers/utils/kalman_filter.py +493 -0
  241. ultralytics/trackers/utils/matching.py +157 -0
  242. ultralytics/utils/__init__.py +1435 -0
  243. ultralytics/utils/autobatch.py +106 -0
  244. ultralytics/utils/autodevice.py +174 -0
  245. ultralytics/utils/benchmarks.py +695 -0
  246. ultralytics/utils/callbacks/__init__.py +5 -0
  247. ultralytics/utils/callbacks/base.py +234 -0
  248. ultralytics/utils/callbacks/clearml.py +153 -0
  249. ultralytics/utils/callbacks/comet.py +552 -0
  250. ultralytics/utils/callbacks/dvc.py +205 -0
  251. ultralytics/utils/callbacks/hub.py +108 -0
  252. ultralytics/utils/callbacks/mlflow.py +138 -0
  253. ultralytics/utils/callbacks/neptune.py +140 -0
  254. ultralytics/utils/callbacks/raytune.py +43 -0
  255. ultralytics/utils/callbacks/tensorboard.py +132 -0
  256. ultralytics/utils/callbacks/wb.py +185 -0
  257. ultralytics/utils/checks.py +897 -0
  258. ultralytics/utils/dist.py +119 -0
  259. ultralytics/utils/downloads.py +499 -0
  260. ultralytics/utils/errors.py +43 -0
  261. ultralytics/utils/export.py +219 -0
  262. ultralytics/utils/files.py +221 -0
  263. ultralytics/utils/instance.py +499 -0
  264. ultralytics/utils/loss.py +813 -0
  265. ultralytics/utils/metrics.py +1356 -0
  266. ultralytics/utils/ops.py +885 -0
  267. ultralytics/utils/patches.py +143 -0
  268. ultralytics/utils/plotting.py +1011 -0
  269. ultralytics/utils/tal.py +416 -0
  270. ultralytics/utils/torch_utils.py +990 -0
  271. ultralytics/utils/triton.py +116 -0
  272. ultralytics/utils/tuner.py +159 -0
@@ -0,0 +1,438 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ import glob
4
+ import math
5
+ import os
6
+ import random
7
+ from copy import deepcopy
8
+ from multiprocessing.pool import ThreadPool
9
+ from pathlib import Path
10
+ from typing import Optional
11
+
12
+ import cv2
13
+ import numpy as np
14
+ from torch.utils.data import Dataset
15
+
16
+ from ultralytics.data.utils import FORMATS_HELP_MSG, HELP_URL, IMG_FORMATS, check_file_speeds
17
+ from ultralytics.utils import DEFAULT_CFG, LOCAL_RANK, LOGGER, NUM_THREADS, TQDM
18
+ from ultralytics.utils.patches import imread
19
+
20
+
21
+ class BaseDataset(Dataset):
22
+ """
23
+ Base dataset class for loading and processing image data.
24
+
25
+ This class provides core functionality for loading images, caching, and preparing data for training and inference
26
+ in object detection tasks.
27
+
28
+ Attributes:
29
+ img_path (str): Path to the folder containing images.
30
+ imgsz (int): Target image size for resizing.
31
+ augment (bool): Whether to apply data augmentation.
32
+ single_cls (bool): Whether to treat all objects as a single class.
33
+ prefix (str): Prefix to print in log messages.
34
+ fraction (float): Fraction of dataset to utilize.
35
+ cv2_flag (int): OpenCV flag for reading images.
36
+ im_files (List[str]): List of image file paths.
37
+ labels (List[Dict]): List of label data dictionaries.
38
+ ni (int): Number of images in the dataset.
39
+ rect (bool): Whether to use rectangular training.
40
+ batch_size (int): Size of batches.
41
+ stride (int): Stride used in the model.
42
+ pad (float): Padding value.
43
+ buffer (list): Buffer for mosaic images.
44
+ max_buffer_length (int): Maximum buffer size.
45
+ ims (list): List of loaded images.
46
+ im_hw0 (list): List of original image dimensions (h, w).
47
+ im_hw (list): List of resized image dimensions (h, w).
48
+ npy_files (List[Path]): List of numpy file paths.
49
+ cache (str): Cache images to RAM or disk during training.
50
+ transforms (callable): Image transformation function.
51
+
52
+ Methods:
53
+ get_img_files: Read image files from the specified path.
54
+ update_labels: Update labels to include only specified classes.
55
+ load_image: Load an image from the dataset.
56
+ cache_images: Cache images to memory or disk.
57
+ cache_images_to_disk: Save an image as an *.npy file for faster loading.
58
+ check_cache_disk: Check image caching requirements vs available disk space.
59
+ check_cache_ram: Check image caching requirements vs available memory.
60
+ set_rectangle: Set the shape of bounding boxes as rectangles.
61
+ get_image_and_label: Get and return label information from the dataset.
62
+ update_labels_info: Custom label format method to be implemented by subclasses.
63
+ build_transforms: Build transformation pipeline to be implemented by subclasses.
64
+ get_labels: Get labels method to be implemented by subclasses.
65
+ """
66
+
67
+ def __init__(
68
+ self,
69
+ img_path,
70
+ imgsz=640,
71
+ cache=False,
72
+ augment=True,
73
+ hyp=DEFAULT_CFG,
74
+ prefix="",
75
+ rect=False,
76
+ batch_size=16,
77
+ stride=32,
78
+ pad=0.5,
79
+ single_cls=False,
80
+ classes=None,
81
+ fraction=1.0,
82
+ channels=3,
83
+ ):
84
+ """
85
+ Initialize BaseDataset with given configuration and options.
86
+
87
+ Args:
88
+ img_path (str): Path to the folder containing images.
89
+ imgsz (int, optional): Image size for resizing.
90
+ cache (bool | str, optional): Cache images to RAM or disk during training.
91
+ augment (bool, optional): If True, data augmentation is applied.
92
+ hyp (dict, optional): Hyperparameters to apply data augmentation.
93
+ prefix (str, optional): Prefix to print in log messages.
94
+ rect (bool, optional): If True, rectangular training is used.
95
+ batch_size (int, optional): Size of batches.
96
+ stride (int, optional): Stride used in the model.
97
+ pad (float, optional): Padding value.
98
+ single_cls (bool, optional): If True, single class training is used.
99
+ classes (list, optional): List of included classes.
100
+ fraction (float, optional): Fraction of dataset to utilize.
101
+ channels (int, optional): Number of channels in the images (1 for grayscale, 3 for RGB).
102
+ """
103
+ super().__init__()
104
+ self.img_path = img_path
105
+ self.imgsz = imgsz
106
+ self.augment = augment
107
+ self.single_cls = single_cls
108
+ self.prefix = prefix
109
+ self.fraction = fraction
110
+ self.channels = channels
111
+ self.cv2_flag = cv2.IMREAD_GRAYSCALE if channels == 1 else cv2.IMREAD_COLOR
112
+ self.im_files = self.get_img_files(self.img_path)
113
+ self.labels = self.get_labels()
114
+ self.update_labels(include_class=classes) # single_cls and include_class
115
+ self.ni = len(self.labels) # number of images
116
+ self.rect = rect
117
+ self.batch_size = batch_size
118
+ self.stride = stride
119
+ self.pad = pad
120
+ if self.rect:
121
+ assert self.batch_size is not None
122
+ self.set_rectangle()
123
+
124
+ # Buffer thread for mosaic images
125
+ self.buffer = [] # buffer size = batch size
126
+ self.max_buffer_length = min((self.ni, self.batch_size * 8, 1000)) if self.augment else 0
127
+
128
+ # Cache images (options are cache = True, False, None, "ram", "disk")
129
+ self.ims, self.im_hw0, self.im_hw = [None] * self.ni, [None] * self.ni, [None] * self.ni
130
+ self.npy_files = [Path(f).with_suffix(".npy") for f in self.im_files]
131
+ self.cache = cache.lower() if isinstance(cache, str) else "ram" if cache is True else None
132
+ if self.cache == "ram" and self.check_cache_ram():
133
+ if hyp.deterministic:
134
+ LOGGER.warning(
135
+ "cache='ram' may produce non-deterministic training results. "
136
+ "Consider cache='disk' as a deterministic alternative if your disk space allows."
137
+ )
138
+ self.cache_images()
139
+ elif self.cache == "disk" and self.check_cache_disk():
140
+ self.cache_images()
141
+
142
+ # Transforms
143
+ self.transforms = self.build_transforms(hyp=hyp)
144
+
145
+ def get_img_files(self, img_path):
146
+ """
147
+ Read image files from the specified path.
148
+
149
+ Args:
150
+ img_path (str | List[str]): Path or list of paths to image directories or files.
151
+
152
+ Returns:
153
+ (List[str]): List of image file paths.
154
+
155
+ Raises:
156
+ FileNotFoundError: If no images are found or the path doesn't exist.
157
+ """
158
+ try:
159
+ f = [] # image files
160
+ for p in img_path if isinstance(img_path, list) else [img_path]:
161
+ p = Path(p) # os-agnostic
162
+ if p.is_dir(): # dir
163
+ f += glob.glob(str(p / "**" / "*.*"), recursive=True)
164
+ # F = list(p.rglob('*.*')) # pathlib
165
+ elif p.is_file(): # file
166
+ with open(p, encoding="utf-8") as t:
167
+ t = t.read().strip().splitlines()
168
+ parent = str(p.parent) + os.sep
169
+ f += [x.replace("./", parent) if x.startswith("./") else x for x in t] # local to global path
170
+ # F += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib)
171
+ else:
172
+ raise FileNotFoundError(f"{self.prefix}{p} does not exist")
173
+ im_files = sorted(x.replace("/", os.sep) for x in f if x.split(".")[-1].lower() in IMG_FORMATS)
174
+ # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib
175
+ assert im_files, f"{self.prefix}No images found in {img_path}. {FORMATS_HELP_MSG}"
176
+ except Exception as e:
177
+ raise FileNotFoundError(f"{self.prefix}Error loading data from {img_path}\n{HELP_URL}") from e
178
+ if self.fraction < 1:
179
+ im_files = im_files[: round(len(im_files) * self.fraction)] # retain a fraction of the dataset
180
+ check_file_speeds(im_files, prefix=self.prefix) # check image read speeds
181
+ return im_files
182
+
183
+ def update_labels(self, include_class: Optional[list]):
184
+ """
185
+ Update labels to include only specified classes.
186
+
187
+ Args:
188
+ include_class (list, optional): List of classes to include. If None, all classes are included.
189
+ """
190
+ include_class_array = np.array(include_class).reshape(1, -1)
191
+ for i in range(len(self.labels)):
192
+ if include_class is not None:
193
+ cls = self.labels[i]["cls"]
194
+ bboxes = self.labels[i]["bboxes"]
195
+ segments = self.labels[i]["segments"]
196
+ keypoints = self.labels[i]["keypoints"]
197
+ j = (cls == include_class_array).any(1)
198
+ self.labels[i]["cls"] = cls[j]
199
+ self.labels[i]["bboxes"] = bboxes[j]
200
+ if segments:
201
+ self.labels[i]["segments"] = [segments[si] for si, idx in enumerate(j) if idx]
202
+ if keypoints is not None:
203
+ self.labels[i]["keypoints"] = keypoints[j]
204
+ if self.single_cls:
205
+ self.labels[i]["cls"][:, 0] = 0
206
+
207
+ def load_image(self, i, rect_mode=True):
208
+ """
209
+ Load an image from dataset index 'i'.
210
+
211
+ Args:
212
+ i (int): Index of the image to load.
213
+ rect_mode (bool, optional): Whether to use rectangular resizing.
214
+
215
+ Returns:
216
+ (np.ndarray): Loaded image as a NumPy array.
217
+ (Tuple[int, int]): Original image dimensions in (height, width) format.
218
+ (Tuple[int, int]): Resized image dimensions in (height, width) format.
219
+
220
+ Raises:
221
+ FileNotFoundError: If the image file is not found.
222
+ """
223
+ im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i]
224
+ if im is None: # not cached in RAM
225
+ if fn.exists(): # load npy
226
+ try:
227
+ im = np.load(fn)
228
+ except Exception as e:
229
+ LOGGER.warning(f"{self.prefix}Removing corrupt *.npy image file {fn} due to: {e}")
230
+ Path(fn).unlink(missing_ok=True)
231
+ im = imread(f, flags=self.cv2_flag) # BGR
232
+ else: # read image
233
+ im = imread(f, flags=self.cv2_flag) # BGR
234
+ if im is None:
235
+ raise FileNotFoundError(f"Image Not Found {f}")
236
+
237
+ h0, w0 = im.shape[:2] # orig hw
238
+ if rect_mode: # resize long side to imgsz while maintaining aspect ratio
239
+ r = self.imgsz / max(h0, w0) # ratio
240
+ if r != 1: # if sizes are not equal
241
+ w, h = (min(math.ceil(w0 * r), self.imgsz), min(math.ceil(h0 * r), self.imgsz))
242
+ im = cv2.resize(im, (w, h), interpolation=cv2.INTER_LINEAR)
243
+ elif not (h0 == w0 == self.imgsz): # resize by stretching image to square imgsz
244
+ im = cv2.resize(im, (self.imgsz, self.imgsz), interpolation=cv2.INTER_LINEAR)
245
+ if im.ndim == 2:
246
+ im = im[..., None]
247
+
248
+ # Add to buffer if training with augmentations
249
+ if self.augment:
250
+ self.ims[i], self.im_hw0[i], self.im_hw[i] = im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized
251
+ self.buffer.append(i)
252
+ if 1 < len(self.buffer) >= self.max_buffer_length: # prevent empty buffer
253
+ j = self.buffer.pop(0)
254
+ if self.cache != "ram":
255
+ self.ims[j], self.im_hw0[j], self.im_hw[j] = None, None, None
256
+
257
+ return im, (h0, w0), im.shape[:2]
258
+
259
+ return self.ims[i], self.im_hw0[i], self.im_hw[i]
260
+
261
+ def cache_images(self):
262
+ """Cache images to memory or disk for faster training."""
263
+ b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes
264
+ fcn, storage = (self.cache_images_to_disk, "Disk") if self.cache == "disk" else (self.load_image, "RAM")
265
+ with ThreadPool(NUM_THREADS) as pool:
266
+ results = pool.imap(fcn, range(self.ni))
267
+ pbar = TQDM(enumerate(results), total=self.ni, disable=LOCAL_RANK > 0)
268
+ for i, x in pbar:
269
+ if self.cache == "disk":
270
+ b += self.npy_files[i].stat().st_size
271
+ else: # 'ram'
272
+ self.ims[i], self.im_hw0[i], self.im_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i)
273
+ b += self.ims[i].nbytes
274
+ pbar.desc = f"{self.prefix}Caching images ({b / gb:.1f}GB {storage})"
275
+ pbar.close()
276
+
277
+ def cache_images_to_disk(self, i):
278
+ """Save an image as an *.npy file for faster loading."""
279
+ f = self.npy_files[i]
280
+ if not f.exists():
281
+ np.save(f.as_posix(), imread(self.im_files[i]), allow_pickle=False)
282
+
283
+ def check_cache_disk(self, safety_margin=0.5):
284
+ """
285
+ Check if there's enough disk space for caching images.
286
+
287
+ Args:
288
+ safety_margin (float, optional): Safety margin factor for disk space calculation.
289
+
290
+ Returns:
291
+ (bool): True if there's enough disk space, False otherwise.
292
+ """
293
+ import shutil
294
+
295
+ b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes
296
+ n = min(self.ni, 30) # extrapolate from 30 random images
297
+ for _ in range(n):
298
+ im_file = random.choice(self.im_files)
299
+ im = imread(im_file)
300
+ if im is None:
301
+ continue
302
+ b += im.nbytes
303
+ if not os.access(Path(im_file).parent, os.W_OK):
304
+ self.cache = None
305
+ LOGGER.warning(f"{self.prefix}Skipping caching images to disk, directory not writeable")
306
+ return False
307
+ disk_required = b * self.ni / n * (1 + safety_margin) # bytes required to cache dataset to disk
308
+ total, used, free = shutil.disk_usage(Path(self.im_files[0]).parent)
309
+ if disk_required > free:
310
+ self.cache = None
311
+ LOGGER.warning(
312
+ f"{self.prefix}{disk_required / gb:.1f}GB disk space required, "
313
+ f"with {int(safety_margin * 100)}% safety margin but only "
314
+ f"{free / gb:.1f}/{total / gb:.1f}GB free, not caching images to disk"
315
+ )
316
+ return False
317
+ return True
318
+
319
+ def check_cache_ram(self, safety_margin=0.5):
320
+ """
321
+ Check if there's enough RAM for caching images.
322
+
323
+ Args:
324
+ safety_margin (float, optional): Safety margin factor for RAM calculation.
325
+
326
+ Returns:
327
+ (bool): True if there's enough RAM, False otherwise.
328
+ """
329
+ b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes
330
+ n = min(self.ni, 30) # extrapolate from 30 random images
331
+ for _ in range(n):
332
+ im = imread(random.choice(self.im_files)) # sample image
333
+ if im is None:
334
+ continue
335
+ ratio = self.imgsz / max(im.shape[0], im.shape[1]) # max(h, w) # ratio
336
+ b += im.nbytes * ratio**2
337
+ mem_required = b * self.ni / n * (1 + safety_margin) # GB required to cache dataset into RAM
338
+ mem = __import__("psutil").virtual_memory()
339
+ if mem_required > mem.available:
340
+ self.cache = None
341
+ LOGGER.warning(
342
+ f"{self.prefix}{mem_required / gb:.1f}GB RAM required to cache images "
343
+ f"with {int(safety_margin * 100)}% safety margin but only "
344
+ f"{mem.available / gb:.1f}/{mem.total / gb:.1f}GB available, not caching images"
345
+ )
346
+ return False
347
+ return True
348
+
349
+ def set_rectangle(self):
350
+ """Set the shape of bounding boxes for YOLO detections as rectangles."""
351
+ bi = np.floor(np.arange(self.ni) / self.batch_size).astype(int) # batch index
352
+ nb = bi[-1] + 1 # number of batches
353
+
354
+ s = np.array([x.pop("shape") for x in self.labels]) # hw
355
+ ar = s[:, 0] / s[:, 1] # aspect ratio
356
+ irect = ar.argsort()
357
+ self.im_files = [self.im_files[i] for i in irect]
358
+ self.labels = [self.labels[i] for i in irect]
359
+ ar = ar[irect]
360
+
361
+ # Set training image shapes
362
+ shapes = [[1, 1]] * nb
363
+ for i in range(nb):
364
+ ari = ar[bi == i]
365
+ mini, maxi = ari.min(), ari.max()
366
+ if maxi < 1:
367
+ shapes[i] = [maxi, 1]
368
+ elif mini > 1:
369
+ shapes[i] = [1, 1 / mini]
370
+
371
+ self.batch_shapes = np.ceil(np.array(shapes) * self.imgsz / self.stride + self.pad).astype(int) * self.stride
372
+ self.batch = bi # batch index of image
373
+
374
+ def __getitem__(self, index):
375
+ """Return transformed label information for given index."""
376
+ return self.transforms(self.get_image_and_label(index))
377
+
378
+ def get_image_and_label(self, index):
379
+ """
380
+ Get and return label information from the dataset.
381
+
382
+ Args:
383
+ index (int): Index of the image to retrieve.
384
+
385
+ Returns:
386
+ (dict): Label dictionary with image and metadata.
387
+ """
388
+ label = deepcopy(self.labels[index]) # requires deepcopy() https://github.com/ultralytics/ultralytics/pull/1948
389
+ label.pop("shape", None) # shape is for rect, remove it
390
+ label["img"], label["ori_shape"], label["resized_shape"] = self.load_image(index)
391
+ label["ratio_pad"] = (
392
+ label["resized_shape"][0] / label["ori_shape"][0],
393
+ label["resized_shape"][1] / label["ori_shape"][1],
394
+ ) # for evaluation
395
+ if self.rect:
396
+ label["rect_shape"] = self.batch_shapes[self.batch[index]]
397
+ return self.update_labels_info(label)
398
+
399
+ def __len__(self):
400
+ """Return the length of the labels list for the dataset."""
401
+ return len(self.labels)
402
+
403
+ def update_labels_info(self, label):
404
+ """Custom your label format here."""
405
+ return label
406
+
407
+ def build_transforms(self, hyp=None):
408
+ """
409
+ Users can customize augmentations here.
410
+
411
+ Examples:
412
+ >>> if self.augment:
413
+ ... # Training transforms
414
+ ... return Compose([])
415
+ >>> else:
416
+ ... # Val transforms
417
+ ... return Compose([])
418
+ """
419
+ raise NotImplementedError
420
+
421
+ def get_labels(self):
422
+ """
423
+ Users can customize their own format here.
424
+
425
+ Examples:
426
+ Ensure output is a dictionary with the following keys:
427
+ >>> dict(
428
+ ... im_file=im_file,
429
+ ... shape=shape, # format: (height, width)
430
+ ... cls=cls,
431
+ ... bboxes=bboxes, # xywh
432
+ ... segments=segments, # xy
433
+ ... keypoints=keypoints, # xy
434
+ ... normalized=True, # or False
435
+ ... bbox_format="xyxy", # or xywh, ltwh
436
+ ... )
437
+ """
438
+ raise NotImplementedError
@@ -0,0 +1,258 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ import os
4
+ import random
5
+ from pathlib import Path
6
+
7
+ import numpy as np
8
+ import torch
9
+ from PIL import Image
10
+ from torch.utils.data import dataloader, distributed
11
+
12
+ from ultralytics.data.dataset import GroundingDataset, YOLODataset, YOLOMultiModalDataset
13
+ from ultralytics.data.loaders import (
14
+ LOADERS,
15
+ LoadImagesAndVideos,
16
+ LoadPilAndNumpy,
17
+ LoadScreenshots,
18
+ LoadStreams,
19
+ LoadTensor,
20
+ SourceTypes,
21
+ autocast_list,
22
+ )
23
+ from ultralytics.data.utils import IMG_FORMATS, PIN_MEMORY, VID_FORMATS
24
+ from ultralytics.utils import RANK, colorstr
25
+ from ultralytics.utils.checks import check_file
26
+
27
+
28
+ class InfiniteDataLoader(dataloader.DataLoader):
29
+ """
30
+ Dataloader that reuses workers.
31
+
32
+ This dataloader extends the PyTorch DataLoader to provide infinite recycling of workers, which improves efficiency
33
+ for training loops that need to iterate through the dataset multiple times.
34
+
35
+ Attributes:
36
+ batch_sampler (_RepeatSampler): A sampler that repeats indefinitely.
37
+ iterator (Iterator): The iterator from the parent DataLoader.
38
+
39
+ Methods:
40
+ __len__: Returns the length of the batch sampler's sampler.
41
+ __iter__: Creates a sampler that repeats indefinitely.
42
+ __del__: Ensures workers are properly terminated.
43
+ reset: Resets the iterator, useful when modifying dataset settings during training.
44
+ """
45
+
46
+ def __init__(self, *args, **kwargs):
47
+ """Initialize the InfiniteDataLoader with the same arguments as DataLoader."""
48
+ super().__init__(*args, **kwargs)
49
+ object.__setattr__(self, "batch_sampler", _RepeatSampler(self.batch_sampler))
50
+ self.iterator = super().__iter__()
51
+
52
+ def __len__(self):
53
+ """Return the length of the batch sampler's sampler."""
54
+ return len(self.batch_sampler.sampler)
55
+
56
+ def __iter__(self):
57
+ """Create an iterator that yields indefinitely from the underlying iterator."""
58
+ for _ in range(len(self)):
59
+ yield next(self.iterator)
60
+
61
+ def __del__(self):
62
+ """Ensure that workers are properly terminated when the dataloader is deleted."""
63
+ try:
64
+ if not hasattr(self.iterator, "_workers"):
65
+ return
66
+ for w in self.iterator._workers: # force terminate
67
+ if w.is_alive():
68
+ w.terminate()
69
+ self.iterator._shutdown_workers() # cleanup
70
+ except Exception:
71
+ pass
72
+
73
+ def reset(self):
74
+ """Reset the iterator to allow modifications to the dataset during training."""
75
+ self.iterator = self._get_iterator()
76
+
77
+
78
+ class _RepeatSampler:
79
+ """
80
+ Sampler that repeats forever.
81
+
82
+ This sampler wraps another sampler and yields its contents indefinitely, allowing for infinite iteration
83
+ over a dataset.
84
+
85
+ Attributes:
86
+ sampler (Dataset.sampler): The sampler to repeat.
87
+ """
88
+
89
+ def __init__(self, sampler):
90
+ """Initialize the _RepeatSampler with a sampler to repeat indefinitely."""
91
+ self.sampler = sampler
92
+
93
+ def __iter__(self):
94
+ """Iterate over the sampler indefinitely, yielding its contents."""
95
+ while True:
96
+ yield from iter(self.sampler)
97
+
98
+
99
+ def seed_worker(worker_id): # noqa
100
+ """Set dataloader worker seed for reproducibility across worker processes."""
101
+ worker_seed = torch.initial_seed() % 2**32
102
+ np.random.seed(worker_seed)
103
+ random.seed(worker_seed)
104
+
105
+
106
+ def build_yolo_dataset(cfg, img_path, batch, data, mode="train", rect=False, stride=32, multi_modal=False):
107
+ """Build and return a YOLO dataset based on configuration parameters."""
108
+ dataset = YOLOMultiModalDataset if multi_modal else YOLODataset
109
+ return dataset(
110
+ img_path=img_path,
111
+ imgsz=cfg.imgsz,
112
+ batch_size=batch,
113
+ augment=mode == "train", # augmentation
114
+ hyp=cfg, # TODO: probably add a get_hyps_from_cfg function
115
+ rect=cfg.rect or rect, # rectangular batches
116
+ cache=cfg.cache or None,
117
+ single_cls=cfg.single_cls or False,
118
+ stride=int(stride),
119
+ pad=0.0 if mode == "train" else 0.5,
120
+ prefix=colorstr(f"{mode}: "),
121
+ task=cfg.task,
122
+ classes=cfg.classes,
123
+ data=data,
124
+ fraction=cfg.fraction if mode == "train" else 1.0,
125
+ )
126
+
127
+
128
+ def build_grounding(cfg, img_path, json_file, batch, mode="train", rect=False, stride=32):
129
+ """Build and return a GroundingDataset based on configuration parameters."""
130
+ return GroundingDataset(
131
+ img_path=img_path,
132
+ json_file=json_file,
133
+ imgsz=cfg.imgsz,
134
+ batch_size=batch,
135
+ augment=mode == "train", # augmentation
136
+ hyp=cfg, # TODO: probably add a get_hyps_from_cfg function
137
+ rect=cfg.rect or rect, # rectangular batches
138
+ cache=cfg.cache or None,
139
+ single_cls=cfg.single_cls or False,
140
+ stride=int(stride),
141
+ pad=0.0 if mode == "train" else 0.5,
142
+ prefix=colorstr(f"{mode}: "),
143
+ task=cfg.task,
144
+ classes=cfg.classes,
145
+ fraction=cfg.fraction if mode == "train" else 1.0,
146
+ )
147
+
148
+
149
+ def build_dataloader(dataset, batch, workers, shuffle=True, rank=-1):
150
+ """
151
+ Create and return an InfiniteDataLoader or DataLoader for training or validation.
152
+
153
+ Args:
154
+ dataset (Dataset): Dataset to load data from.
155
+ batch (int): Batch size for the dataloader.
156
+ workers (int): Number of worker threads for loading data.
157
+ shuffle (bool): Whether to shuffle the dataset.
158
+ rank (int): Process rank in distributed training. -1 for single-GPU training.
159
+
160
+ Returns:
161
+ (InfiniteDataLoader): A dataloader that can be used for training or validation.
162
+ """
163
+ batch = min(batch, len(dataset))
164
+ nd = torch.cuda.device_count() # number of CUDA devices
165
+ nw = min(os.cpu_count() // max(nd, 1), workers) # number of workers
166
+ sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle)
167
+ generator = torch.Generator()
168
+ generator.manual_seed(6148914691236517205 + RANK)
169
+ return InfiniteDataLoader(
170
+ dataset=dataset,
171
+ batch_size=batch,
172
+ shuffle=shuffle and sampler is None,
173
+ num_workers=nw,
174
+ sampler=sampler,
175
+ pin_memory=PIN_MEMORY,
176
+ collate_fn=getattr(dataset, "collate_fn", None),
177
+ worker_init_fn=seed_worker,
178
+ generator=generator,
179
+ )
180
+
181
+
182
+ def check_source(source):
183
+ """
184
+ Check the type of input source and return corresponding flag values.
185
+
186
+ Args:
187
+ source (str | int | Path | List | Tuple | np.ndarray | PIL.Image | torch.Tensor): The input source to check.
188
+
189
+ Returns:
190
+ source (str | int | Path | List | Tuple | np.ndarray | PIL.Image | torch.Tensor): The processed source.
191
+ webcam (bool): Whether the source is a webcam.
192
+ screenshot (bool): Whether the source is a screenshot.
193
+ from_img (bool): Whether the source is an image or list of images.
194
+ in_memory (bool): Whether the source is an in-memory object.
195
+ tensor (bool): Whether the source is a torch.Tensor.
196
+
197
+ Raises:
198
+ TypeError: If the source type is unsupported.
199
+ """
200
+ webcam, screenshot, from_img, in_memory, tensor = False, False, False, False, False
201
+ if isinstance(source, (str, int, Path)): # int for local usb camera
202
+ source = str(source)
203
+ is_file = Path(source).suffix[1:] in (IMG_FORMATS | VID_FORMATS)
204
+ is_url = source.lower().startswith(("https://", "http://", "rtsp://", "rtmp://", "tcp://"))
205
+ webcam = source.isnumeric() or source.endswith(".streams") or (is_url and not is_file)
206
+ screenshot = source.lower() == "screen"
207
+ if is_url and is_file:
208
+ source = check_file(source) # download
209
+ elif isinstance(source, LOADERS):
210
+ in_memory = True
211
+ elif isinstance(source, (list, tuple)):
212
+ source = autocast_list(source) # convert all list elements to PIL or np arrays
213
+ from_img = True
214
+ elif isinstance(source, (Image.Image, np.ndarray)):
215
+ from_img = True
216
+ elif isinstance(source, torch.Tensor):
217
+ tensor = True
218
+ else:
219
+ raise TypeError("Unsupported image type. For supported types see https://docs.ultralytics.com/modes/predict")
220
+
221
+ return source, webcam, screenshot, from_img, in_memory, tensor
222
+
223
+
224
+ def load_inference_source(source=None, batch=1, vid_stride=1, buffer=False, channels=3):
225
+ """
226
+ Load an inference source for object detection and apply necessary transformations.
227
+
228
+ Args:
229
+ source (str | Path | torch.Tensor | PIL.Image | np.ndarray, optional): The input source for inference.
230
+ batch (int, optional): Batch size for dataloaders.
231
+ vid_stride (int, optional): The frame interval for video sources.
232
+ buffer (bool, optional): Whether stream frames will be buffered.
233
+ channels (int): The number of input channels for the model.
234
+
235
+ Returns:
236
+ (Dataset): A dataset object for the specified input source with attached source_type attribute.
237
+ """
238
+ source, stream, screenshot, from_img, in_memory, tensor = check_source(source)
239
+ source_type = source.source_type if in_memory else SourceTypes(stream, screenshot, from_img, tensor)
240
+
241
+ # Dataloader
242
+ if tensor:
243
+ dataset = LoadTensor(source)
244
+ elif in_memory:
245
+ dataset = source
246
+ elif stream:
247
+ dataset = LoadStreams(source, vid_stride=vid_stride, buffer=buffer, channels=channels)
248
+ elif screenshot:
249
+ dataset = LoadScreenshots(source, channels=channels)
250
+ elif from_img:
251
+ dataset = LoadPilAndNumpy(source, channels=channels)
252
+ else:
253
+ dataset = LoadImagesAndVideos(source, batch=batch, vid_stride=vid_stride, channels=channels)
254
+
255
+ # Attach source types to the dataset
256
+ setattr(dataset, "source_type", source_type)
257
+
258
+ return dataset