dgenerate-ultralytics-headless 8.3.134__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (272) hide show
  1. dgenerate_ultralytics_headless-8.3.134.dist-info/METADATA +400 -0
  2. dgenerate_ultralytics_headless-8.3.134.dist-info/RECORD +272 -0
  3. dgenerate_ultralytics_headless-8.3.134.dist-info/WHEEL +5 -0
  4. dgenerate_ultralytics_headless-8.3.134.dist-info/entry_points.txt +3 -0
  5. dgenerate_ultralytics_headless-8.3.134.dist-info/licenses/LICENSE +661 -0
  6. dgenerate_ultralytics_headless-8.3.134.dist-info/top_level.txt +1 -0
  7. tests/__init__.py +22 -0
  8. tests/conftest.py +83 -0
  9. tests/test_cli.py +138 -0
  10. tests/test_cuda.py +215 -0
  11. tests/test_engine.py +131 -0
  12. tests/test_exports.py +236 -0
  13. tests/test_integrations.py +154 -0
  14. tests/test_python.py +694 -0
  15. tests/test_solutions.py +187 -0
  16. ultralytics/__init__.py +30 -0
  17. ultralytics/assets/bus.jpg +0 -0
  18. ultralytics/assets/zidane.jpg +0 -0
  19. ultralytics/cfg/__init__.py +1023 -0
  20. ultralytics/cfg/datasets/Argoverse.yaml +77 -0
  21. ultralytics/cfg/datasets/DOTAv1.5.yaml +37 -0
  22. ultralytics/cfg/datasets/DOTAv1.yaml +36 -0
  23. ultralytics/cfg/datasets/GlobalWheat2020.yaml +68 -0
  24. ultralytics/cfg/datasets/HomeObjects-3K.yaml +33 -0
  25. ultralytics/cfg/datasets/ImageNet.yaml +2025 -0
  26. ultralytics/cfg/datasets/Objects365.yaml +443 -0
  27. ultralytics/cfg/datasets/SKU-110K.yaml +58 -0
  28. ultralytics/cfg/datasets/VOC.yaml +106 -0
  29. ultralytics/cfg/datasets/VisDrone.yaml +77 -0
  30. ultralytics/cfg/datasets/african-wildlife.yaml +25 -0
  31. ultralytics/cfg/datasets/brain-tumor.yaml +23 -0
  32. ultralytics/cfg/datasets/carparts-seg.yaml +44 -0
  33. ultralytics/cfg/datasets/coco-pose.yaml +42 -0
  34. ultralytics/cfg/datasets/coco.yaml +118 -0
  35. ultralytics/cfg/datasets/coco128-seg.yaml +101 -0
  36. ultralytics/cfg/datasets/coco128.yaml +101 -0
  37. ultralytics/cfg/datasets/coco8-multispectral.yaml +104 -0
  38. ultralytics/cfg/datasets/coco8-pose.yaml +26 -0
  39. ultralytics/cfg/datasets/coco8-seg.yaml +101 -0
  40. ultralytics/cfg/datasets/coco8.yaml +101 -0
  41. ultralytics/cfg/datasets/crack-seg.yaml +22 -0
  42. ultralytics/cfg/datasets/dog-pose.yaml +24 -0
  43. ultralytics/cfg/datasets/dota8-multispectral.yaml +38 -0
  44. ultralytics/cfg/datasets/dota8.yaml +35 -0
  45. ultralytics/cfg/datasets/hand-keypoints.yaml +26 -0
  46. ultralytics/cfg/datasets/lvis.yaml +1240 -0
  47. ultralytics/cfg/datasets/medical-pills.yaml +22 -0
  48. ultralytics/cfg/datasets/open-images-v7.yaml +666 -0
  49. ultralytics/cfg/datasets/package-seg.yaml +22 -0
  50. ultralytics/cfg/datasets/signature.yaml +21 -0
  51. ultralytics/cfg/datasets/tiger-pose.yaml +25 -0
  52. ultralytics/cfg/datasets/xView.yaml +155 -0
  53. ultralytics/cfg/default.yaml +127 -0
  54. ultralytics/cfg/models/11/yolo11-cls-resnet18.yaml +17 -0
  55. ultralytics/cfg/models/11/yolo11-cls.yaml +33 -0
  56. ultralytics/cfg/models/11/yolo11-obb.yaml +50 -0
  57. ultralytics/cfg/models/11/yolo11-pose.yaml +51 -0
  58. ultralytics/cfg/models/11/yolo11-seg.yaml +50 -0
  59. ultralytics/cfg/models/11/yolo11.yaml +50 -0
  60. ultralytics/cfg/models/11/yoloe-11-seg.yaml +48 -0
  61. ultralytics/cfg/models/11/yoloe-11.yaml +48 -0
  62. ultralytics/cfg/models/12/yolo12-cls.yaml +32 -0
  63. ultralytics/cfg/models/12/yolo12-obb.yaml +48 -0
  64. ultralytics/cfg/models/12/yolo12-pose.yaml +49 -0
  65. ultralytics/cfg/models/12/yolo12-seg.yaml +48 -0
  66. ultralytics/cfg/models/12/yolo12.yaml +48 -0
  67. ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +53 -0
  68. ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +45 -0
  69. ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +45 -0
  70. ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +57 -0
  71. ultralytics/cfg/models/v10/yolov10b.yaml +45 -0
  72. ultralytics/cfg/models/v10/yolov10l.yaml +45 -0
  73. ultralytics/cfg/models/v10/yolov10m.yaml +45 -0
  74. ultralytics/cfg/models/v10/yolov10n.yaml +45 -0
  75. ultralytics/cfg/models/v10/yolov10s.yaml +45 -0
  76. ultralytics/cfg/models/v10/yolov10x.yaml +45 -0
  77. ultralytics/cfg/models/v3/yolov3-spp.yaml +49 -0
  78. ultralytics/cfg/models/v3/yolov3-tiny.yaml +40 -0
  79. ultralytics/cfg/models/v3/yolov3.yaml +49 -0
  80. ultralytics/cfg/models/v5/yolov5-p6.yaml +62 -0
  81. ultralytics/cfg/models/v5/yolov5.yaml +51 -0
  82. ultralytics/cfg/models/v6/yolov6.yaml +56 -0
  83. ultralytics/cfg/models/v8/yoloe-v8-seg.yaml +45 -0
  84. ultralytics/cfg/models/v8/yoloe-v8.yaml +45 -0
  85. ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml +28 -0
  86. ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml +28 -0
  87. ultralytics/cfg/models/v8/yolov8-cls.yaml +32 -0
  88. ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +58 -0
  89. ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +60 -0
  90. ultralytics/cfg/models/v8/yolov8-ghost.yaml +50 -0
  91. ultralytics/cfg/models/v8/yolov8-obb.yaml +49 -0
  92. ultralytics/cfg/models/v8/yolov8-p2.yaml +57 -0
  93. ultralytics/cfg/models/v8/yolov8-p6.yaml +59 -0
  94. ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +60 -0
  95. ultralytics/cfg/models/v8/yolov8-pose.yaml +50 -0
  96. ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +49 -0
  97. ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +59 -0
  98. ultralytics/cfg/models/v8/yolov8-seg.yaml +49 -0
  99. ultralytics/cfg/models/v8/yolov8-world.yaml +51 -0
  100. ultralytics/cfg/models/v8/yolov8-worldv2.yaml +49 -0
  101. ultralytics/cfg/models/v8/yolov8.yaml +49 -0
  102. ultralytics/cfg/models/v9/yolov9c-seg.yaml +41 -0
  103. ultralytics/cfg/models/v9/yolov9c.yaml +41 -0
  104. ultralytics/cfg/models/v9/yolov9e-seg.yaml +64 -0
  105. ultralytics/cfg/models/v9/yolov9e.yaml +64 -0
  106. ultralytics/cfg/models/v9/yolov9m.yaml +41 -0
  107. ultralytics/cfg/models/v9/yolov9s.yaml +41 -0
  108. ultralytics/cfg/models/v9/yolov9t.yaml +41 -0
  109. ultralytics/cfg/trackers/botsort.yaml +22 -0
  110. ultralytics/cfg/trackers/bytetrack.yaml +14 -0
  111. ultralytics/data/__init__.py +26 -0
  112. ultralytics/data/annotator.py +66 -0
  113. ultralytics/data/augment.py +2945 -0
  114. ultralytics/data/base.py +438 -0
  115. ultralytics/data/build.py +258 -0
  116. ultralytics/data/converter.py +754 -0
  117. ultralytics/data/dataset.py +834 -0
  118. ultralytics/data/loaders.py +676 -0
  119. ultralytics/data/scripts/download_weights.sh +18 -0
  120. ultralytics/data/scripts/get_coco.sh +61 -0
  121. ultralytics/data/scripts/get_coco128.sh +18 -0
  122. ultralytics/data/scripts/get_imagenet.sh +52 -0
  123. ultralytics/data/split.py +125 -0
  124. ultralytics/data/split_dota.py +325 -0
  125. ultralytics/data/utils.py +777 -0
  126. ultralytics/engine/__init__.py +1 -0
  127. ultralytics/engine/exporter.py +1519 -0
  128. ultralytics/engine/model.py +1156 -0
  129. ultralytics/engine/predictor.py +502 -0
  130. ultralytics/engine/results.py +1840 -0
  131. ultralytics/engine/trainer.py +853 -0
  132. ultralytics/engine/tuner.py +243 -0
  133. ultralytics/engine/validator.py +377 -0
  134. ultralytics/hub/__init__.py +168 -0
  135. ultralytics/hub/auth.py +137 -0
  136. ultralytics/hub/google/__init__.py +176 -0
  137. ultralytics/hub/session.py +446 -0
  138. ultralytics/hub/utils.py +248 -0
  139. ultralytics/models/__init__.py +9 -0
  140. ultralytics/models/fastsam/__init__.py +7 -0
  141. ultralytics/models/fastsam/model.py +61 -0
  142. ultralytics/models/fastsam/predict.py +181 -0
  143. ultralytics/models/fastsam/utils.py +24 -0
  144. ultralytics/models/fastsam/val.py +40 -0
  145. ultralytics/models/nas/__init__.py +7 -0
  146. ultralytics/models/nas/model.py +102 -0
  147. ultralytics/models/nas/predict.py +58 -0
  148. ultralytics/models/nas/val.py +39 -0
  149. ultralytics/models/rtdetr/__init__.py +7 -0
  150. ultralytics/models/rtdetr/model.py +63 -0
  151. ultralytics/models/rtdetr/predict.py +84 -0
  152. ultralytics/models/rtdetr/train.py +85 -0
  153. ultralytics/models/rtdetr/val.py +191 -0
  154. ultralytics/models/sam/__init__.py +6 -0
  155. ultralytics/models/sam/amg.py +260 -0
  156. ultralytics/models/sam/build.py +358 -0
  157. ultralytics/models/sam/model.py +170 -0
  158. ultralytics/models/sam/modules/__init__.py +1 -0
  159. ultralytics/models/sam/modules/blocks.py +1129 -0
  160. ultralytics/models/sam/modules/decoders.py +515 -0
  161. ultralytics/models/sam/modules/encoders.py +854 -0
  162. ultralytics/models/sam/modules/memory_attention.py +299 -0
  163. ultralytics/models/sam/modules/sam.py +1006 -0
  164. ultralytics/models/sam/modules/tiny_encoder.py +1002 -0
  165. ultralytics/models/sam/modules/transformer.py +351 -0
  166. ultralytics/models/sam/modules/utils.py +394 -0
  167. ultralytics/models/sam/predict.py +1605 -0
  168. ultralytics/models/utils/__init__.py +1 -0
  169. ultralytics/models/utils/loss.py +455 -0
  170. ultralytics/models/utils/ops.py +268 -0
  171. ultralytics/models/yolo/__init__.py +7 -0
  172. ultralytics/models/yolo/classify/__init__.py +7 -0
  173. ultralytics/models/yolo/classify/predict.py +88 -0
  174. ultralytics/models/yolo/classify/train.py +233 -0
  175. ultralytics/models/yolo/classify/val.py +215 -0
  176. ultralytics/models/yolo/detect/__init__.py +7 -0
  177. ultralytics/models/yolo/detect/predict.py +124 -0
  178. ultralytics/models/yolo/detect/train.py +217 -0
  179. ultralytics/models/yolo/detect/val.py +451 -0
  180. ultralytics/models/yolo/model.py +354 -0
  181. ultralytics/models/yolo/obb/__init__.py +7 -0
  182. ultralytics/models/yolo/obb/predict.py +66 -0
  183. ultralytics/models/yolo/obb/train.py +81 -0
  184. ultralytics/models/yolo/obb/val.py +283 -0
  185. ultralytics/models/yolo/pose/__init__.py +7 -0
  186. ultralytics/models/yolo/pose/predict.py +79 -0
  187. ultralytics/models/yolo/pose/train.py +154 -0
  188. ultralytics/models/yolo/pose/val.py +394 -0
  189. ultralytics/models/yolo/segment/__init__.py +7 -0
  190. ultralytics/models/yolo/segment/predict.py +113 -0
  191. ultralytics/models/yolo/segment/train.py +123 -0
  192. ultralytics/models/yolo/segment/val.py +428 -0
  193. ultralytics/models/yolo/world/__init__.py +5 -0
  194. ultralytics/models/yolo/world/train.py +119 -0
  195. ultralytics/models/yolo/world/train_world.py +176 -0
  196. ultralytics/models/yolo/yoloe/__init__.py +22 -0
  197. ultralytics/models/yolo/yoloe/predict.py +169 -0
  198. ultralytics/models/yolo/yoloe/train.py +298 -0
  199. ultralytics/models/yolo/yoloe/train_seg.py +124 -0
  200. ultralytics/models/yolo/yoloe/val.py +191 -0
  201. ultralytics/nn/__init__.py +29 -0
  202. ultralytics/nn/autobackend.py +842 -0
  203. ultralytics/nn/modules/__init__.py +182 -0
  204. ultralytics/nn/modules/activation.py +53 -0
  205. ultralytics/nn/modules/block.py +1966 -0
  206. ultralytics/nn/modules/conv.py +712 -0
  207. ultralytics/nn/modules/head.py +880 -0
  208. ultralytics/nn/modules/transformer.py +713 -0
  209. ultralytics/nn/modules/utils.py +164 -0
  210. ultralytics/nn/tasks.py +1627 -0
  211. ultralytics/nn/text_model.py +351 -0
  212. ultralytics/solutions/__init__.py +41 -0
  213. ultralytics/solutions/ai_gym.py +116 -0
  214. ultralytics/solutions/analytics.py +252 -0
  215. ultralytics/solutions/config.py +106 -0
  216. ultralytics/solutions/distance_calculation.py +124 -0
  217. ultralytics/solutions/heatmap.py +127 -0
  218. ultralytics/solutions/instance_segmentation.py +84 -0
  219. ultralytics/solutions/object_blurrer.py +90 -0
  220. ultralytics/solutions/object_counter.py +195 -0
  221. ultralytics/solutions/object_cropper.py +84 -0
  222. ultralytics/solutions/parking_management.py +273 -0
  223. ultralytics/solutions/queue_management.py +93 -0
  224. ultralytics/solutions/region_counter.py +120 -0
  225. ultralytics/solutions/security_alarm.py +154 -0
  226. ultralytics/solutions/similarity_search.py +172 -0
  227. ultralytics/solutions/solutions.py +724 -0
  228. ultralytics/solutions/speed_estimation.py +110 -0
  229. ultralytics/solutions/streamlit_inference.py +196 -0
  230. ultralytics/solutions/templates/similarity-search.html +160 -0
  231. ultralytics/solutions/trackzone.py +88 -0
  232. ultralytics/solutions/vision_eye.py +68 -0
  233. ultralytics/trackers/__init__.py +7 -0
  234. ultralytics/trackers/basetrack.py +124 -0
  235. ultralytics/trackers/bot_sort.py +260 -0
  236. ultralytics/trackers/byte_tracker.py +480 -0
  237. ultralytics/trackers/track.py +125 -0
  238. ultralytics/trackers/utils/__init__.py +1 -0
  239. ultralytics/trackers/utils/gmc.py +376 -0
  240. ultralytics/trackers/utils/kalman_filter.py +493 -0
  241. ultralytics/trackers/utils/matching.py +157 -0
  242. ultralytics/utils/__init__.py +1435 -0
  243. ultralytics/utils/autobatch.py +106 -0
  244. ultralytics/utils/autodevice.py +174 -0
  245. ultralytics/utils/benchmarks.py +695 -0
  246. ultralytics/utils/callbacks/__init__.py +5 -0
  247. ultralytics/utils/callbacks/base.py +234 -0
  248. ultralytics/utils/callbacks/clearml.py +153 -0
  249. ultralytics/utils/callbacks/comet.py +552 -0
  250. ultralytics/utils/callbacks/dvc.py +205 -0
  251. ultralytics/utils/callbacks/hub.py +108 -0
  252. ultralytics/utils/callbacks/mlflow.py +138 -0
  253. ultralytics/utils/callbacks/neptune.py +140 -0
  254. ultralytics/utils/callbacks/raytune.py +43 -0
  255. ultralytics/utils/callbacks/tensorboard.py +132 -0
  256. ultralytics/utils/callbacks/wb.py +185 -0
  257. ultralytics/utils/checks.py +897 -0
  258. ultralytics/utils/dist.py +119 -0
  259. ultralytics/utils/downloads.py +499 -0
  260. ultralytics/utils/errors.py +43 -0
  261. ultralytics/utils/export.py +219 -0
  262. ultralytics/utils/files.py +221 -0
  263. ultralytics/utils/instance.py +499 -0
  264. ultralytics/utils/loss.py +813 -0
  265. ultralytics/utils/metrics.py +1356 -0
  266. ultralytics/utils/ops.py +885 -0
  267. ultralytics/utils/patches.py +143 -0
  268. ultralytics/utils/plotting.py +1011 -0
  269. ultralytics/utils/tal.py +416 -0
  270. ultralytics/utils/torch_utils.py +990 -0
  271. ultralytics/utils/triton.py +116 -0
  272. ultralytics/utils/tuner.py +159 -0
@@ -0,0 +1,493 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ import numpy as np
4
+ import scipy.linalg
5
+
6
+
7
+ class KalmanFilterXYAH:
8
+ """
9
+ A KalmanFilterXYAH class for tracking bounding boxes in image space using a Kalman filter.
10
+
11
+ Implements a simple Kalman filter for tracking bounding boxes in image space. The 8-dimensional state space
12
+ (x, y, a, h, vx, vy, va, vh) contains the bounding box center position (x, y), aspect ratio a, height h, and their
13
+ respective velocities. Object motion follows a constant velocity model, and bounding box location (x, y, a, h) is
14
+ taken as a direct observation of the state space (linear observation model).
15
+
16
+ Attributes:
17
+ _motion_mat (np.ndarray): The motion matrix for the Kalman filter.
18
+ _update_mat (np.ndarray): The update matrix for the Kalman filter.
19
+ _std_weight_position (float): Standard deviation weight for position.
20
+ _std_weight_velocity (float): Standard deviation weight for velocity.
21
+
22
+ Methods:
23
+ initiate: Creates a track from an unassociated measurement.
24
+ predict: Runs the Kalman filter prediction step.
25
+ project: Projects the state distribution to measurement space.
26
+ multi_predict: Runs the Kalman filter prediction step (vectorized version).
27
+ update: Runs the Kalman filter correction step.
28
+ gating_distance: Computes the gating distance between state distribution and measurements.
29
+
30
+ Examples:
31
+ Initialize the Kalman filter and create a track from a measurement
32
+ >>> kf = KalmanFilterXYAH()
33
+ >>> measurement = np.array([100, 200, 1.5, 50])
34
+ >>> mean, covariance = kf.initiate(measurement)
35
+ >>> print(mean)
36
+ >>> print(covariance)
37
+ """
38
+
39
+ def __init__(self):
40
+ """
41
+ Initialize Kalman filter model matrices with motion and observation uncertainty weights.
42
+
43
+ The Kalman filter is initialized with an 8-dimensional state space (x, y, a, h, vx, vy, va, vh), where (x, y)
44
+ represents the bounding box center position, 'a' is the aspect ratio, 'h' is the height, and their respective
45
+ velocities are (vx, vy, va, vh). The filter uses a constant velocity model for object motion and a linear
46
+ observation model for bounding box location.
47
+
48
+ Examples:
49
+ Initialize a Kalman filter for tracking:
50
+ >>> kf = KalmanFilterXYAH()
51
+ """
52
+ ndim, dt = 4, 1.0
53
+
54
+ # Create Kalman filter model matrices
55
+ self._motion_mat = np.eye(2 * ndim, 2 * ndim)
56
+ for i in range(ndim):
57
+ self._motion_mat[i, ndim + i] = dt
58
+ self._update_mat = np.eye(ndim, 2 * ndim)
59
+
60
+ # Motion and observation uncertainty are chosen relative to the current state estimate
61
+ self._std_weight_position = 1.0 / 20
62
+ self._std_weight_velocity = 1.0 / 160
63
+
64
+ def initiate(self, measurement: np.ndarray):
65
+ """
66
+ Create a track from an unassociated measurement.
67
+
68
+ Args:
69
+ measurement (np.ndarray): Bounding box coordinates (x, y, a, h) with center position (x, y), aspect ratio a,
70
+ and height h.
71
+
72
+ Returns:
73
+ (np.ndarray): Mean vector (8-dimensional) of the new track. Unobserved velocities are initialized to 0 mean.
74
+ (np.ndarray): Covariance matrix (8x8 dimensional) of the new track.
75
+
76
+ Examples:
77
+ >>> kf = KalmanFilterXYAH()
78
+ >>> measurement = np.array([100, 50, 1.5, 200])
79
+ >>> mean, covariance = kf.initiate(measurement)
80
+ """
81
+ mean_pos = measurement
82
+ mean_vel = np.zeros_like(mean_pos)
83
+ mean = np.r_[mean_pos, mean_vel]
84
+
85
+ std = [
86
+ 2 * self._std_weight_position * measurement[3],
87
+ 2 * self._std_weight_position * measurement[3],
88
+ 1e-2,
89
+ 2 * self._std_weight_position * measurement[3],
90
+ 10 * self._std_weight_velocity * measurement[3],
91
+ 10 * self._std_weight_velocity * measurement[3],
92
+ 1e-5,
93
+ 10 * self._std_weight_velocity * measurement[3],
94
+ ]
95
+ covariance = np.diag(np.square(std))
96
+ return mean, covariance
97
+
98
+ def predict(self, mean: np.ndarray, covariance: np.ndarray):
99
+ """
100
+ Run Kalman filter prediction step.
101
+
102
+ Args:
103
+ mean (np.ndarray): The 8-dimensional mean vector of the object state at the previous time step.
104
+ covariance (np.ndarray): The 8x8-dimensional covariance matrix of the object state at the previous time step.
105
+
106
+ Returns:
107
+ (np.ndarray): Mean vector of the predicted state. Unobserved velocities are initialized to 0 mean.
108
+ (np.ndarray): Covariance matrix of the predicted state.
109
+
110
+ Examples:
111
+ >>> kf = KalmanFilterXYAH()
112
+ >>> mean = np.array([0, 0, 1, 1, 0, 0, 0, 0])
113
+ >>> covariance = np.eye(8)
114
+ >>> predicted_mean, predicted_covariance = kf.predict(mean, covariance)
115
+ """
116
+ std_pos = [
117
+ self._std_weight_position * mean[3],
118
+ self._std_weight_position * mean[3],
119
+ 1e-2,
120
+ self._std_weight_position * mean[3],
121
+ ]
122
+ std_vel = [
123
+ self._std_weight_velocity * mean[3],
124
+ self._std_weight_velocity * mean[3],
125
+ 1e-5,
126
+ self._std_weight_velocity * mean[3],
127
+ ]
128
+ motion_cov = np.diag(np.square(np.r_[std_pos, std_vel]))
129
+
130
+ mean = np.dot(mean, self._motion_mat.T)
131
+ covariance = np.linalg.multi_dot((self._motion_mat, covariance, self._motion_mat.T)) + motion_cov
132
+
133
+ return mean, covariance
134
+
135
+ def project(self, mean: np.ndarray, covariance: np.ndarray):
136
+ """
137
+ Project state distribution to measurement space.
138
+
139
+ Args:
140
+ mean (np.ndarray): The state's mean vector (8 dimensional array).
141
+ covariance (np.ndarray): The state's covariance matrix (8x8 dimensional).
142
+
143
+ Returns:
144
+ (np.ndarray): Projected mean of the given state estimate.
145
+ (np.ndarray): Projected covariance matrix of the given state estimate.
146
+
147
+ Examples:
148
+ >>> kf = KalmanFilterXYAH()
149
+ >>> mean = np.array([0, 0, 1, 1, 0, 0, 0, 0])
150
+ >>> covariance = np.eye(8)
151
+ >>> projected_mean, projected_covariance = kf.project(mean, covariance)
152
+ """
153
+ std = [
154
+ self._std_weight_position * mean[3],
155
+ self._std_weight_position * mean[3],
156
+ 1e-1,
157
+ self._std_weight_position * mean[3],
158
+ ]
159
+ innovation_cov = np.diag(np.square(std))
160
+
161
+ mean = np.dot(self._update_mat, mean)
162
+ covariance = np.linalg.multi_dot((self._update_mat, covariance, self._update_mat.T))
163
+ return mean, covariance + innovation_cov
164
+
165
+ def multi_predict(self, mean: np.ndarray, covariance: np.ndarray):
166
+ """
167
+ Run Kalman filter prediction step for multiple object states (Vectorized version).
168
+
169
+ Args:
170
+ mean (np.ndarray): The Nx8 dimensional mean matrix of the object states at the previous time step.
171
+ covariance (np.ndarray): The Nx8x8 covariance matrix of the object states at the previous time step.
172
+
173
+ Returns:
174
+ (np.ndarray): Mean matrix of the predicted states with shape (N, 8).
175
+ (np.ndarray): Covariance matrix of the predicted states with shape (N, 8, 8).
176
+
177
+ Examples:
178
+ >>> mean = np.random.rand(10, 8) # 10 object states
179
+ >>> covariance = np.random.rand(10, 8, 8) # Covariance matrices for 10 object states
180
+ >>> predicted_mean, predicted_covariance = kalman_filter.multi_predict(mean, covariance)
181
+ """
182
+ std_pos = [
183
+ self._std_weight_position * mean[:, 3],
184
+ self._std_weight_position * mean[:, 3],
185
+ 1e-2 * np.ones_like(mean[:, 3]),
186
+ self._std_weight_position * mean[:, 3],
187
+ ]
188
+ std_vel = [
189
+ self._std_weight_velocity * mean[:, 3],
190
+ self._std_weight_velocity * mean[:, 3],
191
+ 1e-5 * np.ones_like(mean[:, 3]),
192
+ self._std_weight_velocity * mean[:, 3],
193
+ ]
194
+ sqr = np.square(np.r_[std_pos, std_vel]).T
195
+
196
+ motion_cov = [np.diag(sqr[i]) for i in range(len(mean))]
197
+ motion_cov = np.asarray(motion_cov)
198
+
199
+ mean = np.dot(mean, self._motion_mat.T)
200
+ left = np.dot(self._motion_mat, covariance).transpose((1, 0, 2))
201
+ covariance = np.dot(left, self._motion_mat.T) + motion_cov
202
+
203
+ return mean, covariance
204
+
205
+ def update(self, mean: np.ndarray, covariance: np.ndarray, measurement: np.ndarray):
206
+ """
207
+ Run Kalman filter correction step.
208
+
209
+ Args:
210
+ mean (np.ndarray): The predicted state's mean vector (8 dimensional).
211
+ covariance (np.ndarray): The state's covariance matrix (8x8 dimensional).
212
+ measurement (np.ndarray): The 4 dimensional measurement vector (x, y, a, h), where (x, y) is the center
213
+ position, a the aspect ratio, and h the height of the bounding box.
214
+
215
+ Returns:
216
+ (np.ndarray): Measurement-corrected state mean.
217
+ (np.ndarray): Measurement-corrected state covariance.
218
+
219
+ Examples:
220
+ >>> kf = KalmanFilterXYAH()
221
+ >>> mean = np.array([0, 0, 1, 1, 0, 0, 0, 0])
222
+ >>> covariance = np.eye(8)
223
+ >>> measurement = np.array([1, 1, 1, 1])
224
+ >>> new_mean, new_covariance = kf.update(mean, covariance, measurement)
225
+ """
226
+ projected_mean, projected_cov = self.project(mean, covariance)
227
+
228
+ chol_factor, lower = scipy.linalg.cho_factor(projected_cov, lower=True, check_finite=False)
229
+ kalman_gain = scipy.linalg.cho_solve(
230
+ (chol_factor, lower), np.dot(covariance, self._update_mat.T).T, check_finite=False
231
+ ).T
232
+ innovation = measurement - projected_mean
233
+
234
+ new_mean = mean + np.dot(innovation, kalman_gain.T)
235
+ new_covariance = covariance - np.linalg.multi_dot((kalman_gain, projected_cov, kalman_gain.T))
236
+ return new_mean, new_covariance
237
+
238
+ def gating_distance(
239
+ self,
240
+ mean: np.ndarray,
241
+ covariance: np.ndarray,
242
+ measurements: np.ndarray,
243
+ only_position: bool = False,
244
+ metric: str = "maha",
245
+ ) -> np.ndarray:
246
+ """
247
+ Compute gating distance between state distribution and measurements.
248
+
249
+ A suitable distance threshold can be obtained from `chi2inv95`. If `only_position` is False, the chi-square
250
+ distribution has 4 degrees of freedom, otherwise 2.
251
+
252
+ Args:
253
+ mean (np.ndarray): Mean vector over the state distribution (8 dimensional).
254
+ covariance (np.ndarray): Covariance of the state distribution (8x8 dimensional).
255
+ measurements (np.ndarray): An (N, 4) matrix of N measurements, each in format (x, y, a, h) where (x, y) is the
256
+ bounding box center position, a the aspect ratio, and h the height.
257
+ only_position (bool): If True, distance computation is done with respect to box center position only.
258
+ metric (str): The metric to use for calculating the distance. Options are 'gaussian' for the squared
259
+ Euclidean distance and 'maha' for the squared Mahalanobis distance.
260
+
261
+ Returns:
262
+ (np.ndarray): Returns an array of length N, where the i-th element contains the squared distance between
263
+ (mean, covariance) and `measurements[i]`.
264
+
265
+ Examples:
266
+ Compute gating distance using Mahalanobis metric:
267
+ >>> kf = KalmanFilterXYAH()
268
+ >>> mean = np.array([0, 0, 1, 1, 0, 0, 0, 0])
269
+ >>> covariance = np.eye(8)
270
+ >>> measurements = np.array([[1, 1, 1, 1], [2, 2, 1, 1]])
271
+ >>> distances = kf.gating_distance(mean, covariance, measurements, only_position=False, metric="maha")
272
+ """
273
+ mean, covariance = self.project(mean, covariance)
274
+ if only_position:
275
+ mean, covariance = mean[:2], covariance[:2, :2]
276
+ measurements = measurements[:, :2]
277
+
278
+ d = measurements - mean
279
+ if metric == "gaussian":
280
+ return np.sum(d * d, axis=1)
281
+ elif metric == "maha":
282
+ cholesky_factor = np.linalg.cholesky(covariance)
283
+ z = scipy.linalg.solve_triangular(cholesky_factor, d.T, lower=True, check_finite=False, overwrite_b=True)
284
+ return np.sum(z * z, axis=0) # square maha
285
+ else:
286
+ raise ValueError("Invalid distance metric")
287
+
288
+
289
+ class KalmanFilterXYWH(KalmanFilterXYAH):
290
+ """
291
+ A KalmanFilterXYWH class for tracking bounding boxes in image space using a Kalman filter.
292
+
293
+ Implements a Kalman filter for tracking bounding boxes with state space (x, y, w, h, vx, vy, vw, vh), where
294
+ (x, y) is the center position, w is the width, h is the height, and vx, vy, vw, vh are their respective velocities.
295
+ The object motion follows a constant velocity model, and the bounding box location (x, y, w, h) is taken as a direct
296
+ observation of the state space (linear observation model).
297
+
298
+ Attributes:
299
+ _motion_mat (np.ndarray): The motion matrix for the Kalman filter.
300
+ _update_mat (np.ndarray): The update matrix for the Kalman filter.
301
+ _std_weight_position (float): Standard deviation weight for position.
302
+ _std_weight_velocity (float): Standard deviation weight for velocity.
303
+
304
+ Methods:
305
+ initiate: Creates a track from an unassociated measurement.
306
+ predict: Runs the Kalman filter prediction step.
307
+ project: Projects the state distribution to measurement space.
308
+ multi_predict: Runs the Kalman filter prediction step in a vectorized manner.
309
+ update: Runs the Kalman filter correction step.
310
+
311
+ Examples:
312
+ Create a Kalman filter and initialize a track
313
+ >>> kf = KalmanFilterXYWH()
314
+ >>> measurement = np.array([100, 50, 20, 40])
315
+ >>> mean, covariance = kf.initiate(measurement)
316
+ >>> print(mean)
317
+ >>> print(covariance)
318
+ """
319
+
320
+ def initiate(self, measurement: np.ndarray):
321
+ """
322
+ Create track from unassociated measurement.
323
+
324
+ Args:
325
+ measurement (np.ndarray): Bounding box coordinates (x, y, w, h) with center position (x, y), width, and height.
326
+
327
+ Returns:
328
+ (np.ndarray): Mean vector (8 dimensional) of the new track. Unobserved velocities are initialized to 0 mean.
329
+ (np.ndarray): Covariance matrix (8x8 dimensional) of the new track.
330
+
331
+ Examples:
332
+ >>> kf = KalmanFilterXYWH()
333
+ >>> measurement = np.array([100, 50, 20, 40])
334
+ >>> mean, covariance = kf.initiate(measurement)
335
+ >>> print(mean)
336
+ [100. 50. 20. 40. 0. 0. 0. 0.]
337
+ >>> print(covariance)
338
+ [[ 4. 0. 0. 0. 0. 0. 0. 0.]
339
+ [ 0. 4. 0. 0. 0. 0. 0. 0.]
340
+ [ 0. 0. 4. 0. 0. 0. 0. 0.]
341
+ [ 0. 0. 0. 4. 0. 0. 0. 0.]
342
+ [ 0. 0. 0. 0. 0.25 0. 0. 0.]
343
+ [ 0. 0. 0. 0. 0. 0.25 0. 0.]
344
+ [ 0. 0. 0. 0. 0. 0. 0.25 0.]
345
+ [ 0. 0. 0. 0. 0. 0. 0. 0.25]]
346
+ """
347
+ mean_pos = measurement
348
+ mean_vel = np.zeros_like(mean_pos)
349
+ mean = np.r_[mean_pos, mean_vel]
350
+
351
+ std = [
352
+ 2 * self._std_weight_position * measurement[2],
353
+ 2 * self._std_weight_position * measurement[3],
354
+ 2 * self._std_weight_position * measurement[2],
355
+ 2 * self._std_weight_position * measurement[3],
356
+ 10 * self._std_weight_velocity * measurement[2],
357
+ 10 * self._std_weight_velocity * measurement[3],
358
+ 10 * self._std_weight_velocity * measurement[2],
359
+ 10 * self._std_weight_velocity * measurement[3],
360
+ ]
361
+ covariance = np.diag(np.square(std))
362
+ return mean, covariance
363
+
364
+ def predict(self, mean, covariance):
365
+ """
366
+ Run Kalman filter prediction step.
367
+
368
+ Args:
369
+ mean (np.ndarray): The 8-dimensional mean vector of the object state at the previous time step.
370
+ covariance (np.ndarray): The 8x8-dimensional covariance matrix of the object state at the previous time step.
371
+
372
+ Returns:
373
+ (np.ndarray): Mean vector of the predicted state. Unobserved velocities are initialized to 0 mean.
374
+ (np.ndarray): Covariance matrix of the predicted state.
375
+
376
+ Examples:
377
+ >>> kf = KalmanFilterXYWH()
378
+ >>> mean = np.array([0, 0, 1, 1, 0, 0, 0, 0])
379
+ >>> covariance = np.eye(8)
380
+ >>> predicted_mean, predicted_covariance = kf.predict(mean, covariance)
381
+ """
382
+ std_pos = [
383
+ self._std_weight_position * mean[2],
384
+ self._std_weight_position * mean[3],
385
+ self._std_weight_position * mean[2],
386
+ self._std_weight_position * mean[3],
387
+ ]
388
+ std_vel = [
389
+ self._std_weight_velocity * mean[2],
390
+ self._std_weight_velocity * mean[3],
391
+ self._std_weight_velocity * mean[2],
392
+ self._std_weight_velocity * mean[3],
393
+ ]
394
+ motion_cov = np.diag(np.square(np.r_[std_pos, std_vel]))
395
+
396
+ mean = np.dot(mean, self._motion_mat.T)
397
+ covariance = np.linalg.multi_dot((self._motion_mat, covariance, self._motion_mat.T)) + motion_cov
398
+
399
+ return mean, covariance
400
+
401
+ def project(self, mean, covariance):
402
+ """
403
+ Project state distribution to measurement space.
404
+
405
+ Args:
406
+ mean (np.ndarray): The state's mean vector (8 dimensional array).
407
+ covariance (np.ndarray): The state's covariance matrix (8x8 dimensional).
408
+
409
+ Returns:
410
+ (np.ndarray): Projected mean of the given state estimate.
411
+ (np.ndarray): Projected covariance matrix of the given state estimate.
412
+
413
+ Examples:
414
+ >>> kf = KalmanFilterXYWH()
415
+ >>> mean = np.array([0, 0, 1, 1, 0, 0, 0, 0])
416
+ >>> covariance = np.eye(8)
417
+ >>> projected_mean, projected_cov = kf.project(mean, covariance)
418
+ """
419
+ std = [
420
+ self._std_weight_position * mean[2],
421
+ self._std_weight_position * mean[3],
422
+ self._std_weight_position * mean[2],
423
+ self._std_weight_position * mean[3],
424
+ ]
425
+ innovation_cov = np.diag(np.square(std))
426
+
427
+ mean = np.dot(self._update_mat, mean)
428
+ covariance = np.linalg.multi_dot((self._update_mat, covariance, self._update_mat.T))
429
+ return mean, covariance + innovation_cov
430
+
431
+ def multi_predict(self, mean, covariance):
432
+ """
433
+ Run Kalman filter prediction step (Vectorized version).
434
+
435
+ Args:
436
+ mean (np.ndarray): The Nx8 dimensional mean matrix of the object states at the previous time step.
437
+ covariance (np.ndarray): The Nx8x8 covariance matrix of the object states at the previous time step.
438
+
439
+ Returns:
440
+ (np.ndarray): Mean matrix of the predicted states with shape (N, 8).
441
+ (np.ndarray): Covariance matrix of the predicted states with shape (N, 8, 8).
442
+
443
+ Examples:
444
+ >>> mean = np.random.rand(5, 8) # 5 objects with 8-dimensional state vectors
445
+ >>> covariance = np.random.rand(5, 8, 8) # 5 objects with 8x8 covariance matrices
446
+ >>> kf = KalmanFilterXYWH()
447
+ >>> predicted_mean, predicted_covariance = kf.multi_predict(mean, covariance)
448
+ """
449
+ std_pos = [
450
+ self._std_weight_position * mean[:, 2],
451
+ self._std_weight_position * mean[:, 3],
452
+ self._std_weight_position * mean[:, 2],
453
+ self._std_weight_position * mean[:, 3],
454
+ ]
455
+ std_vel = [
456
+ self._std_weight_velocity * mean[:, 2],
457
+ self._std_weight_velocity * mean[:, 3],
458
+ self._std_weight_velocity * mean[:, 2],
459
+ self._std_weight_velocity * mean[:, 3],
460
+ ]
461
+ sqr = np.square(np.r_[std_pos, std_vel]).T
462
+
463
+ motion_cov = [np.diag(sqr[i]) for i in range(len(mean))]
464
+ motion_cov = np.asarray(motion_cov)
465
+
466
+ mean = np.dot(mean, self._motion_mat.T)
467
+ left = np.dot(self._motion_mat, covariance).transpose((1, 0, 2))
468
+ covariance = np.dot(left, self._motion_mat.T) + motion_cov
469
+
470
+ return mean, covariance
471
+
472
+ def update(self, mean, covariance, measurement):
473
+ """
474
+ Run Kalman filter correction step.
475
+
476
+ Args:
477
+ mean (np.ndarray): The predicted state's mean vector (8 dimensional).
478
+ covariance (np.ndarray): The state's covariance matrix (8x8 dimensional).
479
+ measurement (np.ndarray): The 4 dimensional measurement vector (x, y, w, h), where (x, y) is the center
480
+ position, w the width, and h the height of the bounding box.
481
+
482
+ Returns:
483
+ (np.ndarray): Measurement-corrected state mean.
484
+ (np.ndarray): Measurement-corrected state covariance.
485
+
486
+ Examples:
487
+ >>> kf = KalmanFilterXYWH()
488
+ >>> mean = np.array([0, 0, 1, 1, 0, 0, 0, 0])
489
+ >>> covariance = np.eye(8)
490
+ >>> measurement = np.array([0.5, 0.5, 1.2, 1.2])
491
+ >>> new_mean, new_covariance = kf.update(mean, covariance, measurement)
492
+ """
493
+ return super().update(mean, covariance, measurement)
@@ -0,0 +1,157 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ import numpy as np
4
+ import scipy
5
+ from scipy.spatial.distance import cdist
6
+
7
+ from ultralytics.utils.metrics import batch_probiou, bbox_ioa
8
+
9
+ try:
10
+ import lap # for linear_assignment
11
+
12
+ assert lap.__version__ # verify package is not directory
13
+ except (ImportError, AssertionError, AttributeError):
14
+ from ultralytics.utils.checks import check_requirements
15
+
16
+ check_requirements("lap>=0.5.12") # https://github.com/gatagat/lap
17
+ import lap
18
+
19
+
20
+ def linear_assignment(cost_matrix: np.ndarray, thresh: float, use_lap: bool = True) -> tuple:
21
+ """
22
+ Perform linear assignment using either the scipy or lap.lapjv method.
23
+
24
+ Args:
25
+ cost_matrix (np.ndarray): The matrix containing cost values for assignments, with shape (N, M).
26
+ thresh (float): Threshold for considering an assignment valid.
27
+ use_lap (bool): Use lap.lapjv for the assignment. If False, scipy.optimize.linear_sum_assignment is used.
28
+
29
+ Returns:
30
+ matched_indices (np.ndarray): Array of matched indices of shape (K, 2), where K is the number of matches.
31
+ unmatched_a (np.ndarray): Array of unmatched indices from the first set, with shape (L,).
32
+ unmatched_b (np.ndarray): Array of unmatched indices from the second set, with shape (M,).
33
+
34
+ Examples:
35
+ >>> cost_matrix = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
36
+ >>> thresh = 5.0
37
+ >>> matched_indices, unmatched_a, unmatched_b = linear_assignment(cost_matrix, thresh, use_lap=True)
38
+ """
39
+ if cost_matrix.size == 0:
40
+ return np.empty((0, 2), dtype=int), tuple(range(cost_matrix.shape[0])), tuple(range(cost_matrix.shape[1]))
41
+
42
+ if use_lap:
43
+ # Use lap.lapjv
44
+ # https://github.com/gatagat/lap
45
+ _, x, y = lap.lapjv(cost_matrix, extend_cost=True, cost_limit=thresh)
46
+ matches = [[ix, mx] for ix, mx in enumerate(x) if mx >= 0]
47
+ unmatched_a = np.where(x < 0)[0]
48
+ unmatched_b = np.where(y < 0)[0]
49
+ else:
50
+ # Use scipy.optimize.linear_sum_assignment
51
+ # https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linear_sum_assignment.html
52
+ x, y = scipy.optimize.linear_sum_assignment(cost_matrix) # row x, col y
53
+ matches = np.asarray([[x[i], y[i]] for i in range(len(x)) if cost_matrix[x[i], y[i]] <= thresh])
54
+ if len(matches) == 0:
55
+ unmatched_a = list(np.arange(cost_matrix.shape[0]))
56
+ unmatched_b = list(np.arange(cost_matrix.shape[1]))
57
+ else:
58
+ unmatched_a = list(frozenset(np.arange(cost_matrix.shape[0])) - frozenset(matches[:, 0]))
59
+ unmatched_b = list(frozenset(np.arange(cost_matrix.shape[1])) - frozenset(matches[:, 1]))
60
+
61
+ return matches, unmatched_a, unmatched_b
62
+
63
+
64
+ def iou_distance(atracks: list, btracks: list) -> np.ndarray:
65
+ """
66
+ Compute cost based on Intersection over Union (IoU) between tracks.
67
+
68
+ Args:
69
+ atracks (List[STrack] | List[np.ndarray]): List of tracks 'a' or bounding boxes.
70
+ btracks (List[STrack] | List[np.ndarray]): List of tracks 'b' or bounding boxes.
71
+
72
+ Returns:
73
+ (np.ndarray): Cost matrix computed based on IoU with shape (len(atracks), len(btracks)).
74
+
75
+ Examples:
76
+ Compute IoU distance between two sets of tracks
77
+ >>> atracks = [np.array([0, 0, 10, 10]), np.array([20, 20, 30, 30])]
78
+ >>> btracks = [np.array([5, 5, 15, 15]), np.array([25, 25, 35, 35])]
79
+ >>> cost_matrix = iou_distance(atracks, btracks)
80
+ """
81
+ if atracks and isinstance(atracks[0], np.ndarray) or btracks and isinstance(btracks[0], np.ndarray):
82
+ atlbrs = atracks
83
+ btlbrs = btracks
84
+ else:
85
+ atlbrs = [track.xywha if track.angle is not None else track.xyxy for track in atracks]
86
+ btlbrs = [track.xywha if track.angle is not None else track.xyxy for track in btracks]
87
+
88
+ ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=np.float32)
89
+ if len(atlbrs) and len(btlbrs):
90
+ if len(atlbrs[0]) == 5 and len(btlbrs[0]) == 5:
91
+ ious = batch_probiou(
92
+ np.ascontiguousarray(atlbrs, dtype=np.float32),
93
+ np.ascontiguousarray(btlbrs, dtype=np.float32),
94
+ ).numpy()
95
+ else:
96
+ ious = bbox_ioa(
97
+ np.ascontiguousarray(atlbrs, dtype=np.float32),
98
+ np.ascontiguousarray(btlbrs, dtype=np.float32),
99
+ iou=True,
100
+ )
101
+ return 1 - ious # cost matrix
102
+
103
+
104
+ def embedding_distance(tracks: list, detections: list, metric: str = "cosine") -> np.ndarray:
105
+ """
106
+ Compute distance between tracks and detections based on embeddings.
107
+
108
+ Args:
109
+ tracks (List[STrack]): List of tracks, where each track contains embedding features.
110
+ detections (List[BaseTrack]): List of detections, where each detection contains embedding features.
111
+ metric (str): Metric for distance computation. Supported metrics include 'cosine', 'euclidean', etc.
112
+
113
+ Returns:
114
+ (np.ndarray): Cost matrix computed based on embeddings with shape (N, M), where N is the number of tracks
115
+ and M is the number of detections.
116
+
117
+ Examples:
118
+ Compute the embedding distance between tracks and detections using cosine metric
119
+ >>> tracks = [STrack(...), STrack(...)] # List of track objects with embedding features
120
+ >>> detections = [BaseTrack(...), BaseTrack(...)] # List of detection objects with embedding features
121
+ >>> cost_matrix = embedding_distance(tracks, detections, metric="cosine")
122
+ """
123
+ cost_matrix = np.zeros((len(tracks), len(detections)), dtype=np.float32)
124
+ if cost_matrix.size == 0:
125
+ return cost_matrix
126
+ det_features = np.asarray([track.curr_feat for track in detections], dtype=np.float32)
127
+ # for i, track in enumerate(tracks):
128
+ # cost_matrix[i, :] = np.maximum(0.0, cdist(track.smooth_feat.reshape(1,-1), det_features, metric))
129
+ track_features = np.asarray([track.smooth_feat for track in tracks], dtype=np.float32)
130
+ cost_matrix = np.maximum(0.0, cdist(track_features, det_features, metric)) # Normalized features
131
+ return cost_matrix
132
+
133
+
134
+ def fuse_score(cost_matrix: np.ndarray, detections: list) -> np.ndarray:
135
+ """
136
+ Fuse cost matrix with detection scores to produce a single similarity matrix.
137
+
138
+ Args:
139
+ cost_matrix (np.ndarray): The matrix containing cost values for assignments, with shape (N, M).
140
+ detections (List[BaseTrack]): List of detections, each containing a score attribute.
141
+
142
+ Returns:
143
+ (np.ndarray): Fused similarity matrix with shape (N, M).
144
+
145
+ Examples:
146
+ Fuse a cost matrix with detection scores
147
+ >>> cost_matrix = np.random.rand(5, 10) # 5 tracks and 10 detections
148
+ >>> detections = [BaseTrack(score=np.random.rand()) for _ in range(10)]
149
+ >>> fused_matrix = fuse_score(cost_matrix, detections)
150
+ """
151
+ if cost_matrix.size == 0:
152
+ return cost_matrix
153
+ iou_sim = 1 - cost_matrix
154
+ det_scores = np.array([det.score for det in detections])
155
+ det_scores = np.expand_dims(det_scores, axis=0).repeat(cost_matrix.shape[0], axis=0)
156
+ fuse_sim = iou_sim * det_scores
157
+ return 1 - fuse_sim # fuse_cost