photonlibpy 2025.0.0b2__py3-none-any.whl → 2025.0.0b4__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- photonlibpy/estimation/openCVHelp.py +117 -7
- photonlibpy/estimation/rotTrlTransform3d.py +43 -1
- photonlibpy/estimation/targetModel.py +126 -80
- photonlibpy/estimation/visionEstimation.py +26 -4
- photonlibpy/generated/MultiTargetPNPResultSerde.py +7 -1
- photonlibpy/generated/PhotonPipelineMetadataSerde.py +6 -1
- photonlibpy/generated/PhotonPipelineResultSerde.py +9 -1
- photonlibpy/generated/PhotonTrackedTargetSerde.py +7 -1
- photonlibpy/generated/PnpResultSerde.py +6 -1
- photonlibpy/generated/TargetCornerSerde.py +6 -1
- photonlibpy/networktables/NTTopicSet.py +11 -2
- photonlibpy/photonCamera.py +69 -2
- photonlibpy/simulation/photonCameraSim.py +127 -51
- photonlibpy/simulation/simCameraProperties.py +184 -63
- photonlibpy/simulation/visionSystemSim.py +104 -7
- photonlibpy/simulation/visionTargetSim.py +10 -0
- photonlibpy/targeting/TargetCorner.py +2 -2
- photonlibpy/targeting/multiTargetPNPResult.py +4 -15
- photonlibpy/targeting/photonPipelineResult.py +4 -3
- photonlibpy/targeting/photonTrackedTarget.py +2 -2
- photonlibpy/version.py +2 -2
- {photonlibpy-2025.0.0b2.dist-info → photonlibpy-2025.0.0b4.dist-info}/METADATA +8 -9
- photonlibpy-2025.0.0b4.dist-info/RECORD +36 -0
- photonlibpy-2025.0.0b2.dist-info/RECORD +0 -36
- {photonlibpy-2025.0.0b2.dist-info → photonlibpy-2025.0.0b4.dist-info}/WHEEL +0 -0
- {photonlibpy-2025.0.0b2.dist-info → photonlibpy-2025.0.0b4.dist-info}/top_level.txt +0 -0
@@ -1,5 +1,5 @@
|
|
1
1
|
import math
|
2
|
-
from typing import Any
|
2
|
+
from typing import Any
|
3
3
|
|
4
4
|
import cv2 as cv
|
5
5
|
import numpy as np
|
@@ -27,6 +27,12 @@ class OpenCVHelp:
|
|
27
27
|
|
28
28
|
@staticmethod
|
29
29
|
def translationToTVec(translations: list[Translation3d]) -> np.ndarray:
|
30
|
+
"""Creates a new :class:`np.array` with these 3d translations. The opencv tvec is a vector with
|
31
|
+
three elements representing {x, y, z} in the EDN coordinate system.
|
32
|
+
|
33
|
+
:param translations: The translations to convert into a np.array
|
34
|
+
"""
|
35
|
+
|
30
36
|
retVal: list[list] = []
|
31
37
|
for translation in translations:
|
32
38
|
trl = OpenCVHelp.translationNWUtoEDN(translation)
|
@@ -38,6 +44,13 @@ class OpenCVHelp:
|
|
38
44
|
|
39
45
|
@staticmethod
|
40
46
|
def rotationToRVec(rotation: Rotation3d) -> np.ndarray:
|
47
|
+
"""Creates a new :class:`.np.array` with this 3d rotation. The opencv rvec Mat is a vector with
|
48
|
+
three elements representing the axis scaled by the angle in the EDN coordinate system. (angle =
|
49
|
+
norm, and axis = rvec / norm)
|
50
|
+
|
51
|
+
:param rotation: The rotation to convert into a np.array
|
52
|
+
"""
|
53
|
+
|
41
54
|
retVal: list[np.ndarray] = []
|
42
55
|
rot = OpenCVHelp.rotationNWUtoEDN(rotation)
|
43
56
|
rotVec = rot.getQuaternion().toRotationVector()
|
@@ -48,13 +61,13 @@ class OpenCVHelp:
|
|
48
61
|
)
|
49
62
|
|
50
63
|
@staticmethod
|
51
|
-
def avgPoint(points:
|
64
|
+
def avgPoint(points: np.ndarray) -> np.ndarray:
|
52
65
|
x = 0.0
|
53
66
|
y = 0.0
|
54
67
|
for p in points:
|
55
|
-
x += p[0]
|
56
|
-
y += p[1]
|
57
|
-
return (x / len(points), y / len(points))
|
68
|
+
x += p[0, 0]
|
69
|
+
y += p[0, 1]
|
70
|
+
return np.array([[x / len(points), y / len(points)]])
|
58
71
|
|
59
72
|
@staticmethod
|
60
73
|
def pointsToTargetCorners(points: np.ndarray) -> list[TargetCorner]:
|
@@ -88,6 +101,25 @@ class OpenCVHelp:
|
|
88
101
|
def reorderCircular(
|
89
102
|
elements: list[Any] | np.ndarray, backwards: bool, shiftStart: int
|
90
103
|
) -> list[Any]:
|
104
|
+
"""Reorders the list, optionally indexing backwards and wrapping around to the last element after
|
105
|
+
the first, and shifting all indices in the direction of indexing.
|
106
|
+
|
107
|
+
e.g.
|
108
|
+
|
109
|
+
({1,2,3}, false, 1) == {2,3,1}
|
110
|
+
|
111
|
+
({1,2,3}, true, 0) == {1,3,2}
|
112
|
+
|
113
|
+
({1,2,3}, true, 1) == {3,2,1}
|
114
|
+
|
115
|
+
:param elements: list elements
|
116
|
+
:param backwards: If indexing should happen in reverse (0, size-1, size-2, ...)
|
117
|
+
:param shiftStart: How much the initial index should be shifted (instead of starting at index 0,
|
118
|
+
start at shiftStart, negated if backwards)
|
119
|
+
|
120
|
+
:returns: Reordered list
|
121
|
+
"""
|
122
|
+
|
91
123
|
size = len(elements)
|
92
124
|
reordered = []
|
93
125
|
dir = -1 if backwards else 1
|
@@ -100,18 +132,39 @@ class OpenCVHelp:
|
|
100
132
|
|
101
133
|
@staticmethod
|
102
134
|
def translationEDNToNWU(trl: Translation3d) -> Translation3d:
|
135
|
+
"""Convert a rotation delta from EDN to NWU. For example, if you have a rotation X,Y,Z {1, 0, 0}
|
136
|
+
in EDN, this would be {0, -1, 0} in NWU.
|
137
|
+
"""
|
138
|
+
|
103
139
|
return trl.rotateBy(EDN_TO_NWU)
|
104
140
|
|
105
141
|
@staticmethod
|
106
142
|
def rotationEDNToNWU(rot: Rotation3d) -> Rotation3d:
|
143
|
+
"""Convert a rotation delta from NWU to EDN. For example, if you have a rotation X,Y,Z {1, 0, 0}
|
144
|
+
in NWU, this would be {0, 0, 1} in EDN.
|
145
|
+
"""
|
146
|
+
|
107
147
|
return -EDN_TO_NWU + (rot + EDN_TO_NWU)
|
108
148
|
|
109
149
|
@staticmethod
|
110
150
|
def tVecToTranslation(tvecInput: np.ndarray) -> Translation3d:
|
151
|
+
"""Returns a new 3d translation from this :class:`.Mat`. The opencv tvec is a vector with three
|
152
|
+
elements representing {x, y, z} in the EDN coordinate system.
|
153
|
+
|
154
|
+
:param tvecInput: The tvec to create a Translation3d from
|
155
|
+
"""
|
156
|
+
|
111
157
|
return OpenCVHelp.translationEDNToNWU(Translation3d(tvecInput))
|
112
158
|
|
113
159
|
@staticmethod
|
114
160
|
def rVecToRotation(rvecInput: np.ndarray) -> Rotation3d:
|
161
|
+
"""Returns a 3d rotation from this :class:`.Mat`. The opencv rvec Mat is a vector with three
|
162
|
+
elements representing the axis scaled by the angle in the EDN coordinate system. (angle = norm,
|
163
|
+
and axis = rvec / norm)
|
164
|
+
|
165
|
+
:param rvecInput: The rvec to create a Rotation3d from
|
166
|
+
"""
|
167
|
+
|
115
168
|
return OpenCVHelp.rotationEDNToNWU(Rotation3d(rvecInput))
|
116
169
|
|
117
170
|
@staticmethod
|
@@ -121,12 +174,43 @@ class OpenCVHelp:
|
|
121
174
|
modelTrls: list[Translation3d],
|
122
175
|
imagePoints: np.ndarray,
|
123
176
|
) -> PnpResult | None:
|
177
|
+
"""Finds the transformation(s) that map the camera's pose to the target's pose. The camera's pose
|
178
|
+
relative to the target is determined by the supplied 3d points of the target's model and their
|
179
|
+
associated 2d points imaged by the camera. The supplied model translations must be relative to
|
180
|
+
the target's pose.
|
181
|
+
|
182
|
+
For planar targets, there may be an alternate solution which is plausible given the 2d image
|
183
|
+
points. This has an associated "ambiguity" which describes the ratio of reprojection error
|
184
|
+
between the "best" and "alternate" solution.
|
185
|
+
|
186
|
+
This method is intended for use with individual AprilTags, and will not work unless 4 points
|
187
|
+
are provided.
|
188
|
+
|
189
|
+
:param cameraMatrix: The camera intrinsics matrix in standard opencv form
|
190
|
+
:param distCoeffs: The camera distortion matrix in standard opencv form
|
191
|
+
:param modelTrls: The translations of the object corners. These should have the object pose as
|
192
|
+
their origin. These must come in a specific, pose-relative order (in NWU):
|
193
|
+
|
194
|
+
- Point 0: [0, -squareLength / 2, squareLength / 2]
|
195
|
+
- Point 1: [0, squareLength / 2, squareLength / 2]
|
196
|
+
- Point 2: [0, squareLength / 2, -squareLength / 2]
|
197
|
+
- Point 3: [0, -squareLength / 2, -squareLength / 2]
|
198
|
+
:param imagePoints: The projection of these 3d object points into the 2d camera image. The order
|
199
|
+
should match the given object point translations.
|
200
|
+
|
201
|
+
:returns: The resulting transformation that maps the camera pose to the target pose and the
|
202
|
+
ambiguity if an alternate solution is available.
|
203
|
+
"""
|
124
204
|
modelTrls = OpenCVHelp.reorderCircular(modelTrls, True, -1)
|
125
205
|
imagePoints = np.array(OpenCVHelp.reorderCircular(imagePoints, True, -1))
|
126
206
|
objectMat = np.array(OpenCVHelp.translationToTVec(modelTrls))
|
127
207
|
|
128
208
|
alt: Transform3d | None = None
|
209
|
+
reprojectionError: cv.typing.MatLike | None = None
|
210
|
+
best: Transform3d = Transform3d()
|
211
|
+
|
129
212
|
for tries in range(2):
|
213
|
+
# calc rvecs/tvecs and associated reprojection error from image points
|
130
214
|
retval, rvecs, tvecs, reprojectionError = cv.solvePnPGeneric(
|
131
215
|
objectMat,
|
132
216
|
imagePoints,
|
@@ -135,6 +219,7 @@ class OpenCVHelp:
|
|
135
219
|
flags=cv.SOLVEPNP_IPPE_SQUARE,
|
136
220
|
)
|
137
221
|
|
222
|
+
# convert to wpilib coordinates
|
138
223
|
best = Transform3d(
|
139
224
|
OpenCVHelp.tVecToTranslation(tvecs[0]),
|
140
225
|
OpenCVHelp.rVecToRotation(rvecs[0]),
|
@@ -145,7 +230,10 @@ class OpenCVHelp:
|
|
145
230
|
OpenCVHelp.rVecToRotation(rvecs[1]),
|
146
231
|
)
|
147
232
|
|
148
|
-
if
|
233
|
+
# check if we got a NaN result
|
234
|
+
if reprojectionError is not None and not math.isnan(
|
235
|
+
reprojectionError[0, 0]
|
236
|
+
):
|
149
237
|
break
|
150
238
|
else:
|
151
239
|
pt = imagePoints[0]
|
@@ -153,7 +241,8 @@ class OpenCVHelp:
|
|
153
241
|
pt[0, 1] -= 0.001
|
154
242
|
imagePoints[0] = pt
|
155
243
|
|
156
|
-
|
244
|
+
# solvePnP failed
|
245
|
+
if reprojectionError is None or math.isnan(reprojectionError[0, 0]):
|
157
246
|
print("SolvePNP_Square failed!")
|
158
247
|
return None
|
159
248
|
|
@@ -181,6 +270,27 @@ class OpenCVHelp:
|
|
181
270
|
modelTrls: list[Translation3d],
|
182
271
|
imagePoints: np.ndarray,
|
183
272
|
) -> PnpResult | None:
|
273
|
+
"""Finds the transformation that maps the camera's pose to the origin of the supplied object. An
|
274
|
+
"object" is simply a set of known 3d translations that correspond to the given 2d points. If,
|
275
|
+
for example, the object translations are given relative to close-right corner of the blue
|
276
|
+
alliance(the default origin), a camera-to-origin transformation is returned. If the
|
277
|
+
translations are relative to a target's pose, a camera-to-target transformation is returned.
|
278
|
+
|
279
|
+
There must be at least 3 points to use this method. This does not return an alternate
|
280
|
+
solution-- if you are intending to use solvePNP on a single AprilTag, see {@link
|
281
|
+
#solvePNP_SQUARE} instead.
|
282
|
+
|
283
|
+
:param cameraMatrix: The camera intrinsics matrix in standard opencv form
|
284
|
+
:param distCoeffs: The camera distortion matrix in standard opencv form
|
285
|
+
:param objectTrls: The translations of the object corners, relative to the field.
|
286
|
+
:param imagePoints: The projection of these 3d object points into the 2d camera image. The order
|
287
|
+
should match the given object point translations.
|
288
|
+
|
289
|
+
:returns: The resulting transformation that maps the camera pose to the target pose. If the 3d
|
290
|
+
model points are supplied relative to the origin, this transformation brings the camera to
|
291
|
+
the origin.
|
292
|
+
"""
|
293
|
+
|
184
294
|
objectMat = np.array(OpenCVHelp.translationToTVec(modelTrls))
|
185
295
|
|
186
296
|
retval, rvecs, tvecs, reprojectionError = cv.solvePnPGeneric(
|
@@ -4,29 +4,71 @@ from wpimath.geometry import Pose3d, Rotation3d, Transform3d, Translation3d
|
|
4
4
|
|
5
5
|
|
6
6
|
class RotTrlTransform3d:
|
7
|
+
"""Represents a transformation that first rotates a pose around the origin, and then translates it."""
|
8
|
+
|
7
9
|
def __init__(
|
8
10
|
self, rot: Rotation3d = Rotation3d(), trl: Translation3d = Translation3d()
|
9
11
|
):
|
12
|
+
"""A rotation-translation transformation.
|
13
|
+
|
14
|
+
Applying this RotTrlTransform3d to poses will preserve their current origin-to-pose
|
15
|
+
transform as if the origin was transformed by these components instead.
|
16
|
+
|
17
|
+
:param rot: The rotation component
|
18
|
+
:param trl: The translation component
|
19
|
+
"""
|
10
20
|
self.rot = rot
|
11
21
|
self.trl = trl
|
12
22
|
|
13
23
|
def inverse(self) -> Self:
|
24
|
+
"""The inverse of this transformation. Applying the inverse will "undo" this transformation."""
|
14
25
|
invRot = -self.rot
|
15
26
|
invTrl = -(self.trl.rotateBy(invRot))
|
16
27
|
return type(self)(invRot, invTrl)
|
17
28
|
|
18
29
|
def getTransform(self) -> Transform3d:
|
30
|
+
"""This transformation as a Transform3d (as if of the origin)"""
|
19
31
|
return Transform3d(self.trl, self.rot)
|
20
32
|
|
21
33
|
def getTranslation(self) -> Translation3d:
|
34
|
+
"""The translation component of this transformation"""
|
22
35
|
return self.trl
|
23
36
|
|
24
37
|
def getRotation(self) -> Rotation3d:
|
38
|
+
"""The rotation component of this transformation"""
|
25
39
|
return self.rot
|
26
40
|
|
27
|
-
def
|
41
|
+
def applyTranslation(self, trlToApply: Translation3d) -> Translation3d:
|
28
42
|
return trlToApply.rotateBy(self.rot) + self.trl
|
29
43
|
|
44
|
+
def applyRotation(self, rotToApply: Rotation3d) -> Rotation3d:
|
45
|
+
return rotToApply + self.rot
|
46
|
+
|
47
|
+
def applyPose(self, poseToApply: Pose3d) -> Pose3d:
|
48
|
+
return Pose3d(
|
49
|
+
self.applyTranslation(poseToApply.translation()),
|
50
|
+
self.applyRotation(poseToApply.rotation()),
|
51
|
+
)
|
52
|
+
|
53
|
+
def applyTrls(self, rots: list[Rotation3d]) -> list[Rotation3d]:
|
54
|
+
retVal: list[Rotation3d] = []
|
55
|
+
for rot in rots:
|
56
|
+
retVal.append(self.applyRotation(rot))
|
57
|
+
return retVal
|
58
|
+
|
30
59
|
@classmethod
|
31
60
|
def makeRelativeTo(cls, pose: Pose3d) -> Self:
|
61
|
+
"""The rotation-translation transformation that makes poses in the world consider this pose as the
|
62
|
+
new origin, or change the basis to this pose.
|
63
|
+
|
64
|
+
:param pose: The new origin
|
65
|
+
"""
|
32
66
|
return cls(pose.rotation(), pose.translation()).inverse()
|
67
|
+
|
68
|
+
@classmethod
|
69
|
+
def makeBetweenPoses(cls, initial: Pose3d, last: Pose3d) -> Self:
|
70
|
+
return cls(
|
71
|
+
last.rotation() - initial.rotation(),
|
72
|
+
last.translation()
|
73
|
+
- initial.translation().rotateBy(last.rotation() - initial.rotation()),
|
74
|
+
)
|
@@ -8,87 +8,117 @@ from . import RotTrlTransform3d
|
|
8
8
|
|
9
9
|
|
10
10
|
class TargetModel:
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
11
|
+
"""Describes the 3d model of a target."""
|
12
|
+
|
13
|
+
def __init__(self):
|
14
|
+
"""Default constructor for initialising internal class members. DO NOT USE THIS!!! USE THE createPlanar,
|
15
|
+
createCuboid, createSpheroid or create Arbitrary
|
16
|
+
"""
|
17
|
+
self.vertices: List[Translation3d] = []
|
18
|
+
self.isPlanar = False
|
19
|
+
self.isSpherical = False
|
20
|
+
|
21
|
+
@classmethod
|
22
|
+
def createPlanar(cls, width: meters, height: meters) -> Self:
|
23
|
+
"""Creates a rectangular, planar target model given the width and height. The model has four
|
24
|
+
vertices:
|
25
|
+
|
26
|
+
- Point 0: [0, -width/2, -height/2]
|
27
|
+
- Point 1: [0, width/2, -height/2]
|
28
|
+
- Point 2: [0, width/2, height/2]
|
29
|
+
- Point 3: [0, -width/2, height/2]
|
30
|
+
"""
|
31
|
+
|
32
|
+
tm = cls()
|
33
|
+
|
34
|
+
tm.isPlanar = True
|
35
|
+
tm.isSpherical = False
|
36
|
+
tm.vertices = [
|
37
|
+
Translation3d(0.0, -width / 2.0, -height / 2.0),
|
38
|
+
Translation3d(0.0, width / 2.0, -height / 2.0),
|
39
|
+
Translation3d(0.0, width / 2.0, height / 2.0),
|
40
|
+
Translation3d(0.0, -width / 2.0, height / 2.0),
|
41
|
+
]
|
42
|
+
return tm
|
43
|
+
|
44
|
+
@classmethod
|
45
|
+
def createCuboid(cls, length: meters, width: meters, height: meters) -> Self:
|
46
|
+
"""Creates a cuboid target model given the length, width, height. The model has eight vertices:
|
47
|
+
|
48
|
+
- Point 0: [length/2, -width/2, -height/2]
|
49
|
+
- Point 1: [length/2, width/2, -height/2]
|
50
|
+
- Point 2: [length/2, width/2, height/2]
|
51
|
+
- Point 3: [length/2, -width/2, height/2]
|
52
|
+
- Point 4: [-length/2, -width/2, height/2]
|
53
|
+
- Point 5: [-length/2, width/2, height/2]
|
54
|
+
- Point 6: [-length/2, width/2, -height/2]
|
55
|
+
- Point 7: [-length/2, -width/2, -height/2]
|
56
|
+
"""
|
57
|
+
|
58
|
+
tm = cls()
|
59
|
+
verts = [
|
60
|
+
Translation3d(length / 2.0, -width / 2.0, -height / 2.0),
|
61
|
+
Translation3d(length / 2.0, width / 2.0, -height / 2.0),
|
62
|
+
Translation3d(length / 2.0, width / 2.0, height / 2.0),
|
63
|
+
Translation3d(length / 2.0, -width / 2.0, height / 2.0),
|
64
|
+
Translation3d(-length / 2.0, -width / 2.0, height / 2.0),
|
65
|
+
Translation3d(-length / 2.0, width / 2.0, height / 2.0),
|
66
|
+
Translation3d(-length / 2.0, width / 2.0, -height / 2.0),
|
67
|
+
Translation3d(-length / 2.0, -width / 2.0, -height / 2.0),
|
68
|
+
]
|
69
|
+
|
70
|
+
tm._common_construction(verts)
|
71
|
+
|
72
|
+
return tm
|
73
|
+
|
74
|
+
@classmethod
|
75
|
+
def createSpheroid(cls, diameter: meters) -> Self:
|
76
|
+
"""Creates a spherical target model which has similar dimensions regardless of its rotation. This
|
77
|
+
model has four vertices:
|
78
|
+
|
79
|
+
- Point 0: [0, -radius, 0]
|
80
|
+
- Point 1: [0, 0, -radius]
|
81
|
+
- Point 2: [0, radius, 0]
|
82
|
+
- Point 3: [0, 0, radius]
|
83
|
+
|
84
|
+
*Q: Why these vertices?* A: This target should be oriented to the camera every frame, much
|
85
|
+
like a sprite/decal, and these vertices represent the ellipse vertices (maxima). These vertices
|
86
|
+
are used for drawing the image of this sphere, but do not match the corners that will be
|
87
|
+
published by photonvision.
|
88
|
+
"""
|
89
|
+
|
90
|
+
tm = cls()
|
91
|
+
|
92
|
+
tm.isPlanar = False
|
93
|
+
tm.isSpherical = True
|
94
|
+
tm.vertices = [
|
95
|
+
Translation3d(0.0, -diameter / 2.0, 0.0),
|
96
|
+
Translation3d(0.0, 0.0, -diameter / 2.0),
|
97
|
+
Translation3d(0.0, diameter / 2.0, 0.0),
|
98
|
+
Translation3d(0.0, 0.0, diameter / 2.0),
|
99
|
+
]
|
100
|
+
|
101
|
+
return tm
|
84
102
|
|
85
|
-
|
86
|
-
|
87
|
-
|
103
|
+
@classmethod
|
104
|
+
def createArbitrary(cls, verts: List[Translation3d]) -> Self:
|
105
|
+
"""Creates a target model from arbitrary 3d vertices. Automatically determines if the given
|
106
|
+
vertices are planar(x == 0). More than 2 vertices must be given. If this is a planar model, the
|
107
|
+
vertices should define a non-intersecting contour.
|
108
|
+
|
109
|
+
:param vertices: Translations representing the vertices of this target model relative to its
|
110
|
+
pose.
|
111
|
+
"""
|
112
|
+
|
113
|
+
tm = cls()
|
114
|
+
tm._common_construction(verts)
|
88
115
|
|
116
|
+
return tm
|
117
|
+
|
118
|
+
def _common_construction(self, verts: List[Translation3d]) -> None:
|
89
119
|
self.isSpherical = False
|
90
120
|
if len(verts) <= 2:
|
91
|
-
self.vertices
|
121
|
+
self.vertices = []
|
92
122
|
self.isPlanar = False
|
93
123
|
else:
|
94
124
|
cornersPlaner = True
|
@@ -100,17 +130,33 @@ class TargetModel:
|
|
100
130
|
self.vertices = verts
|
101
131
|
|
102
132
|
def getFieldVertices(self, targetPose: Pose3d) -> List[Translation3d]:
|
133
|
+
"""This target's vertices offset from its field pose.
|
134
|
+
|
135
|
+
Note: If this target is spherical, use {@link #getOrientedPose(Translation3d,
|
136
|
+
Translation3d)} with this method.
|
137
|
+
"""
|
138
|
+
|
103
139
|
basisChange = RotTrlTransform3d(targetPose.rotation(), targetPose.translation())
|
104
140
|
|
105
141
|
retVal = []
|
106
142
|
|
107
143
|
for vert in self.vertices:
|
108
|
-
retVal.append(basisChange.
|
144
|
+
retVal.append(basisChange.applyTranslation(vert))
|
109
145
|
|
110
146
|
return retVal
|
111
147
|
|
112
148
|
@classmethod
|
113
149
|
def getOrientedPose(cls, tgtTrl: Translation3d, cameraTrl: Translation3d):
|
150
|
+
"""Returns a Pose3d with the given target translation oriented (with its relative x-axis aligned)
|
151
|
+
to the camera translation. This is used for spherical targets which should not have their
|
152
|
+
projection change regardless of their own rotation.
|
153
|
+
|
154
|
+
:param tgtTrl: This target's translation
|
155
|
+
:param cameraTrl: Camera's translation
|
156
|
+
|
157
|
+
:returns: This target's pose oriented to the camera
|
158
|
+
"""
|
159
|
+
|
114
160
|
relCam = cameraTrl - tgtTrl
|
115
161
|
orientToCam = Rotation3d(
|
116
162
|
0.0,
|
@@ -130,8 +176,8 @@ class TargetModel:
|
|
130
176
|
|
131
177
|
@classmethod
|
132
178
|
def AprilTag36h11(cls) -> Self:
|
133
|
-
return cls(width=6.5 * 0.0254, height=6.5 * 0.0254)
|
179
|
+
return cls.createPlanar(width=6.5 * 0.0254, height=6.5 * 0.0254)
|
134
180
|
|
135
181
|
@classmethod
|
136
182
|
def AprilTag16h5(cls) -> Self:
|
137
|
-
return cls(width=6.0 * 0.0254, height=6.0 * 0.0254)
|
183
|
+
return cls.createPlanar(width=6.0 * 0.0254, height=6.0 * 0.0254)
|
@@ -11,15 +11,16 @@ class VisionEstimation:
|
|
11
11
|
def getVisibleLayoutTags(
|
12
12
|
visTags: list[PhotonTrackedTarget], layout: AprilTagFieldLayout
|
13
13
|
) -> list[AprilTag]:
|
14
|
+
"""Get the visible :class:`.AprilTag`s which are in the tag layout using the visible tag IDs."""
|
14
15
|
retVal: list[AprilTag] = []
|
15
16
|
for tag in visTags:
|
16
17
|
id = tag.getFiducialId()
|
17
18
|
maybePose = layout.getTagPose(id)
|
18
19
|
if maybePose:
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
retVal.append(
|
20
|
+
aprilTag = AprilTag()
|
21
|
+
aprilTag.ID = id
|
22
|
+
aprilTag.pose = maybePose
|
23
|
+
retVal.append(aprilTag)
|
23
24
|
return retVal
|
24
25
|
|
25
26
|
@staticmethod
|
@@ -30,12 +31,31 @@ class VisionEstimation:
|
|
30
31
|
layout: AprilTagFieldLayout,
|
31
32
|
tagModel: TargetModel,
|
32
33
|
) -> PnpResult | None:
|
34
|
+
"""Performs solvePNP using 3d-2d point correspondences of visible AprilTags to estimate the
|
35
|
+
field-to-camera transformation. If only one tag is visible, the result may have an alternate
|
36
|
+
solution.
|
37
|
+
|
38
|
+
**Note:** The returned transformation is from the field origin to the camera pose!
|
39
|
+
|
40
|
+
With only one tag: {@link OpenCVHelp#solvePNP_SQUARE}
|
41
|
+
|
42
|
+
With multiple tags: {@link OpenCVHelp#solvePNP_SQPNP}
|
43
|
+
|
44
|
+
:param cameraMatrix: The camera intrinsics matrix in standard opencv form
|
45
|
+
:param distCoeffs: The camera distortion matrix in standard opencv form
|
46
|
+
:param visTags: The visible tags reported by PV. Non-tag targets are automatically excluded.
|
47
|
+
:param tagLayout: The known tag layout on the field
|
48
|
+
|
49
|
+
:returns: The transformation that maps the field origin to the camera pose. Ensure the {@link
|
50
|
+
PnpResult} are present before utilizing them.
|
51
|
+
"""
|
33
52
|
if len(visTags) == 0:
|
34
53
|
return None
|
35
54
|
|
36
55
|
corners: list[TargetCorner] = []
|
37
56
|
knownTags: list[AprilTag] = []
|
38
57
|
|
58
|
+
# ensure these are AprilTags in our layout
|
39
59
|
for tgt in visTags:
|
40
60
|
id = tgt.getFiducialId()
|
41
61
|
maybePose = layout.getTagPose(id)
|
@@ -53,6 +73,7 @@ class VisionEstimation:
|
|
53
73
|
|
54
74
|
points = OpenCVHelp.cornersToPoints(corners)
|
55
75
|
|
76
|
+
# single-tag pnp
|
56
77
|
if len(knownTags) == 1:
|
57
78
|
camToTag = OpenCVHelp.solvePNP_Square(
|
58
79
|
cameraMatrix, distCoeffs, tagModel.getVertices(), points
|
@@ -74,6 +95,7 @@ class VisionEstimation:
|
|
74
95
|
altReprojErr=camToTag.altReprojErr,
|
75
96
|
)
|
76
97
|
return result
|
98
|
+
# multi-tag pnp
|
77
99
|
else:
|
78
100
|
objectTrls: list[Translation3d] = []
|
79
101
|
for tag in knownTags:
|
@@ -20,8 +20,14 @@
|
|
20
20
|
## --> DO NOT MODIFY <--
|
21
21
|
###############################################################################
|
22
22
|
|
23
|
+
from typing import TYPE_CHECKING
|
24
|
+
|
23
25
|
from ..packet import Packet
|
24
|
-
from ..targeting import *
|
26
|
+
from ..targeting import * # noqa
|
27
|
+
|
28
|
+
if TYPE_CHECKING:
|
29
|
+
from ..targeting import MultiTargetPNPResult # noqa
|
30
|
+
from ..targeting import PnpResult # noqa
|
25
31
|
|
26
32
|
|
27
33
|
class MultiTargetPNPResultSerde:
|
@@ -20,8 +20,13 @@
|
|
20
20
|
## --> DO NOT MODIFY <--
|
21
21
|
###############################################################################
|
22
22
|
|
23
|
+
from typing import TYPE_CHECKING
|
24
|
+
|
23
25
|
from ..packet import Packet
|
24
|
-
from ..targeting import *
|
26
|
+
from ..targeting import * # noqa
|
27
|
+
|
28
|
+
if TYPE_CHECKING:
|
29
|
+
from ..targeting import PhotonPipelineMetadata # noqa
|
25
30
|
|
26
31
|
|
27
32
|
class PhotonPipelineMetadataSerde:
|
@@ -20,8 +20,16 @@
|
|
20
20
|
## --> DO NOT MODIFY <--
|
21
21
|
###############################################################################
|
22
22
|
|
23
|
+
from typing import TYPE_CHECKING
|
24
|
+
|
23
25
|
from ..packet import Packet
|
24
|
-
from ..targeting import *
|
26
|
+
from ..targeting import * # noqa
|
27
|
+
|
28
|
+
if TYPE_CHECKING:
|
29
|
+
from ..targeting import MultiTargetPNPResult # noqa
|
30
|
+
from ..targeting import PhotonPipelineMetadata # noqa
|
31
|
+
from ..targeting import PhotonPipelineResult # noqa
|
32
|
+
from ..targeting import PhotonTrackedTarget # noqa
|
25
33
|
|
26
34
|
|
27
35
|
class PhotonPipelineResultSerde:
|
@@ -20,8 +20,14 @@
|
|
20
20
|
## --> DO NOT MODIFY <--
|
21
21
|
###############################################################################
|
22
22
|
|
23
|
+
from typing import TYPE_CHECKING
|
24
|
+
|
23
25
|
from ..packet import Packet
|
24
|
-
from ..targeting import *
|
26
|
+
from ..targeting import * # noqa
|
27
|
+
|
28
|
+
if TYPE_CHECKING:
|
29
|
+
from ..targeting import PhotonTrackedTarget # noqa
|
30
|
+
from ..targeting import TargetCorner # noqa
|
25
31
|
|
26
32
|
|
27
33
|
class PhotonTrackedTargetSerde:
|