photonlibpy 2025.0.0a0__py3-none-any.whl → 2025.0.0b2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- photonlibpy/__init__.py +2 -2
- photonlibpy/estimation/__init__.py +5 -0
- photonlibpy/estimation/cameraTargetRelation.py +25 -0
- photonlibpy/estimation/openCVHelp.py +200 -0
- photonlibpy/estimation/rotTrlTransform3d.py +32 -0
- photonlibpy/estimation/targetModel.py +137 -0
- photonlibpy/estimation/visionEstimation.py +91 -0
- photonlibpy/generated/MultiTargetPNPResultSerde.py +12 -0
- photonlibpy/generated/PhotonPipelineMetadataSerde.py +23 -4
- photonlibpy/generated/PhotonPipelineResultSerde.py +19 -2
- photonlibpy/generated/PhotonTrackedTargetSerde.py +40 -0
- photonlibpy/generated/PnpResultSerde.py +19 -0
- photonlibpy/generated/TargetCornerSerde.py +12 -0
- photonlibpy/generated/__init__.py +0 -1
- photonlibpy/networktables/NTTopicSet.py +64 -0
- photonlibpy/networktables/__init__.py +1 -0
- photonlibpy/packet.py +123 -8
- photonlibpy/photonCamera.py +10 -7
- photonlibpy/photonPoseEstimator.py +5 -5
- photonlibpy/simulation/__init__.py +5 -0
- photonlibpy/simulation/photonCameraSim.py +408 -0
- photonlibpy/simulation/simCameraProperties.py +661 -0
- photonlibpy/simulation/videoSimUtil.py +2 -0
- photonlibpy/simulation/visionSystemSim.py +237 -0
- photonlibpy/simulation/visionTargetSim.py +50 -0
- photonlibpy/targeting/TargetCorner.py +5 -1
- photonlibpy/targeting/__init__.py +1 -1
- photonlibpy/targeting/multiTargetPNPResult.py +10 -4
- photonlibpy/targeting/photonPipelineResult.py +12 -5
- photonlibpy/targeting/photonTrackedTarget.py +13 -5
- photonlibpy/version.py +2 -2
- {photonlibpy-2025.0.0a0.dist-info → photonlibpy-2025.0.0b2.dist-info}/METADATA +6 -2
- photonlibpy-2025.0.0b2.dist-info/RECORD +36 -0
- {photonlibpy-2025.0.0a0.dist-info → photonlibpy-2025.0.0b2.dist-info}/WHEEL +1 -1
- photonlibpy-2025.0.0a0.dist-info/RECORD +0 -22
- {photonlibpy-2025.0.0a0.dist-info → photonlibpy-2025.0.0b2.dist-info}/top_level.txt +0 -0
photonlibpy/__init__.py
CHANGED
@@ -15,7 +15,7 @@
|
|
15
15
|
## along with this program. If not, see <https://www.gnu.org/licenses/>.
|
16
16
|
###############################################################################
|
17
17
|
|
18
|
-
from .packet import Packet # noqa
|
19
18
|
from .estimatedRobotPose import EstimatedRobotPose # noqa
|
20
|
-
from .
|
19
|
+
from .packet import Packet # noqa
|
21
20
|
from .photonCamera import PhotonCamera # noqa
|
21
|
+
from .photonPoseEstimator import PhotonPoseEstimator, PoseStrategy # noqa
|
@@ -0,0 +1,25 @@
|
|
1
|
+
import math
|
2
|
+
|
3
|
+
from wpimath.geometry import Pose3d, Rotation2d, Transform3d
|
4
|
+
from wpimath.units import meters
|
5
|
+
|
6
|
+
|
7
|
+
class CameraTargetRelation:
|
8
|
+
def __init__(self, cameraPose: Pose3d, targetPose: Pose3d):
|
9
|
+
self.camPose = cameraPose
|
10
|
+
self.camToTarg = Transform3d(cameraPose, targetPose)
|
11
|
+
self.camToTargDist = self.camToTarg.translation().norm()
|
12
|
+
self.camToTargDistXY: meters = math.hypot(
|
13
|
+
self.camToTarg.translation().X(), self.camToTarg.translation().Y()
|
14
|
+
)
|
15
|
+
self.camToTargYaw = Rotation2d(self.camToTarg.X(), self.camToTarg.Y())
|
16
|
+
self.camToTargPitch = Rotation2d(self.camToTargDistXY, -self.camToTarg.Z())
|
17
|
+
self.camToTargAngle = Rotation2d(
|
18
|
+
math.hypot(self.camToTargYaw.radians(), self.camToTargPitch.radians())
|
19
|
+
)
|
20
|
+
self.targToCam = Transform3d(targetPose, cameraPose)
|
21
|
+
self.targToCamYaw = Rotation2d(self.targToCam.X(), self.targToCam.Y())
|
22
|
+
self.targToCamPitch = Rotation2d(self.camToTargDistXY, -self.targToCam.Z())
|
23
|
+
self.targtoCamAngle = Rotation2d(
|
24
|
+
math.hypot(self.targToCamYaw.radians(), self.targToCamPitch.radians())
|
25
|
+
)
|
@@ -0,0 +1,200 @@
|
|
1
|
+
import math
|
2
|
+
from typing import Any, Tuple
|
3
|
+
|
4
|
+
import cv2 as cv
|
5
|
+
import numpy as np
|
6
|
+
from wpimath.geometry import Rotation3d, Transform3d, Translation3d
|
7
|
+
|
8
|
+
from ..targeting import PnpResult, TargetCorner
|
9
|
+
from .rotTrlTransform3d import RotTrlTransform3d
|
10
|
+
|
11
|
+
NWU_TO_EDN = Rotation3d(np.array([[0, -1, 0], [0, 0, -1], [1, 0, 0]]))
|
12
|
+
EDN_TO_NWU = Rotation3d(np.array([[0, 0, 1], [-1, 0, 0], [0, -1, 0]]))
|
13
|
+
|
14
|
+
|
15
|
+
class OpenCVHelp:
|
16
|
+
@staticmethod
|
17
|
+
def getMinAreaRect(points: np.ndarray) -> cv.RotatedRect:
|
18
|
+
return cv.RotatedRect(*cv.minAreaRect(points))
|
19
|
+
|
20
|
+
@staticmethod
|
21
|
+
def translationNWUtoEDN(trl: Translation3d) -> Translation3d:
|
22
|
+
return trl.rotateBy(NWU_TO_EDN)
|
23
|
+
|
24
|
+
@staticmethod
|
25
|
+
def rotationNWUtoEDN(rot: Rotation3d) -> Rotation3d:
|
26
|
+
return -NWU_TO_EDN + (rot + NWU_TO_EDN)
|
27
|
+
|
28
|
+
@staticmethod
|
29
|
+
def translationToTVec(translations: list[Translation3d]) -> np.ndarray:
|
30
|
+
retVal: list[list] = []
|
31
|
+
for translation in translations:
|
32
|
+
trl = OpenCVHelp.translationNWUtoEDN(translation)
|
33
|
+
retVal.append([trl.X(), trl.Y(), trl.Z()])
|
34
|
+
return np.array(
|
35
|
+
retVal,
|
36
|
+
dtype=np.float32,
|
37
|
+
)
|
38
|
+
|
39
|
+
@staticmethod
|
40
|
+
def rotationToRVec(rotation: Rotation3d) -> np.ndarray:
|
41
|
+
retVal: list[np.ndarray] = []
|
42
|
+
rot = OpenCVHelp.rotationNWUtoEDN(rotation)
|
43
|
+
rotVec = rot.getQuaternion().toRotationVector()
|
44
|
+
retVal.append(rotVec)
|
45
|
+
return np.array(
|
46
|
+
retVal,
|
47
|
+
dtype=np.float32,
|
48
|
+
)
|
49
|
+
|
50
|
+
@staticmethod
|
51
|
+
def avgPoint(points: list[Tuple[float, float]]) -> Tuple[float, float]:
|
52
|
+
x = 0.0
|
53
|
+
y = 0.0
|
54
|
+
for p in points:
|
55
|
+
x += p[0]
|
56
|
+
y += p[1]
|
57
|
+
return (x / len(points), y / len(points))
|
58
|
+
|
59
|
+
@staticmethod
|
60
|
+
def pointsToTargetCorners(points: np.ndarray) -> list[TargetCorner]:
|
61
|
+
corners = [TargetCorner(p[0, 0], p[0, 1]) for p in points]
|
62
|
+
return corners
|
63
|
+
|
64
|
+
@staticmethod
|
65
|
+
def cornersToPoints(corners: list[TargetCorner]) -> np.ndarray:
|
66
|
+
points = [[[c.x, c.y]] for c in corners]
|
67
|
+
return np.array(points)
|
68
|
+
|
69
|
+
@staticmethod
|
70
|
+
def projectPoints(
|
71
|
+
cameraMatrix: np.ndarray,
|
72
|
+
distCoeffs: np.ndarray,
|
73
|
+
camRt: RotTrlTransform3d,
|
74
|
+
objectTranslations: list[Translation3d],
|
75
|
+
) -> np.ndarray:
|
76
|
+
objectPoints = OpenCVHelp.translationToTVec(objectTranslations)
|
77
|
+
rvec = OpenCVHelp.rotationToRVec(camRt.getRotation())
|
78
|
+
tvec = OpenCVHelp.translationToTVec(
|
79
|
+
[
|
80
|
+
camRt.getTranslation(),
|
81
|
+
]
|
82
|
+
)
|
83
|
+
|
84
|
+
pts, _ = cv.projectPoints(objectPoints, rvec, tvec, cameraMatrix, distCoeffs)
|
85
|
+
return pts
|
86
|
+
|
87
|
+
@staticmethod
|
88
|
+
def reorderCircular(
|
89
|
+
elements: list[Any] | np.ndarray, backwards: bool, shiftStart: int
|
90
|
+
) -> list[Any]:
|
91
|
+
size = len(elements)
|
92
|
+
reordered = []
|
93
|
+
dir = -1 if backwards else 1
|
94
|
+
for i in range(size):
|
95
|
+
index = (i * dir + shiftStart * dir) % size
|
96
|
+
if index < 0:
|
97
|
+
index += size
|
98
|
+
reordered.append(elements[index])
|
99
|
+
return reordered
|
100
|
+
|
101
|
+
@staticmethod
|
102
|
+
def translationEDNToNWU(trl: Translation3d) -> Translation3d:
|
103
|
+
return trl.rotateBy(EDN_TO_NWU)
|
104
|
+
|
105
|
+
@staticmethod
|
106
|
+
def rotationEDNToNWU(rot: Rotation3d) -> Rotation3d:
|
107
|
+
return -EDN_TO_NWU + (rot + EDN_TO_NWU)
|
108
|
+
|
109
|
+
@staticmethod
|
110
|
+
def tVecToTranslation(tvecInput: np.ndarray) -> Translation3d:
|
111
|
+
return OpenCVHelp.translationEDNToNWU(Translation3d(tvecInput))
|
112
|
+
|
113
|
+
@staticmethod
|
114
|
+
def rVecToRotation(rvecInput: np.ndarray) -> Rotation3d:
|
115
|
+
return OpenCVHelp.rotationEDNToNWU(Rotation3d(rvecInput))
|
116
|
+
|
117
|
+
@staticmethod
|
118
|
+
def solvePNP_Square(
|
119
|
+
cameraMatrix: np.ndarray,
|
120
|
+
distCoeffs: np.ndarray,
|
121
|
+
modelTrls: list[Translation3d],
|
122
|
+
imagePoints: np.ndarray,
|
123
|
+
) -> PnpResult | None:
|
124
|
+
modelTrls = OpenCVHelp.reorderCircular(modelTrls, True, -1)
|
125
|
+
imagePoints = np.array(OpenCVHelp.reorderCircular(imagePoints, True, -1))
|
126
|
+
objectMat = np.array(OpenCVHelp.translationToTVec(modelTrls))
|
127
|
+
|
128
|
+
alt: Transform3d | None = None
|
129
|
+
for tries in range(2):
|
130
|
+
retval, rvecs, tvecs, reprojectionError = cv.solvePnPGeneric(
|
131
|
+
objectMat,
|
132
|
+
imagePoints,
|
133
|
+
cameraMatrix,
|
134
|
+
distCoeffs,
|
135
|
+
flags=cv.SOLVEPNP_IPPE_SQUARE,
|
136
|
+
)
|
137
|
+
|
138
|
+
best = Transform3d(
|
139
|
+
OpenCVHelp.tVecToTranslation(tvecs[0]),
|
140
|
+
OpenCVHelp.rVecToRotation(rvecs[0]),
|
141
|
+
)
|
142
|
+
if len(tvecs) > 1:
|
143
|
+
alt = Transform3d(
|
144
|
+
OpenCVHelp.tVecToTranslation(tvecs[1]),
|
145
|
+
OpenCVHelp.rVecToRotation(rvecs[1]),
|
146
|
+
)
|
147
|
+
|
148
|
+
if not math.isnan(reprojectionError[0, 0]):
|
149
|
+
break
|
150
|
+
else:
|
151
|
+
pt = imagePoints[0]
|
152
|
+
pt[0, 0] -= 0.001
|
153
|
+
pt[0, 1] -= 0.001
|
154
|
+
imagePoints[0] = pt
|
155
|
+
|
156
|
+
if math.isnan(reprojectionError[0, 0]):
|
157
|
+
print("SolvePNP_Square failed!")
|
158
|
+
return None
|
159
|
+
|
160
|
+
if alt:
|
161
|
+
return PnpResult(
|
162
|
+
best=best,
|
163
|
+
bestReprojErr=reprojectionError[0, 0],
|
164
|
+
alt=alt,
|
165
|
+
altReprojErr=reprojectionError[1, 0],
|
166
|
+
ambiguity=reprojectionError[0, 0] / reprojectionError[1, 0],
|
167
|
+
)
|
168
|
+
else:
|
169
|
+
# We have no alternative so set it to best as well
|
170
|
+
return PnpResult(
|
171
|
+
best=best,
|
172
|
+
bestReprojErr=reprojectionError[0],
|
173
|
+
alt=best,
|
174
|
+
altReprojErr=reprojectionError[0],
|
175
|
+
)
|
176
|
+
|
177
|
+
@staticmethod
|
178
|
+
def solvePNP_SQPNP(
|
179
|
+
cameraMatrix: np.ndarray,
|
180
|
+
distCoeffs: np.ndarray,
|
181
|
+
modelTrls: list[Translation3d],
|
182
|
+
imagePoints: np.ndarray,
|
183
|
+
) -> PnpResult | None:
|
184
|
+
objectMat = np.array(OpenCVHelp.translationToTVec(modelTrls))
|
185
|
+
|
186
|
+
retval, rvecs, tvecs, reprojectionError = cv.solvePnPGeneric(
|
187
|
+
objectMat, imagePoints, cameraMatrix, distCoeffs, flags=cv.SOLVEPNP_SQPNP
|
188
|
+
)
|
189
|
+
|
190
|
+
error = reprojectionError[0, 0]
|
191
|
+
best = Transform3d(
|
192
|
+
OpenCVHelp.tVecToTranslation(tvecs[0]), OpenCVHelp.rVecToRotation(rvecs[0])
|
193
|
+
)
|
194
|
+
|
195
|
+
if math.isnan(error):
|
196
|
+
return None
|
197
|
+
|
198
|
+
# We have no alternative so set it to best as well
|
199
|
+
result = PnpResult(best=best, bestReprojErr=error, alt=best, altReprojErr=error)
|
200
|
+
return result
|
@@ -0,0 +1,32 @@
|
|
1
|
+
from typing import Self
|
2
|
+
|
3
|
+
from wpimath.geometry import Pose3d, Rotation3d, Transform3d, Translation3d
|
4
|
+
|
5
|
+
|
6
|
+
class RotTrlTransform3d:
|
7
|
+
def __init__(
|
8
|
+
self, rot: Rotation3d = Rotation3d(), trl: Translation3d = Translation3d()
|
9
|
+
):
|
10
|
+
self.rot = rot
|
11
|
+
self.trl = trl
|
12
|
+
|
13
|
+
def inverse(self) -> Self:
|
14
|
+
invRot = -self.rot
|
15
|
+
invTrl = -(self.trl.rotateBy(invRot))
|
16
|
+
return type(self)(invRot, invTrl)
|
17
|
+
|
18
|
+
def getTransform(self) -> Transform3d:
|
19
|
+
return Transform3d(self.trl, self.rot)
|
20
|
+
|
21
|
+
def getTranslation(self) -> Translation3d:
|
22
|
+
return self.trl
|
23
|
+
|
24
|
+
def getRotation(self) -> Rotation3d:
|
25
|
+
return self.rot
|
26
|
+
|
27
|
+
def apply(self, trlToApply: Translation3d) -> Translation3d:
|
28
|
+
return trlToApply.rotateBy(self.rot) + self.trl
|
29
|
+
|
30
|
+
@classmethod
|
31
|
+
def makeRelativeTo(cls, pose: Pose3d) -> Self:
|
32
|
+
return cls(pose.rotation(), pose.translation()).inverse()
|
@@ -0,0 +1,137 @@
|
|
1
|
+
import math
|
2
|
+
from typing import List, Self
|
3
|
+
|
4
|
+
from wpimath.geometry import Pose3d, Rotation2d, Rotation3d, Translation3d
|
5
|
+
from wpimath.units import meters
|
6
|
+
|
7
|
+
from . import RotTrlTransform3d
|
8
|
+
|
9
|
+
|
10
|
+
class TargetModel:
|
11
|
+
def __init__(
|
12
|
+
self,
|
13
|
+
*,
|
14
|
+
width: meters | None = None,
|
15
|
+
height: meters | None = None,
|
16
|
+
length: meters | None = None,
|
17
|
+
diameter: meters | None = None,
|
18
|
+
verts: List[Translation3d] | None = None
|
19
|
+
):
|
20
|
+
|
21
|
+
if (
|
22
|
+
width is not None
|
23
|
+
and height is not None
|
24
|
+
and length is None
|
25
|
+
and diameter is None
|
26
|
+
and verts is None
|
27
|
+
):
|
28
|
+
self.isPlanar = True
|
29
|
+
self.isSpherical = False
|
30
|
+
self.vertices = [
|
31
|
+
Translation3d(0.0, -width / 2.0, -height / 2.0),
|
32
|
+
Translation3d(0.0, width / 2.0, -height / 2.0),
|
33
|
+
Translation3d(0.0, width / 2.0, height / 2.0),
|
34
|
+
Translation3d(0.0, -width / 2.0, height / 2.0),
|
35
|
+
]
|
36
|
+
|
37
|
+
return
|
38
|
+
|
39
|
+
elif (
|
40
|
+
length is not None
|
41
|
+
and width is not None
|
42
|
+
and height is not None
|
43
|
+
and diameter is None
|
44
|
+
and verts is None
|
45
|
+
):
|
46
|
+
verts = [
|
47
|
+
Translation3d(length / 2.0, -width / 2.0, -height / 2.0),
|
48
|
+
Translation3d(length / 2.0, width / 2.0, -height / 2.0),
|
49
|
+
Translation3d(length / 2.0, width / 2.0, height / 2.0),
|
50
|
+
Translation3d(length / 2.0, -width / 2.0, height / 2.0),
|
51
|
+
Translation3d(-length / 2.0, -width / 2.0, height / 2.0),
|
52
|
+
Translation3d(-length / 2.0, width / 2.0, height / 2.0),
|
53
|
+
Translation3d(-length / 2.0, width / 2.0, -height / 2.0),
|
54
|
+
Translation3d(-length / 2.0, -width / 2.0, -height / 2.0),
|
55
|
+
]
|
56
|
+
# Handle the rest of this in the "default" case
|
57
|
+
elif (
|
58
|
+
diameter is not None
|
59
|
+
and width is None
|
60
|
+
and height is None
|
61
|
+
and length is None
|
62
|
+
and verts is None
|
63
|
+
):
|
64
|
+
self.isPlanar = False
|
65
|
+
self.isSpherical = True
|
66
|
+
self.vertices = [
|
67
|
+
Translation3d(0.0, -diameter / 2.0, 0.0),
|
68
|
+
Translation3d(0.0, 0.0, -diameter / 2.0),
|
69
|
+
Translation3d(0.0, diameter / 2.0, 0.0),
|
70
|
+
Translation3d(0.0, 0.0, diameter / 2.0),
|
71
|
+
]
|
72
|
+
return
|
73
|
+
elif (
|
74
|
+
verts is not None
|
75
|
+
and width is None
|
76
|
+
and height is None
|
77
|
+
and length is None
|
78
|
+
and diameter is None
|
79
|
+
):
|
80
|
+
# Handle this in the "default" case
|
81
|
+
pass
|
82
|
+
else:
|
83
|
+
raise Exception("Not a valid overload")
|
84
|
+
|
85
|
+
# TODO maybe remove this if there is a better/preferred way
|
86
|
+
# make the python type checking gods happy
|
87
|
+
assert verts is not None
|
88
|
+
|
89
|
+
self.isSpherical = False
|
90
|
+
if len(verts) <= 2:
|
91
|
+
self.vertices: List[Translation3d] = []
|
92
|
+
self.isPlanar = False
|
93
|
+
else:
|
94
|
+
cornersPlaner = True
|
95
|
+
for corner in verts:
|
96
|
+
if abs(corner.X() < 1e-4):
|
97
|
+
cornersPlaner = False
|
98
|
+
self.isPlanar = cornersPlaner
|
99
|
+
|
100
|
+
self.vertices = verts
|
101
|
+
|
102
|
+
def getFieldVertices(self, targetPose: Pose3d) -> List[Translation3d]:
|
103
|
+
basisChange = RotTrlTransform3d(targetPose.rotation(), targetPose.translation())
|
104
|
+
|
105
|
+
retVal = []
|
106
|
+
|
107
|
+
for vert in self.vertices:
|
108
|
+
retVal.append(basisChange.apply(vert))
|
109
|
+
|
110
|
+
return retVal
|
111
|
+
|
112
|
+
@classmethod
|
113
|
+
def getOrientedPose(cls, tgtTrl: Translation3d, cameraTrl: Translation3d):
|
114
|
+
relCam = cameraTrl - tgtTrl
|
115
|
+
orientToCam = Rotation3d(
|
116
|
+
0.0,
|
117
|
+
Rotation2d(math.hypot(relCam.X(), relCam.Y()), relCam.Z()).radians(),
|
118
|
+
Rotation2d(relCam.X(), relCam.Y()).radians(),
|
119
|
+
)
|
120
|
+
return Pose3d(tgtTrl, orientToCam)
|
121
|
+
|
122
|
+
def getVertices(self) -> List[Translation3d]:
|
123
|
+
return self.vertices
|
124
|
+
|
125
|
+
def getIsPlanar(self) -> bool:
|
126
|
+
return self.isPlanar
|
127
|
+
|
128
|
+
def getIsSpherical(self) -> bool:
|
129
|
+
return self.isSpherical
|
130
|
+
|
131
|
+
@classmethod
|
132
|
+
def AprilTag36h11(cls) -> Self:
|
133
|
+
return cls(width=6.5 * 0.0254, height=6.5 * 0.0254)
|
134
|
+
|
135
|
+
@classmethod
|
136
|
+
def AprilTag16h5(cls) -> Self:
|
137
|
+
return cls(width=6.0 * 0.0254, height=6.0 * 0.0254)
|
@@ -0,0 +1,91 @@
|
|
1
|
+
import numpy as np
|
2
|
+
from robotpy_apriltag import AprilTag, AprilTagFieldLayout
|
3
|
+
from wpimath.geometry import Pose3d, Transform3d, Translation3d
|
4
|
+
|
5
|
+
from ..targeting import PhotonTrackedTarget, PnpResult, TargetCorner
|
6
|
+
from . import OpenCVHelp, TargetModel
|
7
|
+
|
8
|
+
|
9
|
+
class VisionEstimation:
|
10
|
+
@staticmethod
|
11
|
+
def getVisibleLayoutTags(
|
12
|
+
visTags: list[PhotonTrackedTarget], layout: AprilTagFieldLayout
|
13
|
+
) -> list[AprilTag]:
|
14
|
+
retVal: list[AprilTag] = []
|
15
|
+
for tag in visTags:
|
16
|
+
id = tag.getFiducialId()
|
17
|
+
maybePose = layout.getTagPose(id)
|
18
|
+
if maybePose:
|
19
|
+
tag = AprilTag()
|
20
|
+
tag.ID = id
|
21
|
+
tag.pose = maybePose
|
22
|
+
retVal.append(tag)
|
23
|
+
return retVal
|
24
|
+
|
25
|
+
@staticmethod
|
26
|
+
def estimateCamPosePNP(
|
27
|
+
cameraMatrix: np.ndarray,
|
28
|
+
distCoeffs: np.ndarray,
|
29
|
+
visTags: list[PhotonTrackedTarget],
|
30
|
+
layout: AprilTagFieldLayout,
|
31
|
+
tagModel: TargetModel,
|
32
|
+
) -> PnpResult | None:
|
33
|
+
if len(visTags) == 0:
|
34
|
+
return None
|
35
|
+
|
36
|
+
corners: list[TargetCorner] = []
|
37
|
+
knownTags: list[AprilTag] = []
|
38
|
+
|
39
|
+
for tgt in visTags:
|
40
|
+
id = tgt.getFiducialId()
|
41
|
+
maybePose = layout.getTagPose(id)
|
42
|
+
if maybePose:
|
43
|
+
tag = AprilTag()
|
44
|
+
tag.ID = id
|
45
|
+
tag.pose = maybePose
|
46
|
+
knownTags.append(tag)
|
47
|
+
currentCorners = tgt.getDetectedCorners()
|
48
|
+
if currentCorners:
|
49
|
+
corners += currentCorners
|
50
|
+
|
51
|
+
if len(knownTags) == 0 or len(corners) == 0 or len(corners) % 4 != 0:
|
52
|
+
return None
|
53
|
+
|
54
|
+
points = OpenCVHelp.cornersToPoints(corners)
|
55
|
+
|
56
|
+
if len(knownTags) == 1:
|
57
|
+
camToTag = OpenCVHelp.solvePNP_Square(
|
58
|
+
cameraMatrix, distCoeffs, tagModel.getVertices(), points
|
59
|
+
)
|
60
|
+
if not camToTag:
|
61
|
+
return None
|
62
|
+
|
63
|
+
bestPose = knownTags[0].pose.transformBy(camToTag.best.inverse())
|
64
|
+
altPose = Pose3d()
|
65
|
+
if camToTag.ambiguity != 0:
|
66
|
+
altPose = knownTags[0].pose.transformBy(camToTag.alt.inverse())
|
67
|
+
|
68
|
+
o = Pose3d()
|
69
|
+
result = PnpResult(
|
70
|
+
best=Transform3d(o, bestPose),
|
71
|
+
alt=Transform3d(o, altPose),
|
72
|
+
ambiguity=camToTag.ambiguity,
|
73
|
+
bestReprojErr=camToTag.bestReprojErr,
|
74
|
+
altReprojErr=camToTag.altReprojErr,
|
75
|
+
)
|
76
|
+
return result
|
77
|
+
else:
|
78
|
+
objectTrls: list[Translation3d] = []
|
79
|
+
for tag in knownTags:
|
80
|
+
verts = tagModel.getFieldVertices(tag.pose)
|
81
|
+
objectTrls += verts
|
82
|
+
|
83
|
+
ret = OpenCVHelp.solvePNP_SQPNP(
|
84
|
+
cameraMatrix, distCoeffs, objectTrls, points
|
85
|
+
)
|
86
|
+
if ret:
|
87
|
+
# Invert best/alt transforms
|
88
|
+
ret.best = ret.best.inverse()
|
89
|
+
ret.alt = ret.alt.inverse()
|
90
|
+
|
91
|
+
return ret
|
@@ -20,6 +20,7 @@
|
|
20
20
|
## --> DO NOT MODIFY <--
|
21
21
|
###############################################################################
|
22
22
|
|
23
|
+
from ..packet import Packet
|
23
24
|
from ..targeting import *
|
24
25
|
|
25
26
|
|
@@ -28,6 +29,17 @@ class MultiTargetPNPResultSerde:
|
|
28
29
|
MESSAGE_VERSION = "541096947e9f3ca2d3f425ff7b04aa7b"
|
29
30
|
MESSAGE_FORMAT = "PnpResult:ae4d655c0a3104d88df4f5db144c1e86 estimatedPose;int16 fiducialIDsUsed[?];"
|
30
31
|
|
32
|
+
@staticmethod
|
33
|
+
def pack(value: "MultiTargetPNPResult") -> "Packet":
|
34
|
+
ret = Packet()
|
35
|
+
|
36
|
+
# estimatedPose is of non-intrinsic type PnpResult
|
37
|
+
ret.encodeBytes(PnpResult.photonStruct.pack(value.estimatedPose).getData())
|
38
|
+
|
39
|
+
# fiducialIDsUsed is a custom VLA!
|
40
|
+
ret.encodeShortList(value.fiducialIDsUsed)
|
41
|
+
return ret
|
42
|
+
|
31
43
|
@staticmethod
|
32
44
|
def unpack(packet: "Packet") -> "MultiTargetPNPResult":
|
33
45
|
ret = MultiTargetPNPResult()
|
@@ -20,15 +20,31 @@
|
|
20
20
|
## --> DO NOT MODIFY <--
|
21
21
|
###############################################################################
|
22
22
|
|
23
|
+
from ..packet import Packet
|
23
24
|
from ..targeting import *
|
24
25
|
|
25
26
|
|
26
27
|
class PhotonPipelineMetadataSerde:
|
27
28
|
# Message definition md5sum. See photon_packet.adoc for details
|
28
|
-
MESSAGE_VERSION = "
|
29
|
-
MESSAGE_FORMAT =
|
30
|
-
|
31
|
-
|
29
|
+
MESSAGE_VERSION = "ac0a45f686457856fb30af77699ea356"
|
30
|
+
MESSAGE_FORMAT = "int64 sequenceID;int64 captureTimestampMicros;int64 publishTimestampMicros;int64 timeSinceLastPong;"
|
31
|
+
|
32
|
+
@staticmethod
|
33
|
+
def pack(value: "PhotonPipelineMetadata") -> "Packet":
|
34
|
+
ret = Packet()
|
35
|
+
|
36
|
+
# sequenceID is of intrinsic type int64
|
37
|
+
ret.encodeLong(value.sequenceID)
|
38
|
+
|
39
|
+
# captureTimestampMicros is of intrinsic type int64
|
40
|
+
ret.encodeLong(value.captureTimestampMicros)
|
41
|
+
|
42
|
+
# publishTimestampMicros is of intrinsic type int64
|
43
|
+
ret.encodeLong(value.publishTimestampMicros)
|
44
|
+
|
45
|
+
# timeSinceLastPong is of intrinsic type int64
|
46
|
+
ret.encodeLong(value.timeSinceLastPong)
|
47
|
+
return ret
|
32
48
|
|
33
49
|
@staticmethod
|
34
50
|
def unpack(packet: "Packet") -> "PhotonPipelineMetadata":
|
@@ -43,6 +59,9 @@ class PhotonPipelineMetadataSerde:
|
|
43
59
|
# publishTimestampMicros is of intrinsic type int64
|
44
60
|
ret.publishTimestampMicros = packet.decodeLong()
|
45
61
|
|
62
|
+
# timeSinceLastPong is of intrinsic type int64
|
63
|
+
ret.timeSinceLastPong = packet.decodeLong()
|
64
|
+
|
46
65
|
return ret
|
47
66
|
|
48
67
|
|
@@ -20,13 +20,30 @@
|
|
20
20
|
## --> DO NOT MODIFY <--
|
21
21
|
###############################################################################
|
22
22
|
|
23
|
+
from ..packet import Packet
|
23
24
|
from ..targeting import *
|
24
25
|
|
25
26
|
|
26
27
|
class PhotonPipelineResultSerde:
|
27
28
|
# Message definition md5sum. See photon_packet.adoc for details
|
28
|
-
MESSAGE_VERSION = "
|
29
|
-
MESSAGE_FORMAT = "PhotonPipelineMetadata:
|
29
|
+
MESSAGE_VERSION = "4b2ff16a964b5e2bf04be0c1454d91c4"
|
30
|
+
MESSAGE_FORMAT = "PhotonPipelineMetadata:ac0a45f686457856fb30af77699ea356 metadata;PhotonTrackedTarget:cc6dbb5c5c1e0fa808108019b20863f1 targets[?];optional MultiTargetPNPResult:541096947e9f3ca2d3f425ff7b04aa7b multitagResult;"
|
31
|
+
|
32
|
+
@staticmethod
|
33
|
+
def pack(value: "PhotonPipelineResult") -> "Packet":
|
34
|
+
ret = Packet()
|
35
|
+
|
36
|
+
# metadata is of non-intrinsic type PhotonPipelineMetadata
|
37
|
+
ret.encodeBytes(
|
38
|
+
PhotonPipelineMetadata.photonStruct.pack(value.metadata).getData()
|
39
|
+
)
|
40
|
+
|
41
|
+
# targets is a custom VLA!
|
42
|
+
ret.encodeList(value.targets, PhotonTrackedTarget.photonStruct)
|
43
|
+
|
44
|
+
# multitagResult is optional! it better not be a VLA too
|
45
|
+
ret.encodeOptional(value.multitagResult, MultiTargetPNPResult.photonStruct)
|
46
|
+
return ret
|
30
47
|
|
31
48
|
@staticmethod
|
32
49
|
def unpack(packet: "Packet") -> "PhotonPipelineResult":
|
@@ -20,6 +20,7 @@
|
|
20
20
|
## --> DO NOT MODIFY <--
|
21
21
|
###############################################################################
|
22
22
|
|
23
|
+
from ..packet import Packet
|
23
24
|
from ..targeting import *
|
24
25
|
|
25
26
|
|
@@ -28,6 +29,45 @@ class PhotonTrackedTargetSerde:
|
|
28
29
|
MESSAGE_VERSION = "cc6dbb5c5c1e0fa808108019b20863f1"
|
29
30
|
MESSAGE_FORMAT = "float64 yaw;float64 pitch;float64 area;float64 skew;int32 fiducialId;int32 objDetectId;float32 objDetectConf;Transform3d bestCameraToTarget;Transform3d altCameraToTarget;float64 poseAmbiguity;TargetCorner:16f6ac0dedc8eaccb951f4895d9e18b6 minAreaRectCorners[?];TargetCorner:16f6ac0dedc8eaccb951f4895d9e18b6 detectedCorners[?];"
|
30
31
|
|
32
|
+
@staticmethod
|
33
|
+
def pack(value: "PhotonTrackedTarget") -> "Packet":
|
34
|
+
ret = Packet()
|
35
|
+
|
36
|
+
# yaw is of intrinsic type float64
|
37
|
+
ret.encodeDouble(value.yaw)
|
38
|
+
|
39
|
+
# pitch is of intrinsic type float64
|
40
|
+
ret.encodeDouble(value.pitch)
|
41
|
+
|
42
|
+
# area is of intrinsic type float64
|
43
|
+
ret.encodeDouble(value.area)
|
44
|
+
|
45
|
+
# skew is of intrinsic type float64
|
46
|
+
ret.encodeDouble(value.skew)
|
47
|
+
|
48
|
+
# fiducialId is of intrinsic type int32
|
49
|
+
ret.encodeInt(value.fiducialId)
|
50
|
+
|
51
|
+
# objDetectId is of intrinsic type int32
|
52
|
+
ret.encodeInt(value.objDetectId)
|
53
|
+
|
54
|
+
# objDetectConf is of intrinsic type float32
|
55
|
+
ret.encodeFloat(value.objDetectConf)
|
56
|
+
|
57
|
+
ret.encodeTransform(value.bestCameraToTarget)
|
58
|
+
|
59
|
+
ret.encodeTransform(value.altCameraToTarget)
|
60
|
+
|
61
|
+
# poseAmbiguity is of intrinsic type float64
|
62
|
+
ret.encodeDouble(value.poseAmbiguity)
|
63
|
+
|
64
|
+
# minAreaRectCorners is a custom VLA!
|
65
|
+
ret.encodeList(value.minAreaRectCorners, TargetCorner.photonStruct)
|
66
|
+
|
67
|
+
# detectedCorners is a custom VLA!
|
68
|
+
ret.encodeList(value.detectedCorners, TargetCorner.photonStruct)
|
69
|
+
return ret
|
70
|
+
|
31
71
|
@staticmethod
|
32
72
|
def unpack(packet: "Packet") -> "PhotonTrackedTarget":
|
33
73
|
ret = PhotonTrackedTarget()
|