photonlibpy 2025.0.0b1__py3-none-any.whl → 2025.0.0b3__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (37) hide show
  1. photonlibpy/__init__.py +2 -2
  2. photonlibpy/estimation/__init__.py +5 -0
  3. photonlibpy/estimation/cameraTargetRelation.py +25 -0
  4. photonlibpy/estimation/openCVHelp.py +205 -0
  5. photonlibpy/estimation/rotTrlTransform3d.py +55 -0
  6. photonlibpy/estimation/targetModel.py +120 -0
  7. photonlibpy/estimation/visionEstimation.py +91 -0
  8. photonlibpy/generated/MultiTargetPNPResultSerde.py +7 -1
  9. photonlibpy/generated/PhotonPipelineMetadataSerde.py +6 -1
  10. photonlibpy/generated/PhotonPipelineResultSerde.py +9 -1
  11. photonlibpy/generated/PhotonTrackedTargetSerde.py +7 -1
  12. photonlibpy/generated/PnpResultSerde.py +6 -1
  13. photonlibpy/generated/TargetCornerSerde.py +6 -1
  14. photonlibpy/generated/__init__.py +0 -1
  15. photonlibpy/networktables/NTTopicSet.py +66 -0
  16. photonlibpy/networktables/__init__.py +1 -0
  17. photonlibpy/packet.py +17 -9
  18. photonlibpy/photonCamera.py +10 -7
  19. photonlibpy/photonPoseEstimator.py +3 -3
  20. photonlibpy/simulation/__init__.py +5 -0
  21. photonlibpy/simulation/photonCameraSim.py +378 -0
  22. photonlibpy/simulation/simCameraProperties.py +643 -0
  23. photonlibpy/simulation/videoSimUtil.py +2 -0
  24. photonlibpy/simulation/visionSystemSim.py +242 -0
  25. photonlibpy/simulation/visionTargetSim.py +50 -0
  26. photonlibpy/targeting/TargetCorner.py +5 -1
  27. photonlibpy/targeting/__init__.py +1 -1
  28. photonlibpy/targeting/multiTargetPNPResult.py +8 -13
  29. photonlibpy/targeting/photonPipelineResult.py +8 -4
  30. photonlibpy/targeting/photonTrackedTarget.py +7 -1
  31. photonlibpy/version.py +2 -2
  32. photonlibpy-2025.0.0b3.dist-info/METADATA +17 -0
  33. photonlibpy-2025.0.0b3.dist-info/RECORD +36 -0
  34. photonlibpy-2025.0.0b1.dist-info/METADATA +0 -13
  35. photonlibpy-2025.0.0b1.dist-info/RECORD +0 -22
  36. {photonlibpy-2025.0.0b1.dist-info → photonlibpy-2025.0.0b3.dist-info}/WHEEL +0 -0
  37. {photonlibpy-2025.0.0b1.dist-info → photonlibpy-2025.0.0b3.dist-info}/top_level.txt +0 -0
photonlibpy/__init__.py CHANGED
@@ -15,7 +15,7 @@
15
15
  ## along with this program. If not, see <https://www.gnu.org/licenses/>.
16
16
  ###############################################################################
17
17
 
18
- from .packet import Packet # noqa
19
18
  from .estimatedRobotPose import EstimatedRobotPose # noqa
20
- from .photonPoseEstimator import PhotonPoseEstimator, PoseStrategy # noqa
19
+ from .packet import Packet # noqa
21
20
  from .photonCamera import PhotonCamera # noqa
21
+ from .photonPoseEstimator import PhotonPoseEstimator, PoseStrategy # noqa
@@ -0,0 +1,5 @@
1
+ from .cameraTargetRelation import CameraTargetRelation
2
+ from .openCVHelp import OpenCVHelp
3
+ from .rotTrlTransform3d import RotTrlTransform3d
4
+ from .targetModel import TargetModel
5
+ from .visionEstimation import VisionEstimation
@@ -0,0 +1,25 @@
1
+ import math
2
+
3
+ from wpimath.geometry import Pose3d, Rotation2d, Transform3d
4
+ from wpimath.units import meters
5
+
6
+
7
+ class CameraTargetRelation:
8
+ def __init__(self, cameraPose: Pose3d, targetPose: Pose3d):
9
+ self.camPose = cameraPose
10
+ self.camToTarg = Transform3d(cameraPose, targetPose)
11
+ self.camToTargDist = self.camToTarg.translation().norm()
12
+ self.camToTargDistXY: meters = math.hypot(
13
+ self.camToTarg.translation().X(), self.camToTarg.translation().Y()
14
+ )
15
+ self.camToTargYaw = Rotation2d(self.camToTarg.X(), self.camToTarg.Y())
16
+ self.camToTargPitch = Rotation2d(self.camToTargDistXY, -self.camToTarg.Z())
17
+ self.camToTargAngle = Rotation2d(
18
+ math.hypot(self.camToTargYaw.radians(), self.camToTargPitch.radians())
19
+ )
20
+ self.targToCam = Transform3d(targetPose, cameraPose)
21
+ self.targToCamYaw = Rotation2d(self.targToCam.X(), self.targToCam.Y())
22
+ self.targToCamPitch = Rotation2d(self.camToTargDistXY, -self.targToCam.Z())
23
+ self.targtoCamAngle = Rotation2d(
24
+ math.hypot(self.targToCamYaw.radians(), self.targToCamPitch.radians())
25
+ )
@@ -0,0 +1,205 @@
1
+ import math
2
+ from typing import Any
3
+
4
+ import cv2 as cv
5
+ import numpy as np
6
+ from wpimath.geometry import Rotation3d, Transform3d, Translation3d
7
+
8
+ from ..targeting import PnpResult, TargetCorner
9
+ from .rotTrlTransform3d import RotTrlTransform3d
10
+
11
+ NWU_TO_EDN = Rotation3d(np.array([[0, -1, 0], [0, 0, -1], [1, 0, 0]]))
12
+ EDN_TO_NWU = Rotation3d(np.array([[0, 0, 1], [-1, 0, 0], [0, -1, 0]]))
13
+
14
+
15
+ class OpenCVHelp:
16
+ @staticmethod
17
+ def getMinAreaRect(points: np.ndarray) -> cv.RotatedRect:
18
+ return cv.RotatedRect(*cv.minAreaRect(points))
19
+
20
+ @staticmethod
21
+ def translationNWUtoEDN(trl: Translation3d) -> Translation3d:
22
+ return trl.rotateBy(NWU_TO_EDN)
23
+
24
+ @staticmethod
25
+ def rotationNWUtoEDN(rot: Rotation3d) -> Rotation3d:
26
+ return -NWU_TO_EDN + (rot + NWU_TO_EDN)
27
+
28
+ @staticmethod
29
+ def translationToTVec(translations: list[Translation3d]) -> np.ndarray:
30
+ retVal: list[list] = []
31
+ for translation in translations:
32
+ trl = OpenCVHelp.translationNWUtoEDN(translation)
33
+ retVal.append([trl.X(), trl.Y(), trl.Z()])
34
+ return np.array(
35
+ retVal,
36
+ dtype=np.float32,
37
+ )
38
+
39
+ @staticmethod
40
+ def rotationToRVec(rotation: Rotation3d) -> np.ndarray:
41
+ retVal: list[np.ndarray] = []
42
+ rot = OpenCVHelp.rotationNWUtoEDN(rotation)
43
+ rotVec = rot.getQuaternion().toRotationVector()
44
+ retVal.append(rotVec)
45
+ return np.array(
46
+ retVal,
47
+ dtype=np.float32,
48
+ )
49
+
50
+ @staticmethod
51
+ def avgPoint(points: np.ndarray) -> np.ndarray:
52
+ x = 0.0
53
+ y = 0.0
54
+ for p in points:
55
+ x += p[0, 0]
56
+ y += p[0, 1]
57
+ return np.array([[x / len(points), y / len(points)]])
58
+
59
+ @staticmethod
60
+ def pointsToTargetCorners(points: np.ndarray) -> list[TargetCorner]:
61
+ corners = [TargetCorner(p[0, 0], p[0, 1]) for p in points]
62
+ return corners
63
+
64
+ @staticmethod
65
+ def cornersToPoints(corners: list[TargetCorner]) -> np.ndarray:
66
+ points = [[[c.x, c.y]] for c in corners]
67
+ return np.array(points)
68
+
69
+ @staticmethod
70
+ def projectPoints(
71
+ cameraMatrix: np.ndarray,
72
+ distCoeffs: np.ndarray,
73
+ camRt: RotTrlTransform3d,
74
+ objectTranslations: list[Translation3d],
75
+ ) -> np.ndarray:
76
+ objectPoints = OpenCVHelp.translationToTVec(objectTranslations)
77
+ rvec = OpenCVHelp.rotationToRVec(camRt.getRotation())
78
+ tvec = OpenCVHelp.translationToTVec(
79
+ [
80
+ camRt.getTranslation(),
81
+ ]
82
+ )
83
+
84
+ pts, _ = cv.projectPoints(objectPoints, rvec, tvec, cameraMatrix, distCoeffs)
85
+ return pts
86
+
87
+ @staticmethod
88
+ def reorderCircular(
89
+ elements: list[Any] | np.ndarray, backwards: bool, shiftStart: int
90
+ ) -> list[Any]:
91
+ size = len(elements)
92
+ reordered = []
93
+ dir = -1 if backwards else 1
94
+ for i in range(size):
95
+ index = (i * dir + shiftStart * dir) % size
96
+ if index < 0:
97
+ index += size
98
+ reordered.append(elements[index])
99
+ return reordered
100
+
101
+ @staticmethod
102
+ def translationEDNToNWU(trl: Translation3d) -> Translation3d:
103
+ return trl.rotateBy(EDN_TO_NWU)
104
+
105
+ @staticmethod
106
+ def rotationEDNToNWU(rot: Rotation3d) -> Rotation3d:
107
+ return -EDN_TO_NWU + (rot + EDN_TO_NWU)
108
+
109
+ @staticmethod
110
+ def tVecToTranslation(tvecInput: np.ndarray) -> Translation3d:
111
+ return OpenCVHelp.translationEDNToNWU(Translation3d(tvecInput))
112
+
113
+ @staticmethod
114
+ def rVecToRotation(rvecInput: np.ndarray) -> Rotation3d:
115
+ return OpenCVHelp.rotationEDNToNWU(Rotation3d(rvecInput))
116
+
117
+ @staticmethod
118
+ def solvePNP_Square(
119
+ cameraMatrix: np.ndarray,
120
+ distCoeffs: np.ndarray,
121
+ modelTrls: list[Translation3d],
122
+ imagePoints: np.ndarray,
123
+ ) -> PnpResult | None:
124
+ modelTrls = OpenCVHelp.reorderCircular(modelTrls, True, -1)
125
+ imagePoints = np.array(OpenCVHelp.reorderCircular(imagePoints, True, -1))
126
+ objectMat = np.array(OpenCVHelp.translationToTVec(modelTrls))
127
+
128
+ alt: Transform3d | None = None
129
+ reprojectionError: cv.typing.MatLike | None = None
130
+ best: Transform3d = Transform3d()
131
+
132
+ for tries in range(2):
133
+ retval, rvecs, tvecs, reprojectionError = cv.solvePnPGeneric(
134
+ objectMat,
135
+ imagePoints,
136
+ cameraMatrix,
137
+ distCoeffs,
138
+ flags=cv.SOLVEPNP_IPPE_SQUARE,
139
+ )
140
+
141
+ best = Transform3d(
142
+ OpenCVHelp.tVecToTranslation(tvecs[0]),
143
+ OpenCVHelp.rVecToRotation(rvecs[0]),
144
+ )
145
+ if len(tvecs) > 1:
146
+ alt = Transform3d(
147
+ OpenCVHelp.tVecToTranslation(tvecs[1]),
148
+ OpenCVHelp.rVecToRotation(rvecs[1]),
149
+ )
150
+
151
+ if reprojectionError is not None and not math.isnan(
152
+ reprojectionError[0, 0]
153
+ ):
154
+ break
155
+ else:
156
+ pt = imagePoints[0]
157
+ pt[0, 0] -= 0.001
158
+ pt[0, 1] -= 0.001
159
+ imagePoints[0] = pt
160
+
161
+ if reprojectionError is None or math.isnan(reprojectionError[0, 0]):
162
+ print("SolvePNP_Square failed!")
163
+ return None
164
+
165
+ if alt:
166
+ return PnpResult(
167
+ best=best,
168
+ bestReprojErr=reprojectionError[0, 0],
169
+ alt=alt,
170
+ altReprojErr=reprojectionError[1, 0],
171
+ ambiguity=reprojectionError[0, 0] / reprojectionError[1, 0],
172
+ )
173
+ else:
174
+ # We have no alternative so set it to best as well
175
+ return PnpResult(
176
+ best=best,
177
+ bestReprojErr=reprojectionError[0],
178
+ alt=best,
179
+ altReprojErr=reprojectionError[0],
180
+ )
181
+
182
+ @staticmethod
183
+ def solvePNP_SQPNP(
184
+ cameraMatrix: np.ndarray,
185
+ distCoeffs: np.ndarray,
186
+ modelTrls: list[Translation3d],
187
+ imagePoints: np.ndarray,
188
+ ) -> PnpResult | None:
189
+ objectMat = np.array(OpenCVHelp.translationToTVec(modelTrls))
190
+
191
+ retval, rvecs, tvecs, reprojectionError = cv.solvePnPGeneric(
192
+ objectMat, imagePoints, cameraMatrix, distCoeffs, flags=cv.SOLVEPNP_SQPNP
193
+ )
194
+
195
+ error = reprojectionError[0, 0]
196
+ best = Transform3d(
197
+ OpenCVHelp.tVecToTranslation(tvecs[0]), OpenCVHelp.rVecToRotation(rvecs[0])
198
+ )
199
+
200
+ if math.isnan(error):
201
+ return None
202
+
203
+ # We have no alternative so set it to best as well
204
+ result = PnpResult(best=best, bestReprojErr=error, alt=best, altReprojErr=error)
205
+ return result
@@ -0,0 +1,55 @@
1
+ from typing import Self
2
+
3
+ from wpimath.geometry import Pose3d, Rotation3d, Transform3d, Translation3d
4
+
5
+
6
+ class RotTrlTransform3d:
7
+ def __init__(
8
+ self, rot: Rotation3d = Rotation3d(), trl: Translation3d = Translation3d()
9
+ ):
10
+ self.rot = rot
11
+ self.trl = trl
12
+
13
+ def inverse(self) -> Self:
14
+ invRot = -self.rot
15
+ invTrl = -(self.trl.rotateBy(invRot))
16
+ return type(self)(invRot, invTrl)
17
+
18
+ def getTransform(self) -> Transform3d:
19
+ return Transform3d(self.trl, self.rot)
20
+
21
+ def getTranslation(self) -> Translation3d:
22
+ return self.trl
23
+
24
+ def getRotation(self) -> Rotation3d:
25
+ return self.rot
26
+
27
+ def applyTranslation(self, trlToApply: Translation3d) -> Translation3d:
28
+ return trlToApply.rotateBy(self.rot) + self.trl
29
+
30
+ def applyRotation(self, rotToApply: Rotation3d) -> Rotation3d:
31
+ return rotToApply + self.rot
32
+
33
+ def applyPose(self, poseToApply: Pose3d) -> Pose3d:
34
+ return Pose3d(
35
+ self.applyTranslation(poseToApply.translation()),
36
+ self.applyRotation(poseToApply.rotation()),
37
+ )
38
+
39
+ def applyTrls(self, rots: list[Rotation3d]) -> list[Rotation3d]:
40
+ retVal: list[Rotation3d] = []
41
+ for rot in rots:
42
+ retVal.append(self.applyRotation(rot))
43
+ return retVal
44
+
45
+ @classmethod
46
+ def makeRelativeTo(cls, pose: Pose3d) -> Self:
47
+ return cls(pose.rotation(), pose.translation()).inverse()
48
+
49
+ @classmethod
50
+ def makeBetweenPoses(cls, initial: Pose3d, last: Pose3d) -> Self:
51
+ return cls(
52
+ last.rotation() - initial.rotation(),
53
+ last.translation()
54
+ - initial.translation().rotateBy(last.rotation() - initial.rotation()),
55
+ )
@@ -0,0 +1,120 @@
1
+ import math
2
+ from typing import List, Self
3
+
4
+ from wpimath.geometry import Pose3d, Rotation2d, Rotation3d, Translation3d
5
+ from wpimath.units import meters
6
+
7
+ from . import RotTrlTransform3d
8
+
9
+
10
+ class TargetModel:
11
+
12
+ def __init__(self):
13
+ self.vertices: List[Translation3d] = []
14
+ self.isPlanar = False
15
+ self.isSpherical = False
16
+
17
+ @classmethod
18
+ def createPlanar(cls, width: meters, height: meters) -> Self:
19
+ tm = cls()
20
+
21
+ tm.isPlanar = True
22
+ tm.isSpherical = False
23
+ tm.vertices = [
24
+ Translation3d(0.0, -width / 2.0, -height / 2.0),
25
+ Translation3d(0.0, width / 2.0, -height / 2.0),
26
+ Translation3d(0.0, width / 2.0, height / 2.0),
27
+ Translation3d(0.0, -width / 2.0, height / 2.0),
28
+ ]
29
+ return tm
30
+
31
+ @classmethod
32
+ def createCuboid(cls, length: meters, width: meters, height: meters) -> Self:
33
+ tm = cls()
34
+ verts = [
35
+ Translation3d(length / 2.0, -width / 2.0, -height / 2.0),
36
+ Translation3d(length / 2.0, width / 2.0, -height / 2.0),
37
+ Translation3d(length / 2.0, width / 2.0, height / 2.0),
38
+ Translation3d(length / 2.0, -width / 2.0, height / 2.0),
39
+ Translation3d(-length / 2.0, -width / 2.0, height / 2.0),
40
+ Translation3d(-length / 2.0, width / 2.0, height / 2.0),
41
+ Translation3d(-length / 2.0, width / 2.0, -height / 2.0),
42
+ Translation3d(-length / 2.0, -width / 2.0, -height / 2.0),
43
+ ]
44
+
45
+ tm._common_construction(verts)
46
+
47
+ return tm
48
+
49
+ @classmethod
50
+ def createSpheroid(cls, diameter: meters) -> Self:
51
+ tm = cls()
52
+
53
+ tm.isPlanar = False
54
+ tm.isSpherical = True
55
+ tm.vertices = [
56
+ Translation3d(0.0, -diameter / 2.0, 0.0),
57
+ Translation3d(0.0, 0.0, -diameter / 2.0),
58
+ Translation3d(0.0, diameter / 2.0, 0.0),
59
+ Translation3d(0.0, 0.0, diameter / 2.0),
60
+ ]
61
+
62
+ return tm
63
+
64
+ @classmethod
65
+ def createArbitrary(cls, verts: List[Translation3d]) -> Self:
66
+ tm = cls()
67
+ tm._common_construction(verts)
68
+
69
+ return tm
70
+
71
+ def _common_construction(self, verts: List[Translation3d]) -> None:
72
+ self.isSpherical = False
73
+ if len(verts) <= 2:
74
+ self.vertices = []
75
+ self.isPlanar = False
76
+ else:
77
+ cornersPlaner = True
78
+ for corner in verts:
79
+ if abs(corner.X() < 1e-4):
80
+ cornersPlaner = False
81
+ self.isPlanar = cornersPlaner
82
+
83
+ self.vertices = verts
84
+
85
+ def getFieldVertices(self, targetPose: Pose3d) -> List[Translation3d]:
86
+ basisChange = RotTrlTransform3d(targetPose.rotation(), targetPose.translation())
87
+
88
+ retVal = []
89
+
90
+ for vert in self.vertices:
91
+ retVal.append(basisChange.applyTranslation(vert))
92
+
93
+ return retVal
94
+
95
+ @classmethod
96
+ def getOrientedPose(cls, tgtTrl: Translation3d, cameraTrl: Translation3d):
97
+ relCam = cameraTrl - tgtTrl
98
+ orientToCam = Rotation3d(
99
+ 0.0,
100
+ Rotation2d(math.hypot(relCam.X(), relCam.Y()), relCam.Z()).radians(),
101
+ Rotation2d(relCam.X(), relCam.Y()).radians(),
102
+ )
103
+ return Pose3d(tgtTrl, orientToCam)
104
+
105
+ def getVertices(self) -> List[Translation3d]:
106
+ return self.vertices
107
+
108
+ def getIsPlanar(self) -> bool:
109
+ return self.isPlanar
110
+
111
+ def getIsSpherical(self) -> bool:
112
+ return self.isSpherical
113
+
114
+ @classmethod
115
+ def AprilTag36h11(cls) -> Self:
116
+ return cls.createPlanar(width=6.5 * 0.0254, height=6.5 * 0.0254)
117
+
118
+ @classmethod
119
+ def AprilTag16h5(cls) -> Self:
120
+ return cls.createPlanar(width=6.0 * 0.0254, height=6.0 * 0.0254)
@@ -0,0 +1,91 @@
1
+ import numpy as np
2
+ from robotpy_apriltag import AprilTag, AprilTagFieldLayout
3
+ from wpimath.geometry import Pose3d, Transform3d, Translation3d
4
+
5
+ from ..targeting import PhotonTrackedTarget, PnpResult, TargetCorner
6
+ from . import OpenCVHelp, TargetModel
7
+
8
+
9
+ class VisionEstimation:
10
+ @staticmethod
11
+ def getVisibleLayoutTags(
12
+ visTags: list[PhotonTrackedTarget], layout: AprilTagFieldLayout
13
+ ) -> list[AprilTag]:
14
+ retVal: list[AprilTag] = []
15
+ for tag in visTags:
16
+ id = tag.getFiducialId()
17
+ maybePose = layout.getTagPose(id)
18
+ if maybePose:
19
+ aprilTag = AprilTag()
20
+ aprilTag.ID = id
21
+ aprilTag.pose = maybePose
22
+ retVal.append(aprilTag)
23
+ return retVal
24
+
25
+ @staticmethod
26
+ def estimateCamPosePNP(
27
+ cameraMatrix: np.ndarray,
28
+ distCoeffs: np.ndarray,
29
+ visTags: list[PhotonTrackedTarget],
30
+ layout: AprilTagFieldLayout,
31
+ tagModel: TargetModel,
32
+ ) -> PnpResult | None:
33
+ if len(visTags) == 0:
34
+ return None
35
+
36
+ corners: list[TargetCorner] = []
37
+ knownTags: list[AprilTag] = []
38
+
39
+ for tgt in visTags:
40
+ id = tgt.getFiducialId()
41
+ maybePose = layout.getTagPose(id)
42
+ if maybePose:
43
+ tag = AprilTag()
44
+ tag.ID = id
45
+ tag.pose = maybePose
46
+ knownTags.append(tag)
47
+ currentCorners = tgt.getDetectedCorners()
48
+ if currentCorners:
49
+ corners += currentCorners
50
+
51
+ if len(knownTags) == 0 or len(corners) == 0 or len(corners) % 4 != 0:
52
+ return None
53
+
54
+ points = OpenCVHelp.cornersToPoints(corners)
55
+
56
+ if len(knownTags) == 1:
57
+ camToTag = OpenCVHelp.solvePNP_Square(
58
+ cameraMatrix, distCoeffs, tagModel.getVertices(), points
59
+ )
60
+ if not camToTag:
61
+ return None
62
+
63
+ bestPose = knownTags[0].pose.transformBy(camToTag.best.inverse())
64
+ altPose = Pose3d()
65
+ if camToTag.ambiguity != 0:
66
+ altPose = knownTags[0].pose.transformBy(camToTag.alt.inverse())
67
+
68
+ o = Pose3d()
69
+ result = PnpResult(
70
+ best=Transform3d(o, bestPose),
71
+ alt=Transform3d(o, altPose),
72
+ ambiguity=camToTag.ambiguity,
73
+ bestReprojErr=camToTag.bestReprojErr,
74
+ altReprojErr=camToTag.altReprojErr,
75
+ )
76
+ return result
77
+ else:
78
+ objectTrls: list[Translation3d] = []
79
+ for tag in knownTags:
80
+ verts = tagModel.getFieldVertices(tag.pose)
81
+ objectTrls += verts
82
+
83
+ ret = OpenCVHelp.solvePNP_SQPNP(
84
+ cameraMatrix, distCoeffs, objectTrls, points
85
+ )
86
+ if ret:
87
+ # Invert best/alt transforms
88
+ ret.best = ret.best.inverse()
89
+ ret.alt = ret.alt.inverse()
90
+
91
+ return ret
@@ -20,8 +20,14 @@
20
20
  ## --> DO NOT MODIFY <--
21
21
  ###############################################################################
22
22
 
23
- from ..targeting import *
23
+ from typing import TYPE_CHECKING
24
+
24
25
  from ..packet import Packet
26
+ from ..targeting import * # noqa
27
+
28
+ if TYPE_CHECKING:
29
+ from ..targeting import MultiTargetPNPResult # noqa
30
+ from ..targeting import PnpResult # noqa
25
31
 
26
32
 
27
33
  class MultiTargetPNPResultSerde:
@@ -20,8 +20,13 @@
20
20
  ## --> DO NOT MODIFY <--
21
21
  ###############################################################################
22
22
 
23
- from ..targeting import *
23
+ from typing import TYPE_CHECKING
24
+
24
25
  from ..packet import Packet
26
+ from ..targeting import * # noqa
27
+
28
+ if TYPE_CHECKING:
29
+ from ..targeting import PhotonPipelineMetadata # noqa
25
30
 
26
31
 
27
32
  class PhotonPipelineMetadataSerde:
@@ -20,8 +20,16 @@
20
20
  ## --> DO NOT MODIFY <--
21
21
  ###############################################################################
22
22
 
23
- from ..targeting import *
23
+ from typing import TYPE_CHECKING
24
+
24
25
  from ..packet import Packet
26
+ from ..targeting import * # noqa
27
+
28
+ if TYPE_CHECKING:
29
+ from ..targeting import MultiTargetPNPResult # noqa
30
+ from ..targeting import PhotonPipelineMetadata # noqa
31
+ from ..targeting import PhotonPipelineResult # noqa
32
+ from ..targeting import PhotonTrackedTarget # noqa
25
33
 
26
34
 
27
35
  class PhotonPipelineResultSerde:
@@ -20,8 +20,14 @@
20
20
  ## --> DO NOT MODIFY <--
21
21
  ###############################################################################
22
22
 
23
- from ..targeting import *
23
+ from typing import TYPE_CHECKING
24
+
24
25
  from ..packet import Packet
26
+ from ..targeting import * # noqa
27
+
28
+ if TYPE_CHECKING:
29
+ from ..targeting import PhotonTrackedTarget # noqa
30
+ from ..targeting import TargetCorner # noqa
25
31
 
26
32
 
27
33
  class PhotonTrackedTargetSerde:
@@ -20,8 +20,13 @@
20
20
  ## --> DO NOT MODIFY <--
21
21
  ###############################################################################
22
22
 
23
- from ..targeting import *
23
+ from typing import TYPE_CHECKING
24
+
24
25
  from ..packet import Packet
26
+ from ..targeting import * # noqa
27
+
28
+ if TYPE_CHECKING:
29
+ from ..targeting import PnpResult # noqa
25
30
 
26
31
 
27
32
  class PnpResultSerde:
@@ -20,8 +20,13 @@
20
20
  ## --> DO NOT MODIFY <--
21
21
  ###############################################################################
22
22
 
23
- from ..targeting import *
23
+ from typing import TYPE_CHECKING
24
+
24
25
  from ..packet import Packet
26
+ from ..targeting import * # noqa
27
+
28
+ if TYPE_CHECKING:
29
+ from ..targeting import TargetCorner # noqa
25
30
 
26
31
 
27
32
  class TargetCornerSerde:
@@ -2,7 +2,6 @@
2
2
 
3
3
  from .MultiTargetPNPResultSerde import MultiTargetPNPResultSerde # noqa
4
4
  from .PhotonPipelineMetadataSerde import PhotonPipelineMetadataSerde # noqa
5
- from .PhotonPipelineMetadataSerde import PhotonPipelineMetadataSerde # noqa
6
5
  from .PhotonPipelineResultSerde import PhotonPipelineResultSerde # noqa
7
6
  from .PhotonTrackedTargetSerde import PhotonTrackedTargetSerde # noqa
8
7
  from .PnpResultSerde import PnpResultSerde # noqa
@@ -0,0 +1,66 @@
1
+ import ntcore as nt
2
+ from wpimath.geometry import Transform3d
3
+
4
+ from ..generated.PhotonPipelineResultSerde import PhotonPipelineResultSerde
5
+
6
+ PhotonPipelineResult_TYPE_STRING = (
7
+ "photonstruct:PhotonPipelineResult:" + PhotonPipelineResultSerde.MESSAGE_VERSION
8
+ )
9
+
10
+
11
+ class NTTopicSet:
12
+
13
+ def __init__(self, tableName: str, cameraName: str) -> None:
14
+ instance = nt.NetworkTableInstance.getDefault()
15
+ photonvision_root_table = instance.getTable(tableName)
16
+ self.subTable = photonvision_root_table.getSubTable(cameraName)
17
+
18
+ def updateEntries(self) -> None:
19
+ options = nt.PubSubOptions()
20
+ options.periodic = 0.01
21
+ options.sendAll = True
22
+ self.rawBytesEntry = self.subTable.getRawTopic("rawBytes").publish(
23
+ PhotonPipelineResult_TYPE_STRING, options
24
+ )
25
+ self.rawBytesEntry.getTopic().setProperty(
26
+ "message_uuid", PhotonPipelineResultSerde.MESSAGE_VERSION
27
+ )
28
+ self.pipelineIndexPublisher = self.subTable.getIntegerTopic(
29
+ "pipelineIndexState"
30
+ ).publish()
31
+ self.pipelineIndexRequestSub = self.subTable.getIntegerTopic(
32
+ "pipelineIndexRequest"
33
+ ).subscribe(0)
34
+
35
+ self.driverModePublisher = self.subTable.getBooleanTopic("driverMode").publish()
36
+ self.driverModeSubscriber = self.subTable.getBooleanTopic(
37
+ "driverModeRequest"
38
+ ).subscribe(False)
39
+
40
+ self.driverModeSubscriber.getTopic().publish().setDefault(False)
41
+
42
+ self.latencyMillisEntry = self.subTable.getDoubleTopic(
43
+ "latencyMillis"
44
+ ).publish()
45
+ self.hasTargetEntry = self.subTable.getBooleanTopic("hasTargets").publish()
46
+
47
+ self.targetPitchEntry = self.subTable.getDoubleTopic("targetPitch").publish()
48
+ self.targetAreaEntry = self.subTable.getDoubleTopic("targetArea").publish()
49
+ self.targetYawEntry = self.subTable.getDoubleTopic("targetYaw").publish()
50
+ self.targetPoseEntry = self.subTable.getStructTopic(
51
+ "targetPose", Transform3d
52
+ ).publish()
53
+ self.targetSkewEntry = self.subTable.getDoubleTopic("targetSkew").publish()
54
+
55
+ self.bestTargetPosX = self.subTable.getDoubleTopic("targetPixelsX").publish()
56
+ self.bestTargetPosY = self.subTable.getDoubleTopic("targetPixelsY").publish()
57
+
58
+ self.heartbeatTopic = self.subTable.getIntegerTopic("heartbeat")
59
+ self.heartbeatPublisher = self.heartbeatTopic.publish()
60
+
61
+ self.cameraIntrinsicsPublisher = self.subTable.getDoubleArrayTopic(
62
+ "cameraIntrinsics"
63
+ ).publish()
64
+ self.cameraDistortionPublisher = self.subTable.getDoubleArrayTopic(
65
+ "cameraDistortion"
66
+ ).publish()