photonlibpy 2025.0.0b1__py3-none-any.whl → 2025.0.0b2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. photonlibpy/__init__.py +2 -2
  2. photonlibpy/estimation/__init__.py +5 -0
  3. photonlibpy/estimation/cameraTargetRelation.py +25 -0
  4. photonlibpy/estimation/openCVHelp.py +200 -0
  5. photonlibpy/estimation/rotTrlTransform3d.py +32 -0
  6. photonlibpy/estimation/targetModel.py +137 -0
  7. photonlibpy/estimation/visionEstimation.py +91 -0
  8. photonlibpy/generated/MultiTargetPNPResultSerde.py +1 -1
  9. photonlibpy/generated/PhotonPipelineMetadataSerde.py +1 -1
  10. photonlibpy/generated/PhotonPipelineResultSerde.py +1 -1
  11. photonlibpy/generated/PhotonTrackedTargetSerde.py +1 -1
  12. photonlibpy/generated/PnpResultSerde.py +1 -1
  13. photonlibpy/generated/TargetCornerSerde.py +1 -1
  14. photonlibpy/generated/__init__.py +0 -1
  15. photonlibpy/networktables/NTTopicSet.py +64 -0
  16. photonlibpy/networktables/__init__.py +1 -0
  17. photonlibpy/packet.py +17 -9
  18. photonlibpy/photonCamera.py +9 -6
  19. photonlibpy/photonPoseEstimator.py +3 -3
  20. photonlibpy/simulation/__init__.py +5 -0
  21. photonlibpy/simulation/photonCameraSim.py +408 -0
  22. photonlibpy/simulation/simCameraProperties.py +661 -0
  23. photonlibpy/simulation/videoSimUtil.py +2 -0
  24. photonlibpy/simulation/visionSystemSim.py +237 -0
  25. photonlibpy/simulation/visionTargetSim.py +50 -0
  26. photonlibpy/targeting/TargetCorner.py +5 -1
  27. photonlibpy/targeting/__init__.py +1 -1
  28. photonlibpy/targeting/multiTargetPNPResult.py +8 -2
  29. photonlibpy/targeting/photonPipelineResult.py +7 -4
  30. photonlibpy/targeting/photonTrackedTarget.py +7 -1
  31. photonlibpy/version.py +2 -2
  32. {photonlibpy-2025.0.0b1.dist-info → photonlibpy-2025.0.0b2.dist-info}/METADATA +6 -2
  33. photonlibpy-2025.0.0b2.dist-info/RECORD +36 -0
  34. photonlibpy-2025.0.0b1.dist-info/RECORD +0 -22
  35. {photonlibpy-2025.0.0b1.dist-info → photonlibpy-2025.0.0b2.dist-info}/WHEEL +0 -0
  36. {photonlibpy-2025.0.0b1.dist-info → photonlibpy-2025.0.0b2.dist-info}/top_level.txt +0 -0
photonlibpy/__init__.py CHANGED
@@ -15,7 +15,7 @@
15
15
  ## along with this program. If not, see <https://www.gnu.org/licenses/>.
16
16
  ###############################################################################
17
17
 
18
- from .packet import Packet # noqa
19
18
  from .estimatedRobotPose import EstimatedRobotPose # noqa
20
- from .photonPoseEstimator import PhotonPoseEstimator, PoseStrategy # noqa
19
+ from .packet import Packet # noqa
21
20
  from .photonCamera import PhotonCamera # noqa
21
+ from .photonPoseEstimator import PhotonPoseEstimator, PoseStrategy # noqa
@@ -0,0 +1,5 @@
1
+ from .cameraTargetRelation import CameraTargetRelation
2
+ from .openCVHelp import OpenCVHelp
3
+ from .rotTrlTransform3d import RotTrlTransform3d
4
+ from .targetModel import TargetModel
5
+ from .visionEstimation import VisionEstimation
@@ -0,0 +1,25 @@
1
+ import math
2
+
3
+ from wpimath.geometry import Pose3d, Rotation2d, Transform3d
4
+ from wpimath.units import meters
5
+
6
+
7
+ class CameraTargetRelation:
8
+ def __init__(self, cameraPose: Pose3d, targetPose: Pose3d):
9
+ self.camPose = cameraPose
10
+ self.camToTarg = Transform3d(cameraPose, targetPose)
11
+ self.camToTargDist = self.camToTarg.translation().norm()
12
+ self.camToTargDistXY: meters = math.hypot(
13
+ self.camToTarg.translation().X(), self.camToTarg.translation().Y()
14
+ )
15
+ self.camToTargYaw = Rotation2d(self.camToTarg.X(), self.camToTarg.Y())
16
+ self.camToTargPitch = Rotation2d(self.camToTargDistXY, -self.camToTarg.Z())
17
+ self.camToTargAngle = Rotation2d(
18
+ math.hypot(self.camToTargYaw.radians(), self.camToTargPitch.radians())
19
+ )
20
+ self.targToCam = Transform3d(targetPose, cameraPose)
21
+ self.targToCamYaw = Rotation2d(self.targToCam.X(), self.targToCam.Y())
22
+ self.targToCamPitch = Rotation2d(self.camToTargDistXY, -self.targToCam.Z())
23
+ self.targtoCamAngle = Rotation2d(
24
+ math.hypot(self.targToCamYaw.radians(), self.targToCamPitch.radians())
25
+ )
@@ -0,0 +1,200 @@
1
+ import math
2
+ from typing import Any, Tuple
3
+
4
+ import cv2 as cv
5
+ import numpy as np
6
+ from wpimath.geometry import Rotation3d, Transform3d, Translation3d
7
+
8
+ from ..targeting import PnpResult, TargetCorner
9
+ from .rotTrlTransform3d import RotTrlTransform3d
10
+
11
+ NWU_TO_EDN = Rotation3d(np.array([[0, -1, 0], [0, 0, -1], [1, 0, 0]]))
12
+ EDN_TO_NWU = Rotation3d(np.array([[0, 0, 1], [-1, 0, 0], [0, -1, 0]]))
13
+
14
+
15
+ class OpenCVHelp:
16
+ @staticmethod
17
+ def getMinAreaRect(points: np.ndarray) -> cv.RotatedRect:
18
+ return cv.RotatedRect(*cv.minAreaRect(points))
19
+
20
+ @staticmethod
21
+ def translationNWUtoEDN(trl: Translation3d) -> Translation3d:
22
+ return trl.rotateBy(NWU_TO_EDN)
23
+
24
+ @staticmethod
25
+ def rotationNWUtoEDN(rot: Rotation3d) -> Rotation3d:
26
+ return -NWU_TO_EDN + (rot + NWU_TO_EDN)
27
+
28
+ @staticmethod
29
+ def translationToTVec(translations: list[Translation3d]) -> np.ndarray:
30
+ retVal: list[list] = []
31
+ for translation in translations:
32
+ trl = OpenCVHelp.translationNWUtoEDN(translation)
33
+ retVal.append([trl.X(), trl.Y(), trl.Z()])
34
+ return np.array(
35
+ retVal,
36
+ dtype=np.float32,
37
+ )
38
+
39
+ @staticmethod
40
+ def rotationToRVec(rotation: Rotation3d) -> np.ndarray:
41
+ retVal: list[np.ndarray] = []
42
+ rot = OpenCVHelp.rotationNWUtoEDN(rotation)
43
+ rotVec = rot.getQuaternion().toRotationVector()
44
+ retVal.append(rotVec)
45
+ return np.array(
46
+ retVal,
47
+ dtype=np.float32,
48
+ )
49
+
50
+ @staticmethod
51
+ def avgPoint(points: list[Tuple[float, float]]) -> Tuple[float, float]:
52
+ x = 0.0
53
+ y = 0.0
54
+ for p in points:
55
+ x += p[0]
56
+ y += p[1]
57
+ return (x / len(points), y / len(points))
58
+
59
+ @staticmethod
60
+ def pointsToTargetCorners(points: np.ndarray) -> list[TargetCorner]:
61
+ corners = [TargetCorner(p[0, 0], p[0, 1]) for p in points]
62
+ return corners
63
+
64
+ @staticmethod
65
+ def cornersToPoints(corners: list[TargetCorner]) -> np.ndarray:
66
+ points = [[[c.x, c.y]] for c in corners]
67
+ return np.array(points)
68
+
69
+ @staticmethod
70
+ def projectPoints(
71
+ cameraMatrix: np.ndarray,
72
+ distCoeffs: np.ndarray,
73
+ camRt: RotTrlTransform3d,
74
+ objectTranslations: list[Translation3d],
75
+ ) -> np.ndarray:
76
+ objectPoints = OpenCVHelp.translationToTVec(objectTranslations)
77
+ rvec = OpenCVHelp.rotationToRVec(camRt.getRotation())
78
+ tvec = OpenCVHelp.translationToTVec(
79
+ [
80
+ camRt.getTranslation(),
81
+ ]
82
+ )
83
+
84
+ pts, _ = cv.projectPoints(objectPoints, rvec, tvec, cameraMatrix, distCoeffs)
85
+ return pts
86
+
87
+ @staticmethod
88
+ def reorderCircular(
89
+ elements: list[Any] | np.ndarray, backwards: bool, shiftStart: int
90
+ ) -> list[Any]:
91
+ size = len(elements)
92
+ reordered = []
93
+ dir = -1 if backwards else 1
94
+ for i in range(size):
95
+ index = (i * dir + shiftStart * dir) % size
96
+ if index < 0:
97
+ index += size
98
+ reordered.append(elements[index])
99
+ return reordered
100
+
101
+ @staticmethod
102
+ def translationEDNToNWU(trl: Translation3d) -> Translation3d:
103
+ return trl.rotateBy(EDN_TO_NWU)
104
+
105
+ @staticmethod
106
+ def rotationEDNToNWU(rot: Rotation3d) -> Rotation3d:
107
+ return -EDN_TO_NWU + (rot + EDN_TO_NWU)
108
+
109
+ @staticmethod
110
+ def tVecToTranslation(tvecInput: np.ndarray) -> Translation3d:
111
+ return OpenCVHelp.translationEDNToNWU(Translation3d(tvecInput))
112
+
113
+ @staticmethod
114
+ def rVecToRotation(rvecInput: np.ndarray) -> Rotation3d:
115
+ return OpenCVHelp.rotationEDNToNWU(Rotation3d(rvecInput))
116
+
117
+ @staticmethod
118
+ def solvePNP_Square(
119
+ cameraMatrix: np.ndarray,
120
+ distCoeffs: np.ndarray,
121
+ modelTrls: list[Translation3d],
122
+ imagePoints: np.ndarray,
123
+ ) -> PnpResult | None:
124
+ modelTrls = OpenCVHelp.reorderCircular(modelTrls, True, -1)
125
+ imagePoints = np.array(OpenCVHelp.reorderCircular(imagePoints, True, -1))
126
+ objectMat = np.array(OpenCVHelp.translationToTVec(modelTrls))
127
+
128
+ alt: Transform3d | None = None
129
+ for tries in range(2):
130
+ retval, rvecs, tvecs, reprojectionError = cv.solvePnPGeneric(
131
+ objectMat,
132
+ imagePoints,
133
+ cameraMatrix,
134
+ distCoeffs,
135
+ flags=cv.SOLVEPNP_IPPE_SQUARE,
136
+ )
137
+
138
+ best = Transform3d(
139
+ OpenCVHelp.tVecToTranslation(tvecs[0]),
140
+ OpenCVHelp.rVecToRotation(rvecs[0]),
141
+ )
142
+ if len(tvecs) > 1:
143
+ alt = Transform3d(
144
+ OpenCVHelp.tVecToTranslation(tvecs[1]),
145
+ OpenCVHelp.rVecToRotation(rvecs[1]),
146
+ )
147
+
148
+ if not math.isnan(reprojectionError[0, 0]):
149
+ break
150
+ else:
151
+ pt = imagePoints[0]
152
+ pt[0, 0] -= 0.001
153
+ pt[0, 1] -= 0.001
154
+ imagePoints[0] = pt
155
+
156
+ if math.isnan(reprojectionError[0, 0]):
157
+ print("SolvePNP_Square failed!")
158
+ return None
159
+
160
+ if alt:
161
+ return PnpResult(
162
+ best=best,
163
+ bestReprojErr=reprojectionError[0, 0],
164
+ alt=alt,
165
+ altReprojErr=reprojectionError[1, 0],
166
+ ambiguity=reprojectionError[0, 0] / reprojectionError[1, 0],
167
+ )
168
+ else:
169
+ # We have no alternative so set it to best as well
170
+ return PnpResult(
171
+ best=best,
172
+ bestReprojErr=reprojectionError[0],
173
+ alt=best,
174
+ altReprojErr=reprojectionError[0],
175
+ )
176
+
177
+ @staticmethod
178
+ def solvePNP_SQPNP(
179
+ cameraMatrix: np.ndarray,
180
+ distCoeffs: np.ndarray,
181
+ modelTrls: list[Translation3d],
182
+ imagePoints: np.ndarray,
183
+ ) -> PnpResult | None:
184
+ objectMat = np.array(OpenCVHelp.translationToTVec(modelTrls))
185
+
186
+ retval, rvecs, tvecs, reprojectionError = cv.solvePnPGeneric(
187
+ objectMat, imagePoints, cameraMatrix, distCoeffs, flags=cv.SOLVEPNP_SQPNP
188
+ )
189
+
190
+ error = reprojectionError[0, 0]
191
+ best = Transform3d(
192
+ OpenCVHelp.tVecToTranslation(tvecs[0]), OpenCVHelp.rVecToRotation(rvecs[0])
193
+ )
194
+
195
+ if math.isnan(error):
196
+ return None
197
+
198
+ # We have no alternative so set it to best as well
199
+ result = PnpResult(best=best, bestReprojErr=error, alt=best, altReprojErr=error)
200
+ return result
@@ -0,0 +1,32 @@
1
+ from typing import Self
2
+
3
+ from wpimath.geometry import Pose3d, Rotation3d, Transform3d, Translation3d
4
+
5
+
6
+ class RotTrlTransform3d:
7
+ def __init__(
8
+ self, rot: Rotation3d = Rotation3d(), trl: Translation3d = Translation3d()
9
+ ):
10
+ self.rot = rot
11
+ self.trl = trl
12
+
13
+ def inverse(self) -> Self:
14
+ invRot = -self.rot
15
+ invTrl = -(self.trl.rotateBy(invRot))
16
+ return type(self)(invRot, invTrl)
17
+
18
+ def getTransform(self) -> Transform3d:
19
+ return Transform3d(self.trl, self.rot)
20
+
21
+ def getTranslation(self) -> Translation3d:
22
+ return self.trl
23
+
24
+ def getRotation(self) -> Rotation3d:
25
+ return self.rot
26
+
27
+ def apply(self, trlToApply: Translation3d) -> Translation3d:
28
+ return trlToApply.rotateBy(self.rot) + self.trl
29
+
30
+ @classmethod
31
+ def makeRelativeTo(cls, pose: Pose3d) -> Self:
32
+ return cls(pose.rotation(), pose.translation()).inverse()
@@ -0,0 +1,137 @@
1
+ import math
2
+ from typing import List, Self
3
+
4
+ from wpimath.geometry import Pose3d, Rotation2d, Rotation3d, Translation3d
5
+ from wpimath.units import meters
6
+
7
+ from . import RotTrlTransform3d
8
+
9
+
10
+ class TargetModel:
11
+ def __init__(
12
+ self,
13
+ *,
14
+ width: meters | None = None,
15
+ height: meters | None = None,
16
+ length: meters | None = None,
17
+ diameter: meters | None = None,
18
+ verts: List[Translation3d] | None = None
19
+ ):
20
+
21
+ if (
22
+ width is not None
23
+ and height is not None
24
+ and length is None
25
+ and diameter is None
26
+ and verts is None
27
+ ):
28
+ self.isPlanar = True
29
+ self.isSpherical = False
30
+ self.vertices = [
31
+ Translation3d(0.0, -width / 2.0, -height / 2.0),
32
+ Translation3d(0.0, width / 2.0, -height / 2.0),
33
+ Translation3d(0.0, width / 2.0, height / 2.0),
34
+ Translation3d(0.0, -width / 2.0, height / 2.0),
35
+ ]
36
+
37
+ return
38
+
39
+ elif (
40
+ length is not None
41
+ and width is not None
42
+ and height is not None
43
+ and diameter is None
44
+ and verts is None
45
+ ):
46
+ verts = [
47
+ Translation3d(length / 2.0, -width / 2.0, -height / 2.0),
48
+ Translation3d(length / 2.0, width / 2.0, -height / 2.0),
49
+ Translation3d(length / 2.0, width / 2.0, height / 2.0),
50
+ Translation3d(length / 2.0, -width / 2.0, height / 2.0),
51
+ Translation3d(-length / 2.0, -width / 2.0, height / 2.0),
52
+ Translation3d(-length / 2.0, width / 2.0, height / 2.0),
53
+ Translation3d(-length / 2.0, width / 2.0, -height / 2.0),
54
+ Translation3d(-length / 2.0, -width / 2.0, -height / 2.0),
55
+ ]
56
+ # Handle the rest of this in the "default" case
57
+ elif (
58
+ diameter is not None
59
+ and width is None
60
+ and height is None
61
+ and length is None
62
+ and verts is None
63
+ ):
64
+ self.isPlanar = False
65
+ self.isSpherical = True
66
+ self.vertices = [
67
+ Translation3d(0.0, -diameter / 2.0, 0.0),
68
+ Translation3d(0.0, 0.0, -diameter / 2.0),
69
+ Translation3d(0.0, diameter / 2.0, 0.0),
70
+ Translation3d(0.0, 0.0, diameter / 2.0),
71
+ ]
72
+ return
73
+ elif (
74
+ verts is not None
75
+ and width is None
76
+ and height is None
77
+ and length is None
78
+ and diameter is None
79
+ ):
80
+ # Handle this in the "default" case
81
+ pass
82
+ else:
83
+ raise Exception("Not a valid overload")
84
+
85
+ # TODO maybe remove this if there is a better/preferred way
86
+ # make the python type checking gods happy
87
+ assert verts is not None
88
+
89
+ self.isSpherical = False
90
+ if len(verts) <= 2:
91
+ self.vertices: List[Translation3d] = []
92
+ self.isPlanar = False
93
+ else:
94
+ cornersPlaner = True
95
+ for corner in verts:
96
+ if abs(corner.X() < 1e-4):
97
+ cornersPlaner = False
98
+ self.isPlanar = cornersPlaner
99
+
100
+ self.vertices = verts
101
+
102
+ def getFieldVertices(self, targetPose: Pose3d) -> List[Translation3d]:
103
+ basisChange = RotTrlTransform3d(targetPose.rotation(), targetPose.translation())
104
+
105
+ retVal = []
106
+
107
+ for vert in self.vertices:
108
+ retVal.append(basisChange.apply(vert))
109
+
110
+ return retVal
111
+
112
+ @classmethod
113
+ def getOrientedPose(cls, tgtTrl: Translation3d, cameraTrl: Translation3d):
114
+ relCam = cameraTrl - tgtTrl
115
+ orientToCam = Rotation3d(
116
+ 0.0,
117
+ Rotation2d(math.hypot(relCam.X(), relCam.Y()), relCam.Z()).radians(),
118
+ Rotation2d(relCam.X(), relCam.Y()).radians(),
119
+ )
120
+ return Pose3d(tgtTrl, orientToCam)
121
+
122
+ def getVertices(self) -> List[Translation3d]:
123
+ return self.vertices
124
+
125
+ def getIsPlanar(self) -> bool:
126
+ return self.isPlanar
127
+
128
+ def getIsSpherical(self) -> bool:
129
+ return self.isSpherical
130
+
131
+ @classmethod
132
+ def AprilTag36h11(cls) -> Self:
133
+ return cls(width=6.5 * 0.0254, height=6.5 * 0.0254)
134
+
135
+ @classmethod
136
+ def AprilTag16h5(cls) -> Self:
137
+ return cls(width=6.0 * 0.0254, height=6.0 * 0.0254)
@@ -0,0 +1,91 @@
1
+ import numpy as np
2
+ from robotpy_apriltag import AprilTag, AprilTagFieldLayout
3
+ from wpimath.geometry import Pose3d, Transform3d, Translation3d
4
+
5
+ from ..targeting import PhotonTrackedTarget, PnpResult, TargetCorner
6
+ from . import OpenCVHelp, TargetModel
7
+
8
+
9
+ class VisionEstimation:
10
+ @staticmethod
11
+ def getVisibleLayoutTags(
12
+ visTags: list[PhotonTrackedTarget], layout: AprilTagFieldLayout
13
+ ) -> list[AprilTag]:
14
+ retVal: list[AprilTag] = []
15
+ for tag in visTags:
16
+ id = tag.getFiducialId()
17
+ maybePose = layout.getTagPose(id)
18
+ if maybePose:
19
+ tag = AprilTag()
20
+ tag.ID = id
21
+ tag.pose = maybePose
22
+ retVal.append(tag)
23
+ return retVal
24
+
25
+ @staticmethod
26
+ def estimateCamPosePNP(
27
+ cameraMatrix: np.ndarray,
28
+ distCoeffs: np.ndarray,
29
+ visTags: list[PhotonTrackedTarget],
30
+ layout: AprilTagFieldLayout,
31
+ tagModel: TargetModel,
32
+ ) -> PnpResult | None:
33
+ if len(visTags) == 0:
34
+ return None
35
+
36
+ corners: list[TargetCorner] = []
37
+ knownTags: list[AprilTag] = []
38
+
39
+ for tgt in visTags:
40
+ id = tgt.getFiducialId()
41
+ maybePose = layout.getTagPose(id)
42
+ if maybePose:
43
+ tag = AprilTag()
44
+ tag.ID = id
45
+ tag.pose = maybePose
46
+ knownTags.append(tag)
47
+ currentCorners = tgt.getDetectedCorners()
48
+ if currentCorners:
49
+ corners += currentCorners
50
+
51
+ if len(knownTags) == 0 or len(corners) == 0 or len(corners) % 4 != 0:
52
+ return None
53
+
54
+ points = OpenCVHelp.cornersToPoints(corners)
55
+
56
+ if len(knownTags) == 1:
57
+ camToTag = OpenCVHelp.solvePNP_Square(
58
+ cameraMatrix, distCoeffs, tagModel.getVertices(), points
59
+ )
60
+ if not camToTag:
61
+ return None
62
+
63
+ bestPose = knownTags[0].pose.transformBy(camToTag.best.inverse())
64
+ altPose = Pose3d()
65
+ if camToTag.ambiguity != 0:
66
+ altPose = knownTags[0].pose.transformBy(camToTag.alt.inverse())
67
+
68
+ o = Pose3d()
69
+ result = PnpResult(
70
+ best=Transform3d(o, bestPose),
71
+ alt=Transform3d(o, altPose),
72
+ ambiguity=camToTag.ambiguity,
73
+ bestReprojErr=camToTag.bestReprojErr,
74
+ altReprojErr=camToTag.altReprojErr,
75
+ )
76
+ return result
77
+ else:
78
+ objectTrls: list[Translation3d] = []
79
+ for tag in knownTags:
80
+ verts = tagModel.getFieldVertices(tag.pose)
81
+ objectTrls += verts
82
+
83
+ ret = OpenCVHelp.solvePNP_SQPNP(
84
+ cameraMatrix, distCoeffs, objectTrls, points
85
+ )
86
+ if ret:
87
+ # Invert best/alt transforms
88
+ ret.best = ret.best.inverse()
89
+ ret.alt = ret.alt.inverse()
90
+
91
+ return ret
@@ -20,8 +20,8 @@
20
20
  ## --> DO NOT MODIFY <--
21
21
  ###############################################################################
22
22
 
23
- from ..targeting import *
24
23
  from ..packet import Packet
24
+ from ..targeting import *
25
25
 
26
26
 
27
27
  class MultiTargetPNPResultSerde:
@@ -20,8 +20,8 @@
20
20
  ## --> DO NOT MODIFY <--
21
21
  ###############################################################################
22
22
 
23
- from ..targeting import *
24
23
  from ..packet import Packet
24
+ from ..targeting import *
25
25
 
26
26
 
27
27
  class PhotonPipelineMetadataSerde:
@@ -20,8 +20,8 @@
20
20
  ## --> DO NOT MODIFY <--
21
21
  ###############################################################################
22
22
 
23
- from ..targeting import *
24
23
  from ..packet import Packet
24
+ from ..targeting import *
25
25
 
26
26
 
27
27
  class PhotonPipelineResultSerde:
@@ -20,8 +20,8 @@
20
20
  ## --> DO NOT MODIFY <--
21
21
  ###############################################################################
22
22
 
23
- from ..targeting import *
24
23
  from ..packet import Packet
24
+ from ..targeting import *
25
25
 
26
26
 
27
27
  class PhotonTrackedTargetSerde:
@@ -20,8 +20,8 @@
20
20
  ## --> DO NOT MODIFY <--
21
21
  ###############################################################################
22
22
 
23
- from ..targeting import *
24
23
  from ..packet import Packet
24
+ from ..targeting import *
25
25
 
26
26
 
27
27
  class PnpResultSerde:
@@ -20,8 +20,8 @@
20
20
  ## --> DO NOT MODIFY <--
21
21
  ###############################################################################
22
22
 
23
- from ..targeting import *
24
23
  from ..packet import Packet
24
+ from ..targeting import *
25
25
 
26
26
 
27
27
  class TargetCornerSerde:
@@ -2,7 +2,6 @@
2
2
 
3
3
  from .MultiTargetPNPResultSerde import MultiTargetPNPResultSerde # noqa
4
4
  from .PhotonPipelineMetadataSerde import PhotonPipelineMetadataSerde # noqa
5
- from .PhotonPipelineMetadataSerde import PhotonPipelineMetadataSerde # noqa
6
5
  from .PhotonPipelineResultSerde import PhotonPipelineResultSerde # noqa
7
6
  from .PhotonTrackedTargetSerde import PhotonTrackedTargetSerde # noqa
8
7
  from .PnpResultSerde import PnpResultSerde # noqa
@@ -0,0 +1,64 @@
1
+ import ntcore as nt
2
+ from wpimath.geometry import Transform3d
3
+
4
+ from ..generated.PhotonPipelineResultSerde import PhotonPipelineResultSerde
5
+
6
+ PhotonPipelineResult_TYPE_STRING = (
7
+ "photonstruct:PhotonPipelineResult:" + PhotonPipelineResultSerde.MESSAGE_VERSION
8
+ )
9
+
10
+
11
+ class NTTopicSet:
12
+
13
+ def __init__(self) -> None:
14
+ self.subTable = nt.NetworkTableInstance.getDefault()
15
+
16
+ def updateEntries(self) -> None:
17
+ options = nt.PubSubOptions()
18
+ options.periodic = 0.01
19
+ options.sendAll = True
20
+ self.rawBytesEntry = self.subTable.getRawTopic("rawBytes").publish(
21
+ PhotonPipelineResult_TYPE_STRING, options
22
+ )
23
+ self.rawBytesEntry.getTopic().setProperty(
24
+ "message_uuid", PhotonPipelineResultSerde.MESSAGE_VERSION
25
+ )
26
+ self.pipelineIndexPublisher = self.subTable.getIntegerTopic(
27
+ "pipelineIndexState"
28
+ ).publish()
29
+ self.pipelineIndexRequestSub = self.subTable.getIntegerTopic(
30
+ "pipelineIndexRequest"
31
+ ).subscribe(0)
32
+
33
+ self.driverModePublisher = self.subTable.getBooleanTopic("driverMode").publish()
34
+ self.driverModeSubscriber = self.subTable.getBooleanTopic(
35
+ "driverModeRequest"
36
+ ).subscribe(False)
37
+
38
+ self.driverModeSubscriber.getTopic().publish().setDefault(False)
39
+
40
+ self.latencyMillisEntry = self.subTable.getDoubleTopic(
41
+ "latencyMillis"
42
+ ).publish()
43
+ self.hasTargetEntry = self.subTable.getBooleanTopic("hasTargets").publish()
44
+
45
+ self.targetPitchEntry = self.subTable.getDoubleTopic("targetPitch").publish()
46
+ self.targetAreaEntry = self.subTable.getDoubleTopic("targetArea").publish()
47
+ self.targetYawEntry = self.subTable.getDoubleTopic("targetYaw").publish()
48
+ self.targetPoseEntry = self.subTable.getStructTopic(
49
+ "targetPose", Transform3d
50
+ ).publish()
51
+ self.targetSkewEntry = self.subTable.getDoubleTopic("targetSkew").publish()
52
+
53
+ self.bestTargetPosX = self.subTable.getDoubleTopic("targetPixelsX").publish()
54
+ self.bestTargetPosY = self.subTable.getDoubleTopic("targetPixelsY").publish()
55
+
56
+ self.heartbeatTopic = self.subTable.getIntegerTopic("heartbeat")
57
+ self.heartbeatPublisher = self.heartbeatTopic.publish()
58
+
59
+ self.cameraIntrinsicsPublisher = self.subTable.getDoubleArrayTopic(
60
+ "cameraIntrinsics"
61
+ ).publish()
62
+ self.cameraDistortionPublisher = self.subTable.getDoubleArrayTopic(
63
+ "cameraDistortion"
64
+ ).publish()
@@ -0,0 +1 @@
1
+ from .NTTopicSet import NTTopicSet