photonlibpy 2025.0.0b2__tar.gz → 2025.0.0b4__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
Files changed (45) hide show
  1. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b4}/PKG-INFO +2 -2
  2. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b4}/photonlibpy/estimation/openCVHelp.py +117 -7
  3. photonlibpy-2025.0.0b4/photonlibpy/estimation/rotTrlTransform3d.py +74 -0
  4. photonlibpy-2025.0.0b4/photonlibpy/estimation/targetModel.py +183 -0
  5. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b4}/photonlibpy/estimation/visionEstimation.py +26 -4
  6. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b4}/photonlibpy/generated/MultiTargetPNPResultSerde.py +7 -1
  7. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b4}/photonlibpy/generated/PhotonPipelineMetadataSerde.py +6 -1
  8. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b4}/photonlibpy/generated/PhotonPipelineResultSerde.py +9 -1
  9. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b4}/photonlibpy/generated/PhotonTrackedTargetSerde.py +7 -1
  10. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b4}/photonlibpy/generated/PnpResultSerde.py +6 -1
  11. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b4}/photonlibpy/generated/TargetCornerSerde.py +6 -1
  12. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b4}/photonlibpy/networktables/NTTopicSet.py +11 -2
  13. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b4}/photonlibpy/photonCamera.py +69 -2
  14. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b4}/photonlibpy/simulation/photonCameraSim.py +127 -51
  15. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b4}/photonlibpy/simulation/simCameraProperties.py +184 -63
  16. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b4}/photonlibpy/simulation/visionSystemSim.py +104 -7
  17. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b4}/photonlibpy/simulation/visionTargetSim.py +10 -0
  18. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b4}/photonlibpy/targeting/TargetCorner.py +2 -2
  19. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b4}/photonlibpy/targeting/multiTargetPNPResult.py +4 -15
  20. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b4}/photonlibpy/targeting/photonPipelineResult.py +4 -3
  21. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b4}/photonlibpy/targeting/photonTrackedTarget.py +2 -2
  22. photonlibpy-2025.0.0b4/photonlibpy/version.py +2 -0
  23. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b4}/photonlibpy.egg-info/PKG-INFO +2 -2
  24. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b4}/photonlibpy.egg-info/SOURCES.txt +1 -0
  25. photonlibpy-2025.0.0b4/photonlibpy.egg-info/requires.txt +9 -0
  26. photonlibpy-2025.0.0b4/pyproject.toml +2 -0
  27. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b4}/setup.py +6 -7
  28. photonlibpy-2025.0.0b2/photonlibpy/estimation/rotTrlTransform3d.py +0 -32
  29. photonlibpy-2025.0.0b2/photonlibpy/estimation/targetModel.py +0 -137
  30. photonlibpy-2025.0.0b2/photonlibpy/version.py +0 -2
  31. photonlibpy-2025.0.0b2/photonlibpy.egg-info/requires.txt +0 -12
  32. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b4}/photonlibpy/__init__.py +0 -0
  33. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b4}/photonlibpy/estimatedRobotPose.py +0 -0
  34. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b4}/photonlibpy/estimation/__init__.py +0 -0
  35. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b4}/photonlibpy/estimation/cameraTargetRelation.py +0 -0
  36. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b4}/photonlibpy/generated/__init__.py +0 -0
  37. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b4}/photonlibpy/networktables/__init__.py +0 -0
  38. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b4}/photonlibpy/packet.py +0 -0
  39. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b4}/photonlibpy/photonPoseEstimator.py +0 -0
  40. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b4}/photonlibpy/simulation/__init__.py +0 -0
  41. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b4}/photonlibpy/simulation/videoSimUtil.py +0 -0
  42. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b4}/photonlibpy/targeting/__init__.py +0 -0
  43. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b4}/photonlibpy.egg-info/dependency_links.txt +0 -0
  44. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b4}/photonlibpy.egg-info/top_level.txt +0 -0
  45. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b4}/setup.cfg +0 -0
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: photonlibpy
3
- Version: 2025.0.0b2
4
- Summary: Pure-python implementation of PhotonLib for interfacing with PhotonVision on coprocessors. Implemented with PhotonVision version v2025.0.0-beta-2 .
3
+ Version: 2025.0.0b4
4
+ Summary: Pure-python implementation of PhotonLib for interfacing with PhotonVision on coprocessors. Implemented with PhotonVision version v2025.0.0-beta-4 .
5
5
  Home-page: https://photonvision.org
6
6
  Author: Photonvision Development Team
7
7
  Description-Content-Type: text/markdown
@@ -1,5 +1,5 @@
1
1
  import math
2
- from typing import Any, Tuple
2
+ from typing import Any
3
3
 
4
4
  import cv2 as cv
5
5
  import numpy as np
@@ -27,6 +27,12 @@ class OpenCVHelp:
27
27
 
28
28
  @staticmethod
29
29
  def translationToTVec(translations: list[Translation3d]) -> np.ndarray:
30
+ """Creates a new :class:`np.array` with these 3d translations. The opencv tvec is a vector with
31
+ three elements representing {x, y, z} in the EDN coordinate system.
32
+
33
+ :param translations: The translations to convert into a np.array
34
+ """
35
+
30
36
  retVal: list[list] = []
31
37
  for translation in translations:
32
38
  trl = OpenCVHelp.translationNWUtoEDN(translation)
@@ -38,6 +44,13 @@ class OpenCVHelp:
38
44
 
39
45
  @staticmethod
40
46
  def rotationToRVec(rotation: Rotation3d) -> np.ndarray:
47
+ """Creates a new :class:`.np.array` with this 3d rotation. The opencv rvec Mat is a vector with
48
+ three elements representing the axis scaled by the angle in the EDN coordinate system. (angle =
49
+ norm, and axis = rvec / norm)
50
+
51
+ :param rotation: The rotation to convert into a np.array
52
+ """
53
+
41
54
  retVal: list[np.ndarray] = []
42
55
  rot = OpenCVHelp.rotationNWUtoEDN(rotation)
43
56
  rotVec = rot.getQuaternion().toRotationVector()
@@ -48,13 +61,13 @@ class OpenCVHelp:
48
61
  )
49
62
 
50
63
  @staticmethod
51
- def avgPoint(points: list[Tuple[float, float]]) -> Tuple[float, float]:
64
+ def avgPoint(points: np.ndarray) -> np.ndarray:
52
65
  x = 0.0
53
66
  y = 0.0
54
67
  for p in points:
55
- x += p[0]
56
- y += p[1]
57
- return (x / len(points), y / len(points))
68
+ x += p[0, 0]
69
+ y += p[0, 1]
70
+ return np.array([[x / len(points), y / len(points)]])
58
71
 
59
72
  @staticmethod
60
73
  def pointsToTargetCorners(points: np.ndarray) -> list[TargetCorner]:
@@ -88,6 +101,25 @@ class OpenCVHelp:
88
101
  def reorderCircular(
89
102
  elements: list[Any] | np.ndarray, backwards: bool, shiftStart: int
90
103
  ) -> list[Any]:
104
+ """Reorders the list, optionally indexing backwards and wrapping around to the last element after
105
+ the first, and shifting all indices in the direction of indexing.
106
+
107
+ e.g.
108
+
109
+ ({1,2,3}, false, 1) == {2,3,1}
110
+
111
+ ({1,2,3}, true, 0) == {1,3,2}
112
+
113
+ ({1,2,3}, true, 1) == {3,2,1}
114
+
115
+ :param elements: list elements
116
+ :param backwards: If indexing should happen in reverse (0, size-1, size-2, ...)
117
+ :param shiftStart: How much the initial index should be shifted (instead of starting at index 0,
118
+ start at shiftStart, negated if backwards)
119
+
120
+ :returns: Reordered list
121
+ """
122
+
91
123
  size = len(elements)
92
124
  reordered = []
93
125
  dir = -1 if backwards else 1
@@ -100,18 +132,39 @@ class OpenCVHelp:
100
132
 
101
133
  @staticmethod
102
134
  def translationEDNToNWU(trl: Translation3d) -> Translation3d:
135
+ """Convert a rotation delta from EDN to NWU. For example, if you have a rotation X,Y,Z {1, 0, 0}
136
+ in EDN, this would be {0, -1, 0} in NWU.
137
+ """
138
+
103
139
  return trl.rotateBy(EDN_TO_NWU)
104
140
 
105
141
  @staticmethod
106
142
  def rotationEDNToNWU(rot: Rotation3d) -> Rotation3d:
143
+ """Convert a rotation delta from NWU to EDN. For example, if you have a rotation X,Y,Z {1, 0, 0}
144
+ in NWU, this would be {0, 0, 1} in EDN.
145
+ """
146
+
107
147
  return -EDN_TO_NWU + (rot + EDN_TO_NWU)
108
148
 
109
149
  @staticmethod
110
150
  def tVecToTranslation(tvecInput: np.ndarray) -> Translation3d:
151
+ """Returns a new 3d translation from this :class:`.Mat`. The opencv tvec is a vector with three
152
+ elements representing {x, y, z} in the EDN coordinate system.
153
+
154
+ :param tvecInput: The tvec to create a Translation3d from
155
+ """
156
+
111
157
  return OpenCVHelp.translationEDNToNWU(Translation3d(tvecInput))
112
158
 
113
159
  @staticmethod
114
160
  def rVecToRotation(rvecInput: np.ndarray) -> Rotation3d:
161
+ """Returns a 3d rotation from this :class:`.Mat`. The opencv rvec Mat is a vector with three
162
+ elements representing the axis scaled by the angle in the EDN coordinate system. (angle = norm,
163
+ and axis = rvec / norm)
164
+
165
+ :param rvecInput: The rvec to create a Rotation3d from
166
+ """
167
+
115
168
  return OpenCVHelp.rotationEDNToNWU(Rotation3d(rvecInput))
116
169
 
117
170
  @staticmethod
@@ -121,12 +174,43 @@ class OpenCVHelp:
121
174
  modelTrls: list[Translation3d],
122
175
  imagePoints: np.ndarray,
123
176
  ) -> PnpResult | None:
177
+ """Finds the transformation(s) that map the camera's pose to the target's pose. The camera's pose
178
+ relative to the target is determined by the supplied 3d points of the target's model and their
179
+ associated 2d points imaged by the camera. The supplied model translations must be relative to
180
+ the target's pose.
181
+
182
+ For planar targets, there may be an alternate solution which is plausible given the 2d image
183
+ points. This has an associated "ambiguity" which describes the ratio of reprojection error
184
+ between the "best" and "alternate" solution.
185
+
186
+ This method is intended for use with individual AprilTags, and will not work unless 4 points
187
+ are provided.
188
+
189
+ :param cameraMatrix: The camera intrinsics matrix in standard opencv form
190
+ :param distCoeffs: The camera distortion matrix in standard opencv form
191
+ :param modelTrls: The translations of the object corners. These should have the object pose as
192
+ their origin. These must come in a specific, pose-relative order (in NWU):
193
+
194
+ - Point 0: [0, -squareLength / 2, squareLength / 2]
195
+ - Point 1: [0, squareLength / 2, squareLength / 2]
196
+ - Point 2: [0, squareLength / 2, -squareLength / 2]
197
+ - Point 3: [0, -squareLength / 2, -squareLength / 2]
198
+ :param imagePoints: The projection of these 3d object points into the 2d camera image. The order
199
+ should match the given object point translations.
200
+
201
+ :returns: The resulting transformation that maps the camera pose to the target pose and the
202
+ ambiguity if an alternate solution is available.
203
+ """
124
204
  modelTrls = OpenCVHelp.reorderCircular(modelTrls, True, -1)
125
205
  imagePoints = np.array(OpenCVHelp.reorderCircular(imagePoints, True, -1))
126
206
  objectMat = np.array(OpenCVHelp.translationToTVec(modelTrls))
127
207
 
128
208
  alt: Transform3d | None = None
209
+ reprojectionError: cv.typing.MatLike | None = None
210
+ best: Transform3d = Transform3d()
211
+
129
212
  for tries in range(2):
213
+ # calc rvecs/tvecs and associated reprojection error from image points
130
214
  retval, rvecs, tvecs, reprojectionError = cv.solvePnPGeneric(
131
215
  objectMat,
132
216
  imagePoints,
@@ -135,6 +219,7 @@ class OpenCVHelp:
135
219
  flags=cv.SOLVEPNP_IPPE_SQUARE,
136
220
  )
137
221
 
222
+ # convert to wpilib coordinates
138
223
  best = Transform3d(
139
224
  OpenCVHelp.tVecToTranslation(tvecs[0]),
140
225
  OpenCVHelp.rVecToRotation(rvecs[0]),
@@ -145,7 +230,10 @@ class OpenCVHelp:
145
230
  OpenCVHelp.rVecToRotation(rvecs[1]),
146
231
  )
147
232
 
148
- if not math.isnan(reprojectionError[0, 0]):
233
+ # check if we got a NaN result
234
+ if reprojectionError is not None and not math.isnan(
235
+ reprojectionError[0, 0]
236
+ ):
149
237
  break
150
238
  else:
151
239
  pt = imagePoints[0]
@@ -153,7 +241,8 @@ class OpenCVHelp:
153
241
  pt[0, 1] -= 0.001
154
242
  imagePoints[0] = pt
155
243
 
156
- if math.isnan(reprojectionError[0, 0]):
244
+ # solvePnP failed
245
+ if reprojectionError is None or math.isnan(reprojectionError[0, 0]):
157
246
  print("SolvePNP_Square failed!")
158
247
  return None
159
248
 
@@ -181,6 +270,27 @@ class OpenCVHelp:
181
270
  modelTrls: list[Translation3d],
182
271
  imagePoints: np.ndarray,
183
272
  ) -> PnpResult | None:
273
+ """Finds the transformation that maps the camera's pose to the origin of the supplied object. An
274
+ "object" is simply a set of known 3d translations that correspond to the given 2d points. If,
275
+ for example, the object translations are given relative to close-right corner of the blue
276
+ alliance(the default origin), a camera-to-origin transformation is returned. If the
277
+ translations are relative to a target's pose, a camera-to-target transformation is returned.
278
+
279
+ There must be at least 3 points to use this method. This does not return an alternate
280
+ solution-- if you are intending to use solvePNP on a single AprilTag, see {@link
281
+ #solvePNP_SQUARE} instead.
282
+
283
+ :param cameraMatrix: The camera intrinsics matrix in standard opencv form
284
+ :param distCoeffs: The camera distortion matrix in standard opencv form
285
+ :param objectTrls: The translations of the object corners, relative to the field.
286
+ :param imagePoints: The projection of these 3d object points into the 2d camera image. The order
287
+ should match the given object point translations.
288
+
289
+ :returns: The resulting transformation that maps the camera pose to the target pose. If the 3d
290
+ model points are supplied relative to the origin, this transformation brings the camera to
291
+ the origin.
292
+ """
293
+
184
294
  objectMat = np.array(OpenCVHelp.translationToTVec(modelTrls))
185
295
 
186
296
  retval, rvecs, tvecs, reprojectionError = cv.solvePnPGeneric(
@@ -0,0 +1,74 @@
1
+ from typing import Self
2
+
3
+ from wpimath.geometry import Pose3d, Rotation3d, Transform3d, Translation3d
4
+
5
+
6
+ class RotTrlTransform3d:
7
+ """Represents a transformation that first rotates a pose around the origin, and then translates it."""
8
+
9
+ def __init__(
10
+ self, rot: Rotation3d = Rotation3d(), trl: Translation3d = Translation3d()
11
+ ):
12
+ """A rotation-translation transformation.
13
+
14
+ Applying this RotTrlTransform3d to poses will preserve their current origin-to-pose
15
+ transform as if the origin was transformed by these components instead.
16
+
17
+ :param rot: The rotation component
18
+ :param trl: The translation component
19
+ """
20
+ self.rot = rot
21
+ self.trl = trl
22
+
23
+ def inverse(self) -> Self:
24
+ """The inverse of this transformation. Applying the inverse will "undo" this transformation."""
25
+ invRot = -self.rot
26
+ invTrl = -(self.trl.rotateBy(invRot))
27
+ return type(self)(invRot, invTrl)
28
+
29
+ def getTransform(self) -> Transform3d:
30
+ """This transformation as a Transform3d (as if of the origin)"""
31
+ return Transform3d(self.trl, self.rot)
32
+
33
+ def getTranslation(self) -> Translation3d:
34
+ """The translation component of this transformation"""
35
+ return self.trl
36
+
37
+ def getRotation(self) -> Rotation3d:
38
+ """The rotation component of this transformation"""
39
+ return self.rot
40
+
41
+ def applyTranslation(self, trlToApply: Translation3d) -> Translation3d:
42
+ return trlToApply.rotateBy(self.rot) + self.trl
43
+
44
+ def applyRotation(self, rotToApply: Rotation3d) -> Rotation3d:
45
+ return rotToApply + self.rot
46
+
47
+ def applyPose(self, poseToApply: Pose3d) -> Pose3d:
48
+ return Pose3d(
49
+ self.applyTranslation(poseToApply.translation()),
50
+ self.applyRotation(poseToApply.rotation()),
51
+ )
52
+
53
+ def applyTrls(self, rots: list[Rotation3d]) -> list[Rotation3d]:
54
+ retVal: list[Rotation3d] = []
55
+ for rot in rots:
56
+ retVal.append(self.applyRotation(rot))
57
+ return retVal
58
+
59
+ @classmethod
60
+ def makeRelativeTo(cls, pose: Pose3d) -> Self:
61
+ """The rotation-translation transformation that makes poses in the world consider this pose as the
62
+ new origin, or change the basis to this pose.
63
+
64
+ :param pose: The new origin
65
+ """
66
+ return cls(pose.rotation(), pose.translation()).inverse()
67
+
68
+ @classmethod
69
+ def makeBetweenPoses(cls, initial: Pose3d, last: Pose3d) -> Self:
70
+ return cls(
71
+ last.rotation() - initial.rotation(),
72
+ last.translation()
73
+ - initial.translation().rotateBy(last.rotation() - initial.rotation()),
74
+ )
@@ -0,0 +1,183 @@
1
+ import math
2
+ from typing import List, Self
3
+
4
+ from wpimath.geometry import Pose3d, Rotation2d, Rotation3d, Translation3d
5
+ from wpimath.units import meters
6
+
7
+ from . import RotTrlTransform3d
8
+
9
+
10
+ class TargetModel:
11
+ """Describes the 3d model of a target."""
12
+
13
+ def __init__(self):
14
+ """Default constructor for initialising internal class members. DO NOT USE THIS!!! USE THE createPlanar,
15
+ createCuboid, createSpheroid or create Arbitrary
16
+ """
17
+ self.vertices: List[Translation3d] = []
18
+ self.isPlanar = False
19
+ self.isSpherical = False
20
+
21
+ @classmethod
22
+ def createPlanar(cls, width: meters, height: meters) -> Self:
23
+ """Creates a rectangular, planar target model given the width and height. The model has four
24
+ vertices:
25
+
26
+ - Point 0: [0, -width/2, -height/2]
27
+ - Point 1: [0, width/2, -height/2]
28
+ - Point 2: [0, width/2, height/2]
29
+ - Point 3: [0, -width/2, height/2]
30
+ """
31
+
32
+ tm = cls()
33
+
34
+ tm.isPlanar = True
35
+ tm.isSpherical = False
36
+ tm.vertices = [
37
+ Translation3d(0.0, -width / 2.0, -height / 2.0),
38
+ Translation3d(0.0, width / 2.0, -height / 2.0),
39
+ Translation3d(0.0, width / 2.0, height / 2.0),
40
+ Translation3d(0.0, -width / 2.0, height / 2.0),
41
+ ]
42
+ return tm
43
+
44
+ @classmethod
45
+ def createCuboid(cls, length: meters, width: meters, height: meters) -> Self:
46
+ """Creates a cuboid target model given the length, width, height. The model has eight vertices:
47
+
48
+ - Point 0: [length/2, -width/2, -height/2]
49
+ - Point 1: [length/2, width/2, -height/2]
50
+ - Point 2: [length/2, width/2, height/2]
51
+ - Point 3: [length/2, -width/2, height/2]
52
+ - Point 4: [-length/2, -width/2, height/2]
53
+ - Point 5: [-length/2, width/2, height/2]
54
+ - Point 6: [-length/2, width/2, -height/2]
55
+ - Point 7: [-length/2, -width/2, -height/2]
56
+ """
57
+
58
+ tm = cls()
59
+ verts = [
60
+ Translation3d(length / 2.0, -width / 2.0, -height / 2.0),
61
+ Translation3d(length / 2.0, width / 2.0, -height / 2.0),
62
+ Translation3d(length / 2.0, width / 2.0, height / 2.0),
63
+ Translation3d(length / 2.0, -width / 2.0, height / 2.0),
64
+ Translation3d(-length / 2.0, -width / 2.0, height / 2.0),
65
+ Translation3d(-length / 2.0, width / 2.0, height / 2.0),
66
+ Translation3d(-length / 2.0, width / 2.0, -height / 2.0),
67
+ Translation3d(-length / 2.0, -width / 2.0, -height / 2.0),
68
+ ]
69
+
70
+ tm._common_construction(verts)
71
+
72
+ return tm
73
+
74
+ @classmethod
75
+ def createSpheroid(cls, diameter: meters) -> Self:
76
+ """Creates a spherical target model which has similar dimensions regardless of its rotation. This
77
+ model has four vertices:
78
+
79
+ - Point 0: [0, -radius, 0]
80
+ - Point 1: [0, 0, -radius]
81
+ - Point 2: [0, radius, 0]
82
+ - Point 3: [0, 0, radius]
83
+
84
+ *Q: Why these vertices?* A: This target should be oriented to the camera every frame, much
85
+ like a sprite/decal, and these vertices represent the ellipse vertices (maxima). These vertices
86
+ are used for drawing the image of this sphere, but do not match the corners that will be
87
+ published by photonvision.
88
+ """
89
+
90
+ tm = cls()
91
+
92
+ tm.isPlanar = False
93
+ tm.isSpherical = True
94
+ tm.vertices = [
95
+ Translation3d(0.0, -diameter / 2.0, 0.0),
96
+ Translation3d(0.0, 0.0, -diameter / 2.0),
97
+ Translation3d(0.0, diameter / 2.0, 0.0),
98
+ Translation3d(0.0, 0.0, diameter / 2.0),
99
+ ]
100
+
101
+ return tm
102
+
103
+ @classmethod
104
+ def createArbitrary(cls, verts: List[Translation3d]) -> Self:
105
+ """Creates a target model from arbitrary 3d vertices. Automatically determines if the given
106
+ vertices are planar(x == 0). More than 2 vertices must be given. If this is a planar model, the
107
+ vertices should define a non-intersecting contour.
108
+
109
+ :param vertices: Translations representing the vertices of this target model relative to its
110
+ pose.
111
+ """
112
+
113
+ tm = cls()
114
+ tm._common_construction(verts)
115
+
116
+ return tm
117
+
118
+ def _common_construction(self, verts: List[Translation3d]) -> None:
119
+ self.isSpherical = False
120
+ if len(verts) <= 2:
121
+ self.vertices = []
122
+ self.isPlanar = False
123
+ else:
124
+ cornersPlaner = True
125
+ for corner in verts:
126
+ if abs(corner.X() < 1e-4):
127
+ cornersPlaner = False
128
+ self.isPlanar = cornersPlaner
129
+
130
+ self.vertices = verts
131
+
132
+ def getFieldVertices(self, targetPose: Pose3d) -> List[Translation3d]:
133
+ """This target's vertices offset from its field pose.
134
+
135
+ Note: If this target is spherical, use {@link #getOrientedPose(Translation3d,
136
+ Translation3d)} with this method.
137
+ """
138
+
139
+ basisChange = RotTrlTransform3d(targetPose.rotation(), targetPose.translation())
140
+
141
+ retVal = []
142
+
143
+ for vert in self.vertices:
144
+ retVal.append(basisChange.applyTranslation(vert))
145
+
146
+ return retVal
147
+
148
+ @classmethod
149
+ def getOrientedPose(cls, tgtTrl: Translation3d, cameraTrl: Translation3d):
150
+ """Returns a Pose3d with the given target translation oriented (with its relative x-axis aligned)
151
+ to the camera translation. This is used for spherical targets which should not have their
152
+ projection change regardless of their own rotation.
153
+
154
+ :param tgtTrl: This target's translation
155
+ :param cameraTrl: Camera's translation
156
+
157
+ :returns: This target's pose oriented to the camera
158
+ """
159
+
160
+ relCam = cameraTrl - tgtTrl
161
+ orientToCam = Rotation3d(
162
+ 0.0,
163
+ Rotation2d(math.hypot(relCam.X(), relCam.Y()), relCam.Z()).radians(),
164
+ Rotation2d(relCam.X(), relCam.Y()).radians(),
165
+ )
166
+ return Pose3d(tgtTrl, orientToCam)
167
+
168
+ def getVertices(self) -> List[Translation3d]:
169
+ return self.vertices
170
+
171
+ def getIsPlanar(self) -> bool:
172
+ return self.isPlanar
173
+
174
+ def getIsSpherical(self) -> bool:
175
+ return self.isSpherical
176
+
177
+ @classmethod
178
+ def AprilTag36h11(cls) -> Self:
179
+ return cls.createPlanar(width=6.5 * 0.0254, height=6.5 * 0.0254)
180
+
181
+ @classmethod
182
+ def AprilTag16h5(cls) -> Self:
183
+ return cls.createPlanar(width=6.0 * 0.0254, height=6.0 * 0.0254)
@@ -11,15 +11,16 @@ class VisionEstimation:
11
11
  def getVisibleLayoutTags(
12
12
  visTags: list[PhotonTrackedTarget], layout: AprilTagFieldLayout
13
13
  ) -> list[AprilTag]:
14
+ """Get the visible :class:`.AprilTag`s which are in the tag layout using the visible tag IDs."""
14
15
  retVal: list[AprilTag] = []
15
16
  for tag in visTags:
16
17
  id = tag.getFiducialId()
17
18
  maybePose = layout.getTagPose(id)
18
19
  if maybePose:
19
- tag = AprilTag()
20
- tag.ID = id
21
- tag.pose = maybePose
22
- retVal.append(tag)
20
+ aprilTag = AprilTag()
21
+ aprilTag.ID = id
22
+ aprilTag.pose = maybePose
23
+ retVal.append(aprilTag)
23
24
  return retVal
24
25
 
25
26
  @staticmethod
@@ -30,12 +31,31 @@ class VisionEstimation:
30
31
  layout: AprilTagFieldLayout,
31
32
  tagModel: TargetModel,
32
33
  ) -> PnpResult | None:
34
+ """Performs solvePNP using 3d-2d point correspondences of visible AprilTags to estimate the
35
+ field-to-camera transformation. If only one tag is visible, the result may have an alternate
36
+ solution.
37
+
38
+ **Note:** The returned transformation is from the field origin to the camera pose!
39
+
40
+ With only one tag: {@link OpenCVHelp#solvePNP_SQUARE}
41
+
42
+ With multiple tags: {@link OpenCVHelp#solvePNP_SQPNP}
43
+
44
+ :param cameraMatrix: The camera intrinsics matrix in standard opencv form
45
+ :param distCoeffs: The camera distortion matrix in standard opencv form
46
+ :param visTags: The visible tags reported by PV. Non-tag targets are automatically excluded.
47
+ :param tagLayout: The known tag layout on the field
48
+
49
+ :returns: The transformation that maps the field origin to the camera pose. Ensure the {@link
50
+ PnpResult} are present before utilizing them.
51
+ """
33
52
  if len(visTags) == 0:
34
53
  return None
35
54
 
36
55
  corners: list[TargetCorner] = []
37
56
  knownTags: list[AprilTag] = []
38
57
 
58
+ # ensure these are AprilTags in our layout
39
59
  for tgt in visTags:
40
60
  id = tgt.getFiducialId()
41
61
  maybePose = layout.getTagPose(id)
@@ -53,6 +73,7 @@ class VisionEstimation:
53
73
 
54
74
  points = OpenCVHelp.cornersToPoints(corners)
55
75
 
76
+ # single-tag pnp
56
77
  if len(knownTags) == 1:
57
78
  camToTag = OpenCVHelp.solvePNP_Square(
58
79
  cameraMatrix, distCoeffs, tagModel.getVertices(), points
@@ -74,6 +95,7 @@ class VisionEstimation:
74
95
  altReprojErr=camToTag.altReprojErr,
75
96
  )
76
97
  return result
98
+ # multi-tag pnp
77
99
  else:
78
100
  objectTrls: list[Translation3d] = []
79
101
  for tag in knownTags:
@@ -20,8 +20,14 @@
20
20
  ## --> DO NOT MODIFY <--
21
21
  ###############################################################################
22
22
 
23
+ from typing import TYPE_CHECKING
24
+
23
25
  from ..packet import Packet
24
- from ..targeting import *
26
+ from ..targeting import * # noqa
27
+
28
+ if TYPE_CHECKING:
29
+ from ..targeting import MultiTargetPNPResult # noqa
30
+ from ..targeting import PnpResult # noqa
25
31
 
26
32
 
27
33
  class MultiTargetPNPResultSerde:
@@ -20,8 +20,13 @@
20
20
  ## --> DO NOT MODIFY <--
21
21
  ###############################################################################
22
22
 
23
+ from typing import TYPE_CHECKING
24
+
23
25
  from ..packet import Packet
24
- from ..targeting import *
26
+ from ..targeting import * # noqa
27
+
28
+ if TYPE_CHECKING:
29
+ from ..targeting import PhotonPipelineMetadata # noqa
25
30
 
26
31
 
27
32
  class PhotonPipelineMetadataSerde:
@@ -20,8 +20,16 @@
20
20
  ## --> DO NOT MODIFY <--
21
21
  ###############################################################################
22
22
 
23
+ from typing import TYPE_CHECKING
24
+
23
25
  from ..packet import Packet
24
- from ..targeting import *
26
+ from ..targeting import * # noqa
27
+
28
+ if TYPE_CHECKING:
29
+ from ..targeting import MultiTargetPNPResult # noqa
30
+ from ..targeting import PhotonPipelineMetadata # noqa
31
+ from ..targeting import PhotonPipelineResult # noqa
32
+ from ..targeting import PhotonTrackedTarget # noqa
25
33
 
26
34
 
27
35
  class PhotonPipelineResultSerde:
@@ -20,8 +20,14 @@
20
20
  ## --> DO NOT MODIFY <--
21
21
  ###############################################################################
22
22
 
23
+ from typing import TYPE_CHECKING
24
+
23
25
  from ..packet import Packet
24
- from ..targeting import *
26
+ from ..targeting import * # noqa
27
+
28
+ if TYPE_CHECKING:
29
+ from ..targeting import PhotonTrackedTarget # noqa
30
+ from ..targeting import TargetCorner # noqa
25
31
 
26
32
 
27
33
  class PhotonTrackedTargetSerde:
@@ -20,8 +20,13 @@
20
20
  ## --> DO NOT MODIFY <--
21
21
  ###############################################################################
22
22
 
23
+ from typing import TYPE_CHECKING
24
+
23
25
  from ..packet import Packet
24
- from ..targeting import *
26
+ from ..targeting import * # noqa
27
+
28
+ if TYPE_CHECKING:
29
+ from ..targeting import PnpResult # noqa
25
30
 
26
31
 
27
32
  class PnpResultSerde:
@@ -20,8 +20,13 @@
20
20
  ## --> DO NOT MODIFY <--
21
21
  ###############################################################################
22
22
 
23
+ from typing import TYPE_CHECKING
24
+
23
25
  from ..packet import Packet
24
- from ..targeting import *
26
+ from ..targeting import * # noqa
27
+
28
+ if TYPE_CHECKING:
29
+ from ..targeting import TargetCorner # noqa
25
30
 
26
31
 
27
32
  class TargetCornerSerde:
@@ -9,9 +9,18 @@ PhotonPipelineResult_TYPE_STRING = (
9
9
 
10
10
 
11
11
  class NTTopicSet:
12
+ """This class is a wrapper around all per-pipeline NT topics that PhotonVision should be publishing
13
+ It's split here so the sim and real-camera implementations can share a common implementation of
14
+ the naming and registration of the NT content.
12
15
 
13
- def __init__(self) -> None:
14
- self.subTable = nt.NetworkTableInstance.getDefault()
16
+ However, we do expect that the actual logic which fills out values in the entries will be
17
+ different for sim vs. real camera
18
+ """
19
+
20
+ def __init__(self, tableName: str, cameraName: str) -> None:
21
+ instance = nt.NetworkTableInstance.getDefault()
22
+ photonvision_root_table = instance.getTable(tableName)
23
+ self.subTable = photonvision_root_table.getSubTable(cameraName)
15
24
 
16
25
  def updateEntries(self) -> None:
17
26
  options = nt.PubSubOptions()