photonlibpy 2025.0.0b2__tar.gz → 2025.0.0b3__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
Files changed (45) hide show
  1. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b3}/PKG-INFO +2 -2
  2. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b3}/photonlibpy/estimation/openCVHelp.py +12 -7
  3. photonlibpy-2025.0.0b3/photonlibpy/estimation/rotTrlTransform3d.py +55 -0
  4. photonlibpy-2025.0.0b3/photonlibpy/estimation/targetModel.py +120 -0
  5. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b3}/photonlibpy/estimation/visionEstimation.py +4 -4
  6. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b3}/photonlibpy/generated/MultiTargetPNPResultSerde.py +7 -1
  7. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b3}/photonlibpy/generated/PhotonPipelineMetadataSerde.py +6 -1
  8. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b3}/photonlibpy/generated/PhotonPipelineResultSerde.py +9 -1
  9. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b3}/photonlibpy/generated/PhotonTrackedTargetSerde.py +7 -1
  10. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b3}/photonlibpy/generated/PnpResultSerde.py +6 -1
  11. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b3}/photonlibpy/generated/TargetCornerSerde.py +6 -1
  12. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b3}/photonlibpy/networktables/NTTopicSet.py +4 -2
  13. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b3}/photonlibpy/photonCamera.py +1 -1
  14. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b3}/photonlibpy/simulation/photonCameraSim.py +18 -48
  15. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b3}/photonlibpy/simulation/simCameraProperties.py +44 -62
  16. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b3}/photonlibpy/simulation/visionSystemSim.py +12 -7
  17. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b3}/photonlibpy/targeting/TargetCorner.py +2 -2
  18. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b3}/photonlibpy/targeting/multiTargetPNPResult.py +4 -15
  19. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b3}/photonlibpy/targeting/photonPipelineResult.py +4 -3
  20. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b3}/photonlibpy/targeting/photonTrackedTarget.py +2 -2
  21. photonlibpy-2025.0.0b3/photonlibpy/version.py +2 -0
  22. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b3}/photonlibpy.egg-info/PKG-INFO +2 -2
  23. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b3}/photonlibpy.egg-info/SOURCES.txt +1 -0
  24. photonlibpy-2025.0.0b3/photonlibpy.egg-info/requires.txt +12 -0
  25. photonlibpy-2025.0.0b3/pyproject.toml +2 -0
  26. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b3}/setup.py +6 -6
  27. photonlibpy-2025.0.0b2/photonlibpy/estimation/rotTrlTransform3d.py +0 -32
  28. photonlibpy-2025.0.0b2/photonlibpy/estimation/targetModel.py +0 -137
  29. photonlibpy-2025.0.0b2/photonlibpy/version.py +0 -2
  30. photonlibpy-2025.0.0b2/photonlibpy.egg-info/requires.txt +0 -12
  31. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b3}/photonlibpy/__init__.py +0 -0
  32. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b3}/photonlibpy/estimatedRobotPose.py +0 -0
  33. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b3}/photonlibpy/estimation/__init__.py +0 -0
  34. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b3}/photonlibpy/estimation/cameraTargetRelation.py +0 -0
  35. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b3}/photonlibpy/generated/__init__.py +0 -0
  36. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b3}/photonlibpy/networktables/__init__.py +0 -0
  37. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b3}/photonlibpy/packet.py +0 -0
  38. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b3}/photonlibpy/photonPoseEstimator.py +0 -0
  39. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b3}/photonlibpy/simulation/__init__.py +0 -0
  40. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b3}/photonlibpy/simulation/videoSimUtil.py +0 -0
  41. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b3}/photonlibpy/simulation/visionTargetSim.py +0 -0
  42. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b3}/photonlibpy/targeting/__init__.py +0 -0
  43. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b3}/photonlibpy.egg-info/dependency_links.txt +0 -0
  44. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b3}/photonlibpy.egg-info/top_level.txt +0 -0
  45. {photonlibpy-2025.0.0b2 → photonlibpy-2025.0.0b3}/setup.cfg +0 -0
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: photonlibpy
3
- Version: 2025.0.0b2
4
- Summary: Pure-python implementation of PhotonLib for interfacing with PhotonVision on coprocessors. Implemented with PhotonVision version v2025.0.0-beta-2 .
3
+ Version: 2025.0.0b3
4
+ Summary: Pure-python implementation of PhotonLib for interfacing with PhotonVision on coprocessors. Implemented with PhotonVision version v2025.0.0-beta-3 .
5
5
  Home-page: https://photonvision.org
6
6
  Author: Photonvision Development Team
7
7
  Description-Content-Type: text/markdown
@@ -1,5 +1,5 @@
1
1
  import math
2
- from typing import Any, Tuple
2
+ from typing import Any
3
3
 
4
4
  import cv2 as cv
5
5
  import numpy as np
@@ -48,13 +48,13 @@ class OpenCVHelp:
48
48
  )
49
49
 
50
50
  @staticmethod
51
- def avgPoint(points: list[Tuple[float, float]]) -> Tuple[float, float]:
51
+ def avgPoint(points: np.ndarray) -> np.ndarray:
52
52
  x = 0.0
53
53
  y = 0.0
54
54
  for p in points:
55
- x += p[0]
56
- y += p[1]
57
- return (x / len(points), y / len(points))
55
+ x += p[0, 0]
56
+ y += p[0, 1]
57
+ return np.array([[x / len(points), y / len(points)]])
58
58
 
59
59
  @staticmethod
60
60
  def pointsToTargetCorners(points: np.ndarray) -> list[TargetCorner]:
@@ -126,6 +126,9 @@ class OpenCVHelp:
126
126
  objectMat = np.array(OpenCVHelp.translationToTVec(modelTrls))
127
127
 
128
128
  alt: Transform3d | None = None
129
+ reprojectionError: cv.typing.MatLike | None = None
130
+ best: Transform3d = Transform3d()
131
+
129
132
  for tries in range(2):
130
133
  retval, rvecs, tvecs, reprojectionError = cv.solvePnPGeneric(
131
134
  objectMat,
@@ -145,7 +148,9 @@ class OpenCVHelp:
145
148
  OpenCVHelp.rVecToRotation(rvecs[1]),
146
149
  )
147
150
 
148
- if not math.isnan(reprojectionError[0, 0]):
151
+ if reprojectionError is not None and not math.isnan(
152
+ reprojectionError[0, 0]
153
+ ):
149
154
  break
150
155
  else:
151
156
  pt = imagePoints[0]
@@ -153,7 +158,7 @@ class OpenCVHelp:
153
158
  pt[0, 1] -= 0.001
154
159
  imagePoints[0] = pt
155
160
 
156
- if math.isnan(reprojectionError[0, 0]):
161
+ if reprojectionError is None or math.isnan(reprojectionError[0, 0]):
157
162
  print("SolvePNP_Square failed!")
158
163
  return None
159
164
 
@@ -0,0 +1,55 @@
1
+ from typing import Self
2
+
3
+ from wpimath.geometry import Pose3d, Rotation3d, Transform3d, Translation3d
4
+
5
+
6
+ class RotTrlTransform3d:
7
+ def __init__(
8
+ self, rot: Rotation3d = Rotation3d(), trl: Translation3d = Translation3d()
9
+ ):
10
+ self.rot = rot
11
+ self.trl = trl
12
+
13
+ def inverse(self) -> Self:
14
+ invRot = -self.rot
15
+ invTrl = -(self.trl.rotateBy(invRot))
16
+ return type(self)(invRot, invTrl)
17
+
18
+ def getTransform(self) -> Transform3d:
19
+ return Transform3d(self.trl, self.rot)
20
+
21
+ def getTranslation(self) -> Translation3d:
22
+ return self.trl
23
+
24
+ def getRotation(self) -> Rotation3d:
25
+ return self.rot
26
+
27
+ def applyTranslation(self, trlToApply: Translation3d) -> Translation3d:
28
+ return trlToApply.rotateBy(self.rot) + self.trl
29
+
30
+ def applyRotation(self, rotToApply: Rotation3d) -> Rotation3d:
31
+ return rotToApply + self.rot
32
+
33
+ def applyPose(self, poseToApply: Pose3d) -> Pose3d:
34
+ return Pose3d(
35
+ self.applyTranslation(poseToApply.translation()),
36
+ self.applyRotation(poseToApply.rotation()),
37
+ )
38
+
39
+ def applyTrls(self, rots: list[Rotation3d]) -> list[Rotation3d]:
40
+ retVal: list[Rotation3d] = []
41
+ for rot in rots:
42
+ retVal.append(self.applyRotation(rot))
43
+ return retVal
44
+
45
+ @classmethod
46
+ def makeRelativeTo(cls, pose: Pose3d) -> Self:
47
+ return cls(pose.rotation(), pose.translation()).inverse()
48
+
49
+ @classmethod
50
+ def makeBetweenPoses(cls, initial: Pose3d, last: Pose3d) -> Self:
51
+ return cls(
52
+ last.rotation() - initial.rotation(),
53
+ last.translation()
54
+ - initial.translation().rotateBy(last.rotation() - initial.rotation()),
55
+ )
@@ -0,0 +1,120 @@
1
+ import math
2
+ from typing import List, Self
3
+
4
+ from wpimath.geometry import Pose3d, Rotation2d, Rotation3d, Translation3d
5
+ from wpimath.units import meters
6
+
7
+ from . import RotTrlTransform3d
8
+
9
+
10
+ class TargetModel:
11
+
12
+ def __init__(self):
13
+ self.vertices: List[Translation3d] = []
14
+ self.isPlanar = False
15
+ self.isSpherical = False
16
+
17
+ @classmethod
18
+ def createPlanar(cls, width: meters, height: meters) -> Self:
19
+ tm = cls()
20
+
21
+ tm.isPlanar = True
22
+ tm.isSpherical = False
23
+ tm.vertices = [
24
+ Translation3d(0.0, -width / 2.0, -height / 2.0),
25
+ Translation3d(0.0, width / 2.0, -height / 2.0),
26
+ Translation3d(0.0, width / 2.0, height / 2.0),
27
+ Translation3d(0.0, -width / 2.0, height / 2.0),
28
+ ]
29
+ return tm
30
+
31
+ @classmethod
32
+ def createCuboid(cls, length: meters, width: meters, height: meters) -> Self:
33
+ tm = cls()
34
+ verts = [
35
+ Translation3d(length / 2.0, -width / 2.0, -height / 2.0),
36
+ Translation3d(length / 2.0, width / 2.0, -height / 2.0),
37
+ Translation3d(length / 2.0, width / 2.0, height / 2.0),
38
+ Translation3d(length / 2.0, -width / 2.0, height / 2.0),
39
+ Translation3d(-length / 2.0, -width / 2.0, height / 2.0),
40
+ Translation3d(-length / 2.0, width / 2.0, height / 2.0),
41
+ Translation3d(-length / 2.0, width / 2.0, -height / 2.0),
42
+ Translation3d(-length / 2.0, -width / 2.0, -height / 2.0),
43
+ ]
44
+
45
+ tm._common_construction(verts)
46
+
47
+ return tm
48
+
49
+ @classmethod
50
+ def createSpheroid(cls, diameter: meters) -> Self:
51
+ tm = cls()
52
+
53
+ tm.isPlanar = False
54
+ tm.isSpherical = True
55
+ tm.vertices = [
56
+ Translation3d(0.0, -diameter / 2.0, 0.0),
57
+ Translation3d(0.0, 0.0, -diameter / 2.0),
58
+ Translation3d(0.0, diameter / 2.0, 0.0),
59
+ Translation3d(0.0, 0.0, diameter / 2.0),
60
+ ]
61
+
62
+ return tm
63
+
64
+ @classmethod
65
+ def createArbitrary(cls, verts: List[Translation3d]) -> Self:
66
+ tm = cls()
67
+ tm._common_construction(verts)
68
+
69
+ return tm
70
+
71
+ def _common_construction(self, verts: List[Translation3d]) -> None:
72
+ self.isSpherical = False
73
+ if len(verts) <= 2:
74
+ self.vertices = []
75
+ self.isPlanar = False
76
+ else:
77
+ cornersPlaner = True
78
+ for corner in verts:
79
+ if abs(corner.X() < 1e-4):
80
+ cornersPlaner = False
81
+ self.isPlanar = cornersPlaner
82
+
83
+ self.vertices = verts
84
+
85
+ def getFieldVertices(self, targetPose: Pose3d) -> List[Translation3d]:
86
+ basisChange = RotTrlTransform3d(targetPose.rotation(), targetPose.translation())
87
+
88
+ retVal = []
89
+
90
+ for vert in self.vertices:
91
+ retVal.append(basisChange.applyTranslation(vert))
92
+
93
+ return retVal
94
+
95
+ @classmethod
96
+ def getOrientedPose(cls, tgtTrl: Translation3d, cameraTrl: Translation3d):
97
+ relCam = cameraTrl - tgtTrl
98
+ orientToCam = Rotation3d(
99
+ 0.0,
100
+ Rotation2d(math.hypot(relCam.X(), relCam.Y()), relCam.Z()).radians(),
101
+ Rotation2d(relCam.X(), relCam.Y()).radians(),
102
+ )
103
+ return Pose3d(tgtTrl, orientToCam)
104
+
105
+ def getVertices(self) -> List[Translation3d]:
106
+ return self.vertices
107
+
108
+ def getIsPlanar(self) -> bool:
109
+ return self.isPlanar
110
+
111
+ def getIsSpherical(self) -> bool:
112
+ return self.isSpherical
113
+
114
+ @classmethod
115
+ def AprilTag36h11(cls) -> Self:
116
+ return cls.createPlanar(width=6.5 * 0.0254, height=6.5 * 0.0254)
117
+
118
+ @classmethod
119
+ def AprilTag16h5(cls) -> Self:
120
+ return cls.createPlanar(width=6.0 * 0.0254, height=6.0 * 0.0254)
@@ -16,10 +16,10 @@ class VisionEstimation:
16
16
  id = tag.getFiducialId()
17
17
  maybePose = layout.getTagPose(id)
18
18
  if maybePose:
19
- tag = AprilTag()
20
- tag.ID = id
21
- tag.pose = maybePose
22
- retVal.append(tag)
19
+ aprilTag = AprilTag()
20
+ aprilTag.ID = id
21
+ aprilTag.pose = maybePose
22
+ retVal.append(aprilTag)
23
23
  return retVal
24
24
 
25
25
  @staticmethod
@@ -20,8 +20,14 @@
20
20
  ## --> DO NOT MODIFY <--
21
21
  ###############################################################################
22
22
 
23
+ from typing import TYPE_CHECKING
24
+
23
25
  from ..packet import Packet
24
- from ..targeting import *
26
+ from ..targeting import * # noqa
27
+
28
+ if TYPE_CHECKING:
29
+ from ..targeting import MultiTargetPNPResult # noqa
30
+ from ..targeting import PnpResult # noqa
25
31
 
26
32
 
27
33
  class MultiTargetPNPResultSerde:
@@ -20,8 +20,13 @@
20
20
  ## --> DO NOT MODIFY <--
21
21
  ###############################################################################
22
22
 
23
+ from typing import TYPE_CHECKING
24
+
23
25
  from ..packet import Packet
24
- from ..targeting import *
26
+ from ..targeting import * # noqa
27
+
28
+ if TYPE_CHECKING:
29
+ from ..targeting import PhotonPipelineMetadata # noqa
25
30
 
26
31
 
27
32
  class PhotonPipelineMetadataSerde:
@@ -20,8 +20,16 @@
20
20
  ## --> DO NOT MODIFY <--
21
21
  ###############################################################################
22
22
 
23
+ from typing import TYPE_CHECKING
24
+
23
25
  from ..packet import Packet
24
- from ..targeting import *
26
+ from ..targeting import * # noqa
27
+
28
+ if TYPE_CHECKING:
29
+ from ..targeting import MultiTargetPNPResult # noqa
30
+ from ..targeting import PhotonPipelineMetadata # noqa
31
+ from ..targeting import PhotonPipelineResult # noqa
32
+ from ..targeting import PhotonTrackedTarget # noqa
25
33
 
26
34
 
27
35
  class PhotonPipelineResultSerde:
@@ -20,8 +20,14 @@
20
20
  ## --> DO NOT MODIFY <--
21
21
  ###############################################################################
22
22
 
23
+ from typing import TYPE_CHECKING
24
+
23
25
  from ..packet import Packet
24
- from ..targeting import *
26
+ from ..targeting import * # noqa
27
+
28
+ if TYPE_CHECKING:
29
+ from ..targeting import PhotonTrackedTarget # noqa
30
+ from ..targeting import TargetCorner # noqa
25
31
 
26
32
 
27
33
  class PhotonTrackedTargetSerde:
@@ -20,8 +20,13 @@
20
20
  ## --> DO NOT MODIFY <--
21
21
  ###############################################################################
22
22
 
23
+ from typing import TYPE_CHECKING
24
+
23
25
  from ..packet import Packet
24
- from ..targeting import *
26
+ from ..targeting import * # noqa
27
+
28
+ if TYPE_CHECKING:
29
+ from ..targeting import PnpResult # noqa
25
30
 
26
31
 
27
32
  class PnpResultSerde:
@@ -20,8 +20,13 @@
20
20
  ## --> DO NOT MODIFY <--
21
21
  ###############################################################################
22
22
 
23
+ from typing import TYPE_CHECKING
24
+
23
25
  from ..packet import Packet
24
- from ..targeting import *
26
+ from ..targeting import * # noqa
27
+
28
+ if TYPE_CHECKING:
29
+ from ..targeting import TargetCorner # noqa
25
30
 
26
31
 
27
32
  class TargetCornerSerde:
@@ -10,8 +10,10 @@ PhotonPipelineResult_TYPE_STRING = (
10
10
 
11
11
  class NTTopicSet:
12
12
 
13
- def __init__(self) -> None:
14
- self.subTable = nt.NetworkTableInstance.getDefault()
13
+ def __init__(self, tableName: str, cameraName: str) -> None:
14
+ instance = nt.NetworkTableInstance.getDefault()
15
+ photonvision_root_table = instance.getTable(tableName)
16
+ self.subTable = photonvision_root_table.getSubTable(cameraName)
15
17
 
16
18
  def updateEntries(self) -> None:
17
19
  options = nt.PubSubOptions()
@@ -231,7 +231,7 @@ class PhotonCamera:
231
231
  versionString = self.versionEntry.get(defaultValue="")
232
232
  localUUID = PhotonPipelineResult.photonStruct.MESSAGE_VERSION
233
233
 
234
- remoteUUID = self._rawBytesEntry.getTopic().getProperty("message_uuid")
234
+ remoteUUID = str(self._rawBytesEntry.getTopic().getProperty("message_uuid"))
235
235
 
236
236
  if not remoteUUID:
237
237
  wpilib.reportWarning(
@@ -4,8 +4,8 @@ import typing
4
4
  import cscore as cs
5
5
  import cv2 as cv
6
6
  import numpy as np
7
- import robotpy_apriltag
8
7
  import wpilib
8
+ from robotpy_apriltag import AprilTagField, AprilTagFieldLayout
9
9
  from wpimath.geometry import Pose3d, Transform3d
10
10
  from wpimath.units import meters, seconds
11
11
 
@@ -31,7 +31,7 @@ class PhotonCameraSim:
31
31
  def __init__(
32
32
  self,
33
33
  camera: PhotonCamera,
34
- props: SimCameraProperties | None = None,
34
+ props: SimCameraProperties = SimCameraProperties.PERFECT_90DEG(),
35
35
  minTargetAreaPercent: float | None = None,
36
36
  maxSightRange: meters | None = None,
37
37
  ):
@@ -41,37 +41,12 @@ class PhotonCameraSim:
41
41
  self.videoSimRawEnabled: bool = False
42
42
  self.videoSimWireframeEnabled: bool = False
43
43
  self.videoSimWireframeResolution: float = 0.1
44
- self.videoSimProcEnabled: bool = True
45
- self.ts = NTTopicSet()
44
+ self.videoSimProcEnabled: bool = (
45
+ False # TODO switch this back to default True when the functionality is enabled
46
+ )
46
47
  self.heartbeatCounter: int = 0
47
48
  self.nextNtEntryTime = int(wpilib.Timer.getFPGATimestamp() * 1e6)
48
- self.tagLayout = robotpy_apriltag.loadAprilTagLayoutField(
49
- robotpy_apriltag.AprilTagField.k2024Crescendo
50
- )
51
-
52
- if (
53
- camera is not None
54
- and props is None
55
- and minTargetAreaPercent is None
56
- and maxSightRange is None
57
- ):
58
- props = SimCameraProperties.PERFECT_90DEG()
59
- elif (
60
- camera is not None
61
- and props is not None
62
- and minTargetAreaPercent is not None
63
- and maxSightRange is not None
64
- ):
65
- pass
66
- elif (
67
- camera is not None
68
- and props is not None
69
- and minTargetAreaPercent is None
70
- and maxSightRange is None
71
- ):
72
- pass
73
- else:
74
- raise Exception("Invalid Constructor Called")
49
+ self.tagLayout = AprilTagFieldLayout.loadField(AprilTagField.k2024Crescendo)
75
50
 
76
51
  self.cam = camera
77
52
  self.prop = props
@@ -101,16 +76,11 @@ class PhotonCameraSim:
101
76
  (self.prop.getResWidth(), self.prop.getResHeight())
102
77
  )
103
78
 
104
- self.ts.subTable = self.cam._cameraTable
79
+ self.ts = NTTopicSet("photonvision", self.cam.getName())
105
80
  self.ts.updateEntries()
106
81
 
107
82
  # Handle this last explicitly for this function signature because the other constructor is called in the initialiser list
108
- if (
109
- camera is not None
110
- and props is not None
111
- and minTargetAreaPercent is not None
112
- and maxSightRange is not None
113
- ):
83
+ if minTargetAreaPercent is not None and maxSightRange is not None:
114
84
  self.minTargetAreaPercent = minTargetAreaPercent
115
85
  self.maxSightRange = maxSightRange
116
86
 
@@ -194,19 +164,19 @@ class PhotonCameraSim:
194
164
  self.maxSightRange = range
195
165
 
196
166
  def enableRawStream(self, enabled: bool) -> None:
167
+ self.videoSimRawEnabled = enabled
197
168
  raise Exception("Raw stream not implemented")
198
- # self.videoSimRawEnabled = enabled
199
169
 
200
170
  def enableDrawWireframe(self, enabled: bool) -> None:
171
+ self.videoSimWireframeEnabled = enabled
201
172
  raise Exception("Wireframe not implemented")
202
- # self.videoSimWireframeEnabled = enabled
203
173
 
204
174
  def setWireframeResolution(self, resolution: float) -> None:
205
175
  self.videoSimWireframeResolution = resolution
206
176
 
207
177
  def enableProcessedStream(self, enabled: bool) -> None:
178
+ self.videoSimProcEnabled = enabled
208
179
  raise Exception("Processed stream not implemented")
209
- # self.videoSimProcEnabled = enabled
210
180
 
211
181
  def process(
212
182
  self, latency: seconds, cameraPose: Pose3d, targets: list[VisionTargetSim]
@@ -217,9 +187,7 @@ class PhotonCameraSim:
217
187
 
218
188
  targets.sort(key=distance, reverse=True)
219
189
 
220
- visibleTgts: list[
221
- typing.Tuple[VisionTargetSim, list[typing.Tuple[float, float]]]
222
- ] = []
190
+ visibleTgts: list[typing.Tuple[VisionTargetSim, np.ndarray]] = []
223
191
  detectableTgts: list[PhotonTrackedTarget] = []
224
192
 
225
193
  camRt = RotTrlTransform3d.makeRelativeTo(cameraPose)
@@ -258,6 +226,7 @@ class PhotonCameraSim:
258
226
  ] * 4
259
227
  t = (l + 1) % 4
260
228
  b = (l + 1) % 4
229
+ r = 0
261
230
  for i in range(4):
262
231
  if i == l:
263
232
  continue
@@ -271,14 +240,14 @@ class PhotonCameraSim:
271
240
  if i != t and i != l and i != b:
272
241
  r = i
273
242
  rect = cv.RotatedRect(
274
- center,
243
+ (center[0, 0], center[0, 1]),
275
244
  (
276
245
  imagePoints[r, 0, 0] - lc[0, 0],
277
246
  imagePoints[b, 0, 1] - imagePoints[t, 0, 1],
278
247
  ),
279
248
  -angles[r],
280
249
  )
281
- imagePoints = rect.points()
250
+ imagePoints = np.array([[p[0], p[1], p[2]] for p in rect.points()])
282
251
 
283
252
  visibleTgts.append((tgt, imagePoints))
284
253
  noisyTargetCorners = self.prop.estPixelNoise(imagePoints)
@@ -325,13 +294,13 @@ class PhotonCameraSim:
325
294
  )
326
295
 
327
296
  # Video streams disabled for now
328
- if self.enableRawStream:
297
+ if self.videoSimRawEnabled:
329
298
  # VideoSimUtil::UpdateVideoProp(videoSimRaw, prop);
330
299
  # cv::Size videoFrameSize{prop.GetResWidth(), prop.GetResHeight()};
331
300
  # cv::Mat blankFrame = cv::Mat::zeros(videoFrameSize, CV_8UC1);
332
301
  # blankFrame.assignTo(videoSimFrameRaw);
333
302
  pass
334
- if self.enableProcessedStream:
303
+ if self.videoSimProcEnabled:
335
304
  # VideoSimUtil::UpdateVideoProp(videoSimProcessed, prop);
336
305
  pass
337
306
 
@@ -385,6 +354,7 @@ class PhotonCameraSim:
385
354
  self.ts.targetSkewEntry.set(0.0, receiveTimestamp)
386
355
  else:
387
356
  bestTarget = result.getBestTarget()
357
+ assert bestTarget
388
358
 
389
359
  self.ts.targetPitchEntry.set(bestTarget.getPitch(), receiveTimestamp)
390
360
  self.ts.targetYawEntry.set(bestTarget.getYaw(), receiveTimestamp)
@@ -11,7 +11,7 @@ from ..estimation import RotTrlTransform3d
11
11
 
12
12
 
13
13
  class SimCameraProperties:
14
- def __init__(self, path: str | None = None, width: int = 0, height: int = 0):
14
+ def __init__(self):
15
15
  self.resWidth: int = -1
16
16
  self.resHeight: int = -1
17
17
  self.camIntrinsics: np.ndarray = np.zeros((3, 3)) # [3,3]
@@ -24,57 +24,41 @@ class SimCameraProperties:
24
24
  self.latencyStdDev: seconds = 0.0
25
25
  self.viewplanes: list[np.ndarray] = [] # [3,1]
26
26
 
27
- if path is None:
28
- self.setCalibration(960, 720, fovDiag=Rotation2d(math.radians(90.0)))
29
- else:
30
- raise Exception("not yet implemented")
31
-
32
- def setCalibration(
33
- self,
34
- width: int,
35
- height: int,
36
- *,
37
- fovDiag: Rotation2d | None = None,
38
- newCamIntrinsics: np.ndarray | None = None,
39
- newDistCoeffs: np.ndarray | None = None,
40
- ):
41
- # Should be an inverted XOR on the args to differentiate between the signatures
42
-
43
- has_fov_args = fovDiag is not None
44
- has_matrix_args = newCamIntrinsics is not None and newDistCoeffs is not None
27
+ self.setCalibrationFromFOV(960, 720, fovDiag=Rotation2d(math.radians(90.0)))
45
28
 
46
- if (has_fov_args and has_matrix_args) or (
47
- not has_matrix_args and not has_fov_args
48
- ):
49
- raise Exception("not a correct function sig")
29
+ def setCalibrationFromFOV(
30
+ self, width: int, height: int, fovDiag: Rotation2d
31
+ ) -> None:
32
+ if fovDiag.degrees() < 1.0 or fovDiag.degrees() > 179.0:
33
+ fovDiag = Rotation2d.fromDegrees(max(min(fovDiag.degrees(), 179.0), 1.0))
34
+ logging.error("Requested invalid FOV! Clamping between (1, 179) degrees...")
50
35
 
51
- if has_fov_args:
52
- if fovDiag.degrees() < 1.0 or fovDiag.degrees() > 179.0:
53
- fovDiag = Rotation2d.fromDegrees(
54
- max(min(fovDiag.degrees(), 179.0), 1.0)
55
- )
56
- logging.error(
57
- "Requested invalid FOV! Clamping between (1, 179) degrees..."
58
- )
36
+ resDiag = math.sqrt(width * width + height * height)
37
+ diagRatio = math.tan(fovDiag.radians() / 2.0)
38
+ fovWidth = Rotation2d(math.atan((diagRatio * (width / resDiag)) * 2))
39
+ fovHeight = Rotation2d(math.atan(diagRatio * (height / resDiag)) * 2)
59
40
 
60
- resDiag = math.sqrt(width * width + height * height)
61
- diagRatio = math.tan(fovDiag.radians() / 2.0)
62
- fovWidth = Rotation2d(math.atan((diagRatio * (width / resDiag)) * 2))
63
- fovHeight = Rotation2d(math.atan(diagRatio * (height / resDiag)) * 2)
41
+ newDistCoeffs = np.zeros((8, 1))
64
42
 
65
- newDistCoeffs = np.zeros((8, 1))
43
+ cx = width / 2.0 - 0.5
44
+ cy = height / 2.0 - 0.5
66
45
 
67
- cx = width / 2.0 - 0.5
68
- cy = height / 2.0 - 0.5
46
+ fx = cx / math.tan(fovWidth.radians() / 2.0)
47
+ fy = cy / math.tan(fovHeight.radians() / 2.0)
69
48
 
70
- fx = cx / math.tan(fovWidth.radians() / 2.0)
71
- fy = cy / math.tan(fovHeight.radians() / 2.0)
49
+ newCamIntrinsics = np.array([[fx, 0.0, cx], [0.0, fy, cy], [0.0, 0.0, 1.0]])
72
50
 
73
- newCamIntrinsics = np.array([[fx, 0.0, cx], [0.0, fy, cy], [0.0, 0.0, 1.0]])
51
+ self.setCalibrationFromIntrinsics(
52
+ width, height, newCamIntrinsics, newDistCoeffs
53
+ )
74
54
 
75
- # really convince python we are doing the right thing
76
- assert newCamIntrinsics is not None
77
- assert newDistCoeffs is not None
55
+ def setCalibrationFromIntrinsics(
56
+ self,
57
+ width: int,
58
+ height: int,
59
+ newCamIntrinsics: np.ndarray,
60
+ newDistCoeffs: np.ndarray,
61
+ ) -> None:
78
62
 
79
63
  self.resWidth = width
80
64
  self.resHeight = height
@@ -171,10 +155,8 @@ class SimCameraProperties:
171
155
  def getLatencyStdDev(self) -> seconds:
172
156
  return self.latencyStdDev
173
157
 
174
- def getContourAreaPercent(self, points: list[typing.Tuple[float, float]]) -> float:
175
- return (
176
- cv.contourArea(cv.convexHull(np.array(points))) / self.getResArea() * 100.0
177
- )
158
+ def getContourAreaPercent(self, points: np.ndarray) -> float:
159
+ return cv.contourArea(cv.convexHull(points)) / self.getResArea() * 100.0
178
160
 
179
161
  def getPixelYaw(self, pixelX: float) -> Rotation2d:
180
162
  fx = self.camIntrinsics[0, 0]
@@ -188,14 +170,14 @@ class SimCameraProperties:
188
170
  yOffset = cy - pixelY
189
171
  return Rotation2d(fy, -yOffset)
190
172
 
191
- def getPixelRot(self, point: typing.Tuple[int, int]) -> Rotation3d:
173
+ def getPixelRot(self, point: cv.typing.Point2f) -> Rotation3d:
192
174
  return Rotation3d(
193
175
  0.0,
194
176
  self.getPixelPitch(point[1]).radians(),
195
177
  self.getPixelYaw(point[0]).radians(),
196
178
  )
197
179
 
198
- def getCorrectedPixelRot(self, point: typing.Tuple[float, float]) -> Rotation3d:
180
+ def getCorrectedPixelRot(self, point: cv.typing.Point2f) -> Rotation3d:
199
181
  fx = self.camIntrinsics[0, 0]
200
182
  cx = self.camIntrinsics[0, 2]
201
183
  xOffset = cx - point[0]
@@ -226,8 +208,8 @@ class SimCameraProperties:
226
208
  def getVisibleLine(
227
209
  self, camRt: RotTrlTransform3d, a: Translation3d, b: Translation3d
228
210
  ) -> typing.Tuple[float | None, float | None]:
229
- relA = camRt.apply(a)
230
- relB = camRt.apply(b)
211
+ relA = camRt.applyTranslation(a)
212
+ relB = camRt.applyTranslation(b)
231
213
 
232
214
  if relA.X() <= 0.0 and relB.X() <= 0.0:
233
215
  return (None, None)
@@ -279,7 +261,7 @@ class SimCameraProperties:
279
261
  ipts[i] = None
280
262
  break
281
263
 
282
- if not ipts[i]:
264
+ if ipts[i] is None:
283
265
  continue
284
266
 
285
267
  for j in range(i - 1, 0 - 1):
@@ -357,7 +339,7 @@ class SimCameraProperties:
357
339
  @classmethod
358
340
  def PI4_LIFECAM_320_240(cls) -> typing.Self:
359
341
  prop = cls()
360
- prop.setCalibration(
342
+ prop.setCalibrationFromIntrinsics(
361
343
  320,
362
344
  240,
363
345
  newCamIntrinsics=np.array(
@@ -391,7 +373,7 @@ class SimCameraProperties:
391
373
  @classmethod
392
374
  def PI4_LIFECAM_640_480(cls) -> typing.Self:
393
375
  prop = cls()
394
- prop.setCalibration(
376
+ prop.setCalibrationFromIntrinsics(
395
377
  640,
396
378
  480,
397
379
  newCamIntrinsics=np.array(
@@ -425,7 +407,7 @@ class SimCameraProperties:
425
407
  @classmethod
426
408
  def LL2_640_480(cls) -> typing.Self:
427
409
  prop = cls()
428
- prop.setCalibration(
410
+ prop.setCalibrationFromIntrinsics(
429
411
  640,
430
412
  480,
431
413
  newCamIntrinsics=np.array(
@@ -459,7 +441,7 @@ class SimCameraProperties:
459
441
  @classmethod
460
442
  def LL2_960_720(cls) -> typing.Self:
461
443
  prop = cls()
462
- prop.setCalibration(
444
+ prop.setCalibrationFromIntrinsics(
463
445
  960,
464
446
  720,
465
447
  newCamIntrinsics=np.array(
@@ -493,7 +475,7 @@ class SimCameraProperties:
493
475
  @classmethod
494
476
  def LL2_1280_720(cls) -> typing.Self:
495
477
  prop = cls()
496
- prop.setCalibration(
478
+ prop.setCalibrationFromIntrinsics(
497
479
  1280,
498
480
  720,
499
481
  newCamIntrinsics=np.array(
@@ -527,7 +509,7 @@ class SimCameraProperties:
527
509
  @classmethod
528
510
  def OV9281_640_480(cls) -> typing.Self:
529
511
  prop = cls()
530
- prop.setCalibration(
512
+ prop.setCalibrationFromIntrinsics(
531
513
  640,
532
514
  480,
533
515
  newCamIntrinsics=np.array(
@@ -561,7 +543,7 @@ class SimCameraProperties:
561
543
  @classmethod
562
544
  def OV9281_800_600(cls) -> typing.Self:
563
545
  prop = cls()
564
- prop.setCalibration(
546
+ prop.setCalibrationFromIntrinsics(
565
547
  800,
566
548
  600,
567
549
  newCamIntrinsics=np.array(
@@ -595,7 +577,7 @@ class SimCameraProperties:
595
577
  @classmethod
596
578
  def OV9281_1280_720(cls) -> typing.Self:
597
579
  prop = cls()
598
- prop.setCalibration(
580
+ prop.setCalibrationFromIntrinsics(
599
581
  1280,
600
582
  720,
601
583
  newCamIntrinsics=np.array(
@@ -629,7 +611,7 @@ class SimCameraProperties:
629
611
  @classmethod
630
612
  def OV9281_1920_1080(cls) -> typing.Self:
631
613
  prop = cls()
632
- prop.setCalibration(
614
+ prop.setCalibrationFromIntrinsics(
633
615
  1920,
634
616
  1080,
635
617
  newCamIntrinsics=np.array(
@@ -84,7 +84,11 @@ class VisionSystemSim:
84
84
  if robotToCamera is None:
85
85
  return None
86
86
  else:
87
- return self.getRobotPose(time) + robotToCamera
87
+ pose = self.getRobotPose(time)
88
+ if pose:
89
+ return pose + robotToCamera
90
+ else:
91
+ return None
88
92
 
89
93
  def adjustCamera(
90
94
  self, cameraSim: PhotonCameraSim, robotToCamera: Transform3d
@@ -167,7 +171,7 @@ class VisionSystemSim:
167
171
 
168
172
  def getRobotPose(
169
173
  self, timestamp: seconds = wpilib.Timer.getFPGATimestamp()
170
- ) -> Pose3d:
174
+ ) -> Pose3d | None:
171
175
  return self.robotPoseBuffer.sample(timestamp)
172
176
 
173
177
  def resetRobotPose(self, robotPose: Pose2d | Pose3d) -> None:
@@ -216,15 +220,16 @@ class VisionSystemSim:
216
220
  timestampCapture = timestampNt * 1.0e-6 - latency
217
221
 
218
222
  lateRobotPose = self.getRobotPose(timestampCapture)
219
- lateCameraPose = lateRobotPose + self.getRobotToCamera(
220
- camSim, timestampCapture
221
- )
223
+ robotToCamera = self.getRobotToCamera(camSim, timestampCapture)
224
+ if lateRobotPose is None or robotToCamera is None:
225
+ return None
226
+ lateCameraPose = lateRobotPose + robotToCamera
222
227
  cameraPoses2d.append(lateCameraPose.toPose2d())
223
228
 
224
229
  camResult = camSim.process(latency, lateCameraPose, allTargets)
225
230
  camSim.submitProcessedFrame(camResult, timestampNt)
226
- for target in camResult.getTargets():
227
- trf = target.getBestCameraToTarget()
231
+ for tgt in camResult.getTargets():
232
+ trf = tgt.getBestCameraToTarget()
228
233
  if trf == Transform3d():
229
234
  continue
230
235
 
@@ -2,7 +2,7 @@ from dataclasses import dataclass
2
2
  from typing import TYPE_CHECKING, ClassVar
3
3
 
4
4
  if TYPE_CHECKING:
5
- from .. import generated
5
+ from ..generated.TargetCornerSerde import TargetCornerSerde
6
6
 
7
7
 
8
8
  @dataclass
@@ -10,4 +10,4 @@ class TargetCorner:
10
10
  x: float = 0
11
11
  y: float = 9
12
12
 
13
- photonStruct: ClassVar["generated.TargetCornerSerde"]
13
+ photonStruct: ClassVar["TargetCornerSerde"]
@@ -3,10 +3,9 @@ from typing import TYPE_CHECKING, ClassVar
3
3
 
4
4
  from wpimath.geometry import Transform3d
5
5
 
6
- from ..packet import Packet
7
-
8
6
  if TYPE_CHECKING:
9
- from .. import generated
7
+ from ..generated.MultiTargetPNPResultSerde import MultiTargetPNPResultSerde
8
+ from ..generated.PnpResultSerde import PnpResultSerde
10
9
 
11
10
 
12
11
  @dataclass
@@ -17,7 +16,7 @@ class PnpResult:
17
16
  bestReprojErr: float = 0.0
18
17
  altReprojErr: float = 0.0
19
18
 
20
- photonStruct: ClassVar["generated.PnpResultSerde"]
19
+ photonStruct: ClassVar["PnpResultSerde"]
21
20
 
22
21
 
23
22
  @dataclass
@@ -27,14 +26,4 @@ class MultiTargetPNPResult:
27
26
  estimatedPose: PnpResult = field(default_factory=PnpResult)
28
27
  fiducialIDsUsed: list[int] = field(default_factory=list)
29
28
 
30
- def createFromPacket(self, packet: Packet) -> Packet:
31
- self.estimatedPose = PnpResult()
32
- self.estimatedPose.createFromPacket(packet)
33
- self.fiducialIDsUsed = []
34
- for _ in range(MultiTargetPNPResult._MAX_IDS):
35
- fidId = packet.decode16()
36
- if fidId >= 0:
37
- self.fiducialIDsUsed.append(fidId)
38
- return packet
39
-
40
- photonStruct: ClassVar["generated.MultiTargetPNPResultSerde"]
29
+ photonStruct: ClassVar["MultiTargetPNPResultSerde"]
@@ -5,7 +5,8 @@ from .multiTargetPNPResult import MultiTargetPNPResult
5
5
  from .photonTrackedTarget import PhotonTrackedTarget
6
6
 
7
7
  if TYPE_CHECKING:
8
- from .. import generated
8
+ from ..generated.PhotonPipelineMetadataSerde import PhotonPipelineMetadataSerde
9
+ from ..generated.PhotonPipelineResultSerde import PhotonPipelineResultSerde
9
10
 
10
11
 
11
12
  @dataclass
@@ -20,7 +21,7 @@ class PhotonPipelineMetadata:
20
21
 
21
22
  timeSinceLastPong: int = -1
22
23
 
23
- photonStruct: ClassVar["generated.PhotonPipelineMetadataSerde"]
24
+ photonStruct: ClassVar["PhotonPipelineMetadataSerde"]
24
25
 
25
26
 
26
27
  @dataclass
@@ -69,4 +70,4 @@ class PhotonPipelineResult:
69
70
  return None
70
71
  return self.getTargets()[0]
71
72
 
72
- photonStruct: ClassVar["generated.PhotonPipelineResultSerde"]
73
+ photonStruct: ClassVar["PhotonPipelineResultSerde"]
@@ -7,7 +7,7 @@ from ..packet import Packet
7
7
  from .TargetCorner import TargetCorner
8
8
 
9
9
  if TYPE_CHECKING:
10
- from .. import generated
10
+ from ..generated.PhotonTrackedTargetSerde import PhotonTrackedTargetSerde
11
11
 
12
12
 
13
13
  @dataclass
@@ -63,4 +63,4 @@ class PhotonTrackedTarget:
63
63
  retList.append(TargetCorner(cx, cy))
64
64
  return retList
65
65
 
66
- photonStruct: ClassVar["generated.PhotonTrackedTargetSerde"]
66
+ photonStruct: ClassVar["PhotonTrackedTargetSerde"]
@@ -0,0 +1,2 @@
1
+ PHOTONLIB_VERSION="v2025.0.0.beta.3"
2
+ PHOTONVISION_VERSION="v2025.0.0-beta-3"
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: photonlibpy
3
- Version: 2025.0.0b2
4
- Summary: Pure-python implementation of PhotonLib for interfacing with PhotonVision on coprocessors. Implemented with PhotonVision version v2025.0.0-beta-2 .
3
+ Version: 2025.0.0b3
4
+ Summary: Pure-python implementation of PhotonLib for interfacing with PhotonVision on coprocessors. Implemented with PhotonVision version v2025.0.0-beta-3 .
5
5
  Home-page: https://photonvision.org
6
6
  Author: Photonvision Development Team
7
7
  Description-Content-Type: text/markdown
@@ -1,3 +1,4 @@
1
+ pyproject.toml
1
2
  setup.py
2
3
  photonlibpy/__init__.py
3
4
  photonlibpy/estimatedRobotPose.py
@@ -0,0 +1,12 @@
1
+ numpy~=2.1
2
+ wpilib<2026,>=2025.0.0b1
3
+ robotpy-wpimath<2026,>=2025.0.0b1
4
+ robotpy-apriltag<2026,>=2025.0.0b1
5
+ robotpy-cscore<2026,>=2025.0.0b1
6
+ pyntcore<2026,>=2025.0.0b1
7
+
8
+ [:platform_machine != "roborio"]
9
+ opencv-python
10
+
11
+ [:platform_machine == "roborio"]
12
+ robotpy-opencv
@@ -0,0 +1,2 @@
1
+ [tool.mypy]
2
+ exclude = ["build","setup.py"]
@@ -57,12 +57,12 @@ setup(
57
57
  packages=find_packages(),
58
58
  version=versionString,
59
59
  install_requires=[
60
- "numpy~=1.25",
61
- "wpilib<2025,>=2024.0.0b2",
62
- "robotpy-wpimath<2025,>=2024.0.0b2",
63
- "robotpy-apriltag<2025,>=2024.0.0b2",
64
- "robotpy-cscore<2025,>=2024.0.0.b2",
65
- "pyntcore<2025,>=2024.0.0b2",
60
+ "numpy~=2.1",
61
+ "wpilib<2026,>=2025.0.0b1",
62
+ "robotpy-wpimath<2026,>=2025.0.0b1",
63
+ "robotpy-apriltag<2026,>=2025.0.0b1",
64
+ "robotpy-cscore<2026,>=2025.0.0b1",
65
+ "pyntcore<2026,>=2025.0.0b1",
66
66
  "robotpy-opencv;platform_machine=='roborio'",
67
67
  "opencv-python;platform_machine!='roborio'",
68
68
  ],
@@ -1,32 +0,0 @@
1
- from typing import Self
2
-
3
- from wpimath.geometry import Pose3d, Rotation3d, Transform3d, Translation3d
4
-
5
-
6
- class RotTrlTransform3d:
7
- def __init__(
8
- self, rot: Rotation3d = Rotation3d(), trl: Translation3d = Translation3d()
9
- ):
10
- self.rot = rot
11
- self.trl = trl
12
-
13
- def inverse(self) -> Self:
14
- invRot = -self.rot
15
- invTrl = -(self.trl.rotateBy(invRot))
16
- return type(self)(invRot, invTrl)
17
-
18
- def getTransform(self) -> Transform3d:
19
- return Transform3d(self.trl, self.rot)
20
-
21
- def getTranslation(self) -> Translation3d:
22
- return self.trl
23
-
24
- def getRotation(self) -> Rotation3d:
25
- return self.rot
26
-
27
- def apply(self, trlToApply: Translation3d) -> Translation3d:
28
- return trlToApply.rotateBy(self.rot) + self.trl
29
-
30
- @classmethod
31
- def makeRelativeTo(cls, pose: Pose3d) -> Self:
32
- return cls(pose.rotation(), pose.translation()).inverse()
@@ -1,137 +0,0 @@
1
- import math
2
- from typing import List, Self
3
-
4
- from wpimath.geometry import Pose3d, Rotation2d, Rotation3d, Translation3d
5
- from wpimath.units import meters
6
-
7
- from . import RotTrlTransform3d
8
-
9
-
10
- class TargetModel:
11
- def __init__(
12
- self,
13
- *,
14
- width: meters | None = None,
15
- height: meters | None = None,
16
- length: meters | None = None,
17
- diameter: meters | None = None,
18
- verts: List[Translation3d] | None = None
19
- ):
20
-
21
- if (
22
- width is not None
23
- and height is not None
24
- and length is None
25
- and diameter is None
26
- and verts is None
27
- ):
28
- self.isPlanar = True
29
- self.isSpherical = False
30
- self.vertices = [
31
- Translation3d(0.0, -width / 2.0, -height / 2.0),
32
- Translation3d(0.0, width / 2.0, -height / 2.0),
33
- Translation3d(0.0, width / 2.0, height / 2.0),
34
- Translation3d(0.0, -width / 2.0, height / 2.0),
35
- ]
36
-
37
- return
38
-
39
- elif (
40
- length is not None
41
- and width is not None
42
- and height is not None
43
- and diameter is None
44
- and verts is None
45
- ):
46
- verts = [
47
- Translation3d(length / 2.0, -width / 2.0, -height / 2.0),
48
- Translation3d(length / 2.0, width / 2.0, -height / 2.0),
49
- Translation3d(length / 2.0, width / 2.0, height / 2.0),
50
- Translation3d(length / 2.0, -width / 2.0, height / 2.0),
51
- Translation3d(-length / 2.0, -width / 2.0, height / 2.0),
52
- Translation3d(-length / 2.0, width / 2.0, height / 2.0),
53
- Translation3d(-length / 2.0, width / 2.0, -height / 2.0),
54
- Translation3d(-length / 2.0, -width / 2.0, -height / 2.0),
55
- ]
56
- # Handle the rest of this in the "default" case
57
- elif (
58
- diameter is not None
59
- and width is None
60
- and height is None
61
- and length is None
62
- and verts is None
63
- ):
64
- self.isPlanar = False
65
- self.isSpherical = True
66
- self.vertices = [
67
- Translation3d(0.0, -diameter / 2.0, 0.0),
68
- Translation3d(0.0, 0.0, -diameter / 2.0),
69
- Translation3d(0.0, diameter / 2.0, 0.0),
70
- Translation3d(0.0, 0.0, diameter / 2.0),
71
- ]
72
- return
73
- elif (
74
- verts is not None
75
- and width is None
76
- and height is None
77
- and length is None
78
- and diameter is None
79
- ):
80
- # Handle this in the "default" case
81
- pass
82
- else:
83
- raise Exception("Not a valid overload")
84
-
85
- # TODO maybe remove this if there is a better/preferred way
86
- # make the python type checking gods happy
87
- assert verts is not None
88
-
89
- self.isSpherical = False
90
- if len(verts) <= 2:
91
- self.vertices: List[Translation3d] = []
92
- self.isPlanar = False
93
- else:
94
- cornersPlaner = True
95
- for corner in verts:
96
- if abs(corner.X() < 1e-4):
97
- cornersPlaner = False
98
- self.isPlanar = cornersPlaner
99
-
100
- self.vertices = verts
101
-
102
- def getFieldVertices(self, targetPose: Pose3d) -> List[Translation3d]:
103
- basisChange = RotTrlTransform3d(targetPose.rotation(), targetPose.translation())
104
-
105
- retVal = []
106
-
107
- for vert in self.vertices:
108
- retVal.append(basisChange.apply(vert))
109
-
110
- return retVal
111
-
112
- @classmethod
113
- def getOrientedPose(cls, tgtTrl: Translation3d, cameraTrl: Translation3d):
114
- relCam = cameraTrl - tgtTrl
115
- orientToCam = Rotation3d(
116
- 0.0,
117
- Rotation2d(math.hypot(relCam.X(), relCam.Y()), relCam.Z()).radians(),
118
- Rotation2d(relCam.X(), relCam.Y()).radians(),
119
- )
120
- return Pose3d(tgtTrl, orientToCam)
121
-
122
- def getVertices(self) -> List[Translation3d]:
123
- return self.vertices
124
-
125
- def getIsPlanar(self) -> bool:
126
- return self.isPlanar
127
-
128
- def getIsSpherical(self) -> bool:
129
- return self.isSpherical
130
-
131
- @classmethod
132
- def AprilTag36h11(cls) -> Self:
133
- return cls(width=6.5 * 0.0254, height=6.5 * 0.0254)
134
-
135
- @classmethod
136
- def AprilTag16h5(cls) -> Self:
137
- return cls(width=6.0 * 0.0254, height=6.0 * 0.0254)
@@ -1,2 +0,0 @@
1
- PHOTONLIB_VERSION="v2025.0.0.beta.2"
2
- PHOTONVISION_VERSION="v2025.0.0-beta-2"
@@ -1,12 +0,0 @@
1
- numpy~=1.25
2
- wpilib<2025,>=2024.0.0b2
3
- robotpy-wpimath<2025,>=2024.0.0b2
4
- robotpy-apriltag<2025,>=2024.0.0b2
5
- robotpy-cscore<2025,>=2024.0.0.b2
6
- pyntcore<2025,>=2024.0.0b2
7
-
8
- [:platform_machine != "roborio"]
9
- opencv-python
10
-
11
- [:platform_machine == "roborio"]
12
- robotpy-opencv