photonlibpy 2024.2.7__tar.gz → 2026.1.1rc3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- photonlibpy-2026.1.1rc3/PKG-INFO +24 -0
- photonlibpy-2026.1.1rc3/photonlibpy/__init__.py +35 -0
- photonlibpy-2026.1.1rc3/photonlibpy/estimatedRobotPose.py +36 -0
- photonlibpy-2026.1.1rc3/photonlibpy/estimation/__init__.py +5 -0
- photonlibpy-2026.1.1rc3/photonlibpy/estimation/cameraTargetRelation.py +25 -0
- photonlibpy-2026.1.1rc3/photonlibpy/estimation/openCVHelp.py +314 -0
- photonlibpy-2026.1.1rc3/photonlibpy/estimation/rotTrlTransform3d.py +74 -0
- photonlibpy-2026.1.1rc3/photonlibpy/estimation/targetModel.py +183 -0
- photonlibpy-2026.1.1rc3/photonlibpy/estimation/visionEstimation.py +113 -0
- photonlibpy-2026.1.1rc3/photonlibpy/generated/MultiTargetPNPResultSerde.py +70 -0
- photonlibpy-2026.1.1rc3/photonlibpy/generated/PhotonPipelineMetadataSerde.py +81 -0
- photonlibpy-2026.1.1rc3/photonlibpy/generated/PhotonPipelineResultSerde.py +78 -0
- photonlibpy-2026.1.1rc3/photonlibpy/generated/PhotonTrackedTargetSerde.py +126 -0
- photonlibpy-2026.1.1rc3/photonlibpy/generated/PnpResultSerde.py +83 -0
- photonlibpy-2026.1.1rc3/photonlibpy/generated/TargetCornerSerde.py +69 -0
- photonlibpy-2026.1.1rc3/photonlibpy/generated/__init__.py +8 -0
- photonlibpy-2026.1.1rc3/photonlibpy/networktables/NTTopicSet.py +81 -0
- photonlibpy-2026.1.1rc3/photonlibpy/networktables/__init__.py +1 -0
- photonlibpy-2026.1.1rc3/photonlibpy/packet.py +315 -0
- photonlibpy-2026.1.1rc3/photonlibpy/photonCamera.py +375 -0
- photonlibpy-2026.1.1rc3/photonlibpy/photonPoseEstimator.py +272 -0
- photonlibpy-2026.1.1rc3/photonlibpy/py.typed +0 -0
- photonlibpy-2026.1.1rc3/photonlibpy/simulation/__init__.py +13 -0
- photonlibpy-2026.1.1rc3/photonlibpy/simulation/photonCameraSim.py +490 -0
- photonlibpy-2026.1.1rc3/photonlibpy/simulation/simCameraProperties.py +783 -0
- photonlibpy-2026.1.1rc3/photonlibpy/simulation/videoSimUtil.py +2 -0
- photonlibpy-2026.1.1rc3/photonlibpy/simulation/visionSystemSim.py +335 -0
- photonlibpy-2026.1.1rc3/photonlibpy/simulation/visionTargetSim.py +60 -0
- photonlibpy-2026.1.1rc3/photonlibpy/targeting/TargetCorner.py +13 -0
- photonlibpy-2026.1.1rc3/photonlibpy/targeting/__init__.py +13 -0
- photonlibpy-2026.1.1rc3/photonlibpy/targeting/multiTargetPNPResult.py +29 -0
- photonlibpy-2026.1.1rc3/photonlibpy/targeting/photonPipelineResult.py +70 -0
- {photonlibpy-2024.2.7/photonlibpy → photonlibpy-2026.1.1rc3/photonlibpy/targeting}/photonTrackedTarget.py +13 -29
- photonlibpy-2026.1.1rc3/photonlibpy/timesync/__init__.py +1 -0
- photonlibpy-2026.1.1rc3/photonlibpy/timesync/timeSyncServer.py +94 -0
- photonlibpy-2026.1.1rc3/photonlibpy/version.py +2 -0
- photonlibpy-2026.1.1rc3/photonlibpy.egg-info/PKG-INFO +24 -0
- photonlibpy-2026.1.1rc3/photonlibpy.egg-info/SOURCES.txt +50 -0
- photonlibpy-2026.1.1rc3/photonlibpy.egg-info/requires.txt +9 -0
- {photonlibpy-2024.2.7 → photonlibpy-2026.1.1rc3}/photonlibpy.egg-info/top_level.txt +1 -0
- photonlibpy-2026.1.1rc3/pyproject.toml +2 -0
- {photonlibpy-2024.2.7 → photonlibpy-2026.1.1rc3}/setup.py +21 -16
- photonlibpy-2026.1.1rc3/test/__init__.py +0 -0
- photonlibpy-2026.1.1rc3/test/openCVHelp_test.py +205 -0
- photonlibpy-2026.1.1rc3/test/photonCamera_test.py +36 -0
- photonlibpy-2026.1.1rc3/test/photonPoseEstimator_test.py +287 -0
- photonlibpy-2026.1.1rc3/test/photonlibpy_test.py +42 -0
- photonlibpy-2026.1.1rc3/test/simCameraProperties_test.py +99 -0
- photonlibpy-2026.1.1rc3/test/testUtil.py +65 -0
- photonlibpy-2026.1.1rc3/test/visionSystemSim_test.py +617 -0
- photonlibpy-2024.2.7/PKG-INFO +0 -9
- photonlibpy-2024.2.7/photonlibpy/__init__.py +0 -1
- photonlibpy-2024.2.7/photonlibpy/estimatedRobotPose.py +0 -26
- photonlibpy-2024.2.7/photonlibpy/multiTargetPNPResult.py +0 -49
- photonlibpy-2024.2.7/photonlibpy/packet.py +0 -143
- photonlibpy-2024.2.7/photonlibpy/photonCamera.py +0 -195
- photonlibpy-2024.2.7/photonlibpy/photonPipelineResult.py +0 -43
- photonlibpy-2024.2.7/photonlibpy/photonPoseEstimator.py +0 -321
- photonlibpy-2024.2.7/photonlibpy/version.py +0 -2
- photonlibpy-2024.2.7/photonlibpy.egg-info/PKG-INFO +0 -9
- photonlibpy-2024.2.7/photonlibpy.egg-info/SOURCES.txt +0 -15
- photonlibpy-2024.2.7/photonlibpy.egg-info/requires.txt +0 -4
- {photonlibpy-2024.2.7 → photonlibpy-2026.1.1rc3}/photonlibpy.egg-info/dependency_links.txt +0 -0
- {photonlibpy-2024.2.7 → photonlibpy-2026.1.1rc3}/setup.cfg +0 -0
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: photonlibpy
|
|
3
|
+
Version: 2026.1.1rc3
|
|
4
|
+
Summary: Pure-python implementation of PhotonLib for interfacing with PhotonVision on coprocessors. Implemented with PhotonVision version v2026.1.1-rc-3 .
|
|
5
|
+
Home-page: https://photonvision.org
|
|
6
|
+
Author: Photonvision Development Team
|
|
7
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
8
|
+
Description-Content-Type: text/markdown
|
|
9
|
+
Requires-Dist: numpy~=2.3
|
|
10
|
+
Requires-Dist: wpilib==2026.1.1
|
|
11
|
+
Requires-Dist: robotpy-wpimath==2026.1.1
|
|
12
|
+
Requires-Dist: robotpy-apriltag==2026.1.1
|
|
13
|
+
Requires-Dist: robotpy-cscore==2026.1.1
|
|
14
|
+
Requires-Dist: pyntcore==2026.1.1
|
|
15
|
+
Requires-Dist: opencv-python; platform_machine != "roborio"
|
|
16
|
+
Dynamic: author
|
|
17
|
+
Dynamic: classifier
|
|
18
|
+
Dynamic: description
|
|
19
|
+
Dynamic: description-content-type
|
|
20
|
+
Dynamic: home-page
|
|
21
|
+
Dynamic: requires-dist
|
|
22
|
+
Dynamic: summary
|
|
23
|
+
|
|
24
|
+
A Pure-python implementation of PhotonLib
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
#
|
|
2
|
+
# MIT License
|
|
3
|
+
#
|
|
4
|
+
# Copyright (c) PhotonVision
|
|
5
|
+
#
|
|
6
|
+
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
7
|
+
# of this software and associated documentation files (the "Software"), to deal
|
|
8
|
+
# in the Software without restriction, including without limitation the rights
|
|
9
|
+
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
10
|
+
# copies of the Software, and to permit persons to whom the Software is
|
|
11
|
+
# furnished to do so, subject to the following conditions:
|
|
12
|
+
#
|
|
13
|
+
# The above copyright notice and this permission notice shall be included in all
|
|
14
|
+
# copies or substantial portions of the Software.
|
|
15
|
+
#
|
|
16
|
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
17
|
+
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
18
|
+
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
19
|
+
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
20
|
+
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
21
|
+
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
22
|
+
# SOFTWARE.
|
|
23
|
+
#
|
|
24
|
+
|
|
25
|
+
from .estimatedRobotPose import EstimatedRobotPose
|
|
26
|
+
from .packet import Packet
|
|
27
|
+
from .photonCamera import PhotonCamera
|
|
28
|
+
from .photonPoseEstimator import PhotonPoseEstimator
|
|
29
|
+
|
|
30
|
+
__all__ = (
|
|
31
|
+
"EstimatedRobotPose",
|
|
32
|
+
"Packet",
|
|
33
|
+
"PhotonCamera",
|
|
34
|
+
"PhotonPoseEstimator",
|
|
35
|
+
)
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
###############################################################################
|
|
2
|
+
## Copyright (C) Photon Vision.
|
|
3
|
+
###############################################################################
|
|
4
|
+
## This program is free software: you can redistribute it and/or modify
|
|
5
|
+
## it under the terms of the GNU General Public License as published by
|
|
6
|
+
## the Free Software Foundation, either version 3 of the License, or
|
|
7
|
+
## (at your option) any later version.
|
|
8
|
+
##
|
|
9
|
+
## This program is distributed in the hope that it will be useful,
|
|
10
|
+
## but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
11
|
+
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
12
|
+
## GNU General Public License for more details.
|
|
13
|
+
##
|
|
14
|
+
## You should have received a copy of the GNU General Public License
|
|
15
|
+
## along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
16
|
+
###############################################################################
|
|
17
|
+
|
|
18
|
+
from dataclasses import dataclass
|
|
19
|
+
|
|
20
|
+
from wpimath.geometry import Pose3d
|
|
21
|
+
|
|
22
|
+
from .targeting.photonTrackedTarget import PhotonTrackedTarget
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
@dataclass
|
|
26
|
+
class EstimatedRobotPose:
|
|
27
|
+
"""An estimated pose based on pipeline result"""
|
|
28
|
+
|
|
29
|
+
estimatedPose: Pose3d
|
|
30
|
+
"""The estimated pose"""
|
|
31
|
+
|
|
32
|
+
timestampSeconds: float
|
|
33
|
+
"""The estimated time the frame used to derive the robot pose was taken"""
|
|
34
|
+
|
|
35
|
+
targetsUsed: list[PhotonTrackedTarget]
|
|
36
|
+
"""A list of the targets used to compute this pose"""
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
import math
|
|
2
|
+
|
|
3
|
+
from wpimath.geometry import Pose3d, Rotation2d, Transform3d
|
|
4
|
+
from wpimath.units import meters
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class CameraTargetRelation:
|
|
8
|
+
def __init__(self, cameraPose: Pose3d, targetPose: Pose3d):
|
|
9
|
+
self.camPose = cameraPose
|
|
10
|
+
self.camToTarg = Transform3d(cameraPose, targetPose)
|
|
11
|
+
self.camToTargDist = self.camToTarg.translation().norm()
|
|
12
|
+
self.camToTargDistXY: meters = math.hypot(
|
|
13
|
+
self.camToTarg.translation().X(), self.camToTarg.translation().Y()
|
|
14
|
+
)
|
|
15
|
+
self.camToTargYaw = Rotation2d(self.camToTarg.X(), self.camToTarg.Y())
|
|
16
|
+
self.camToTargPitch = Rotation2d(self.camToTargDistXY, -self.camToTarg.Z())
|
|
17
|
+
self.camToTargAngle = Rotation2d(
|
|
18
|
+
math.hypot(self.camToTargYaw.radians(), self.camToTargPitch.radians())
|
|
19
|
+
)
|
|
20
|
+
self.targToCam = Transform3d(targetPose, cameraPose)
|
|
21
|
+
self.targToCamYaw = Rotation2d(self.targToCam.X(), self.targToCam.Y())
|
|
22
|
+
self.targToCamPitch = Rotation2d(self.camToTargDistXY, -self.targToCam.Z())
|
|
23
|
+
self.targtoCamAngle = Rotation2d(
|
|
24
|
+
math.hypot(self.targToCamYaw.radians(), self.targToCamPitch.radians())
|
|
25
|
+
)
|
|
@@ -0,0 +1,314 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import math
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
import cv2 as cv
|
|
6
|
+
import numpy as np
|
|
7
|
+
from wpimath.geometry import Rotation3d, Transform3d, Translation3d
|
|
8
|
+
|
|
9
|
+
from ..targeting import PnpResult, TargetCorner
|
|
10
|
+
from .rotTrlTransform3d import RotTrlTransform3d
|
|
11
|
+
|
|
12
|
+
NWU_TO_EDN = Rotation3d(np.array([[0, -1, 0], [0, 0, -1], [1, 0, 0]]))
|
|
13
|
+
EDN_TO_NWU = Rotation3d(np.array([[0, 0, 1], [-1, 0, 0], [0, -1, 0]]))
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class OpenCVHelp:
|
|
19
|
+
@staticmethod
|
|
20
|
+
def getMinAreaRect(points: np.ndarray) -> cv.RotatedRect:
|
|
21
|
+
return cv.RotatedRect(*cv.minAreaRect(points))
|
|
22
|
+
|
|
23
|
+
@staticmethod
|
|
24
|
+
def translationNWUtoEDN(trl: Translation3d) -> Translation3d:
|
|
25
|
+
return trl.rotateBy(NWU_TO_EDN)
|
|
26
|
+
|
|
27
|
+
@staticmethod
|
|
28
|
+
def rotationNWUtoEDN(rot: Rotation3d) -> Rotation3d:
|
|
29
|
+
return -NWU_TO_EDN + (rot + NWU_TO_EDN)
|
|
30
|
+
|
|
31
|
+
@staticmethod
|
|
32
|
+
def translationToTVec(translations: list[Translation3d]) -> np.ndarray:
|
|
33
|
+
"""Creates a new :class:`np.array` with these 3d translations. The opencv tvec is a vector with
|
|
34
|
+
three elements representing {x, y, z} in the EDN coordinate system.
|
|
35
|
+
|
|
36
|
+
:param translations: The translations to convert into a np.array
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
retVal: list[list] = []
|
|
40
|
+
for translation in translations:
|
|
41
|
+
trl = OpenCVHelp.translationNWUtoEDN(translation)
|
|
42
|
+
retVal.append([trl.X(), trl.Y(), trl.Z()])
|
|
43
|
+
return np.array(
|
|
44
|
+
retVal,
|
|
45
|
+
dtype=np.float32,
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
@staticmethod
|
|
49
|
+
def rotationToRVec(rotation: Rotation3d) -> np.ndarray:
|
|
50
|
+
"""Creates a new :class:`.np.array` with this 3d rotation. The opencv rvec Mat is a vector with
|
|
51
|
+
three elements representing the axis scaled by the angle in the EDN coordinate system. (angle =
|
|
52
|
+
norm, and axis = rvec / norm)
|
|
53
|
+
|
|
54
|
+
:param rotation: The rotation to convert into a np.array
|
|
55
|
+
"""
|
|
56
|
+
|
|
57
|
+
retVal: list[np.ndarray] = []
|
|
58
|
+
rot = OpenCVHelp.rotationNWUtoEDN(rotation)
|
|
59
|
+
rotVec = rot.getQuaternion().toRotationVector()
|
|
60
|
+
retVal.append(rotVec)
|
|
61
|
+
return np.array(
|
|
62
|
+
retVal,
|
|
63
|
+
dtype=np.float32,
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
@staticmethod
|
|
67
|
+
def avgPoint(points: np.ndarray) -> np.ndarray:
|
|
68
|
+
x = 0.0
|
|
69
|
+
y = 0.0
|
|
70
|
+
for p in points:
|
|
71
|
+
x += p[0, 0]
|
|
72
|
+
y += p[0, 1]
|
|
73
|
+
return np.array([[x / len(points), y / len(points)]])
|
|
74
|
+
|
|
75
|
+
@staticmethod
|
|
76
|
+
def pointsToTargetCorners(points: np.ndarray) -> list[TargetCorner]:
|
|
77
|
+
corners = [TargetCorner(p[0, 0], p[0, 1]) for p in points]
|
|
78
|
+
return corners
|
|
79
|
+
|
|
80
|
+
@staticmethod
|
|
81
|
+
def cornersToPoints(corners: list[TargetCorner]) -> np.ndarray:
|
|
82
|
+
points = [[[c.x, c.y]] for c in corners]
|
|
83
|
+
return np.array(points)
|
|
84
|
+
|
|
85
|
+
@staticmethod
|
|
86
|
+
def projectPoints(
|
|
87
|
+
cameraMatrix: np.ndarray,
|
|
88
|
+
distCoeffs: np.ndarray,
|
|
89
|
+
camRt: RotTrlTransform3d,
|
|
90
|
+
objectTranslations: list[Translation3d],
|
|
91
|
+
) -> np.ndarray:
|
|
92
|
+
objectPoints = OpenCVHelp.translationToTVec(objectTranslations)
|
|
93
|
+
rvec = OpenCVHelp.rotationToRVec(camRt.getRotation())
|
|
94
|
+
tvec = OpenCVHelp.translationToTVec(
|
|
95
|
+
[
|
|
96
|
+
camRt.getTranslation(),
|
|
97
|
+
]
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
pts, _ = cv.projectPoints(objectPoints, rvec, tvec, cameraMatrix, distCoeffs)
|
|
101
|
+
return pts
|
|
102
|
+
|
|
103
|
+
@staticmethod
|
|
104
|
+
def reorderCircular(
|
|
105
|
+
elements: list[Any] | np.ndarray, backwards: bool, shiftStart: int
|
|
106
|
+
) -> list[Any]:
|
|
107
|
+
"""Reorders the list, optionally indexing backwards and wrapping around to the last element after
|
|
108
|
+
the first, and shifting all indices in the direction of indexing.
|
|
109
|
+
|
|
110
|
+
e.g.
|
|
111
|
+
|
|
112
|
+
({1,2,3}, false, 1) == {2,3,1}
|
|
113
|
+
|
|
114
|
+
({1,2,3}, true, 0) == {1,3,2}
|
|
115
|
+
|
|
116
|
+
({1,2,3}, true, 1) == {3,2,1}
|
|
117
|
+
|
|
118
|
+
:param elements: list elements
|
|
119
|
+
:param backwards: If indexing should happen in reverse (0, size-1, size-2, ...)
|
|
120
|
+
:param shiftStart: How much the initial index should be shifted (instead of starting at index 0,
|
|
121
|
+
start at shiftStart, negated if backwards)
|
|
122
|
+
|
|
123
|
+
:returns: Reordered list
|
|
124
|
+
"""
|
|
125
|
+
|
|
126
|
+
size = len(elements)
|
|
127
|
+
reordered = []
|
|
128
|
+
dir = -1 if backwards else 1
|
|
129
|
+
for i in range(size):
|
|
130
|
+
index = (i * dir + shiftStart * dir) % size
|
|
131
|
+
if index < 0:
|
|
132
|
+
index += size
|
|
133
|
+
reordered.append(elements[index])
|
|
134
|
+
return reordered
|
|
135
|
+
|
|
136
|
+
@staticmethod
|
|
137
|
+
def translationEDNToNWU(trl: Translation3d) -> Translation3d:
|
|
138
|
+
"""Convert a rotation delta from EDN to NWU. For example, if you have a rotation X,Y,Z {1, 0, 0}
|
|
139
|
+
in EDN, this would be {0, -1, 0} in NWU.
|
|
140
|
+
"""
|
|
141
|
+
|
|
142
|
+
return trl.rotateBy(EDN_TO_NWU)
|
|
143
|
+
|
|
144
|
+
@staticmethod
|
|
145
|
+
def rotationEDNToNWU(rot: Rotation3d) -> Rotation3d:
|
|
146
|
+
"""Convert a rotation delta from NWU to EDN. For example, if you have a rotation X,Y,Z {1, 0, 0}
|
|
147
|
+
in NWU, this would be {0, 0, 1} in EDN.
|
|
148
|
+
"""
|
|
149
|
+
|
|
150
|
+
return -EDN_TO_NWU + (rot + EDN_TO_NWU)
|
|
151
|
+
|
|
152
|
+
@staticmethod
|
|
153
|
+
def tVecToTranslation(tvecInput: np.ndarray) -> Translation3d:
|
|
154
|
+
"""Returns a new 3d translation from this :class:`.Mat`. The opencv tvec is a vector with three
|
|
155
|
+
elements representing {x, y, z} in the EDN coordinate system.
|
|
156
|
+
|
|
157
|
+
:param tvecInput: The tvec to create a Translation3d from
|
|
158
|
+
"""
|
|
159
|
+
|
|
160
|
+
return OpenCVHelp.translationEDNToNWU(Translation3d(tvecInput))
|
|
161
|
+
|
|
162
|
+
@staticmethod
|
|
163
|
+
def rVecToRotation(rvecInput: np.ndarray) -> Rotation3d:
|
|
164
|
+
"""Returns a 3d rotation from this :class:`.Mat`. The opencv rvec Mat is a vector with three
|
|
165
|
+
elements representing the axis scaled by the angle in the EDN coordinate system. (angle = norm,
|
|
166
|
+
and axis = rvec / norm)
|
|
167
|
+
|
|
168
|
+
:param rvecInput: The rvec to create a Rotation3d from
|
|
169
|
+
"""
|
|
170
|
+
|
|
171
|
+
return OpenCVHelp.rotationEDNToNWU(Rotation3d(rvecInput))
|
|
172
|
+
|
|
173
|
+
@staticmethod
|
|
174
|
+
def solvePNP_Square(
|
|
175
|
+
cameraMatrix: np.ndarray,
|
|
176
|
+
distCoeffs: np.ndarray,
|
|
177
|
+
modelTrls: list[Translation3d],
|
|
178
|
+
imagePoints: np.ndarray,
|
|
179
|
+
) -> PnpResult | None:
|
|
180
|
+
"""Finds the transformation(s) that map the camera's pose to the target's pose. The camera's pose
|
|
181
|
+
relative to the target is determined by the supplied 3d points of the target's model and their
|
|
182
|
+
associated 2d points imaged by the camera. The supplied model translations must be relative to
|
|
183
|
+
the target's pose.
|
|
184
|
+
|
|
185
|
+
For planar targets, there may be an alternate solution which is plausible given the 2d image
|
|
186
|
+
points. This has an associated "ambiguity" which describes the ratio of reprojection error
|
|
187
|
+
between the "best" and "alternate" solution.
|
|
188
|
+
|
|
189
|
+
This method is intended for use with individual AprilTags, and will not work unless 4 points
|
|
190
|
+
are provided.
|
|
191
|
+
|
|
192
|
+
:param cameraMatrix: The camera intrinsics matrix in standard opencv form
|
|
193
|
+
:param distCoeffs: The camera distortion matrix in standard opencv form
|
|
194
|
+
:param modelTrls: The translations of the object corners. These should have the object pose as
|
|
195
|
+
their origin. These must come in a specific, pose-relative order (in NWU):
|
|
196
|
+
|
|
197
|
+
- Point 0: [0, -squareLength / 2, squareLength / 2]
|
|
198
|
+
- Point 1: [0, squareLength / 2, squareLength / 2]
|
|
199
|
+
- Point 2: [0, squareLength / 2, -squareLength / 2]
|
|
200
|
+
- Point 3: [0, -squareLength / 2, -squareLength / 2]
|
|
201
|
+
:param imagePoints: The projection of these 3d object points into the 2d camera image. The order
|
|
202
|
+
should match the given object point translations.
|
|
203
|
+
|
|
204
|
+
:returns: The resulting transformation that maps the camera pose to the target pose and the
|
|
205
|
+
ambiguity if an alternate solution is available.
|
|
206
|
+
"""
|
|
207
|
+
modelTrls = OpenCVHelp.reorderCircular(modelTrls, True, -1)
|
|
208
|
+
imagePoints = np.array(OpenCVHelp.reorderCircular(imagePoints, True, -1))
|
|
209
|
+
objectMat = np.array(OpenCVHelp.translationToTVec(modelTrls))
|
|
210
|
+
|
|
211
|
+
alt: Transform3d | None = None
|
|
212
|
+
reprojectionError: cv.typing.MatLike | None = None
|
|
213
|
+
best: Transform3d = Transform3d()
|
|
214
|
+
|
|
215
|
+
for tries in range(2):
|
|
216
|
+
# calc rvecs/tvecs and associated reprojection error from image points
|
|
217
|
+
retval, rvecs, tvecs, reprojectionError = cv.solvePnPGeneric(
|
|
218
|
+
objectMat,
|
|
219
|
+
imagePoints,
|
|
220
|
+
cameraMatrix,
|
|
221
|
+
distCoeffs,
|
|
222
|
+
flags=cv.SOLVEPNP_IPPE_SQUARE,
|
|
223
|
+
)
|
|
224
|
+
|
|
225
|
+
# convert to wpilib coordinates
|
|
226
|
+
best = Transform3d(
|
|
227
|
+
OpenCVHelp.tVecToTranslation(tvecs[0]),
|
|
228
|
+
OpenCVHelp.rVecToRotation(rvecs[0]),
|
|
229
|
+
)
|
|
230
|
+
if len(tvecs) > 1:
|
|
231
|
+
alt = Transform3d(
|
|
232
|
+
OpenCVHelp.tVecToTranslation(tvecs[1]),
|
|
233
|
+
OpenCVHelp.rVecToRotation(rvecs[1]),
|
|
234
|
+
)
|
|
235
|
+
|
|
236
|
+
# check if we got a NaN result
|
|
237
|
+
if reprojectionError is not None and not math.isnan(
|
|
238
|
+
reprojectionError[0, 0]
|
|
239
|
+
):
|
|
240
|
+
break
|
|
241
|
+
else:
|
|
242
|
+
pt = imagePoints[0]
|
|
243
|
+
pt[0, 0] -= 0.001
|
|
244
|
+
pt[0, 1] -= 0.001
|
|
245
|
+
imagePoints[0] = pt
|
|
246
|
+
|
|
247
|
+
# solvePnP failed
|
|
248
|
+
if reprojectionError is None or math.isnan(reprojectionError[0, 0]):
|
|
249
|
+
logger.error("SolvePNP_Square failed!")
|
|
250
|
+
return None
|
|
251
|
+
|
|
252
|
+
if alt:
|
|
253
|
+
return PnpResult(
|
|
254
|
+
best=best,
|
|
255
|
+
bestReprojErr=reprojectionError[0, 0],
|
|
256
|
+
alt=alt,
|
|
257
|
+
altReprojErr=reprojectionError[1, 0],
|
|
258
|
+
ambiguity=reprojectionError[0, 0] / reprojectionError[1, 0],
|
|
259
|
+
)
|
|
260
|
+
else:
|
|
261
|
+
# We have no alternative so set it to best as well
|
|
262
|
+
return PnpResult(
|
|
263
|
+
best=best,
|
|
264
|
+
bestReprojErr=reprojectionError[0],
|
|
265
|
+
alt=best,
|
|
266
|
+
altReprojErr=reprojectionError[0],
|
|
267
|
+
)
|
|
268
|
+
|
|
269
|
+
@staticmethod
|
|
270
|
+
def solvePNP_SQPNP(
|
|
271
|
+
cameraMatrix: np.ndarray,
|
|
272
|
+
distCoeffs: np.ndarray,
|
|
273
|
+
modelTrls: list[Translation3d],
|
|
274
|
+
imagePoints: np.ndarray,
|
|
275
|
+
) -> PnpResult | None:
|
|
276
|
+
"""Finds the transformation that maps the camera's pose to the origin of the supplied object. An
|
|
277
|
+
"object" is simply a set of known 3d translations that correspond to the given 2d points. If,
|
|
278
|
+
for example, the object translations are given relative to close-right corner of the blue
|
|
279
|
+
alliance(the default origin), a camera-to-origin transformation is returned. If the
|
|
280
|
+
translations are relative to a target's pose, a camera-to-target transformation is returned.
|
|
281
|
+
|
|
282
|
+
There must be at least 3 points to use this method. This does not return an alternate
|
|
283
|
+
solution-- if you are intending to use solvePNP on a single AprilTag, see {@link
|
|
284
|
+
#solvePNP_SQUARE} instead.
|
|
285
|
+
|
|
286
|
+
:param cameraMatrix: The camera intrinsics matrix in standard opencv form
|
|
287
|
+
:param distCoeffs: The camera distortion matrix in standard opencv form
|
|
288
|
+
:param objectTrls: The translations of the object corners, relative to the field.
|
|
289
|
+
:param imagePoints: The projection of these 3d object points into the 2d camera image. The order
|
|
290
|
+
should match the given object point translations.
|
|
291
|
+
|
|
292
|
+
:returns: The resulting transformation that maps the camera pose to the target pose. If the 3d
|
|
293
|
+
model points are supplied relative to the origin, this transformation brings the camera to
|
|
294
|
+
the origin.
|
|
295
|
+
"""
|
|
296
|
+
|
|
297
|
+
objectMat = np.array(OpenCVHelp.translationToTVec(modelTrls))
|
|
298
|
+
|
|
299
|
+
retval, rvecs, tvecs, reprojectionError = cv.solvePnPGeneric(
|
|
300
|
+
objectMat, imagePoints, cameraMatrix, distCoeffs, flags=cv.SOLVEPNP_SQPNP
|
|
301
|
+
)
|
|
302
|
+
|
|
303
|
+
error = reprojectionError[0, 0]
|
|
304
|
+
best = Transform3d(
|
|
305
|
+
OpenCVHelp.tVecToTranslation(tvecs[0]), OpenCVHelp.rVecToRotation(rvecs[0])
|
|
306
|
+
)
|
|
307
|
+
|
|
308
|
+
if math.isnan(error):
|
|
309
|
+
logger.error("SolvePNP_SQPNP failed!")
|
|
310
|
+
return None
|
|
311
|
+
|
|
312
|
+
# We have no alternative so set it to best as well
|
|
313
|
+
result = PnpResult(best=best, bestReprojErr=error, alt=best, altReprojErr=error)
|
|
314
|
+
return result
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
from typing import Self
|
|
2
|
+
|
|
3
|
+
from wpimath.geometry import Pose3d, Rotation3d, Transform3d, Translation3d
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class RotTrlTransform3d:
|
|
7
|
+
"""Represents a transformation that first rotates a pose around the origin, and then translates it."""
|
|
8
|
+
|
|
9
|
+
def __init__(
|
|
10
|
+
self, rot: Rotation3d = Rotation3d(), trl: Translation3d = Translation3d()
|
|
11
|
+
):
|
|
12
|
+
"""A rotation-translation transformation.
|
|
13
|
+
|
|
14
|
+
Applying this RotTrlTransform3d to poses will preserve their current origin-to-pose
|
|
15
|
+
transform as if the origin was transformed by these components instead.
|
|
16
|
+
|
|
17
|
+
:param rot: The rotation component
|
|
18
|
+
:param trl: The translation component
|
|
19
|
+
"""
|
|
20
|
+
self.rot = rot
|
|
21
|
+
self.trl = trl
|
|
22
|
+
|
|
23
|
+
def inverse(self) -> Self:
|
|
24
|
+
"""The inverse of this transformation. Applying the inverse will "undo" this transformation."""
|
|
25
|
+
invRot = -self.rot
|
|
26
|
+
invTrl = -(self.trl.rotateBy(invRot))
|
|
27
|
+
return type(self)(invRot, invTrl)
|
|
28
|
+
|
|
29
|
+
def getTransform(self) -> Transform3d:
|
|
30
|
+
"""This transformation as a Transform3d (as if of the origin)"""
|
|
31
|
+
return Transform3d(self.trl, self.rot)
|
|
32
|
+
|
|
33
|
+
def getTranslation(self) -> Translation3d:
|
|
34
|
+
"""The translation component of this transformation"""
|
|
35
|
+
return self.trl
|
|
36
|
+
|
|
37
|
+
def getRotation(self) -> Rotation3d:
|
|
38
|
+
"""The rotation component of this transformation"""
|
|
39
|
+
return self.rot
|
|
40
|
+
|
|
41
|
+
def applyTranslation(self, trlToApply: Translation3d) -> Translation3d:
|
|
42
|
+
return trlToApply.rotateBy(self.rot) + self.trl
|
|
43
|
+
|
|
44
|
+
def applyRotation(self, rotToApply: Rotation3d) -> Rotation3d:
|
|
45
|
+
return rotToApply + self.rot
|
|
46
|
+
|
|
47
|
+
def applyPose(self, poseToApply: Pose3d) -> Pose3d:
|
|
48
|
+
return Pose3d(
|
|
49
|
+
self.applyTranslation(poseToApply.translation()),
|
|
50
|
+
self.applyRotation(poseToApply.rotation()),
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
def applyTrls(self, rots: list[Rotation3d]) -> list[Rotation3d]:
|
|
54
|
+
retVal: list[Rotation3d] = []
|
|
55
|
+
for rot in rots:
|
|
56
|
+
retVal.append(self.applyRotation(rot))
|
|
57
|
+
return retVal
|
|
58
|
+
|
|
59
|
+
@classmethod
|
|
60
|
+
def makeRelativeTo(cls, pose: Pose3d) -> Self:
|
|
61
|
+
"""The rotation-translation transformation that makes poses in the world consider this pose as the
|
|
62
|
+
new origin, or change the basis to this pose.
|
|
63
|
+
|
|
64
|
+
:param pose: The new origin
|
|
65
|
+
"""
|
|
66
|
+
return cls(pose.rotation(), pose.translation()).inverse()
|
|
67
|
+
|
|
68
|
+
@classmethod
|
|
69
|
+
def makeBetweenPoses(cls, initial: Pose3d, last: Pose3d) -> Self:
|
|
70
|
+
return cls(
|
|
71
|
+
last.rotation() - initial.rotation(),
|
|
72
|
+
last.translation()
|
|
73
|
+
- initial.translation().rotateBy(last.rotation() - initial.rotation()),
|
|
74
|
+
)
|