photonlibpy 2025.0.0b2__py3-none-any.whl → 2025.0.0b3__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- photonlibpy/estimation/openCVHelp.py +12 -7
- photonlibpy/estimation/rotTrlTransform3d.py +24 -1
- photonlibpy/estimation/targetModel.py +63 -80
- photonlibpy/estimation/visionEstimation.py +4 -4
- photonlibpy/generated/MultiTargetPNPResultSerde.py +7 -1
- photonlibpy/generated/PhotonPipelineMetadataSerde.py +6 -1
- photonlibpy/generated/PhotonPipelineResultSerde.py +9 -1
- photonlibpy/generated/PhotonTrackedTargetSerde.py +7 -1
- photonlibpy/generated/PnpResultSerde.py +6 -1
- photonlibpy/generated/TargetCornerSerde.py +6 -1
- photonlibpy/networktables/NTTopicSet.py +4 -2
- photonlibpy/photonCamera.py +1 -1
- photonlibpy/simulation/photonCameraSim.py +18 -48
- photonlibpy/simulation/simCameraProperties.py +44 -62
- photonlibpy/simulation/visionSystemSim.py +12 -7
- photonlibpy/targeting/TargetCorner.py +2 -2
- photonlibpy/targeting/multiTargetPNPResult.py +4 -15
- photonlibpy/targeting/photonPipelineResult.py +4 -3
- photonlibpy/targeting/photonTrackedTarget.py +2 -2
- photonlibpy/version.py +2 -2
- {photonlibpy-2025.0.0b2.dist-info → photonlibpy-2025.0.0b3.dist-info}/METADATA +8 -8
- photonlibpy-2025.0.0b3.dist-info/RECORD +36 -0
- photonlibpy-2025.0.0b2.dist-info/RECORD +0 -36
- {photonlibpy-2025.0.0b2.dist-info → photonlibpy-2025.0.0b3.dist-info}/WHEEL +0 -0
- {photonlibpy-2025.0.0b2.dist-info → photonlibpy-2025.0.0b3.dist-info}/top_level.txt +0 -0
@@ -1,5 +1,5 @@
|
|
1
1
|
import math
|
2
|
-
from typing import Any
|
2
|
+
from typing import Any
|
3
3
|
|
4
4
|
import cv2 as cv
|
5
5
|
import numpy as np
|
@@ -48,13 +48,13 @@ class OpenCVHelp:
|
|
48
48
|
)
|
49
49
|
|
50
50
|
@staticmethod
|
51
|
-
def avgPoint(points:
|
51
|
+
def avgPoint(points: np.ndarray) -> np.ndarray:
|
52
52
|
x = 0.0
|
53
53
|
y = 0.0
|
54
54
|
for p in points:
|
55
|
-
x += p[0]
|
56
|
-
y += p[1]
|
57
|
-
return (x / len(points), y / len(points))
|
55
|
+
x += p[0, 0]
|
56
|
+
y += p[0, 1]
|
57
|
+
return np.array([[x / len(points), y / len(points)]])
|
58
58
|
|
59
59
|
@staticmethod
|
60
60
|
def pointsToTargetCorners(points: np.ndarray) -> list[TargetCorner]:
|
@@ -126,6 +126,9 @@ class OpenCVHelp:
|
|
126
126
|
objectMat = np.array(OpenCVHelp.translationToTVec(modelTrls))
|
127
127
|
|
128
128
|
alt: Transform3d | None = None
|
129
|
+
reprojectionError: cv.typing.MatLike | None = None
|
130
|
+
best: Transform3d = Transform3d()
|
131
|
+
|
129
132
|
for tries in range(2):
|
130
133
|
retval, rvecs, tvecs, reprojectionError = cv.solvePnPGeneric(
|
131
134
|
objectMat,
|
@@ -145,7 +148,9 @@ class OpenCVHelp:
|
|
145
148
|
OpenCVHelp.rVecToRotation(rvecs[1]),
|
146
149
|
)
|
147
150
|
|
148
|
-
if not math.isnan(
|
151
|
+
if reprojectionError is not None and not math.isnan(
|
152
|
+
reprojectionError[0, 0]
|
153
|
+
):
|
149
154
|
break
|
150
155
|
else:
|
151
156
|
pt = imagePoints[0]
|
@@ -153,7 +158,7 @@ class OpenCVHelp:
|
|
153
158
|
pt[0, 1] -= 0.001
|
154
159
|
imagePoints[0] = pt
|
155
160
|
|
156
|
-
if math.isnan(reprojectionError[0, 0]):
|
161
|
+
if reprojectionError is None or math.isnan(reprojectionError[0, 0]):
|
157
162
|
print("SolvePNP_Square failed!")
|
158
163
|
return None
|
159
164
|
|
@@ -24,9 +24,32 @@ class RotTrlTransform3d:
|
|
24
24
|
def getRotation(self) -> Rotation3d:
|
25
25
|
return self.rot
|
26
26
|
|
27
|
-
def
|
27
|
+
def applyTranslation(self, trlToApply: Translation3d) -> Translation3d:
|
28
28
|
return trlToApply.rotateBy(self.rot) + self.trl
|
29
29
|
|
30
|
+
def applyRotation(self, rotToApply: Rotation3d) -> Rotation3d:
|
31
|
+
return rotToApply + self.rot
|
32
|
+
|
33
|
+
def applyPose(self, poseToApply: Pose3d) -> Pose3d:
|
34
|
+
return Pose3d(
|
35
|
+
self.applyTranslation(poseToApply.translation()),
|
36
|
+
self.applyRotation(poseToApply.rotation()),
|
37
|
+
)
|
38
|
+
|
39
|
+
def applyTrls(self, rots: list[Rotation3d]) -> list[Rotation3d]:
|
40
|
+
retVal: list[Rotation3d] = []
|
41
|
+
for rot in rots:
|
42
|
+
retVal.append(self.applyRotation(rot))
|
43
|
+
return retVal
|
44
|
+
|
30
45
|
@classmethod
|
31
46
|
def makeRelativeTo(cls, pose: Pose3d) -> Self:
|
32
47
|
return cls(pose.rotation(), pose.translation()).inverse()
|
48
|
+
|
49
|
+
@classmethod
|
50
|
+
def makeBetweenPoses(cls, initial: Pose3d, last: Pose3d) -> Self:
|
51
|
+
return cls(
|
52
|
+
last.rotation() - initial.rotation(),
|
53
|
+
last.translation()
|
54
|
+
- initial.translation().rotateBy(last.rotation() - initial.rotation()),
|
55
|
+
)
|
@@ -8,87 +8,70 @@ from . import RotTrlTransform3d
|
|
8
8
|
|
9
9
|
|
10
10
|
class TargetModel:
|
11
|
-
def __init__(
|
12
|
-
self,
|
13
|
-
*,
|
14
|
-
width: meters | None = None,
|
15
|
-
height: meters | None = None,
|
16
|
-
length: meters | None = None,
|
17
|
-
diameter: meters | None = None,
|
18
|
-
verts: List[Translation3d] | None = None
|
19
|
-
):
|
20
|
-
|
21
|
-
if (
|
22
|
-
width is not None
|
23
|
-
and height is not None
|
24
|
-
and length is None
|
25
|
-
and diameter is None
|
26
|
-
and verts is None
|
27
|
-
):
|
28
|
-
self.isPlanar = True
|
29
|
-
self.isSpherical = False
|
30
|
-
self.vertices = [
|
31
|
-
Translation3d(0.0, -width / 2.0, -height / 2.0),
|
32
|
-
Translation3d(0.0, width / 2.0, -height / 2.0),
|
33
|
-
Translation3d(0.0, width / 2.0, height / 2.0),
|
34
|
-
Translation3d(0.0, -width / 2.0, height / 2.0),
|
35
|
-
]
|
36
|
-
|
37
|
-
return
|
38
|
-
|
39
|
-
elif (
|
40
|
-
length is not None
|
41
|
-
and width is not None
|
42
|
-
and height is not None
|
43
|
-
and diameter is None
|
44
|
-
and verts is None
|
45
|
-
):
|
46
|
-
verts = [
|
47
|
-
Translation3d(length / 2.0, -width / 2.0, -height / 2.0),
|
48
|
-
Translation3d(length / 2.0, width / 2.0, -height / 2.0),
|
49
|
-
Translation3d(length / 2.0, width / 2.0, height / 2.0),
|
50
|
-
Translation3d(length / 2.0, -width / 2.0, height / 2.0),
|
51
|
-
Translation3d(-length / 2.0, -width / 2.0, height / 2.0),
|
52
|
-
Translation3d(-length / 2.0, width / 2.0, height / 2.0),
|
53
|
-
Translation3d(-length / 2.0, width / 2.0, -height / 2.0),
|
54
|
-
Translation3d(-length / 2.0, -width / 2.0, -height / 2.0),
|
55
|
-
]
|
56
|
-
# Handle the rest of this in the "default" case
|
57
|
-
elif (
|
58
|
-
diameter is not None
|
59
|
-
and width is None
|
60
|
-
and height is None
|
61
|
-
and length is None
|
62
|
-
and verts is None
|
63
|
-
):
|
64
|
-
self.isPlanar = False
|
65
|
-
self.isSpherical = True
|
66
|
-
self.vertices = [
|
67
|
-
Translation3d(0.0, -diameter / 2.0, 0.0),
|
68
|
-
Translation3d(0.0, 0.0, -diameter / 2.0),
|
69
|
-
Translation3d(0.0, diameter / 2.0, 0.0),
|
70
|
-
Translation3d(0.0, 0.0, diameter / 2.0),
|
71
|
-
]
|
72
|
-
return
|
73
|
-
elif (
|
74
|
-
verts is not None
|
75
|
-
and width is None
|
76
|
-
and height is None
|
77
|
-
and length is None
|
78
|
-
and diameter is None
|
79
|
-
):
|
80
|
-
# Handle this in the "default" case
|
81
|
-
pass
|
82
|
-
else:
|
83
|
-
raise Exception("Not a valid overload")
|
84
11
|
|
85
|
-
|
86
|
-
|
87
|
-
|
12
|
+
def __init__(self):
|
13
|
+
self.vertices: List[Translation3d] = []
|
14
|
+
self.isPlanar = False
|
15
|
+
self.isSpherical = False
|
16
|
+
|
17
|
+
@classmethod
|
18
|
+
def createPlanar(cls, width: meters, height: meters) -> Self:
|
19
|
+
tm = cls()
|
20
|
+
|
21
|
+
tm.isPlanar = True
|
22
|
+
tm.isSpherical = False
|
23
|
+
tm.vertices = [
|
24
|
+
Translation3d(0.0, -width / 2.0, -height / 2.0),
|
25
|
+
Translation3d(0.0, width / 2.0, -height / 2.0),
|
26
|
+
Translation3d(0.0, width / 2.0, height / 2.0),
|
27
|
+
Translation3d(0.0, -width / 2.0, height / 2.0),
|
28
|
+
]
|
29
|
+
return tm
|
30
|
+
|
31
|
+
@classmethod
|
32
|
+
def createCuboid(cls, length: meters, width: meters, height: meters) -> Self:
|
33
|
+
tm = cls()
|
34
|
+
verts = [
|
35
|
+
Translation3d(length / 2.0, -width / 2.0, -height / 2.0),
|
36
|
+
Translation3d(length / 2.0, width / 2.0, -height / 2.0),
|
37
|
+
Translation3d(length / 2.0, width / 2.0, height / 2.0),
|
38
|
+
Translation3d(length / 2.0, -width / 2.0, height / 2.0),
|
39
|
+
Translation3d(-length / 2.0, -width / 2.0, height / 2.0),
|
40
|
+
Translation3d(-length / 2.0, width / 2.0, height / 2.0),
|
41
|
+
Translation3d(-length / 2.0, width / 2.0, -height / 2.0),
|
42
|
+
Translation3d(-length / 2.0, -width / 2.0, -height / 2.0),
|
43
|
+
]
|
44
|
+
|
45
|
+
tm._common_construction(verts)
|
46
|
+
|
47
|
+
return tm
|
48
|
+
|
49
|
+
@classmethod
|
50
|
+
def createSpheroid(cls, diameter: meters) -> Self:
|
51
|
+
tm = cls()
|
52
|
+
|
53
|
+
tm.isPlanar = False
|
54
|
+
tm.isSpherical = True
|
55
|
+
tm.vertices = [
|
56
|
+
Translation3d(0.0, -diameter / 2.0, 0.0),
|
57
|
+
Translation3d(0.0, 0.0, -diameter / 2.0),
|
58
|
+
Translation3d(0.0, diameter / 2.0, 0.0),
|
59
|
+
Translation3d(0.0, 0.0, diameter / 2.0),
|
60
|
+
]
|
61
|
+
|
62
|
+
return tm
|
63
|
+
|
64
|
+
@classmethod
|
65
|
+
def createArbitrary(cls, verts: List[Translation3d]) -> Self:
|
66
|
+
tm = cls()
|
67
|
+
tm._common_construction(verts)
|
68
|
+
|
69
|
+
return tm
|
88
70
|
|
71
|
+
def _common_construction(self, verts: List[Translation3d]) -> None:
|
89
72
|
self.isSpherical = False
|
90
73
|
if len(verts) <= 2:
|
91
|
-
self.vertices
|
74
|
+
self.vertices = []
|
92
75
|
self.isPlanar = False
|
93
76
|
else:
|
94
77
|
cornersPlaner = True
|
@@ -105,7 +88,7 @@ class TargetModel:
|
|
105
88
|
retVal = []
|
106
89
|
|
107
90
|
for vert in self.vertices:
|
108
|
-
retVal.append(basisChange.
|
91
|
+
retVal.append(basisChange.applyTranslation(vert))
|
109
92
|
|
110
93
|
return retVal
|
111
94
|
|
@@ -130,8 +113,8 @@ class TargetModel:
|
|
130
113
|
|
131
114
|
@classmethod
|
132
115
|
def AprilTag36h11(cls) -> Self:
|
133
|
-
return cls(width=6.5 * 0.0254, height=6.5 * 0.0254)
|
116
|
+
return cls.createPlanar(width=6.5 * 0.0254, height=6.5 * 0.0254)
|
134
117
|
|
135
118
|
@classmethod
|
136
119
|
def AprilTag16h5(cls) -> Self:
|
137
|
-
return cls(width=6.0 * 0.0254, height=6.0 * 0.0254)
|
120
|
+
return cls.createPlanar(width=6.0 * 0.0254, height=6.0 * 0.0254)
|
@@ -16,10 +16,10 @@ class VisionEstimation:
|
|
16
16
|
id = tag.getFiducialId()
|
17
17
|
maybePose = layout.getTagPose(id)
|
18
18
|
if maybePose:
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
retVal.append(
|
19
|
+
aprilTag = AprilTag()
|
20
|
+
aprilTag.ID = id
|
21
|
+
aprilTag.pose = maybePose
|
22
|
+
retVal.append(aprilTag)
|
23
23
|
return retVal
|
24
24
|
|
25
25
|
@staticmethod
|
@@ -20,8 +20,14 @@
|
|
20
20
|
## --> DO NOT MODIFY <--
|
21
21
|
###############################################################################
|
22
22
|
|
23
|
+
from typing import TYPE_CHECKING
|
24
|
+
|
23
25
|
from ..packet import Packet
|
24
|
-
from ..targeting import *
|
26
|
+
from ..targeting import * # noqa
|
27
|
+
|
28
|
+
if TYPE_CHECKING:
|
29
|
+
from ..targeting import MultiTargetPNPResult # noqa
|
30
|
+
from ..targeting import PnpResult # noqa
|
25
31
|
|
26
32
|
|
27
33
|
class MultiTargetPNPResultSerde:
|
@@ -20,8 +20,13 @@
|
|
20
20
|
## --> DO NOT MODIFY <--
|
21
21
|
###############################################################################
|
22
22
|
|
23
|
+
from typing import TYPE_CHECKING
|
24
|
+
|
23
25
|
from ..packet import Packet
|
24
|
-
from ..targeting import *
|
26
|
+
from ..targeting import * # noqa
|
27
|
+
|
28
|
+
if TYPE_CHECKING:
|
29
|
+
from ..targeting import PhotonPipelineMetadata # noqa
|
25
30
|
|
26
31
|
|
27
32
|
class PhotonPipelineMetadataSerde:
|
@@ -20,8 +20,16 @@
|
|
20
20
|
## --> DO NOT MODIFY <--
|
21
21
|
###############################################################################
|
22
22
|
|
23
|
+
from typing import TYPE_CHECKING
|
24
|
+
|
23
25
|
from ..packet import Packet
|
24
|
-
from ..targeting import *
|
26
|
+
from ..targeting import * # noqa
|
27
|
+
|
28
|
+
if TYPE_CHECKING:
|
29
|
+
from ..targeting import MultiTargetPNPResult # noqa
|
30
|
+
from ..targeting import PhotonPipelineMetadata # noqa
|
31
|
+
from ..targeting import PhotonPipelineResult # noqa
|
32
|
+
from ..targeting import PhotonTrackedTarget # noqa
|
25
33
|
|
26
34
|
|
27
35
|
class PhotonPipelineResultSerde:
|
@@ -20,8 +20,14 @@
|
|
20
20
|
## --> DO NOT MODIFY <--
|
21
21
|
###############################################################################
|
22
22
|
|
23
|
+
from typing import TYPE_CHECKING
|
24
|
+
|
23
25
|
from ..packet import Packet
|
24
|
-
from ..targeting import *
|
26
|
+
from ..targeting import * # noqa
|
27
|
+
|
28
|
+
if TYPE_CHECKING:
|
29
|
+
from ..targeting import PhotonTrackedTarget # noqa
|
30
|
+
from ..targeting import TargetCorner # noqa
|
25
31
|
|
26
32
|
|
27
33
|
class PhotonTrackedTargetSerde:
|
@@ -20,8 +20,13 @@
|
|
20
20
|
## --> DO NOT MODIFY <--
|
21
21
|
###############################################################################
|
22
22
|
|
23
|
+
from typing import TYPE_CHECKING
|
24
|
+
|
23
25
|
from ..packet import Packet
|
24
|
-
from ..targeting import *
|
26
|
+
from ..targeting import * # noqa
|
27
|
+
|
28
|
+
if TYPE_CHECKING:
|
29
|
+
from ..targeting import PnpResult # noqa
|
25
30
|
|
26
31
|
|
27
32
|
class PnpResultSerde:
|
@@ -20,8 +20,13 @@
|
|
20
20
|
## --> DO NOT MODIFY <--
|
21
21
|
###############################################################################
|
22
22
|
|
23
|
+
from typing import TYPE_CHECKING
|
24
|
+
|
23
25
|
from ..packet import Packet
|
24
|
-
from ..targeting import *
|
26
|
+
from ..targeting import * # noqa
|
27
|
+
|
28
|
+
if TYPE_CHECKING:
|
29
|
+
from ..targeting import TargetCorner # noqa
|
25
30
|
|
26
31
|
|
27
32
|
class TargetCornerSerde:
|
@@ -10,8 +10,10 @@ PhotonPipelineResult_TYPE_STRING = (
|
|
10
10
|
|
11
11
|
class NTTopicSet:
|
12
12
|
|
13
|
-
def __init__(self) -> None:
|
14
|
-
|
13
|
+
def __init__(self, tableName: str, cameraName: str) -> None:
|
14
|
+
instance = nt.NetworkTableInstance.getDefault()
|
15
|
+
photonvision_root_table = instance.getTable(tableName)
|
16
|
+
self.subTable = photonvision_root_table.getSubTable(cameraName)
|
15
17
|
|
16
18
|
def updateEntries(self) -> None:
|
17
19
|
options = nt.PubSubOptions()
|
photonlibpy/photonCamera.py
CHANGED
@@ -231,7 +231,7 @@ class PhotonCamera:
|
|
231
231
|
versionString = self.versionEntry.get(defaultValue="")
|
232
232
|
localUUID = PhotonPipelineResult.photonStruct.MESSAGE_VERSION
|
233
233
|
|
234
|
-
remoteUUID = self._rawBytesEntry.getTopic().getProperty("message_uuid")
|
234
|
+
remoteUUID = str(self._rawBytesEntry.getTopic().getProperty("message_uuid"))
|
235
235
|
|
236
236
|
if not remoteUUID:
|
237
237
|
wpilib.reportWarning(
|
@@ -4,8 +4,8 @@ import typing
|
|
4
4
|
import cscore as cs
|
5
5
|
import cv2 as cv
|
6
6
|
import numpy as np
|
7
|
-
import robotpy_apriltag
|
8
7
|
import wpilib
|
8
|
+
from robotpy_apriltag import AprilTagField, AprilTagFieldLayout
|
9
9
|
from wpimath.geometry import Pose3d, Transform3d
|
10
10
|
from wpimath.units import meters, seconds
|
11
11
|
|
@@ -31,7 +31,7 @@ class PhotonCameraSim:
|
|
31
31
|
def __init__(
|
32
32
|
self,
|
33
33
|
camera: PhotonCamera,
|
34
|
-
props: SimCameraProperties
|
34
|
+
props: SimCameraProperties = SimCameraProperties.PERFECT_90DEG(),
|
35
35
|
minTargetAreaPercent: float | None = None,
|
36
36
|
maxSightRange: meters | None = None,
|
37
37
|
):
|
@@ -41,37 +41,12 @@ class PhotonCameraSim:
|
|
41
41
|
self.videoSimRawEnabled: bool = False
|
42
42
|
self.videoSimWireframeEnabled: bool = False
|
43
43
|
self.videoSimWireframeResolution: float = 0.1
|
44
|
-
self.videoSimProcEnabled: bool =
|
45
|
-
|
44
|
+
self.videoSimProcEnabled: bool = (
|
45
|
+
False # TODO switch this back to default True when the functionality is enabled
|
46
|
+
)
|
46
47
|
self.heartbeatCounter: int = 0
|
47
48
|
self.nextNtEntryTime = int(wpilib.Timer.getFPGATimestamp() * 1e6)
|
48
|
-
self.tagLayout =
|
49
|
-
robotpy_apriltag.AprilTagField.k2024Crescendo
|
50
|
-
)
|
51
|
-
|
52
|
-
if (
|
53
|
-
camera is not None
|
54
|
-
and props is None
|
55
|
-
and minTargetAreaPercent is None
|
56
|
-
and maxSightRange is None
|
57
|
-
):
|
58
|
-
props = SimCameraProperties.PERFECT_90DEG()
|
59
|
-
elif (
|
60
|
-
camera is not None
|
61
|
-
and props is not None
|
62
|
-
and minTargetAreaPercent is not None
|
63
|
-
and maxSightRange is not None
|
64
|
-
):
|
65
|
-
pass
|
66
|
-
elif (
|
67
|
-
camera is not None
|
68
|
-
and props is not None
|
69
|
-
and minTargetAreaPercent is None
|
70
|
-
and maxSightRange is None
|
71
|
-
):
|
72
|
-
pass
|
73
|
-
else:
|
74
|
-
raise Exception("Invalid Constructor Called")
|
49
|
+
self.tagLayout = AprilTagFieldLayout.loadField(AprilTagField.k2024Crescendo)
|
75
50
|
|
76
51
|
self.cam = camera
|
77
52
|
self.prop = props
|
@@ -101,16 +76,11 @@ class PhotonCameraSim:
|
|
101
76
|
(self.prop.getResWidth(), self.prop.getResHeight())
|
102
77
|
)
|
103
78
|
|
104
|
-
self.ts
|
79
|
+
self.ts = NTTopicSet("photonvision", self.cam.getName())
|
105
80
|
self.ts.updateEntries()
|
106
81
|
|
107
82
|
# Handle this last explicitly for this function signature because the other constructor is called in the initialiser list
|
108
|
-
if
|
109
|
-
camera is not None
|
110
|
-
and props is not None
|
111
|
-
and minTargetAreaPercent is not None
|
112
|
-
and maxSightRange is not None
|
113
|
-
):
|
83
|
+
if minTargetAreaPercent is not None and maxSightRange is not None:
|
114
84
|
self.minTargetAreaPercent = minTargetAreaPercent
|
115
85
|
self.maxSightRange = maxSightRange
|
116
86
|
|
@@ -194,19 +164,19 @@ class PhotonCameraSim:
|
|
194
164
|
self.maxSightRange = range
|
195
165
|
|
196
166
|
def enableRawStream(self, enabled: bool) -> None:
|
167
|
+
self.videoSimRawEnabled = enabled
|
197
168
|
raise Exception("Raw stream not implemented")
|
198
|
-
# self.videoSimRawEnabled = enabled
|
199
169
|
|
200
170
|
def enableDrawWireframe(self, enabled: bool) -> None:
|
171
|
+
self.videoSimWireframeEnabled = enabled
|
201
172
|
raise Exception("Wireframe not implemented")
|
202
|
-
# self.videoSimWireframeEnabled = enabled
|
203
173
|
|
204
174
|
def setWireframeResolution(self, resolution: float) -> None:
|
205
175
|
self.videoSimWireframeResolution = resolution
|
206
176
|
|
207
177
|
def enableProcessedStream(self, enabled: bool) -> None:
|
178
|
+
self.videoSimProcEnabled = enabled
|
208
179
|
raise Exception("Processed stream not implemented")
|
209
|
-
# self.videoSimProcEnabled = enabled
|
210
180
|
|
211
181
|
def process(
|
212
182
|
self, latency: seconds, cameraPose: Pose3d, targets: list[VisionTargetSim]
|
@@ -217,9 +187,7 @@ class PhotonCameraSim:
|
|
217
187
|
|
218
188
|
targets.sort(key=distance, reverse=True)
|
219
189
|
|
220
|
-
visibleTgts: list[
|
221
|
-
typing.Tuple[VisionTargetSim, list[typing.Tuple[float, float]]]
|
222
|
-
] = []
|
190
|
+
visibleTgts: list[typing.Tuple[VisionTargetSim, np.ndarray]] = []
|
223
191
|
detectableTgts: list[PhotonTrackedTarget] = []
|
224
192
|
|
225
193
|
camRt = RotTrlTransform3d.makeRelativeTo(cameraPose)
|
@@ -258,6 +226,7 @@ class PhotonCameraSim:
|
|
258
226
|
] * 4
|
259
227
|
t = (l + 1) % 4
|
260
228
|
b = (l + 1) % 4
|
229
|
+
r = 0
|
261
230
|
for i in range(4):
|
262
231
|
if i == l:
|
263
232
|
continue
|
@@ -271,14 +240,14 @@ class PhotonCameraSim:
|
|
271
240
|
if i != t and i != l and i != b:
|
272
241
|
r = i
|
273
242
|
rect = cv.RotatedRect(
|
274
|
-
center,
|
243
|
+
(center[0, 0], center[0, 1]),
|
275
244
|
(
|
276
245
|
imagePoints[r, 0, 0] - lc[0, 0],
|
277
246
|
imagePoints[b, 0, 1] - imagePoints[t, 0, 1],
|
278
247
|
),
|
279
248
|
-angles[r],
|
280
249
|
)
|
281
|
-
imagePoints = rect.points()
|
250
|
+
imagePoints = np.array([[p[0], p[1], p[2]] for p in rect.points()])
|
282
251
|
|
283
252
|
visibleTgts.append((tgt, imagePoints))
|
284
253
|
noisyTargetCorners = self.prop.estPixelNoise(imagePoints)
|
@@ -325,13 +294,13 @@ class PhotonCameraSim:
|
|
325
294
|
)
|
326
295
|
|
327
296
|
# Video streams disabled for now
|
328
|
-
if self.
|
297
|
+
if self.videoSimRawEnabled:
|
329
298
|
# VideoSimUtil::UpdateVideoProp(videoSimRaw, prop);
|
330
299
|
# cv::Size videoFrameSize{prop.GetResWidth(), prop.GetResHeight()};
|
331
300
|
# cv::Mat blankFrame = cv::Mat::zeros(videoFrameSize, CV_8UC1);
|
332
301
|
# blankFrame.assignTo(videoSimFrameRaw);
|
333
302
|
pass
|
334
|
-
if self.
|
303
|
+
if self.videoSimProcEnabled:
|
335
304
|
# VideoSimUtil::UpdateVideoProp(videoSimProcessed, prop);
|
336
305
|
pass
|
337
306
|
|
@@ -385,6 +354,7 @@ class PhotonCameraSim:
|
|
385
354
|
self.ts.targetSkewEntry.set(0.0, receiveTimestamp)
|
386
355
|
else:
|
387
356
|
bestTarget = result.getBestTarget()
|
357
|
+
assert bestTarget
|
388
358
|
|
389
359
|
self.ts.targetPitchEntry.set(bestTarget.getPitch(), receiveTimestamp)
|
390
360
|
self.ts.targetYawEntry.set(bestTarget.getYaw(), receiveTimestamp)
|
@@ -11,7 +11,7 @@ from ..estimation import RotTrlTransform3d
|
|
11
11
|
|
12
12
|
|
13
13
|
class SimCameraProperties:
|
14
|
-
def __init__(self
|
14
|
+
def __init__(self):
|
15
15
|
self.resWidth: int = -1
|
16
16
|
self.resHeight: int = -1
|
17
17
|
self.camIntrinsics: np.ndarray = np.zeros((3, 3)) # [3,3]
|
@@ -24,57 +24,41 @@ class SimCameraProperties:
|
|
24
24
|
self.latencyStdDev: seconds = 0.0
|
25
25
|
self.viewplanes: list[np.ndarray] = [] # [3,1]
|
26
26
|
|
27
|
-
|
28
|
-
self.setCalibration(960, 720, fovDiag=Rotation2d(math.radians(90.0)))
|
29
|
-
else:
|
30
|
-
raise Exception("not yet implemented")
|
31
|
-
|
32
|
-
def setCalibration(
|
33
|
-
self,
|
34
|
-
width: int,
|
35
|
-
height: int,
|
36
|
-
*,
|
37
|
-
fovDiag: Rotation2d | None = None,
|
38
|
-
newCamIntrinsics: np.ndarray | None = None,
|
39
|
-
newDistCoeffs: np.ndarray | None = None,
|
40
|
-
):
|
41
|
-
# Should be an inverted XOR on the args to differentiate between the signatures
|
42
|
-
|
43
|
-
has_fov_args = fovDiag is not None
|
44
|
-
has_matrix_args = newCamIntrinsics is not None and newDistCoeffs is not None
|
27
|
+
self.setCalibrationFromFOV(960, 720, fovDiag=Rotation2d(math.radians(90.0)))
|
45
28
|
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
29
|
+
def setCalibrationFromFOV(
|
30
|
+
self, width: int, height: int, fovDiag: Rotation2d
|
31
|
+
) -> None:
|
32
|
+
if fovDiag.degrees() < 1.0 or fovDiag.degrees() > 179.0:
|
33
|
+
fovDiag = Rotation2d.fromDegrees(max(min(fovDiag.degrees(), 179.0), 1.0))
|
34
|
+
logging.error("Requested invalid FOV! Clamping between (1, 179) degrees...")
|
50
35
|
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
)
|
56
|
-
logging.error(
|
57
|
-
"Requested invalid FOV! Clamping between (1, 179) degrees..."
|
58
|
-
)
|
36
|
+
resDiag = math.sqrt(width * width + height * height)
|
37
|
+
diagRatio = math.tan(fovDiag.radians() / 2.0)
|
38
|
+
fovWidth = Rotation2d(math.atan((diagRatio * (width / resDiag)) * 2))
|
39
|
+
fovHeight = Rotation2d(math.atan(diagRatio * (height / resDiag)) * 2)
|
59
40
|
|
60
|
-
|
61
|
-
diagRatio = math.tan(fovDiag.radians() / 2.0)
|
62
|
-
fovWidth = Rotation2d(math.atan((diagRatio * (width / resDiag)) * 2))
|
63
|
-
fovHeight = Rotation2d(math.atan(diagRatio * (height / resDiag)) * 2)
|
41
|
+
newDistCoeffs = np.zeros((8, 1))
|
64
42
|
|
65
|
-
|
43
|
+
cx = width / 2.0 - 0.5
|
44
|
+
cy = height / 2.0 - 0.5
|
66
45
|
|
67
|
-
|
68
|
-
|
46
|
+
fx = cx / math.tan(fovWidth.radians() / 2.0)
|
47
|
+
fy = cy / math.tan(fovHeight.radians() / 2.0)
|
69
48
|
|
70
|
-
|
71
|
-
fy = cy / math.tan(fovHeight.radians() / 2.0)
|
49
|
+
newCamIntrinsics = np.array([[fx, 0.0, cx], [0.0, fy, cy], [0.0, 0.0, 1.0]])
|
72
50
|
|
73
|
-
|
51
|
+
self.setCalibrationFromIntrinsics(
|
52
|
+
width, height, newCamIntrinsics, newDistCoeffs
|
53
|
+
)
|
74
54
|
|
75
|
-
|
76
|
-
|
77
|
-
|
55
|
+
def setCalibrationFromIntrinsics(
|
56
|
+
self,
|
57
|
+
width: int,
|
58
|
+
height: int,
|
59
|
+
newCamIntrinsics: np.ndarray,
|
60
|
+
newDistCoeffs: np.ndarray,
|
61
|
+
) -> None:
|
78
62
|
|
79
63
|
self.resWidth = width
|
80
64
|
self.resHeight = height
|
@@ -171,10 +155,8 @@ class SimCameraProperties:
|
|
171
155
|
def getLatencyStdDev(self) -> seconds:
|
172
156
|
return self.latencyStdDev
|
173
157
|
|
174
|
-
def getContourAreaPercent(self, points:
|
175
|
-
return (
|
176
|
-
cv.contourArea(cv.convexHull(np.array(points))) / self.getResArea() * 100.0
|
177
|
-
)
|
158
|
+
def getContourAreaPercent(self, points: np.ndarray) -> float:
|
159
|
+
return cv.contourArea(cv.convexHull(points)) / self.getResArea() * 100.0
|
178
160
|
|
179
161
|
def getPixelYaw(self, pixelX: float) -> Rotation2d:
|
180
162
|
fx = self.camIntrinsics[0, 0]
|
@@ -188,14 +170,14 @@ class SimCameraProperties:
|
|
188
170
|
yOffset = cy - pixelY
|
189
171
|
return Rotation2d(fy, -yOffset)
|
190
172
|
|
191
|
-
def getPixelRot(self, point: typing.
|
173
|
+
def getPixelRot(self, point: cv.typing.Point2f) -> Rotation3d:
|
192
174
|
return Rotation3d(
|
193
175
|
0.0,
|
194
176
|
self.getPixelPitch(point[1]).radians(),
|
195
177
|
self.getPixelYaw(point[0]).radians(),
|
196
178
|
)
|
197
179
|
|
198
|
-
def getCorrectedPixelRot(self, point: typing.
|
180
|
+
def getCorrectedPixelRot(self, point: cv.typing.Point2f) -> Rotation3d:
|
199
181
|
fx = self.camIntrinsics[0, 0]
|
200
182
|
cx = self.camIntrinsics[0, 2]
|
201
183
|
xOffset = cx - point[0]
|
@@ -226,8 +208,8 @@ class SimCameraProperties:
|
|
226
208
|
def getVisibleLine(
|
227
209
|
self, camRt: RotTrlTransform3d, a: Translation3d, b: Translation3d
|
228
210
|
) -> typing.Tuple[float | None, float | None]:
|
229
|
-
relA = camRt.
|
230
|
-
relB = camRt.
|
211
|
+
relA = camRt.applyTranslation(a)
|
212
|
+
relB = camRt.applyTranslation(b)
|
231
213
|
|
232
214
|
if relA.X() <= 0.0 and relB.X() <= 0.0:
|
233
215
|
return (None, None)
|
@@ -279,7 +261,7 @@ class SimCameraProperties:
|
|
279
261
|
ipts[i] = None
|
280
262
|
break
|
281
263
|
|
282
|
-
if
|
264
|
+
if ipts[i] is None:
|
283
265
|
continue
|
284
266
|
|
285
267
|
for j in range(i - 1, 0 - 1):
|
@@ -357,7 +339,7 @@ class SimCameraProperties:
|
|
357
339
|
@classmethod
|
358
340
|
def PI4_LIFECAM_320_240(cls) -> typing.Self:
|
359
341
|
prop = cls()
|
360
|
-
prop.
|
342
|
+
prop.setCalibrationFromIntrinsics(
|
361
343
|
320,
|
362
344
|
240,
|
363
345
|
newCamIntrinsics=np.array(
|
@@ -391,7 +373,7 @@ class SimCameraProperties:
|
|
391
373
|
@classmethod
|
392
374
|
def PI4_LIFECAM_640_480(cls) -> typing.Self:
|
393
375
|
prop = cls()
|
394
|
-
prop.
|
376
|
+
prop.setCalibrationFromIntrinsics(
|
395
377
|
640,
|
396
378
|
480,
|
397
379
|
newCamIntrinsics=np.array(
|
@@ -425,7 +407,7 @@ class SimCameraProperties:
|
|
425
407
|
@classmethod
|
426
408
|
def LL2_640_480(cls) -> typing.Self:
|
427
409
|
prop = cls()
|
428
|
-
prop.
|
410
|
+
prop.setCalibrationFromIntrinsics(
|
429
411
|
640,
|
430
412
|
480,
|
431
413
|
newCamIntrinsics=np.array(
|
@@ -459,7 +441,7 @@ class SimCameraProperties:
|
|
459
441
|
@classmethod
|
460
442
|
def LL2_960_720(cls) -> typing.Self:
|
461
443
|
prop = cls()
|
462
|
-
prop.
|
444
|
+
prop.setCalibrationFromIntrinsics(
|
463
445
|
960,
|
464
446
|
720,
|
465
447
|
newCamIntrinsics=np.array(
|
@@ -493,7 +475,7 @@ class SimCameraProperties:
|
|
493
475
|
@classmethod
|
494
476
|
def LL2_1280_720(cls) -> typing.Self:
|
495
477
|
prop = cls()
|
496
|
-
prop.
|
478
|
+
prop.setCalibrationFromIntrinsics(
|
497
479
|
1280,
|
498
480
|
720,
|
499
481
|
newCamIntrinsics=np.array(
|
@@ -527,7 +509,7 @@ class SimCameraProperties:
|
|
527
509
|
@classmethod
|
528
510
|
def OV9281_640_480(cls) -> typing.Self:
|
529
511
|
prop = cls()
|
530
|
-
prop.
|
512
|
+
prop.setCalibrationFromIntrinsics(
|
531
513
|
640,
|
532
514
|
480,
|
533
515
|
newCamIntrinsics=np.array(
|
@@ -561,7 +543,7 @@ class SimCameraProperties:
|
|
561
543
|
@classmethod
|
562
544
|
def OV9281_800_600(cls) -> typing.Self:
|
563
545
|
prop = cls()
|
564
|
-
prop.
|
546
|
+
prop.setCalibrationFromIntrinsics(
|
565
547
|
800,
|
566
548
|
600,
|
567
549
|
newCamIntrinsics=np.array(
|
@@ -595,7 +577,7 @@ class SimCameraProperties:
|
|
595
577
|
@classmethod
|
596
578
|
def OV9281_1280_720(cls) -> typing.Self:
|
597
579
|
prop = cls()
|
598
|
-
prop.
|
580
|
+
prop.setCalibrationFromIntrinsics(
|
599
581
|
1280,
|
600
582
|
720,
|
601
583
|
newCamIntrinsics=np.array(
|
@@ -629,7 +611,7 @@ class SimCameraProperties:
|
|
629
611
|
@classmethod
|
630
612
|
def OV9281_1920_1080(cls) -> typing.Self:
|
631
613
|
prop = cls()
|
632
|
-
prop.
|
614
|
+
prop.setCalibrationFromIntrinsics(
|
633
615
|
1920,
|
634
616
|
1080,
|
635
617
|
newCamIntrinsics=np.array(
|
@@ -84,7 +84,11 @@ class VisionSystemSim:
|
|
84
84
|
if robotToCamera is None:
|
85
85
|
return None
|
86
86
|
else:
|
87
|
-
|
87
|
+
pose = self.getRobotPose(time)
|
88
|
+
if pose:
|
89
|
+
return pose + robotToCamera
|
90
|
+
else:
|
91
|
+
return None
|
88
92
|
|
89
93
|
def adjustCamera(
|
90
94
|
self, cameraSim: PhotonCameraSim, robotToCamera: Transform3d
|
@@ -167,7 +171,7 @@ class VisionSystemSim:
|
|
167
171
|
|
168
172
|
def getRobotPose(
|
169
173
|
self, timestamp: seconds = wpilib.Timer.getFPGATimestamp()
|
170
|
-
) -> Pose3d:
|
174
|
+
) -> Pose3d | None:
|
171
175
|
return self.robotPoseBuffer.sample(timestamp)
|
172
176
|
|
173
177
|
def resetRobotPose(self, robotPose: Pose2d | Pose3d) -> None:
|
@@ -216,15 +220,16 @@ class VisionSystemSim:
|
|
216
220
|
timestampCapture = timestampNt * 1.0e-6 - latency
|
217
221
|
|
218
222
|
lateRobotPose = self.getRobotPose(timestampCapture)
|
219
|
-
|
220
|
-
|
221
|
-
|
223
|
+
robotToCamera = self.getRobotToCamera(camSim, timestampCapture)
|
224
|
+
if lateRobotPose is None or robotToCamera is None:
|
225
|
+
return None
|
226
|
+
lateCameraPose = lateRobotPose + robotToCamera
|
222
227
|
cameraPoses2d.append(lateCameraPose.toPose2d())
|
223
228
|
|
224
229
|
camResult = camSim.process(latency, lateCameraPose, allTargets)
|
225
230
|
camSim.submitProcessedFrame(camResult, timestampNt)
|
226
|
-
for
|
227
|
-
trf =
|
231
|
+
for tgt in camResult.getTargets():
|
232
|
+
trf = tgt.getBestCameraToTarget()
|
228
233
|
if trf == Transform3d():
|
229
234
|
continue
|
230
235
|
|
@@ -2,7 +2,7 @@ from dataclasses import dataclass
|
|
2
2
|
from typing import TYPE_CHECKING, ClassVar
|
3
3
|
|
4
4
|
if TYPE_CHECKING:
|
5
|
-
from .. import
|
5
|
+
from ..generated.TargetCornerSerde import TargetCornerSerde
|
6
6
|
|
7
7
|
|
8
8
|
@dataclass
|
@@ -10,4 +10,4 @@ class TargetCorner:
|
|
10
10
|
x: float = 0
|
11
11
|
y: float = 9
|
12
12
|
|
13
|
-
photonStruct: ClassVar["
|
13
|
+
photonStruct: ClassVar["TargetCornerSerde"]
|
@@ -3,10 +3,9 @@ from typing import TYPE_CHECKING, ClassVar
|
|
3
3
|
|
4
4
|
from wpimath.geometry import Transform3d
|
5
5
|
|
6
|
-
from ..packet import Packet
|
7
|
-
|
8
6
|
if TYPE_CHECKING:
|
9
|
-
from .. import
|
7
|
+
from ..generated.MultiTargetPNPResultSerde import MultiTargetPNPResultSerde
|
8
|
+
from ..generated.PnpResultSerde import PnpResultSerde
|
10
9
|
|
11
10
|
|
12
11
|
@dataclass
|
@@ -17,7 +16,7 @@ class PnpResult:
|
|
17
16
|
bestReprojErr: float = 0.0
|
18
17
|
altReprojErr: float = 0.0
|
19
18
|
|
20
|
-
photonStruct: ClassVar["
|
19
|
+
photonStruct: ClassVar["PnpResultSerde"]
|
21
20
|
|
22
21
|
|
23
22
|
@dataclass
|
@@ -27,14 +26,4 @@ class MultiTargetPNPResult:
|
|
27
26
|
estimatedPose: PnpResult = field(default_factory=PnpResult)
|
28
27
|
fiducialIDsUsed: list[int] = field(default_factory=list)
|
29
28
|
|
30
|
-
|
31
|
-
self.estimatedPose = PnpResult()
|
32
|
-
self.estimatedPose.createFromPacket(packet)
|
33
|
-
self.fiducialIDsUsed = []
|
34
|
-
for _ in range(MultiTargetPNPResult._MAX_IDS):
|
35
|
-
fidId = packet.decode16()
|
36
|
-
if fidId >= 0:
|
37
|
-
self.fiducialIDsUsed.append(fidId)
|
38
|
-
return packet
|
39
|
-
|
40
|
-
photonStruct: ClassVar["generated.MultiTargetPNPResultSerde"]
|
29
|
+
photonStruct: ClassVar["MultiTargetPNPResultSerde"]
|
@@ -5,7 +5,8 @@ from .multiTargetPNPResult import MultiTargetPNPResult
|
|
5
5
|
from .photonTrackedTarget import PhotonTrackedTarget
|
6
6
|
|
7
7
|
if TYPE_CHECKING:
|
8
|
-
from .. import
|
8
|
+
from ..generated.PhotonPipelineMetadataSerde import PhotonPipelineMetadataSerde
|
9
|
+
from ..generated.PhotonPipelineResultSerde import PhotonPipelineResultSerde
|
9
10
|
|
10
11
|
|
11
12
|
@dataclass
|
@@ -20,7 +21,7 @@ class PhotonPipelineMetadata:
|
|
20
21
|
|
21
22
|
timeSinceLastPong: int = -1
|
22
23
|
|
23
|
-
photonStruct: ClassVar["
|
24
|
+
photonStruct: ClassVar["PhotonPipelineMetadataSerde"]
|
24
25
|
|
25
26
|
|
26
27
|
@dataclass
|
@@ -69,4 +70,4 @@ class PhotonPipelineResult:
|
|
69
70
|
return None
|
70
71
|
return self.getTargets()[0]
|
71
72
|
|
72
|
-
photonStruct: ClassVar["
|
73
|
+
photonStruct: ClassVar["PhotonPipelineResultSerde"]
|
@@ -7,7 +7,7 @@ from ..packet import Packet
|
|
7
7
|
from .TargetCorner import TargetCorner
|
8
8
|
|
9
9
|
if TYPE_CHECKING:
|
10
|
-
from .. import
|
10
|
+
from ..generated.PhotonTrackedTargetSerde import PhotonTrackedTargetSerde
|
11
11
|
|
12
12
|
|
13
13
|
@dataclass
|
@@ -63,4 +63,4 @@ class PhotonTrackedTarget:
|
|
63
63
|
retList.append(TargetCorner(cx, cy))
|
64
64
|
return retList
|
65
65
|
|
66
|
-
photonStruct: ClassVar["
|
66
|
+
photonStruct: ClassVar["PhotonTrackedTargetSerde"]
|
photonlibpy/version.py
CHANGED
@@ -1,2 +1,2 @@
|
|
1
|
-
PHOTONLIB_VERSION="v2025.0.0.beta.
|
2
|
-
PHOTONVISION_VERSION="v2025.0.0-beta-
|
1
|
+
PHOTONLIB_VERSION="v2025.0.0.beta.3"
|
2
|
+
PHOTONVISION_VERSION="v2025.0.0-beta-3"
|
@@ -1,16 +1,16 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: photonlibpy
|
3
|
-
Version: 2025.0.
|
4
|
-
Summary: Pure-python implementation of PhotonLib for interfacing with PhotonVision on coprocessors. Implemented with PhotonVision version v2025.0.0-beta-
|
3
|
+
Version: 2025.0.0b3
|
4
|
+
Summary: Pure-python implementation of PhotonLib for interfacing with PhotonVision on coprocessors. Implemented with PhotonVision version v2025.0.0-beta-3 .
|
5
5
|
Home-page: https://photonvision.org
|
6
6
|
Author: Photonvision Development Team
|
7
7
|
Description-Content-Type: text/markdown
|
8
|
-
Requires-Dist: numpy~=1
|
9
|
-
Requires-Dist: wpilib<2025
|
10
|
-
Requires-Dist: robotpy-wpimath<2025
|
11
|
-
Requires-Dist: robotpy-apriltag<2025
|
12
|
-
Requires-Dist: robotpy-cscore<2025
|
13
|
-
Requires-Dist: pyntcore<2025
|
8
|
+
Requires-Dist: numpy~=2.1
|
9
|
+
Requires-Dist: wpilib<2026,>=2025.0.0b1
|
10
|
+
Requires-Dist: robotpy-wpimath<2026,>=2025.0.0b1
|
11
|
+
Requires-Dist: robotpy-apriltag<2026,>=2025.0.0b1
|
12
|
+
Requires-Dist: robotpy-cscore<2026,>=2025.0.0b1
|
13
|
+
Requires-Dist: pyntcore<2026,>=2025.0.0b1
|
14
14
|
Requires-Dist: opencv-python; platform_machine != "roborio"
|
15
15
|
Requires-Dist: robotpy-opencv; platform_machine == "roborio"
|
16
16
|
|
@@ -0,0 +1,36 @@
|
|
1
|
+
photonlibpy/__init__.py,sha256=WW1OGrrcNXwwxaHSZlkxmhH2GYiQIHHxSxGVTJZhZbY,1136
|
2
|
+
photonlibpy/estimatedRobotPose.py,sha256=X7wF9xdPXGKSVy0MY0qrWZJOEbuZPd721lYp0KXKlP0,1603
|
3
|
+
photonlibpy/packet.py,sha256=5YomViVFwOljL2FGOetWM9FbPc_yCQ15ylzkYlgLIs8,9724
|
4
|
+
photonlibpy/photonCamera.py,sha256=061meWm5G36_9vByAUBFCor0fR4G24baw306xt_cWB8,10181
|
5
|
+
photonlibpy/photonPoseEstimator.py,sha256=2iMqxPFsQHTsq95yv-WCSv1a6wXNcHPqOyMc4Bu6IG0,12584
|
6
|
+
photonlibpy/version.py,sha256=A48GfU-qwEM5vulc7mQd7EH8j_mm-7dL2tuMzcswSqg,77
|
7
|
+
photonlibpy/estimation/__init__.py,sha256=pZ-d6fN1DJvT-lRl4FfIos5HAvlzmetIOrGIinrdv7k,223
|
8
|
+
photonlibpy/estimation/cameraTargetRelation.py,sha256=i7DPBXtkZve4ToXQscEIe-5F1oGQ1Qmf5QBaE__EeMQ,1158
|
9
|
+
photonlibpy/estimation/openCVHelp.py,sha256=6ynO0WbccVxbUCeDu4QgISgipqiqs-erLd14rnSSH-c,6726
|
10
|
+
photonlibpy/estimation/rotTrlTransform3d.py,sha256=_5q3oMdbYRVBhzxQt5UwyRE7AXkhT3Fth5Ljrj3gANY,1728
|
11
|
+
photonlibpy/estimation/targetModel.py,sha256=ZfuiB0ZDNM8gMum5nuq6KWyEeFts49jYgMo7-oojjY0,3726
|
12
|
+
photonlibpy/estimation/visionEstimation.py,sha256=v4WkVtEQd5Q9OWxrcJAseIjP-trMHj_MDzMj4fcfjtw,3019
|
13
|
+
photonlibpy/generated/MultiTargetPNPResultSerde.py,sha256=CzsCosHUnzxAoSC_sSAqpsXsDp54KW-BClnIXdzfMPc,2506
|
14
|
+
photonlibpy/generated/PhotonPipelineMetadataSerde.py,sha256=9pq5XUEDEoRowCd7sRnuGV7xtugu5HZCRrnwqZG2xXc,2887
|
15
|
+
photonlibpy/generated/PhotonPipelineResultSerde.py,sha256=jTf1obU2jPNDsxvt-tl9Ty_qzJws9Wqron3C1QiHkP8,3137
|
16
|
+
photonlibpy/generated/PhotonTrackedTargetSerde.py,sha256=-6vKir_ABDVBGbg8ktM48IKm_nBMFBbiyuZLiO_fP9U,4437
|
17
|
+
photonlibpy/generated/PnpResultSerde.py,sha256=YoTKdQ51oSdxC-7Poy6hunL0-zkMKvP5uedqaHWPudY,2693
|
18
|
+
photonlibpy/generated/TargetCornerSerde.py,sha256=kziD_rQIwyhzPfgOaDgn-3d87tvtXiAYbBjzu76biYU,2190
|
19
|
+
photonlibpy/generated/__init__.py,sha256=mElM8M88---wxTWO-SRqIJ4EfxN0fdIUwZBZ-UIGuRw,428
|
20
|
+
photonlibpy/networktables/NTTopicSet.py,sha256=NduTI3c3fG4MuVcY2cuLIoV1i1PSxxYzqBOt24ngS9k,2708
|
21
|
+
photonlibpy/networktables/__init__.py,sha256=o_LxTdyIylAszMy_zhUtTkXHyu6jqxccccj78d44OrI,35
|
22
|
+
photonlibpy/simulation/__init__.py,sha256=HKJV02of5d8bOnuI7syLzSYtOYge7XUrHSaLvawh99M,227
|
23
|
+
photonlibpy/simulation/photonCameraSim.py,sha256=BJ870459FdaO1HLkAMfA23W2S8S9axn3rZUPyxQfH40,14078
|
24
|
+
photonlibpy/simulation/simCameraProperties.py,sha256=gcRfMOH5zQhv8mBElSo8GT7yOC9dA0EsQoJz-1WsCCw,19960
|
25
|
+
photonlibpy/simulation/videoSimUtil.py,sha256=xMuTvJ2Jx9IoQqmAJi_zUm06MdEwhVpIz9OyzYQp0k4,29
|
26
|
+
photonlibpy/simulation/visionSystemSim.py,sha256=WZKYdu89IVchcCsXR04AXVQv78F_KgZIQRZ00ocusWE,8814
|
27
|
+
photonlibpy/simulation/visionTargetSim.py,sha256=AN7jXW3guQfNUu2EiQl23Jj8Mkgn4gYmRphGXL0-Dqk,1734
|
28
|
+
photonlibpy/targeting/TargetCorner.py,sha256=ouKj3E5uD76OZSNHHuSDzKOY65a8HqtcOsuejH-MVsU,276
|
29
|
+
photonlibpy/targeting/__init__.py,sha256=YzINSpq6A0cjr-yAQcFqHoiYdLGKPFXThlVYlMjY11w,295
|
30
|
+
photonlibpy/targeting/multiTargetPNPResult.py,sha256=Y9rweHtMzoCZ6mv6F8CutQi2Thq5pHN0ydBWvTCsOwY,806
|
31
|
+
photonlibpy/targeting/photonPipelineResult.py,sha256=MbaSyHZTJpoKTtLOZztpSGSt9xWWFqhzgwj8medObVA,2732
|
32
|
+
photonlibpy/targeting/photonTrackedTarget.py,sha256=zCoFp32hX-3GmBYEmsYBQieBoMzXtP2F_55_q0zPOXA,1956
|
33
|
+
photonlibpy-2025.0.0b3.dist-info/METADATA,sha256=C6jySUa1aCRwXFf3ZKZS8l8C_MV4pnnfBS6Ueho98WM,750
|
34
|
+
photonlibpy-2025.0.0b3.dist-info/WHEEL,sha256=bFJAMchF8aTQGUgMZzHJyDDMPTO3ToJ7x23SLJa1SVo,92
|
35
|
+
photonlibpy-2025.0.0b3.dist-info/top_level.txt,sha256=T8Xc6U6he2VjKUAca6zawSkHdUZuLanxYIc4nxw2ctc,12
|
36
|
+
photonlibpy-2025.0.0b3.dist-info/RECORD,,
|
@@ -1,36 +0,0 @@
|
|
1
|
-
photonlibpy/__init__.py,sha256=WW1OGrrcNXwwxaHSZlkxmhH2GYiQIHHxSxGVTJZhZbY,1136
|
2
|
-
photonlibpy/estimatedRobotPose.py,sha256=X7wF9xdPXGKSVy0MY0qrWZJOEbuZPd721lYp0KXKlP0,1603
|
3
|
-
photonlibpy/packet.py,sha256=5YomViVFwOljL2FGOetWM9FbPc_yCQ15ylzkYlgLIs8,9724
|
4
|
-
photonlibpy/photonCamera.py,sha256=wz55_9XoKzGX6UQ-2Oa3kjrz9KM6R7-wExiaUPkUeto,10176
|
5
|
-
photonlibpy/photonPoseEstimator.py,sha256=2iMqxPFsQHTsq95yv-WCSv1a6wXNcHPqOyMc4Bu6IG0,12584
|
6
|
-
photonlibpy/version.py,sha256=gk9q-HkZ3pUpfERPbTZbPNGvNy0gR9d01A_Db5oMkZ4,77
|
7
|
-
photonlibpy/estimation/__init__.py,sha256=pZ-d6fN1DJvT-lRl4FfIos5HAvlzmetIOrGIinrdv7k,223
|
8
|
-
photonlibpy/estimation/cameraTargetRelation.py,sha256=i7DPBXtkZve4ToXQscEIe-5F1oGQ1Qmf5QBaE__EeMQ,1158
|
9
|
-
photonlibpy/estimation/openCVHelp.py,sha256=ThJDVuJ_QFteuuSeo7wQHoE4YzPQOx60_bmFRnILS1Y,6544
|
10
|
-
photonlibpy/estimation/rotTrlTransform3d.py,sha256=nX60k9rgei7bkLRR3Ykl2lhnjpsiSNuImk6fERFcy2w,915
|
11
|
-
photonlibpy/estimation/targetModel.py,sha256=e5IaDKFdNtek6pJLrzw2poit5wxvraIRtHPDmN-rqUk,4416
|
12
|
-
photonlibpy/estimation/visionEstimation.py,sha256=jCOBHHOjhzjNOD-_kn-jpTOXqlGxek24oKL51GiRSKU,2999
|
13
|
-
photonlibpy/generated/MultiTargetPNPResultSerde.py,sha256=WoCdln8kkciBltG-YdFn5OjdLGMD3VwwHymfe95QF4s,2342
|
14
|
-
photonlibpy/generated/PhotonPipelineMetadataSerde.py,sha256=0beAVUFeQ1Shq5ku4q5IgE-sYkMzk3XoemRnC5Vx8Hg,2767
|
15
|
-
photonlibpy/generated/PhotonPipelineResultSerde.py,sha256=q2w8ei-8X1ZJgHnYsTfX7NHTybUboP-DqQkdGY5bj1M,2847
|
16
|
-
photonlibpy/generated/PhotonTrackedTargetSerde.py,sha256=TT18q7jb2WzcPDOf26NOo1l23W9gY80CyrNYUBXbHYs,4271
|
17
|
-
photonlibpy/generated/PnpResultSerde.py,sha256=FVvv34GzQ856uGobpRSUO8TbV-h7IVipkTqgqfRa35Y,2586
|
18
|
-
photonlibpy/generated/TargetCornerSerde.py,sha256=rUJUBfs02gTWgkHvSjuaGLQdSzZcfpRRcSLDlFo8-_E,2080
|
19
|
-
photonlibpy/generated/__init__.py,sha256=mElM8M88---wxTWO-SRqIJ4EfxN0fdIUwZBZ-UIGuRw,428
|
20
|
-
photonlibpy/networktables/NTTopicSet.py,sha256=9jagxRAxO6nN7LvHZti86qA9V_d9_FStoDz5C3siM7Y,2545
|
21
|
-
photonlibpy/networktables/__init__.py,sha256=o_LxTdyIylAszMy_zhUtTkXHyu6jqxccccj78d44OrI,35
|
22
|
-
photonlibpy/simulation/__init__.py,sha256=HKJV02of5d8bOnuI7syLzSYtOYge7XUrHSaLvawh99M,227
|
23
|
-
photonlibpy/simulation/photonCameraSim.py,sha256=xul2eXUe1XgJ-RWALG6cgU4zDPcDOvHHvjFjAASS188,14706
|
24
|
-
photonlibpy/simulation/simCameraProperties.py,sha256=LAmxwm9FSyx-CTsVO-TRhfn4Pz5oanJV0Jh7KUnCNgM,20557
|
25
|
-
photonlibpy/simulation/videoSimUtil.py,sha256=xMuTvJ2Jx9IoQqmAJi_zUm06MdEwhVpIz9OyzYQp0k4,29
|
26
|
-
photonlibpy/simulation/visionSystemSim.py,sha256=q4ifF4q_XkCJ9jIkHBX6lYY6INc-uvFBi4c2OLQR3WA,8615
|
27
|
-
photonlibpy/simulation/visionTargetSim.py,sha256=AN7jXW3guQfNUu2EiQl23Jj8Mkgn4gYmRphGXL0-Dqk,1734
|
28
|
-
photonlibpy/targeting/TargetCorner.py,sha256=5a7RdK_WLCgui7BVE3KnHmb3jHbXeM5_SfFSDbnmd0M,251
|
29
|
-
photonlibpy/targeting/__init__.py,sha256=YzINSpq6A0cjr-yAQcFqHoiYdLGKPFXThlVYlMjY11w,295
|
30
|
-
photonlibpy/targeting/multiTargetPNPResult.py,sha256=zcUF1rIAgovURXu9saiLHIjuspSpRn3r7-J6tB_YlU4,1125
|
31
|
-
photonlibpy/targeting/photonPipelineResult.py,sha256=Sy_KBvI0ps1pQOJ80XYHTrz50aqUDBcuvIC_b1ku3G4,2617
|
32
|
-
photonlibpy/targeting/photonTrackedTarget.py,sha256=m6y-dBqP8iVt5J0WcGDxYpmgctVy6SNL358ZY1YgjCc,1917
|
33
|
-
photonlibpy-2025.0.0b2.dist-info/METADATA,sha256=EdH_l3RSbxbU8Z2s7SIBpIwca7DCOPRaOwVuniX1fro,752
|
34
|
-
photonlibpy-2025.0.0b2.dist-info/WHEEL,sha256=bFJAMchF8aTQGUgMZzHJyDDMPTO3ToJ7x23SLJa1SVo,92
|
35
|
-
photonlibpy-2025.0.0b2.dist-info/top_level.txt,sha256=T8Xc6U6he2VjKUAca6zawSkHdUZuLanxYIc4nxw2ctc,12
|
36
|
-
photonlibpy-2025.0.0b2.dist-info/RECORD,,
|
File without changes
|
File without changes
|