photonlibpy 2025.0.0b3__py3-none-any.whl → 2025.0.0b4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -27,6 +27,12 @@ class OpenCVHelp:
27
27
 
28
28
  @staticmethod
29
29
  def translationToTVec(translations: list[Translation3d]) -> np.ndarray:
30
+ """Creates a new :class:`np.array` with these 3d translations. The opencv tvec is a vector with
31
+ three elements representing {x, y, z} in the EDN coordinate system.
32
+
33
+ :param translations: The translations to convert into a np.array
34
+ """
35
+
30
36
  retVal: list[list] = []
31
37
  for translation in translations:
32
38
  trl = OpenCVHelp.translationNWUtoEDN(translation)
@@ -38,6 +44,13 @@ class OpenCVHelp:
38
44
 
39
45
  @staticmethod
40
46
  def rotationToRVec(rotation: Rotation3d) -> np.ndarray:
47
+ """Creates a new :class:`.np.array` with this 3d rotation. The opencv rvec Mat is a vector with
48
+ three elements representing the axis scaled by the angle in the EDN coordinate system. (angle =
49
+ norm, and axis = rvec / norm)
50
+
51
+ :param rotation: The rotation to convert into a np.array
52
+ """
53
+
41
54
  retVal: list[np.ndarray] = []
42
55
  rot = OpenCVHelp.rotationNWUtoEDN(rotation)
43
56
  rotVec = rot.getQuaternion().toRotationVector()
@@ -88,6 +101,25 @@ class OpenCVHelp:
88
101
  def reorderCircular(
89
102
  elements: list[Any] | np.ndarray, backwards: bool, shiftStart: int
90
103
  ) -> list[Any]:
104
+ """Reorders the list, optionally indexing backwards and wrapping around to the last element after
105
+ the first, and shifting all indices in the direction of indexing.
106
+
107
+ e.g.
108
+
109
+ ({1,2,3}, false, 1) == {2,3,1}
110
+
111
+ ({1,2,3}, true, 0) == {1,3,2}
112
+
113
+ ({1,2,3}, true, 1) == {3,2,1}
114
+
115
+ :param elements: list elements
116
+ :param backwards: If indexing should happen in reverse (0, size-1, size-2, ...)
117
+ :param shiftStart: How much the initial index should be shifted (instead of starting at index 0,
118
+ start at shiftStart, negated if backwards)
119
+
120
+ :returns: Reordered list
121
+ """
122
+
91
123
  size = len(elements)
92
124
  reordered = []
93
125
  dir = -1 if backwards else 1
@@ -100,18 +132,39 @@ class OpenCVHelp:
100
132
 
101
133
  @staticmethod
102
134
  def translationEDNToNWU(trl: Translation3d) -> Translation3d:
135
+ """Convert a rotation delta from EDN to NWU. For example, if you have a rotation X,Y,Z {1, 0, 0}
136
+ in EDN, this would be {0, -1, 0} in NWU.
137
+ """
138
+
103
139
  return trl.rotateBy(EDN_TO_NWU)
104
140
 
105
141
  @staticmethod
106
142
  def rotationEDNToNWU(rot: Rotation3d) -> Rotation3d:
143
+ """Convert a rotation delta from NWU to EDN. For example, if you have a rotation X,Y,Z {1, 0, 0}
144
+ in NWU, this would be {0, 0, 1} in EDN.
145
+ """
146
+
107
147
  return -EDN_TO_NWU + (rot + EDN_TO_NWU)
108
148
 
109
149
  @staticmethod
110
150
  def tVecToTranslation(tvecInput: np.ndarray) -> Translation3d:
151
+ """Returns a new 3d translation from this :class:`.Mat`. The opencv tvec is a vector with three
152
+ elements representing {x, y, z} in the EDN coordinate system.
153
+
154
+ :param tvecInput: The tvec to create a Translation3d from
155
+ """
156
+
111
157
  return OpenCVHelp.translationEDNToNWU(Translation3d(tvecInput))
112
158
 
113
159
  @staticmethod
114
160
  def rVecToRotation(rvecInput: np.ndarray) -> Rotation3d:
161
+ """Returns a 3d rotation from this :class:`.Mat`. The opencv rvec Mat is a vector with three
162
+ elements representing the axis scaled by the angle in the EDN coordinate system. (angle = norm,
163
+ and axis = rvec / norm)
164
+
165
+ :param rvecInput: The rvec to create a Rotation3d from
166
+ """
167
+
115
168
  return OpenCVHelp.rotationEDNToNWU(Rotation3d(rvecInput))
116
169
 
117
170
  @staticmethod
@@ -121,6 +174,33 @@ class OpenCVHelp:
121
174
  modelTrls: list[Translation3d],
122
175
  imagePoints: np.ndarray,
123
176
  ) -> PnpResult | None:
177
+ """Finds the transformation(s) that map the camera's pose to the target's pose. The camera's pose
178
+ relative to the target is determined by the supplied 3d points of the target's model and their
179
+ associated 2d points imaged by the camera. The supplied model translations must be relative to
180
+ the target's pose.
181
+
182
+ For planar targets, there may be an alternate solution which is plausible given the 2d image
183
+ points. This has an associated "ambiguity" which describes the ratio of reprojection error
184
+ between the "best" and "alternate" solution.
185
+
186
+ This method is intended for use with individual AprilTags, and will not work unless 4 points
187
+ are provided.
188
+
189
+ :param cameraMatrix: The camera intrinsics matrix in standard opencv form
190
+ :param distCoeffs: The camera distortion matrix in standard opencv form
191
+ :param modelTrls: The translations of the object corners. These should have the object pose as
192
+ their origin. These must come in a specific, pose-relative order (in NWU):
193
+
194
+ - Point 0: [0, -squareLength / 2, squareLength / 2]
195
+ - Point 1: [0, squareLength / 2, squareLength / 2]
196
+ - Point 2: [0, squareLength / 2, -squareLength / 2]
197
+ - Point 3: [0, -squareLength / 2, -squareLength / 2]
198
+ :param imagePoints: The projection of these 3d object points into the 2d camera image. The order
199
+ should match the given object point translations.
200
+
201
+ :returns: The resulting transformation that maps the camera pose to the target pose and the
202
+ ambiguity if an alternate solution is available.
203
+ """
124
204
  modelTrls = OpenCVHelp.reorderCircular(modelTrls, True, -1)
125
205
  imagePoints = np.array(OpenCVHelp.reorderCircular(imagePoints, True, -1))
126
206
  objectMat = np.array(OpenCVHelp.translationToTVec(modelTrls))
@@ -130,6 +210,7 @@ class OpenCVHelp:
130
210
  best: Transform3d = Transform3d()
131
211
 
132
212
  for tries in range(2):
213
+ # calc rvecs/tvecs and associated reprojection error from image points
133
214
  retval, rvecs, tvecs, reprojectionError = cv.solvePnPGeneric(
134
215
  objectMat,
135
216
  imagePoints,
@@ -138,6 +219,7 @@ class OpenCVHelp:
138
219
  flags=cv.SOLVEPNP_IPPE_SQUARE,
139
220
  )
140
221
 
222
+ # convert to wpilib coordinates
141
223
  best = Transform3d(
142
224
  OpenCVHelp.tVecToTranslation(tvecs[0]),
143
225
  OpenCVHelp.rVecToRotation(rvecs[0]),
@@ -148,6 +230,7 @@ class OpenCVHelp:
148
230
  OpenCVHelp.rVecToRotation(rvecs[1]),
149
231
  )
150
232
 
233
+ # check if we got a NaN result
151
234
  if reprojectionError is not None and not math.isnan(
152
235
  reprojectionError[0, 0]
153
236
  ):
@@ -158,6 +241,7 @@ class OpenCVHelp:
158
241
  pt[0, 1] -= 0.001
159
242
  imagePoints[0] = pt
160
243
 
244
+ # solvePnP failed
161
245
  if reprojectionError is None or math.isnan(reprojectionError[0, 0]):
162
246
  print("SolvePNP_Square failed!")
163
247
  return None
@@ -186,6 +270,27 @@ class OpenCVHelp:
186
270
  modelTrls: list[Translation3d],
187
271
  imagePoints: np.ndarray,
188
272
  ) -> PnpResult | None:
273
+ """Finds the transformation that maps the camera's pose to the origin of the supplied object. An
274
+ "object" is simply a set of known 3d translations that correspond to the given 2d points. If,
275
+ for example, the object translations are given relative to close-right corner of the blue
276
+ alliance(the default origin), a camera-to-origin transformation is returned. If the
277
+ translations are relative to a target's pose, a camera-to-target transformation is returned.
278
+
279
+ There must be at least 3 points to use this method. This does not return an alternate
280
+ solution-- if you are intending to use solvePNP on a single AprilTag, see {@link
281
+ #solvePNP_SQUARE} instead.
282
+
283
+ :param cameraMatrix: The camera intrinsics matrix in standard opencv form
284
+ :param distCoeffs: The camera distortion matrix in standard opencv form
285
+ :param objectTrls: The translations of the object corners, relative to the field.
286
+ :param imagePoints: The projection of these 3d object points into the 2d camera image. The order
287
+ should match the given object point translations.
288
+
289
+ :returns: The resulting transformation that maps the camera pose to the target pose. If the 3d
290
+ model points are supplied relative to the origin, this transformation brings the camera to
291
+ the origin.
292
+ """
293
+
189
294
  objectMat = np.array(OpenCVHelp.translationToTVec(modelTrls))
190
295
 
191
296
  retval, rvecs, tvecs, reprojectionError = cv.solvePnPGeneric(
@@ -4,24 +4,38 @@ from wpimath.geometry import Pose3d, Rotation3d, Transform3d, Translation3d
4
4
 
5
5
 
6
6
  class RotTrlTransform3d:
7
+ """Represents a transformation that first rotates a pose around the origin, and then translates it."""
8
+
7
9
  def __init__(
8
10
  self, rot: Rotation3d = Rotation3d(), trl: Translation3d = Translation3d()
9
11
  ):
12
+ """A rotation-translation transformation.
13
+
14
+ Applying this RotTrlTransform3d to poses will preserve their current origin-to-pose
15
+ transform as if the origin was transformed by these components instead.
16
+
17
+ :param rot: The rotation component
18
+ :param trl: The translation component
19
+ """
10
20
  self.rot = rot
11
21
  self.trl = trl
12
22
 
13
23
  def inverse(self) -> Self:
24
+ """The inverse of this transformation. Applying the inverse will "undo" this transformation."""
14
25
  invRot = -self.rot
15
26
  invTrl = -(self.trl.rotateBy(invRot))
16
27
  return type(self)(invRot, invTrl)
17
28
 
18
29
  def getTransform(self) -> Transform3d:
30
+ """This transformation as a Transform3d (as if of the origin)"""
19
31
  return Transform3d(self.trl, self.rot)
20
32
 
21
33
  def getTranslation(self) -> Translation3d:
34
+ """The translation component of this transformation"""
22
35
  return self.trl
23
36
 
24
37
  def getRotation(self) -> Rotation3d:
38
+ """The rotation component of this transformation"""
25
39
  return self.rot
26
40
 
27
41
  def applyTranslation(self, trlToApply: Translation3d) -> Translation3d:
@@ -44,6 +58,11 @@ class RotTrlTransform3d:
44
58
 
45
59
  @classmethod
46
60
  def makeRelativeTo(cls, pose: Pose3d) -> Self:
61
+ """The rotation-translation transformation that makes poses in the world consider this pose as the
62
+ new origin, or change the basis to this pose.
63
+
64
+ :param pose: The new origin
65
+ """
47
66
  return cls(pose.rotation(), pose.translation()).inverse()
48
67
 
49
68
  @classmethod
@@ -8,14 +8,27 @@ from . import RotTrlTransform3d
8
8
 
9
9
 
10
10
  class TargetModel:
11
+ """Describes the 3d model of a target."""
11
12
 
12
13
  def __init__(self):
14
+ """Default constructor for initialising internal class members. DO NOT USE THIS!!! USE THE createPlanar,
15
+ createCuboid, createSpheroid or create Arbitrary
16
+ """
13
17
  self.vertices: List[Translation3d] = []
14
18
  self.isPlanar = False
15
19
  self.isSpherical = False
16
20
 
17
21
  @classmethod
18
22
  def createPlanar(cls, width: meters, height: meters) -> Self:
23
+ """Creates a rectangular, planar target model given the width and height. The model has four
24
+ vertices:
25
+
26
+ - Point 0: [0, -width/2, -height/2]
27
+ - Point 1: [0, width/2, -height/2]
28
+ - Point 2: [0, width/2, height/2]
29
+ - Point 3: [0, -width/2, height/2]
30
+ """
31
+
19
32
  tm = cls()
20
33
 
21
34
  tm.isPlanar = True
@@ -30,6 +43,18 @@ class TargetModel:
30
43
 
31
44
  @classmethod
32
45
  def createCuboid(cls, length: meters, width: meters, height: meters) -> Self:
46
+ """Creates a cuboid target model given the length, width, height. The model has eight vertices:
47
+
48
+ - Point 0: [length/2, -width/2, -height/2]
49
+ - Point 1: [length/2, width/2, -height/2]
50
+ - Point 2: [length/2, width/2, height/2]
51
+ - Point 3: [length/2, -width/2, height/2]
52
+ - Point 4: [-length/2, -width/2, height/2]
53
+ - Point 5: [-length/2, width/2, height/2]
54
+ - Point 6: [-length/2, width/2, -height/2]
55
+ - Point 7: [-length/2, -width/2, -height/2]
56
+ """
57
+
33
58
  tm = cls()
34
59
  verts = [
35
60
  Translation3d(length / 2.0, -width / 2.0, -height / 2.0),
@@ -48,6 +73,20 @@ class TargetModel:
48
73
 
49
74
  @classmethod
50
75
  def createSpheroid(cls, diameter: meters) -> Self:
76
+ """Creates a spherical target model which has similar dimensions regardless of its rotation. This
77
+ model has four vertices:
78
+
79
+ - Point 0: [0, -radius, 0]
80
+ - Point 1: [0, 0, -radius]
81
+ - Point 2: [0, radius, 0]
82
+ - Point 3: [0, 0, radius]
83
+
84
+ *Q: Why these vertices?* A: This target should be oriented to the camera every frame, much
85
+ like a sprite/decal, and these vertices represent the ellipse vertices (maxima). These vertices
86
+ are used for drawing the image of this sphere, but do not match the corners that will be
87
+ published by photonvision.
88
+ """
89
+
51
90
  tm = cls()
52
91
 
53
92
  tm.isPlanar = False
@@ -63,6 +102,14 @@ class TargetModel:
63
102
 
64
103
  @classmethod
65
104
  def createArbitrary(cls, verts: List[Translation3d]) -> Self:
105
+ """Creates a target model from arbitrary 3d vertices. Automatically determines if the given
106
+ vertices are planar(x == 0). More than 2 vertices must be given. If this is a planar model, the
107
+ vertices should define a non-intersecting contour.
108
+
109
+ :param vertices: Translations representing the vertices of this target model relative to its
110
+ pose.
111
+ """
112
+
66
113
  tm = cls()
67
114
  tm._common_construction(verts)
68
115
 
@@ -83,6 +130,12 @@ class TargetModel:
83
130
  self.vertices = verts
84
131
 
85
132
  def getFieldVertices(self, targetPose: Pose3d) -> List[Translation3d]:
133
+ """This target's vertices offset from its field pose.
134
+
135
+ Note: If this target is spherical, use {@link #getOrientedPose(Translation3d,
136
+ Translation3d)} with this method.
137
+ """
138
+
86
139
  basisChange = RotTrlTransform3d(targetPose.rotation(), targetPose.translation())
87
140
 
88
141
  retVal = []
@@ -94,6 +147,16 @@ class TargetModel:
94
147
 
95
148
  @classmethod
96
149
  def getOrientedPose(cls, tgtTrl: Translation3d, cameraTrl: Translation3d):
150
+ """Returns a Pose3d with the given target translation oriented (with its relative x-axis aligned)
151
+ to the camera translation. This is used for spherical targets which should not have their
152
+ projection change regardless of their own rotation.
153
+
154
+ :param tgtTrl: This target's translation
155
+ :param cameraTrl: Camera's translation
156
+
157
+ :returns: This target's pose oriented to the camera
158
+ """
159
+
97
160
  relCam = cameraTrl - tgtTrl
98
161
  orientToCam = Rotation3d(
99
162
  0.0,
@@ -11,6 +11,7 @@ class VisionEstimation:
11
11
  def getVisibleLayoutTags(
12
12
  visTags: list[PhotonTrackedTarget], layout: AprilTagFieldLayout
13
13
  ) -> list[AprilTag]:
14
+ """Get the visible :class:`.AprilTag`s which are in the tag layout using the visible tag IDs."""
14
15
  retVal: list[AprilTag] = []
15
16
  for tag in visTags:
16
17
  id = tag.getFiducialId()
@@ -30,12 +31,31 @@ class VisionEstimation:
30
31
  layout: AprilTagFieldLayout,
31
32
  tagModel: TargetModel,
32
33
  ) -> PnpResult | None:
34
+ """Performs solvePNP using 3d-2d point correspondences of visible AprilTags to estimate the
35
+ field-to-camera transformation. If only one tag is visible, the result may have an alternate
36
+ solution.
37
+
38
+ **Note:** The returned transformation is from the field origin to the camera pose!
39
+
40
+ With only one tag: {@link OpenCVHelp#solvePNP_SQUARE}
41
+
42
+ With multiple tags: {@link OpenCVHelp#solvePNP_SQPNP}
43
+
44
+ :param cameraMatrix: The camera intrinsics matrix in standard opencv form
45
+ :param distCoeffs: The camera distortion matrix in standard opencv form
46
+ :param visTags: The visible tags reported by PV. Non-tag targets are automatically excluded.
47
+ :param tagLayout: The known tag layout on the field
48
+
49
+ :returns: The transformation that maps the field origin to the camera pose. Ensure the {@link
50
+ PnpResult} are present before utilizing them.
51
+ """
33
52
  if len(visTags) == 0:
34
53
  return None
35
54
 
36
55
  corners: list[TargetCorner] = []
37
56
  knownTags: list[AprilTag] = []
38
57
 
58
+ # ensure these are AprilTags in our layout
39
59
  for tgt in visTags:
40
60
  id = tgt.getFiducialId()
41
61
  maybePose = layout.getTagPose(id)
@@ -53,6 +73,7 @@ class VisionEstimation:
53
73
 
54
74
  points = OpenCVHelp.cornersToPoints(corners)
55
75
 
76
+ # single-tag pnp
56
77
  if len(knownTags) == 1:
57
78
  camToTag = OpenCVHelp.solvePNP_Square(
58
79
  cameraMatrix, distCoeffs, tagModel.getVertices(), points
@@ -74,6 +95,7 @@ class VisionEstimation:
74
95
  altReprojErr=camToTag.altReprojErr,
75
96
  )
76
97
  return result
98
+ # multi-tag pnp
77
99
  else:
78
100
  objectTrls: list[Translation3d] = []
79
101
  for tag in knownTags:
@@ -9,6 +9,13 @@ PhotonPipelineResult_TYPE_STRING = (
9
9
 
10
10
 
11
11
  class NTTopicSet:
12
+ """This class is a wrapper around all per-pipeline NT topics that PhotonVision should be publishing
13
+ It's split here so the sim and real-camera implementations can share a common implementation of
14
+ the naming and registration of the NT content.
15
+
16
+ However, we do expect that the actual logic which fills out values in the entries will be
17
+ different for sim vs. real camera
18
+ """
12
19
 
13
20
  def __init__(self, tableName: str, cameraName: str) -> None:
14
21
  instance = nt.NetworkTableInstance.getDefault()
@@ -48,6 +48,10 @@ def setVersionCheckEnabled(enabled: bool):
48
48
 
49
49
  class PhotonCamera:
50
50
  def __init__(self, cameraName: str):
51
+ """Constructs a PhotonCamera from the name of the camera.
52
+
53
+ :param cameraName: The nickname of the camera (found in the PhotonVision UI).
54
+ """
51
55
  instance = ntcore.NetworkTableInstance.getDefault()
52
56
  self._name = cameraName
53
57
  self._tableName = "photonvision"
@@ -132,6 +136,14 @@ class PhotonCamera:
132
136
  return ret
133
137
 
134
138
  def getLatestResult(self) -> PhotonPipelineResult:
139
+ """Returns the latest pipeline result. This is simply the most recent result Received via NT.
140
+ Calling this multiple times will always return the most recent result.
141
+
142
+ Replaced by :meth:`.getAllUnreadResults` over getLatestResult, as this function can miss
143
+ results, or provide duplicate ones!
144
+ TODO implement the thing that will take this ones place...
145
+ """
146
+
135
147
  self._versionCheck()
136
148
 
137
149
  now = RobotController.getFPGATime()
@@ -149,34 +161,85 @@ class PhotonCamera:
149
161
  return retVal
150
162
 
151
163
  def getDriverMode(self) -> bool:
164
+ """Returns whether the camera is in driver mode.
165
+
166
+ :returns: Whether the camera is in driver mode.
167
+ """
168
+
152
169
  return self._driverModeSubscriber.get()
153
170
 
154
171
  def setDriverMode(self, driverMode: bool) -> None:
172
+ """Toggles driver mode.
173
+
174
+ :param driverMode: Whether to set driver mode.
175
+ """
176
+
155
177
  self._driverModePublisher.set(driverMode)
156
178
 
157
179
  def takeInputSnapshot(self) -> None:
180
+ """Request the camera to save a new image file from the input camera stream with overlays. Images
181
+ take up space in the filesystem of the PhotonCamera. Calling it frequently will fill up disk
182
+ space and eventually cause the system to stop working. Clear out images in
183
+ /opt/photonvision/photonvision_config/imgSaves frequently to prevent issues.
184
+ """
185
+
158
186
  self._inputSaveImgEntry.set(self._inputSaveImgEntry.get() + 1)
159
187
 
160
188
  def takeOutputSnapshot(self) -> None:
189
+ """Request the camera to save a new image file from the output stream with overlays. Images take
190
+ up space in the filesystem of the PhotonCamera. Calling it frequently will fill up disk space
191
+ and eventually cause the system to stop working. Clear out images in
192
+ /opt/photonvision/photonvision_config/imgSaves frequently to prevent issues.
193
+ """
161
194
  self._outputSaveImgEntry.set(self._outputSaveImgEntry.get() + 1)
162
195
 
163
196
  def getPipelineIndex(self) -> int:
197
+ """Returns the active pipeline index.
198
+
199
+ :returns: The active pipeline index.
200
+ """
201
+
164
202
  return self._pipelineIndexState.get(0)
165
203
 
166
204
  def setPipelineIndex(self, index: int) -> None:
205
+ """Allows the user to select the active pipeline index.
206
+
207
+ :param index: The active pipeline index.
208
+ """
167
209
  self._pipelineIndexRequest.set(index)
168
210
 
169
211
  def getLEDMode(self) -> VisionLEDMode:
212
+ """Returns the current LED mode.
213
+
214
+ :returns: The current LED mode.
215
+ """
216
+
170
217
  mode = self._ledModeState.get()
171
218
  return VisionLEDMode(mode)
172
219
 
173
220
  def setLEDMode(self, led: VisionLEDMode) -> None:
221
+ """Sets the LED mode.
222
+
223
+ :param led: The mode to set to.
224
+ """
225
+
174
226
  self._ledModeRequest.set(led.value)
175
227
 
176
228
  def getName(self) -> str:
229
+ """Returns the name of the camera. This will return the same value that was given to the
230
+ constructor as cameraName.
231
+
232
+ :returns: The name of the camera.
233
+ """
177
234
  return self._name
178
235
 
179
236
  def isConnected(self) -> bool:
237
+ """Returns whether the camera is connected and actively returning new data. Connection status is
238
+ debounced.
239
+
240
+ :returns: True if the camera is actively sending frame data, false otherwise.
241
+ """
242
+
180
243
  curHeartbeat = self._heartbeatEntry.get()
181
244
  now = Timer.getFPGATimestamp()
182
245
 
@@ -197,6 +260,8 @@ class PhotonCamera:
197
260
 
198
261
  _lastVersionTimeCheck = Timer.getFPGATimestamp()
199
262
 
263
+ # Heartbeat entry is assumed to always be present. If it's not present, we
264
+ # assume that a camera with that name was never connected in the first place.
200
265
  if not self._heartbeatEntry.exists():
201
266
  cameraNames = (
202
267
  self._cameraTable.getInstance().getTable(self._tableName).getSubTables()
@@ -222,6 +287,7 @@ class PhotonCamera:
222
287
  True,
223
288
  )
224
289
 
290
+ # Check for connection status. Warn if disconnected.
225
291
  elif not self.isConnected():
226
292
  wpilib.reportWarning(
227
293
  f"PhotonVision coprocessor at path {self._path} is not sending new data.",
@@ -229,8 +295,9 @@ class PhotonCamera:
229
295
  )
230
296
 
231
297
  versionString = self.versionEntry.get(defaultValue="")
232
- localUUID = PhotonPipelineResult.photonStruct.MESSAGE_VERSION
233
298
 
299
+ # Check mdef UUID
300
+ localUUID = PhotonPipelineResult.photonStruct.MESSAGE_VERSION
234
301
  remoteUUID = str(self._rawBytesEntry.getTopic().getProperty("message_uuid"))
235
302
 
236
303
  if not remoteUUID: