photonlibpy 2025.0.0b2__py3-none-any.whl → 2025.0.0b4__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
@@ -20,8 +20,13 @@
20
20
  ## --> DO NOT MODIFY <--
21
21
  ###############################################################################
22
22
 
23
+ from typing import TYPE_CHECKING
24
+
23
25
  from ..packet import Packet
24
- from ..targeting import *
26
+ from ..targeting import * # noqa
27
+
28
+ if TYPE_CHECKING:
29
+ from ..targeting import PnpResult # noqa
25
30
 
26
31
 
27
32
  class PnpResultSerde:
@@ -20,8 +20,13 @@
20
20
  ## --> DO NOT MODIFY <--
21
21
  ###############################################################################
22
22
 
23
+ from typing import TYPE_CHECKING
24
+
23
25
  from ..packet import Packet
24
- from ..targeting import *
26
+ from ..targeting import * # noqa
27
+
28
+ if TYPE_CHECKING:
29
+ from ..targeting import TargetCorner # noqa
25
30
 
26
31
 
27
32
  class TargetCornerSerde:
@@ -9,9 +9,18 @@ PhotonPipelineResult_TYPE_STRING = (
9
9
 
10
10
 
11
11
  class NTTopicSet:
12
+ """This class is a wrapper around all per-pipeline NT topics that PhotonVision should be publishing
13
+ It's split here so the sim and real-camera implementations can share a common implementation of
14
+ the naming and registration of the NT content.
12
15
 
13
- def __init__(self) -> None:
14
- self.subTable = nt.NetworkTableInstance.getDefault()
16
+ However, we do expect that the actual logic which fills out values in the entries will be
17
+ different for sim vs. real camera
18
+ """
19
+
20
+ def __init__(self, tableName: str, cameraName: str) -> None:
21
+ instance = nt.NetworkTableInstance.getDefault()
22
+ photonvision_root_table = instance.getTable(tableName)
23
+ self.subTable = photonvision_root_table.getSubTable(cameraName)
15
24
 
16
25
  def updateEntries(self) -> None:
17
26
  options = nt.PubSubOptions()
@@ -48,6 +48,10 @@ def setVersionCheckEnabled(enabled: bool):
48
48
 
49
49
  class PhotonCamera:
50
50
  def __init__(self, cameraName: str):
51
+ """Constructs a PhotonCamera from the name of the camera.
52
+
53
+ :param cameraName: The nickname of the camera (found in the PhotonVision UI).
54
+ """
51
55
  instance = ntcore.NetworkTableInstance.getDefault()
52
56
  self._name = cameraName
53
57
  self._tableName = "photonvision"
@@ -132,6 +136,14 @@ class PhotonCamera:
132
136
  return ret
133
137
 
134
138
  def getLatestResult(self) -> PhotonPipelineResult:
139
+ """Returns the latest pipeline result. This is simply the most recent result Received via NT.
140
+ Calling this multiple times will always return the most recent result.
141
+
142
+ Replaced by :meth:`.getAllUnreadResults` over getLatestResult, as this function can miss
143
+ results, or provide duplicate ones!
144
+ TODO implement the thing that will take this ones place...
145
+ """
146
+
135
147
  self._versionCheck()
136
148
 
137
149
  now = RobotController.getFPGATime()
@@ -149,34 +161,85 @@ class PhotonCamera:
149
161
  return retVal
150
162
 
151
163
  def getDriverMode(self) -> bool:
164
+ """Returns whether the camera is in driver mode.
165
+
166
+ :returns: Whether the camera is in driver mode.
167
+ """
168
+
152
169
  return self._driverModeSubscriber.get()
153
170
 
154
171
  def setDriverMode(self, driverMode: bool) -> None:
172
+ """Toggles driver mode.
173
+
174
+ :param driverMode: Whether to set driver mode.
175
+ """
176
+
155
177
  self._driverModePublisher.set(driverMode)
156
178
 
157
179
  def takeInputSnapshot(self) -> None:
180
+ """Request the camera to save a new image file from the input camera stream with overlays. Images
181
+ take up space in the filesystem of the PhotonCamera. Calling it frequently will fill up disk
182
+ space and eventually cause the system to stop working. Clear out images in
183
+ /opt/photonvision/photonvision_config/imgSaves frequently to prevent issues.
184
+ """
185
+
158
186
  self._inputSaveImgEntry.set(self._inputSaveImgEntry.get() + 1)
159
187
 
160
188
  def takeOutputSnapshot(self) -> None:
189
+ """Request the camera to save a new image file from the output stream with overlays. Images take
190
+ up space in the filesystem of the PhotonCamera. Calling it frequently will fill up disk space
191
+ and eventually cause the system to stop working. Clear out images in
192
+ /opt/photonvision/photonvision_config/imgSaves frequently to prevent issues.
193
+ """
161
194
  self._outputSaveImgEntry.set(self._outputSaveImgEntry.get() + 1)
162
195
 
163
196
  def getPipelineIndex(self) -> int:
197
+ """Returns the active pipeline index.
198
+
199
+ :returns: The active pipeline index.
200
+ """
201
+
164
202
  return self._pipelineIndexState.get(0)
165
203
 
166
204
  def setPipelineIndex(self, index: int) -> None:
205
+ """Allows the user to select the active pipeline index.
206
+
207
+ :param index: The active pipeline index.
208
+ """
167
209
  self._pipelineIndexRequest.set(index)
168
210
 
169
211
  def getLEDMode(self) -> VisionLEDMode:
212
+ """Returns the current LED mode.
213
+
214
+ :returns: The current LED mode.
215
+ """
216
+
170
217
  mode = self._ledModeState.get()
171
218
  return VisionLEDMode(mode)
172
219
 
173
220
  def setLEDMode(self, led: VisionLEDMode) -> None:
221
+ """Sets the LED mode.
222
+
223
+ :param led: The mode to set to.
224
+ """
225
+
174
226
  self._ledModeRequest.set(led.value)
175
227
 
176
228
  def getName(self) -> str:
229
+ """Returns the name of the camera. This will return the same value that was given to the
230
+ constructor as cameraName.
231
+
232
+ :returns: The name of the camera.
233
+ """
177
234
  return self._name
178
235
 
179
236
  def isConnected(self) -> bool:
237
+ """Returns whether the camera is connected and actively returning new data. Connection status is
238
+ debounced.
239
+
240
+ :returns: True if the camera is actively sending frame data, false otherwise.
241
+ """
242
+
180
243
  curHeartbeat = self._heartbeatEntry.get()
181
244
  now = Timer.getFPGATimestamp()
182
245
 
@@ -197,6 +260,8 @@ class PhotonCamera:
197
260
 
198
261
  _lastVersionTimeCheck = Timer.getFPGATimestamp()
199
262
 
263
+ # Heartbeat entry is assumed to always be present. If it's not present, we
264
+ # assume that a camera with that name was never connected in the first place.
200
265
  if not self._heartbeatEntry.exists():
201
266
  cameraNames = (
202
267
  self._cameraTable.getInstance().getTable(self._tableName).getSubTables()
@@ -222,6 +287,7 @@ class PhotonCamera:
222
287
  True,
223
288
  )
224
289
 
290
+ # Check for connection status. Warn if disconnected.
225
291
  elif not self.isConnected():
226
292
  wpilib.reportWarning(
227
293
  f"PhotonVision coprocessor at path {self._path} is not sending new data.",
@@ -229,9 +295,10 @@ class PhotonCamera:
229
295
  )
230
296
 
231
297
  versionString = self.versionEntry.get(defaultValue="")
232
- localUUID = PhotonPipelineResult.photonStruct.MESSAGE_VERSION
233
298
 
234
- remoteUUID = self._rawBytesEntry.getTopic().getProperty("message_uuid")
299
+ # Check mdef UUID
300
+ localUUID = PhotonPipelineResult.photonStruct.MESSAGE_VERSION
301
+ remoteUUID = str(self._rawBytesEntry.getTopic().getProperty("message_uuid"))
235
302
 
236
303
  if not remoteUUID:
237
304
  wpilib.reportWarning(
@@ -4,8 +4,8 @@ import typing
4
4
  import cscore as cs
5
5
  import cv2 as cv
6
6
  import numpy as np
7
- import robotpy_apriltag
8
7
  import wpilib
8
+ from robotpy_apriltag import AprilTagField, AprilTagFieldLayout
9
9
  from wpimath.geometry import Pose3d, Transform3d
10
10
  from wpimath.units import meters, seconds
11
11
 
@@ -26,52 +26,46 @@ from .visionTargetSim import VisionTargetSim
26
26
 
27
27
 
28
28
  class PhotonCameraSim:
29
+ """A handle for simulating :class:`.PhotonCamera` values. Processing simulated targets through this
30
+ class will change the associated PhotonCamera's results.
31
+ """
32
+
29
33
  kDefaultMinAreaPx: float = 100
30
34
 
31
35
  def __init__(
32
36
  self,
33
37
  camera: PhotonCamera,
34
- props: SimCameraProperties | None = None,
38
+ props: SimCameraProperties = SimCameraProperties.PERFECT_90DEG(),
35
39
  minTargetAreaPercent: float | None = None,
36
40
  maxSightRange: meters | None = None,
37
41
  ):
42
+ """Constructs a handle for simulating :class:`.PhotonCamera` values. Processing simulated targets
43
+ through this class will change the associated PhotonCamera's results.
44
+
45
+ By default, this constructor's camera has a 90 deg FOV with no simulated lag if props!
46
+ By default, the minimum target area is 100 pixels and there is no maximum sight range unless both params are passed to override.
47
+
48
+
49
+ :param camera: The camera to be simulated
50
+ :param prop: Properties of this camera such as FOV and FPS
51
+ :param minTargetAreaPercent: The minimum percentage(0 - 100) a detected target must take up of
52
+ the camera's image to be processed. Match this with your contour filtering settings in the
53
+ PhotonVision GUI.
54
+ :param maxSightRangeMeters: Maximum distance at which the target is illuminated to your camera.
55
+ Note that minimum target area of the image is separate from this.
56
+ """
38
57
 
39
58
  self.minTargetAreaPercent: float = 0.0
40
59
  self.maxSightRange: float = 1.0e99
41
60
  self.videoSimRawEnabled: bool = False
42
61
  self.videoSimWireframeEnabled: bool = False
43
62
  self.videoSimWireframeResolution: float = 0.1
44
- self.videoSimProcEnabled: bool = True
45
- self.ts = NTTopicSet()
63
+ self.videoSimProcEnabled: bool = (
64
+ False # TODO switch this back to default True when the functionality is enabled
65
+ )
46
66
  self.heartbeatCounter: int = 0
47
67
  self.nextNtEntryTime = int(wpilib.Timer.getFPGATimestamp() * 1e6)
48
- self.tagLayout = robotpy_apriltag.loadAprilTagLayoutField(
49
- robotpy_apriltag.AprilTagField.k2024Crescendo
50
- )
51
-
52
- if (
53
- camera is not None
54
- and props is None
55
- and minTargetAreaPercent is None
56
- and maxSightRange is None
57
- ):
58
- props = SimCameraProperties.PERFECT_90DEG()
59
- elif (
60
- camera is not None
61
- and props is not None
62
- and minTargetAreaPercent is not None
63
- and maxSightRange is not None
64
- ):
65
- pass
66
- elif (
67
- camera is not None
68
- and props is not None
69
- and minTargetAreaPercent is None
70
- and maxSightRange is None
71
- ):
72
- pass
73
- else:
74
- raise Exception("Invalid Constructor Called")
68
+ self.tagLayout = AprilTagFieldLayout.loadField(AprilTagField.k2024Crescendo)
75
69
 
76
70
  self.cam = camera
77
71
  self.prop = props
@@ -101,16 +95,11 @@ class PhotonCameraSim:
101
95
  (self.prop.getResWidth(), self.prop.getResHeight())
102
96
  )
103
97
 
104
- self.ts.subTable = self.cam._cameraTable
98
+ self.ts = NTTopicSet("photonvision", self.cam.getName())
105
99
  self.ts.updateEntries()
106
100
 
107
101
  # Handle this last explicitly for this function signature because the other constructor is called in the initialiser list
108
- if (
109
- camera is not None
110
- and props is not None
111
- and minTargetAreaPercent is not None
112
- and maxSightRange is not None
113
- ):
102
+ if minTargetAreaPercent is not None and maxSightRange is not None:
114
103
  self.minTargetAreaPercent = minTargetAreaPercent
115
104
  self.maxSightRange = maxSightRange
116
105
 
@@ -133,22 +122,39 @@ class PhotonCameraSim:
133
122
  return self.videoSimFrameRaw
134
123
 
135
124
  def canSeeTargetPose(self, camPose: Pose3d, target: VisionTargetSim) -> bool:
125
+ """Determines if this target's pose should be visible to the camera without considering its
126
+ projected image points. Does not account for image area.
127
+
128
+ :param camPose: Camera's 3d pose
129
+ :param target: Vision target containing pose and shape
130
+
131
+ :returns: If this vision target can be seen before image projection.
132
+ """
133
+
136
134
  rel = CameraTargetRelation(camPose, target.getPose())
137
135
  return (
138
136
  (
137
+ # target translation is outside of camera's FOV
139
138
  abs(rel.camToTargYaw.degrees())
140
139
  < self.prop.getHorizFOV().degrees() / 2.0
141
140
  and abs(rel.camToTargPitch.degrees())
142
141
  < self.prop.getVertFOV().degrees() / 2.0
143
142
  )
144
143
  and (
144
+ # camera is behind planar target and it should be occluded
145
145
  not target.getModel().getIsPlanar()
146
146
  or abs(rel.targtoCamAngle.degrees()) < 90
147
147
  )
148
+ # target is too far
148
149
  and rel.camToTarg.translation().norm() <= self.maxSightRange
149
150
  )
150
151
 
151
152
  def canSeeCorner(self, points: np.ndarray) -> bool:
153
+ """Determines if all target points are inside the camera's image.
154
+
155
+ :param points: The target's 2d image points
156
+ """
157
+
152
158
  assert points.shape[1] == 1
153
159
  assert points.shape[2] == 2
154
160
  for pt in points:
@@ -160,53 +166,90 @@ class PhotonCameraSim:
160
166
  or y < 0
161
167
  or y > self.prop.getResHeight()
162
168
  ):
163
- return False
169
+ return False # point is outside of resolution
164
170
 
165
171
  return True
166
172
 
167
173
  def consumeNextEntryTime(self) -> float | None:
174
+ """Determine if this camera should process a new frame based on performance metrics and the time
175
+ since the last update. This returns an Optional which is either empty if no update should occur
176
+ or a Long of the timestamp in microseconds of when the frame which should be received by NT. If
177
+ a timestamp is returned, the last frame update time becomes that timestamp.
178
+
179
+ :returns: Optional long which is empty while blocked or the NT entry timestamp in microseconds if
180
+ ready
181
+ """
182
+ # check if this camera is ready for another frame update
168
183
  now = int(wpilib.Timer.getFPGATimestamp() * 1e6)
169
184
  timestamp = 0
170
185
  iter = 0
186
+ # prepare next latest update
171
187
  while now >= self.nextNtEntryTime:
172
188
  timestamp = int(self.nextNtEntryTime)
173
189
  frameTime = int(self.prop.estSecUntilNextFrame() * 1e6)
174
190
  self.nextNtEntryTime += frameTime
175
191
 
192
+ # if frame time is very small, avoid blocking
176
193
  iter += 1
177
194
  if iter > 50:
178
195
  timestamp = now
179
196
  self.nextNtEntryTime = now + frameTime
180
197
  break
181
198
 
199
+ # return the timestamp of the latest update
182
200
  if timestamp != 0:
183
201
  return timestamp
184
202
 
203
+ # or this camera isn't ready to process yet
185
204
  return None
186
205
 
187
206
  def setMinTargetAreaPercent(self, areaPercent: float) -> None:
207
+ """The minimum percentage(0 - 100) a detected target must take up of the camera's image to be
208
+ processed.
209
+ """
188
210
  self.minTargetAreaPercent = areaPercent
189
211
 
190
212
  def setMinTargetAreaPixels(self, areaPx: float) -> None:
213
+ """The minimum number of pixels a detected target must take up in the camera's image to be
214
+ processed.
215
+ """
191
216
  self.minTargetAreaPercent = areaPx / self.prop.getResArea() * 100.0
192
217
 
193
218
  def setMaxSightRange(self, range: meters) -> None:
219
+ """Maximum distance at which the target is illuminated to your camera. Note that minimum target
220
+ area of the image is separate from this.
221
+ """
194
222
  self.maxSightRange = range
195
223
 
196
224
  def enableRawStream(self, enabled: bool) -> None:
225
+ """Sets whether the raw video stream simulation is enabled.
226
+
227
+ Note: This may increase loop times.
228
+ """
229
+ self.videoSimRawEnabled = enabled
197
230
  raise Exception("Raw stream not implemented")
198
- # self.videoSimRawEnabled = enabled
199
231
 
200
232
  def enableDrawWireframe(self, enabled: bool) -> None:
233
+ """Sets whether a wireframe of the field is drawn to the raw video stream.
234
+
235
+ Note: This will dramatically increase loop times.
236
+ """
237
+ self.videoSimWireframeEnabled = enabled
201
238
  raise Exception("Wireframe not implemented")
202
- # self.videoSimWireframeEnabled = enabled
203
239
 
204
240
  def setWireframeResolution(self, resolution: float) -> None:
241
+ """Sets the resolution of the drawn wireframe if enabled. Drawn line segments will be subdivided
242
+ into smaller segments based on a threshold set by the resolution.
243
+
244
+ :param resolution: Resolution as a fraction(0 - 1) of the video frame's diagonal length in
245
+ pixels
246
+ """
205
247
  self.videoSimWireframeResolution = resolution
206
248
 
207
249
  def enableProcessedStream(self, enabled: bool) -> None:
250
+ """Sets whether the processed video stream simulation is enabled."""
251
+ self.videoSimProcEnabled = enabled
208
252
  raise Exception("Processed stream not implemented")
209
- # self.videoSimProcEnabled = enabled
210
253
 
211
254
  def process(
212
255
  self, latency: seconds, cameraPose: Pose3d, targets: list[VisionTargetSim]
@@ -217,27 +260,32 @@ class PhotonCameraSim:
217
260
 
218
261
  targets.sort(key=distance, reverse=True)
219
262
 
220
- visibleTgts: list[
221
- typing.Tuple[VisionTargetSim, list[typing.Tuple[float, float]]]
222
- ] = []
263
+ # all targets visible before noise
264
+ visibleTgts: list[typing.Tuple[VisionTargetSim, np.ndarray]] = []
265
+ # all targets actually detected by camera (after noise)
223
266
  detectableTgts: list[PhotonTrackedTarget] = []
224
267
 
268
+ # basis change from world coordinates to camera coordinates
225
269
  camRt = RotTrlTransform3d.makeRelativeTo(cameraPose)
226
270
 
227
271
  for tgt in targets:
272
+ # pose isn't visible, skip to next
228
273
  if not self.canSeeTargetPose(cameraPose, tgt):
229
274
  continue
230
275
 
276
+ # find target's 3d corner points
231
277
  fieldCorners = tgt.getFieldVertices()
232
278
  isSpherical = tgt.getModel().getIsSpherical()
233
- if isSpherical:
279
+ if isSpherical: # target is spherical
234
280
  model = tgt.getModel()
281
+ # orient the model to the camera (like a sprite/decal) so it appears similar regardless of view
235
282
  fieldCorners = model.getFieldVertices(
236
283
  TargetModel.getOrientedPose(
237
284
  tgt.getPose().translation(), cameraPose.translation()
238
285
  )
239
286
  )
240
287
 
288
+ # project 3d target points into 2d image points
241
289
  imagePoints = OpenCVHelp.projectPoints(
242
290
  self.prop.getIntrinsics(),
243
291
  self.prop.getDistCoeffs(),
@@ -245,9 +293,11 @@ class PhotonCameraSim:
245
293
  fieldCorners,
246
294
  )
247
295
 
296
+ # spherical targets need a rotated rectangle of their midpoints for visualization
248
297
  if isSpherical:
249
298
  center = OpenCVHelp.avgPoint(imagePoints)
250
299
  l: int = 0
300
+ # reference point (left side midpoint)
251
301
  for i in range(4):
252
302
  if imagePoints[i, 0, 0] < imagePoints[l, 0, 0].x:
253
303
  l = i
@@ -258,6 +308,7 @@ class PhotonCameraSim:
258
308
  ] * 4
259
309
  t = (l + 1) % 4
260
310
  b = (l + 1) % 4
311
+ r = 0
261
312
  for i in range(4):
262
313
  if i == l:
263
314
  continue
@@ -270,24 +321,32 @@ class PhotonCameraSim:
270
321
  for i in range(4):
271
322
  if i != t and i != l and i != b:
272
323
  r = i
324
+ # create RotatedRect from midpoints
273
325
  rect = cv.RotatedRect(
274
- center,
326
+ (center[0, 0], center[0, 1]),
275
327
  (
276
328
  imagePoints[r, 0, 0] - lc[0, 0],
277
329
  imagePoints[b, 0, 1] - imagePoints[t, 0, 1],
278
330
  ),
279
331
  -angles[r],
280
332
  )
281
- imagePoints = rect.points()
333
+ # set target corners to rect corners
334
+ imagePoints = np.array([[p[0], p[1], p[2]] for p in rect.points()])
282
335
 
336
+ # save visible targets for raw video stream simulation
283
337
  visibleTgts.append((tgt, imagePoints))
338
+ # estimate pixel noise
284
339
  noisyTargetCorners = self.prop.estPixelNoise(imagePoints)
340
+ # find the minimum area rectangle of target corners
285
341
  minAreaRect = OpenCVHelp.getMinAreaRect(noisyTargetCorners)
286
342
  minAreaRectPts = minAreaRect.points()
343
+ # find the (naive) 2d yaw/pitch
287
344
  centerPt = minAreaRect.center
288
345
  centerRot = self.prop.getPixelRot(centerPt)
346
+ # find contour area
289
347
  areaPercent = self.prop.getContourAreaPercent(noisyTargetCorners)
290
348
 
349
+ # projected target can't be detected, skip to next
291
350
  if (
292
351
  not self.canSeeCorner(noisyTargetCorners)
293
352
  or not areaPercent >= self.minTargetAreaPercent
@@ -296,6 +355,7 @@ class PhotonCameraSim:
296
355
 
297
356
  pnpSim: PnpResult | None = None
298
357
  if tgt.fiducialId >= 0 and len(tgt.getFieldVertices()) == 4:
358
+ # single AprilTag solvePNP
299
359
  pnpSim = OpenCVHelp.solvePNP_Square(
300
360
  self.prop.getIntrinsics(),
301
361
  self.prop.getDistCoeffs(),
@@ -325,13 +385,14 @@ class PhotonCameraSim:
325
385
  )
326
386
 
327
387
  # Video streams disabled for now
328
- if self.enableRawStream:
388
+ if self.videoSimRawEnabled:
389
+ # TODO Video streams disabled for now port and uncomment when implemented
329
390
  # VideoSimUtil::UpdateVideoProp(videoSimRaw, prop);
330
391
  # cv::Size videoFrameSize{prop.GetResWidth(), prop.GetResHeight()};
331
392
  # cv::Mat blankFrame = cv::Mat::zeros(videoFrameSize, CV_8UC1);
332
393
  # blankFrame.assignTo(videoSimFrameRaw);
333
394
  pass
334
- if self.enableProcessedStream:
395
+ if self.videoSimProcEnabled:
335
396
  # VideoSimUtil::UpdateVideoProp(videoSimProcessed, prop);
336
397
  pass
337
398
 
@@ -343,6 +404,7 @@ class PhotonCameraSim:
343
404
 
344
405
  if len(visibleLayoutTags) > 1:
345
406
  usedIds = [tag.ID for tag in visibleLayoutTags]
407
+ # sort target order sorts in ascending order by default
346
408
  usedIds.sort()
347
409
  pnpResult = VisionEstimation.estimateCamPosePNP(
348
410
  self.prop.getIntrinsics(),
@@ -354,10 +416,16 @@ class PhotonCameraSim:
354
416
  if pnpResult is not None:
355
417
  multiTagResults = MultiTargetPNPResult(pnpResult, usedIds)
356
418
 
419
+ # put this simulated data to NT
357
420
  self.heartbeatCounter += 1
421
+ now_micros = wpilib.Timer.getFPGATimestamp() * 1e6
358
422
  return PhotonPipelineResult(
359
423
  metadata=PhotonPipelineMetadata(
360
- self.heartbeatCounter, int(latency * 1e6), 1000000
424
+ self.heartbeatCounter,
425
+ int(now_micros - latency * 1e6),
426
+ int(now_micros),
427
+ # Pretend like we heard a pong recently
428
+ int(np.random.uniform(950, 1050)),
361
429
  ),
362
430
  targets=detectableTgts,
363
431
  multitagResult=multiTagResults,
@@ -366,6 +434,13 @@ class PhotonCameraSim:
366
434
  def submitProcessedFrame(
367
435
  self, result: PhotonPipelineResult, receiveTimestamp: float | None
368
436
  ):
437
+ """Simulate one processed frame of vision data, putting one result to NT. Image capture timestamp
438
+ overrides :meth:`.PhotonPipelineResult.getTimestampSeconds` for more
439
+ precise latency simulation.
440
+
441
+ :param result: The pipeline result to submit
442
+ :param receiveTimestamp: The (sim) timestamp when this result was read by NT in microseconds. If not passed image capture time is assumed be (current time - latency)
443
+ """
369
444
  if receiveTimestamp is None:
370
445
  receiveTimestamp = wpilib.Timer.getFPGATimestamp() * 1e6
371
446
  receiveTimestamp = int(receiveTimestamp)
@@ -385,6 +460,7 @@ class PhotonCameraSim:
385
460
  self.ts.targetSkewEntry.set(0.0, receiveTimestamp)
386
461
  else:
387
462
  bestTarget = result.getBestTarget()
463
+ assert bestTarget
388
464
 
389
465
  self.ts.targetPitchEntry.set(bestTarget.getPitch(), receiveTimestamp)
390
466
  self.ts.targetYawEntry.set(bestTarget.getYaw(), receiveTimestamp)