photonlibpy 2025.0.0b3__py3-none-any.whl → 2025.0.0b4__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
@@ -26,6 +26,10 @@ from .visionTargetSim import VisionTargetSim
26
26
 
27
27
 
28
28
  class PhotonCameraSim:
29
+ """A handle for simulating :class:`.PhotonCamera` values. Processing simulated targets through this
30
+ class will change the associated PhotonCamera's results.
31
+ """
32
+
29
33
  kDefaultMinAreaPx: float = 100
30
34
 
31
35
  def __init__(
@@ -35,6 +39,21 @@ class PhotonCameraSim:
35
39
  minTargetAreaPercent: float | None = None,
36
40
  maxSightRange: meters | None = None,
37
41
  ):
42
+ """Constructs a handle for simulating :class:`.PhotonCamera` values. Processing simulated targets
43
+ through this class will change the associated PhotonCamera's results.
44
+
45
+ By default, this constructor's camera has a 90 deg FOV with no simulated lag if props!
46
+ By default, the minimum target area is 100 pixels and there is no maximum sight range unless both params are passed to override.
47
+
48
+
49
+ :param camera: The camera to be simulated
50
+ :param prop: Properties of this camera such as FOV and FPS
51
+ :param minTargetAreaPercent: The minimum percentage(0 - 100) a detected target must take up of
52
+ the camera's image to be processed. Match this with your contour filtering settings in the
53
+ PhotonVision GUI.
54
+ :param maxSightRangeMeters: Maximum distance at which the target is illuminated to your camera.
55
+ Note that minimum target area of the image is separate from this.
56
+ """
38
57
 
39
58
  self.minTargetAreaPercent: float = 0.0
40
59
  self.maxSightRange: float = 1.0e99
@@ -103,22 +122,39 @@ class PhotonCameraSim:
103
122
  return self.videoSimFrameRaw
104
123
 
105
124
  def canSeeTargetPose(self, camPose: Pose3d, target: VisionTargetSim) -> bool:
125
+ """Determines if this target's pose should be visible to the camera without considering its
126
+ projected image points. Does not account for image area.
127
+
128
+ :param camPose: Camera's 3d pose
129
+ :param target: Vision target containing pose and shape
130
+
131
+ :returns: If this vision target can be seen before image projection.
132
+ """
133
+
106
134
  rel = CameraTargetRelation(camPose, target.getPose())
107
135
  return (
108
136
  (
137
+ # target translation is outside of camera's FOV
109
138
  abs(rel.camToTargYaw.degrees())
110
139
  < self.prop.getHorizFOV().degrees() / 2.0
111
140
  and abs(rel.camToTargPitch.degrees())
112
141
  < self.prop.getVertFOV().degrees() / 2.0
113
142
  )
114
143
  and (
144
+ # camera is behind planar target and it should be occluded
115
145
  not target.getModel().getIsPlanar()
116
146
  or abs(rel.targtoCamAngle.degrees()) < 90
117
147
  )
148
+ # target is too far
118
149
  and rel.camToTarg.translation().norm() <= self.maxSightRange
119
150
  )
120
151
 
121
152
  def canSeeCorner(self, points: np.ndarray) -> bool:
153
+ """Determines if all target points are inside the camera's image.
154
+
155
+ :param points: The target's 2d image points
156
+ """
157
+
122
158
  assert points.shape[1] == 1
123
159
  assert points.shape[2] == 2
124
160
  for pt in points:
@@ -130,51 +166,88 @@ class PhotonCameraSim:
130
166
  or y < 0
131
167
  or y > self.prop.getResHeight()
132
168
  ):
133
- return False
169
+ return False # point is outside of resolution
134
170
 
135
171
  return True
136
172
 
137
173
  def consumeNextEntryTime(self) -> float | None:
174
+ """Determine if this camera should process a new frame based on performance metrics and the time
175
+ since the last update. This returns an Optional which is either empty if no update should occur
176
+ or a Long of the timestamp in microseconds of when the frame which should be received by NT. If
177
+ a timestamp is returned, the last frame update time becomes that timestamp.
178
+
179
+ :returns: Optional long which is empty while blocked or the NT entry timestamp in microseconds if
180
+ ready
181
+ """
182
+ # check if this camera is ready for another frame update
138
183
  now = int(wpilib.Timer.getFPGATimestamp() * 1e6)
139
184
  timestamp = 0
140
185
  iter = 0
186
+ # prepare next latest update
141
187
  while now >= self.nextNtEntryTime:
142
188
  timestamp = int(self.nextNtEntryTime)
143
189
  frameTime = int(self.prop.estSecUntilNextFrame() * 1e6)
144
190
  self.nextNtEntryTime += frameTime
145
191
 
192
+ # if frame time is very small, avoid blocking
146
193
  iter += 1
147
194
  if iter > 50:
148
195
  timestamp = now
149
196
  self.nextNtEntryTime = now + frameTime
150
197
  break
151
198
 
199
+ # return the timestamp of the latest update
152
200
  if timestamp != 0:
153
201
  return timestamp
154
202
 
203
+ # or this camera isn't ready to process yet
155
204
  return None
156
205
 
157
206
  def setMinTargetAreaPercent(self, areaPercent: float) -> None:
207
+ """The minimum percentage(0 - 100) a detected target must take up of the camera's image to be
208
+ processed.
209
+ """
158
210
  self.minTargetAreaPercent = areaPercent
159
211
 
160
212
  def setMinTargetAreaPixels(self, areaPx: float) -> None:
213
+ """The minimum number of pixels a detected target must take up in the camera's image to be
214
+ processed.
215
+ """
161
216
  self.minTargetAreaPercent = areaPx / self.prop.getResArea() * 100.0
162
217
 
163
218
  def setMaxSightRange(self, range: meters) -> None:
219
+ """Maximum distance at which the target is illuminated to your camera. Note that minimum target
220
+ area of the image is separate from this.
221
+ """
164
222
  self.maxSightRange = range
165
223
 
166
224
  def enableRawStream(self, enabled: bool) -> None:
225
+ """Sets whether the raw video stream simulation is enabled.
226
+
227
+ Note: This may increase loop times.
228
+ """
167
229
  self.videoSimRawEnabled = enabled
168
230
  raise Exception("Raw stream not implemented")
169
231
 
170
232
  def enableDrawWireframe(self, enabled: bool) -> None:
233
+ """Sets whether a wireframe of the field is drawn to the raw video stream.
234
+
235
+ Note: This will dramatically increase loop times.
236
+ """
171
237
  self.videoSimWireframeEnabled = enabled
172
238
  raise Exception("Wireframe not implemented")
173
239
 
174
240
  def setWireframeResolution(self, resolution: float) -> None:
241
+ """Sets the resolution of the drawn wireframe if enabled. Drawn line segments will be subdivided
242
+ into smaller segments based on a threshold set by the resolution.
243
+
244
+ :param resolution: Resolution as a fraction(0 - 1) of the video frame's diagonal length in
245
+ pixels
246
+ """
175
247
  self.videoSimWireframeResolution = resolution
176
248
 
177
249
  def enableProcessedStream(self, enabled: bool) -> None:
250
+ """Sets whether the processed video stream simulation is enabled."""
178
251
  self.videoSimProcEnabled = enabled
179
252
  raise Exception("Processed stream not implemented")
180
253
 
@@ -187,25 +260,32 @@ class PhotonCameraSim:
187
260
 
188
261
  targets.sort(key=distance, reverse=True)
189
262
 
263
+ # all targets visible before noise
190
264
  visibleTgts: list[typing.Tuple[VisionTargetSim, np.ndarray]] = []
265
+ # all targets actually detected by camera (after noise)
191
266
  detectableTgts: list[PhotonTrackedTarget] = []
192
267
 
268
+ # basis change from world coordinates to camera coordinates
193
269
  camRt = RotTrlTransform3d.makeRelativeTo(cameraPose)
194
270
 
195
271
  for tgt in targets:
272
+ # pose isn't visible, skip to next
196
273
  if not self.canSeeTargetPose(cameraPose, tgt):
197
274
  continue
198
275
 
276
+ # find target's 3d corner points
199
277
  fieldCorners = tgt.getFieldVertices()
200
278
  isSpherical = tgt.getModel().getIsSpherical()
201
- if isSpherical:
279
+ if isSpherical: # target is spherical
202
280
  model = tgt.getModel()
281
+ # orient the model to the camera (like a sprite/decal) so it appears similar regardless of view
203
282
  fieldCorners = model.getFieldVertices(
204
283
  TargetModel.getOrientedPose(
205
284
  tgt.getPose().translation(), cameraPose.translation()
206
285
  )
207
286
  )
208
287
 
288
+ # project 3d target points into 2d image points
209
289
  imagePoints = OpenCVHelp.projectPoints(
210
290
  self.prop.getIntrinsics(),
211
291
  self.prop.getDistCoeffs(),
@@ -213,9 +293,11 @@ class PhotonCameraSim:
213
293
  fieldCorners,
214
294
  )
215
295
 
296
+ # spherical targets need a rotated rectangle of their midpoints for visualization
216
297
  if isSpherical:
217
298
  center = OpenCVHelp.avgPoint(imagePoints)
218
299
  l: int = 0
300
+ # reference point (left side midpoint)
219
301
  for i in range(4):
220
302
  if imagePoints[i, 0, 0] < imagePoints[l, 0, 0].x:
221
303
  l = i
@@ -239,6 +321,7 @@ class PhotonCameraSim:
239
321
  for i in range(4):
240
322
  if i != t and i != l and i != b:
241
323
  r = i
324
+ # create RotatedRect from midpoints
242
325
  rect = cv.RotatedRect(
243
326
  (center[0, 0], center[0, 1]),
244
327
  (
@@ -247,16 +330,23 @@ class PhotonCameraSim:
247
330
  ),
248
331
  -angles[r],
249
332
  )
333
+ # set target corners to rect corners
250
334
  imagePoints = np.array([[p[0], p[1], p[2]] for p in rect.points()])
251
335
 
336
+ # save visible targets for raw video stream simulation
252
337
  visibleTgts.append((tgt, imagePoints))
338
+ # estimate pixel noise
253
339
  noisyTargetCorners = self.prop.estPixelNoise(imagePoints)
340
+ # find the minimum area rectangle of target corners
254
341
  minAreaRect = OpenCVHelp.getMinAreaRect(noisyTargetCorners)
255
342
  minAreaRectPts = minAreaRect.points()
343
+ # find the (naive) 2d yaw/pitch
256
344
  centerPt = minAreaRect.center
257
345
  centerRot = self.prop.getPixelRot(centerPt)
346
+ # find contour area
258
347
  areaPercent = self.prop.getContourAreaPercent(noisyTargetCorners)
259
348
 
349
+ # projected target can't be detected, skip to next
260
350
  if (
261
351
  not self.canSeeCorner(noisyTargetCorners)
262
352
  or not areaPercent >= self.minTargetAreaPercent
@@ -265,6 +355,7 @@ class PhotonCameraSim:
265
355
 
266
356
  pnpSim: PnpResult | None = None
267
357
  if tgt.fiducialId >= 0 and len(tgt.getFieldVertices()) == 4:
358
+ # single AprilTag solvePNP
268
359
  pnpSim = OpenCVHelp.solvePNP_Square(
269
360
  self.prop.getIntrinsics(),
270
361
  self.prop.getDistCoeffs(),
@@ -295,6 +386,7 @@ class PhotonCameraSim:
295
386
 
296
387
  # Video streams disabled for now
297
388
  if self.videoSimRawEnabled:
389
+ # TODO Video streams disabled for now port and uncomment when implemented
298
390
  # VideoSimUtil::UpdateVideoProp(videoSimRaw, prop);
299
391
  # cv::Size videoFrameSize{prop.GetResWidth(), prop.GetResHeight()};
300
392
  # cv::Mat blankFrame = cv::Mat::zeros(videoFrameSize, CV_8UC1);
@@ -312,6 +404,7 @@ class PhotonCameraSim:
312
404
 
313
405
  if len(visibleLayoutTags) > 1:
314
406
  usedIds = [tag.ID for tag in visibleLayoutTags]
407
+ # sort target order sorts in ascending order by default
315
408
  usedIds.sort()
316
409
  pnpResult = VisionEstimation.estimateCamPosePNP(
317
410
  self.prop.getIntrinsics(),
@@ -323,10 +416,16 @@ class PhotonCameraSim:
323
416
  if pnpResult is not None:
324
417
  multiTagResults = MultiTargetPNPResult(pnpResult, usedIds)
325
418
 
419
+ # put this simulated data to NT
326
420
  self.heartbeatCounter += 1
421
+ now_micros = wpilib.Timer.getFPGATimestamp() * 1e6
327
422
  return PhotonPipelineResult(
328
423
  metadata=PhotonPipelineMetadata(
329
- self.heartbeatCounter, int(latency * 1e6), 1000000
424
+ self.heartbeatCounter,
425
+ int(now_micros - latency * 1e6),
426
+ int(now_micros),
427
+ # Pretend like we heard a pong recently
428
+ int(np.random.uniform(950, 1050)),
330
429
  ),
331
430
  targets=detectableTgts,
332
431
  multitagResult=multiTagResults,
@@ -335,6 +434,13 @@ class PhotonCameraSim:
335
434
  def submitProcessedFrame(
336
435
  self, result: PhotonPipelineResult, receiveTimestamp: float | None
337
436
  ):
437
+ """Simulate one processed frame of vision data, putting one result to NT. Image capture timestamp
438
+ overrides :meth:`.PhotonPipelineResult.getTimestampSeconds` for more
439
+ precise latency simulation.
440
+
441
+ :param result: The pipeline result to submit
442
+ :param receiveTimestamp: The (sim) timestamp when this result was read by NT in microseconds. If not passed image capture time is assumed be (current time - latency)
443
+ """
338
444
  if receiveTimestamp is None:
339
445
  receiveTimestamp = wpilib.Timer.getFPGATimestamp() * 1e6
340
446
  receiveTimestamp = int(receiveTimestamp)
@@ -11,7 +11,22 @@ from ..estimation import RotTrlTransform3d
11
11
 
12
12
 
13
13
  class SimCameraProperties:
14
+ """Calibration and performance values for this camera.
15
+
16
+ The resolution will affect the accuracy of projected(3d to 2d) target corners and similarly
17
+ the severity of image noise on estimation(2d to 3d).
18
+
19
+ The camera intrinsics and distortion coefficients describe the results of calibration, and how
20
+ to map between 3d field points and 2d image points.
21
+
22
+ The performance values (framerate/exposure time, latency) determine how often results should
23
+ be updated and with how much latency in simulation. High exposure time causes motion blur which
24
+ can inhibit target detection while moving. Note that latency estimation does not account for
25
+ network latency and the latency reported will always be perfect.
26
+ """
27
+
14
28
  def __init__(self):
29
+ """Default constructor which is the same as {@link #PERFECT_90DEG}"""
15
30
  self.resWidth: int = -1
16
31
  self.resHeight: int = -1
17
32
  self.camIntrinsics: np.ndarray = np.zeros((3, 3)) # [3,3]
@@ -38,14 +53,18 @@ class SimCameraProperties:
38
53
  fovWidth = Rotation2d(math.atan((diagRatio * (width / resDiag)) * 2))
39
54
  fovHeight = Rotation2d(math.atan(diagRatio * (height / resDiag)) * 2)
40
55
 
56
+ # assume no distortion
41
57
  newDistCoeffs = np.zeros((8, 1))
42
58
 
59
+ # assume centered principal point (pixels)
43
60
  cx = width / 2.0 - 0.5
44
61
  cy = height / 2.0 - 0.5
45
62
 
63
+ # use given fov to determine focal point (pixels)
46
64
  fx = cx / math.tan(fovWidth.radians() / 2.0)
47
65
  fy = cy / math.tan(fovHeight.radians() / 2.0)
48
66
 
67
+ # create camera intrinsics matrix
49
68
  newCamIntrinsics = np.array([[fx, 0.0, cx], [0.0, fy, cy], [0.0, 0.0, 1.0]])
50
69
 
51
70
  self.setCalibrationFromIntrinsics(
@@ -65,6 +84,7 @@ class SimCameraProperties:
65
84
  self.camIntrinsics = newCamIntrinsics
66
85
  self.distCoeffs = newDistCoeffs
67
86
 
87
+ # left, right, up, and down view planes
68
88
  p = [
69
89
  Translation3d(
70
90
  1.0,
@@ -110,16 +130,33 @@ class SimCameraProperties:
110
130
  self.errorStdDevPx = newErrorStdDevPx
111
131
 
112
132
  def setFPS(self, fps: hertz):
133
+ """
134
+ :param fps: The average frames per second the camera should process at. :strong:`Exposure time limits
135
+ FPS if set!`
136
+ """
137
+
113
138
  self.frameSpeed = max(1.0 / fps, self.exposureTime)
114
139
 
115
140
  def setExposureTime(self, newExposureTime: seconds):
141
+ """
142
+ :param newExposureTime: The amount of time the "shutter" is open for one frame. Affects motion
143
+ blur. **Frame speed(from FPS) is limited to this!**
144
+ """
145
+
116
146
  self.exposureTime = newExposureTime
117
147
  self.frameSpeed = max(self.frameSpeed, self.exposureTime)
118
148
 
119
149
  def setAvgLatency(self, newAvgLatency: seconds):
150
+ """
151
+ :param newAvgLatency: The average latency (from image capture to data published) in milliseconds
152
+ a frame should have
153
+ """
120
154
  self.vgLatency = newAvgLatency
121
155
 
122
156
  def setLatencyStdDev(self, newLatencyStdDev: seconds):
157
+ """
158
+ :param latencyStdDevMs: The standard deviation in milliseconds of the latency
159
+ """
123
160
  self.latencyStdDev = newLatencyStdDev
124
161
 
125
162
  def getResWidth(self) -> int:
@@ -156,21 +193,43 @@ class SimCameraProperties:
156
193
  return self.latencyStdDev
157
194
 
158
195
  def getContourAreaPercent(self, points: np.ndarray) -> float:
196
+ """The percentage(0 - 100) of this camera's resolution the contour takes up in pixels of the
197
+ image.
198
+
199
+ :param points: Points of the contour
200
+ """
201
+
159
202
  return cv.contourArea(cv.convexHull(points)) / self.getResArea() * 100.0
160
203
 
161
204
  def getPixelYaw(self, pixelX: float) -> Rotation2d:
205
+ """The yaw from the principal point of this camera to the pixel x value. Positive values left."""
162
206
  fx = self.camIntrinsics[0, 0]
207
+ # account for principal point not being centered
163
208
  cx = self.camIntrinsics[0, 2]
164
209
  xOffset = cx - pixelX
165
210
  return Rotation2d(fx, xOffset)
166
211
 
167
212
  def getPixelPitch(self, pixelY: float) -> Rotation2d:
213
+ """The pitch from the principal point of this camera to the pixel y value. Pitch is positive down.
214
+
215
+ Note that this angle is naively computed and may be incorrect. See {@link
216
+ #getCorrectedPixelRot(Point)}.
217
+ """
218
+
168
219
  fy = self.camIntrinsics[1, 1]
220
+ # account for principal point not being centered
169
221
  cy = self.camIntrinsics[1, 2]
170
222
  yOffset = cy - pixelY
171
223
  return Rotation2d(fy, -yOffset)
172
224
 
173
225
  def getPixelRot(self, point: cv.typing.Point2f) -> Rotation3d:
226
+ """Finds the yaw and pitch to the given image point. Yaw is positive left, and pitch is positive
227
+ down.
228
+
229
+ Note that pitch is naively computed and may be incorrect. See {@link
230
+ #getCorrectedPixelRot(Point)}.
231
+ """
232
+
174
233
  return Rotation3d(
175
234
  0.0,
176
235
  self.getPixelPitch(point[1]).radians(),
@@ -178,6 +237,27 @@ class SimCameraProperties:
178
237
  )
179
238
 
180
239
  def getCorrectedPixelRot(self, point: cv.typing.Point2f) -> Rotation3d:
240
+ """Gives the yaw and pitch of the line intersecting the camera lens and the given pixel
241
+ coordinates on the sensor. Yaw is positive left, and pitch positive down.
242
+
243
+ The pitch traditionally calculated from pixel offsets do not correctly account for non-zero
244
+ values of yaw because of perspective distortion (not to be confused with lens distortion)-- for
245
+ example, the pitch angle is naively calculated as:
246
+
247
+ <pre>pitch = arctan(pixel y offset / focal length y)</pre>
248
+
249
+ However, using focal length as a side of the associated right triangle is not correct when the
250
+ pixel x value is not 0, because the distance from this pixel (projected on the x-axis) to the
251
+ camera lens increases. Projecting a line back out of the camera with these naive angles will
252
+ not intersect the 3d point that was originally projected into this 2d pixel. Instead, this
253
+ length should be:
254
+
255
+ <pre>focal length y ⟶ (focal length y / cos(arctan(pixel x offset / focal length x)))</pre>
256
+
257
+ :returns: Rotation3d with yaw and pitch of the line projected out of the camera from the given
258
+ pixel (roll is zero).
259
+ """
260
+
181
261
  fx = self.camIntrinsics[0, 0]
182
262
  cx = self.camIntrinsics[0, 2]
183
263
  xOffset = cx - point[0]
@@ -191,11 +271,13 @@ class SimCameraProperties:
191
271
  return Rotation3d(0.0, pitch.radians(), yaw.radians())
192
272
 
193
273
  def getHorizFOV(self) -> Rotation2d:
274
+ # sum of FOV left and right principal point
194
275
  left = self.getPixelYaw(0)
195
276
  right = self.getPixelYaw(self.resWidth)
196
277
  return left - right
197
278
 
198
279
  def getVertFOV(self) -> Rotation2d:
280
+ # sum of FOV above and below principal point
199
281
  above = self.getPixelPitch(0)
200
282
  below = self.getPixelPitch(self.resHeight)
201
283
  return below - above
@@ -208,9 +290,34 @@ class SimCameraProperties:
208
290
  def getVisibleLine(
209
291
  self, camRt: RotTrlTransform3d, a: Translation3d, b: Translation3d
210
292
  ) -> typing.Tuple[float | None, float | None]:
293
+ """Determines where the line segment defined by the two given translations intersects the camera's
294
+ frustum/field-of-vision, if at all.
295
+
296
+ The line is parametrized so any of its points <code>p = t * (b - a) + a</code>. This method
297
+ returns these values of t, minimum first, defining the region of the line segment which is
298
+ visible in the frustum. If both ends of the line segment are visible, this simply returns {0,
299
+ 1}. If, for example, point b is visible while a is not, and half of the line segment is inside
300
+ the camera frustum, {0.5, 1} would be returned.
301
+
302
+ :param camRt: The change in basis from world coordinates to camera coordinates. See {@link
303
+ RotTrlTransform3d#makeRelativeTo(Pose3d)}.
304
+ :param a: The initial translation of the line
305
+ :param b: The final translation of the line
306
+
307
+ :returns: A Pair of Doubles. The values may be null:
308
+
309
+ - {Double, Double} : Two parametrized values(t), minimum first, representing which
310
+ segment of the line is visible in the camera frustum.
311
+ - {Double, null} : One value(t) representing a single intersection point. For example,
312
+ the line only intersects the intersection of two adjacent viewplanes.
313
+ - {null, null} : No values. The line segment is not visible in the camera frustum.
314
+ """
315
+
316
+ # translations relative to the camera
211
317
  relA = camRt.applyTranslation(a)
212
318
  relB = camRt.applyTranslation(b)
213
319
 
320
+ # check if both ends are behind camera
214
321
  if relA.X() <= 0.0 and relB.X() <= 0.0:
215
322
  return (None, None)
216
323
 
@@ -221,6 +328,7 @@ class SimCameraProperties:
221
328
  aVisible = True
222
329
  bVisible = True
223
330
 
331
+ # check if the ends of the line segment are visible
224
332
  for normal in self.viewplanes:
225
333
  aVisibility = av.dot(normal)
226
334
  if aVisibility < 0:
@@ -229,38 +337,54 @@ class SimCameraProperties:
229
337
  bVisibility = bv.dot(normal)
230
338
  if bVisibility < 0:
231
339
  bVisible = False
340
+ # both ends are outside at least one of the same viewplane
232
341
  if aVisibility <= 0 and bVisibility <= 0:
233
342
  return (None, None)
234
343
 
344
+ # both ends are inside frustum
235
345
  if aVisible and bVisible:
236
346
  return (0.0, 1.0)
237
347
 
348
+ # parametrized (t=0 at a, t=1 at b) intersections with viewplanes
238
349
  intersections = [float("nan"), float("nan"), float("nan"), float("nan")]
239
350
 
240
351
  # Optionally 3x1 vector
241
352
  ipts: typing.List[np.ndarray | None] = [None, None, None, None]
242
353
 
354
+ # find intersections
243
355
  for i, normal in enumerate(self.viewplanes):
356
+
357
+ # // we want to know the value of t when the line intercepts this plane
358
+ # // parametrized: v = t * ab + a, where v lies on the plane
359
+ # // we can find the projection of a onto the plane normal
360
+ # // a_projn = normal.times(av.dot(normal) / normal.dot(normal));
244
361
  a_projn = (av.dot(normal) / normal.dot(normal)) * normal
245
362
 
363
+ # // this projection lets us determine the scalar multiple t of ab where
364
+ # // (t * ab + a) is a vector which lies on the plane
246
365
  if abs(abv.dot(normal)) < 1.0e-5:
247
366
  continue
248
367
  intersections[i] = a_projn.dot(a_projn) / -(abv.dot(a_projn))
249
368
 
369
+ # // vector a to the viewplane
250
370
  apv = intersections[i] * abv
371
+ # av + apv = intersection point
251
372
  intersectpt = av + apv
252
373
  ipts[i] = intersectpt
253
374
 
375
+ # // discard intersections outside the camera frustum
254
376
  for j in range(1, len(self.viewplanes)):
255
377
  if j == 0:
256
378
  continue
257
379
  oi = (i + j) % len(self.viewplanes)
258
380
  onormal = self.viewplanes[oi]
381
+ # if the dot of the intersection point with any plane normal is negative, it is outside
259
382
  if intersectpt.dot(onormal) < 0:
260
383
  intersections[i] = float("nan")
261
384
  ipts[i] = None
262
385
  break
263
386
 
387
+ # // discard duplicate intersections
264
388
  if ipts[i] is None:
265
389
  continue
266
390
 
@@ -275,6 +399,7 @@ class SimCameraProperties:
275
399
  ipts[i] = None
276
400
  break
277
401
 
402
+ # determine visible segment (minimum and maximum t)
278
403
  inter1 = float("nan")
279
404
  inter2 = float("nan")
280
405
  for inter in intersections:
@@ -284,6 +409,7 @@ class SimCameraProperties:
284
409
  else:
285
410
  inter2 = inter
286
411
 
412
+ # // two viewplane intersections
287
413
  if not math.isnan(inter2):
288
414
  max_ = max(inter1, inter2)
289
415
  min_ = min(inter1, inter2)
@@ -292,16 +418,19 @@ class SimCameraProperties:
292
418
  if bVisible:
293
419
  max_ = 1
294
420
  return (min_, max_)
421
+ # // one viewplane intersection
295
422
  elif not math.isnan(inter1):
296
423
  if aVisible:
297
424
  return (0, inter1)
298
425
  if bVisible:
299
426
  return (inter1, 1)
300
427
  return (inter1, None)
428
+ # no intersections
301
429
  else:
302
430
  return (None, None)
303
431
 
304
432
  def estPixelNoise(self, points: np.ndarray) -> np.ndarray:
433
+ """Returns these points after applying this camera's estimated noise."""
305
434
  assert points.shape[1] == 1, points.shape
306
435
  assert points.shape[2] == 2, points.shape
307
436
  if self.avgErrorPx == 0 and self.errorStdDevPx == 0:
@@ -309,6 +438,7 @@ class SimCameraProperties:
309
438
 
310
439
  noisyPts: list[list] = []
311
440
  for p in points:
441
+ # // error pixels in random direction
312
442
  error = np.random.normal(self.avgErrorPx, self.errorStdDevPx, 1)[0]
313
443
  errorAngle = np.random.uniform(-math.pi, math.pi)
314
444
  noisyPts.append(
@@ -324,16 +454,25 @@ class SimCameraProperties:
324
454
  return retval
325
455
 
326
456
  def estLatency(self) -> seconds:
457
+ """
458
+ :returns: Noisy estimation of a frame's processing latency
459
+ """
460
+
327
461
  return max(
328
462
  float(np.random.normal(self.avgLatency, self.latencyStdDev, 1)[0]),
329
463
  0.0,
330
464
  )
331
465
 
332
466
  def estSecUntilNextFrame(self) -> seconds:
467
+ """
468
+ :returns: Estimate how long until the next frame should be processed in milliseconds
469
+ """
470
+ # // exceptional processing latency blocks the next frame
333
471
  return self.frameSpeed + max(0.0, self.estLatency() - self.frameSpeed)
334
472
 
335
473
  @classmethod
336
474
  def PERFECT_90DEG(cls) -> typing.Self:
475
+ """960x720 resolution, 90 degree FOV, "perfect" lagless camera"""
337
476
  return cls()
338
477
 
339
478
  @classmethod