photonlibpy 2025.0.0b3__py3-none-any.whl → 2025.0.0b5__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
@@ -26,6 +26,10 @@ from .visionTargetSim import VisionTargetSim
26
26
 
27
27
 
28
28
  class PhotonCameraSim:
29
+ """A handle for simulating :class:`.PhotonCamera` values. Processing simulated targets through this
30
+ class will change the associated PhotonCamera's results.
31
+ """
32
+
29
33
  kDefaultMinAreaPx: float = 100
30
34
 
31
35
  def __init__(
@@ -35,6 +39,21 @@ class PhotonCameraSim:
35
39
  minTargetAreaPercent: float | None = None,
36
40
  maxSightRange: meters | None = None,
37
41
  ):
42
+ """Constructs a handle for simulating :class:`.PhotonCamera` values. Processing simulated targets
43
+ through this class will change the associated PhotonCamera's results.
44
+
45
+ By default, this constructor's camera has a 90 deg FOV with no simulated lag if props!
46
+ By default, the minimum target area is 100 pixels and there is no maximum sight range unless both params are passed to override.
47
+
48
+
49
+ :param camera: The camera to be simulated
50
+ :param prop: Properties of this camera such as FOV and FPS
51
+ :param minTargetAreaPercent: The minimum percentage(0 - 100) a detected target must take up of
52
+ the camera's image to be processed. Match this with your contour filtering settings in the
53
+ PhotonVision GUI.
54
+ :param maxSightRangeMeters: Maximum distance at which the target is illuminated to your camera.
55
+ Note that minimum target area of the image is separate from this.
56
+ """
38
57
 
39
58
  self.minTargetAreaPercent: float = 0.0
40
59
  self.maxSightRange: float = 1.0e99
@@ -103,22 +122,39 @@ class PhotonCameraSim:
103
122
  return self.videoSimFrameRaw
104
123
 
105
124
  def canSeeTargetPose(self, camPose: Pose3d, target: VisionTargetSim) -> bool:
125
+ """Determines if this target's pose should be visible to the camera without considering its
126
+ projected image points. Does not account for image area.
127
+
128
+ :param camPose: Camera's 3d pose
129
+ :param target: Vision target containing pose and shape
130
+
131
+ :returns: If this vision target can be seen before image projection.
132
+ """
133
+
106
134
  rel = CameraTargetRelation(camPose, target.getPose())
107
135
  return (
108
136
  (
137
+ # target translation is outside of camera's FOV
109
138
  abs(rel.camToTargYaw.degrees())
110
139
  < self.prop.getHorizFOV().degrees() / 2.0
111
140
  and abs(rel.camToTargPitch.degrees())
112
141
  < self.prop.getVertFOV().degrees() / 2.0
113
142
  )
114
143
  and (
144
+ # camera is behind planar target and it should be occluded
115
145
  not target.getModel().getIsPlanar()
116
146
  or abs(rel.targtoCamAngle.degrees()) < 90
117
147
  )
148
+ # target is too far
118
149
  and rel.camToTarg.translation().norm() <= self.maxSightRange
119
150
  )
120
151
 
121
152
  def canSeeCorner(self, points: np.ndarray) -> bool:
153
+ """Determines if all target points are inside the camera's image.
154
+
155
+ :param points: The target's 2d image points
156
+ """
157
+
122
158
  assert points.shape[1] == 1
123
159
  assert points.shape[2] == 2
124
160
  for pt in points:
@@ -130,51 +166,88 @@ class PhotonCameraSim:
130
166
  or y < 0
131
167
  or y > self.prop.getResHeight()
132
168
  ):
133
- return False
169
+ return False # point is outside of resolution
134
170
 
135
171
  return True
136
172
 
137
173
  def consumeNextEntryTime(self) -> float | None:
174
+ """Determine if this camera should process a new frame based on performance metrics and the time
175
+ since the last update. This returns an Optional which is either empty if no update should occur
176
+ or a Long of the timestamp in microseconds of when the frame which should be received by NT. If
177
+ a timestamp is returned, the last frame update time becomes that timestamp.
178
+
179
+ :returns: Optional long which is empty while blocked or the NT entry timestamp in microseconds if
180
+ ready
181
+ """
182
+ # check if this camera is ready for another frame update
138
183
  now = int(wpilib.Timer.getFPGATimestamp() * 1e6)
139
184
  timestamp = 0
140
185
  iter = 0
186
+ # prepare next latest update
141
187
  while now >= self.nextNtEntryTime:
142
188
  timestamp = int(self.nextNtEntryTime)
143
189
  frameTime = int(self.prop.estSecUntilNextFrame() * 1e6)
144
190
  self.nextNtEntryTime += frameTime
145
191
 
192
+ # if frame time is very small, avoid blocking
146
193
  iter += 1
147
194
  if iter > 50:
148
195
  timestamp = now
149
196
  self.nextNtEntryTime = now + frameTime
150
197
  break
151
198
 
199
+ # return the timestamp of the latest update
152
200
  if timestamp != 0:
153
201
  return timestamp
154
202
 
203
+ # or this camera isn't ready to process yet
155
204
  return None
156
205
 
157
206
  def setMinTargetAreaPercent(self, areaPercent: float) -> None:
207
+ """The minimum percentage(0 - 100) a detected target must take up of the camera's image to be
208
+ processed.
209
+ """
158
210
  self.minTargetAreaPercent = areaPercent
159
211
 
160
212
  def setMinTargetAreaPixels(self, areaPx: float) -> None:
213
+ """The minimum number of pixels a detected target must take up in the camera's image to be
214
+ processed.
215
+ """
161
216
  self.minTargetAreaPercent = areaPx / self.prop.getResArea() * 100.0
162
217
 
163
218
  def setMaxSightRange(self, range: meters) -> None:
219
+ """Maximum distance at which the target is illuminated to your camera. Note that minimum target
220
+ area of the image is separate from this.
221
+ """
164
222
  self.maxSightRange = range
165
223
 
166
224
  def enableRawStream(self, enabled: bool) -> None:
225
+ """Sets whether the raw video stream simulation is enabled.
226
+
227
+ Note: This may increase loop times.
228
+ """
167
229
  self.videoSimRawEnabled = enabled
168
230
  raise Exception("Raw stream not implemented")
169
231
 
170
232
  def enableDrawWireframe(self, enabled: bool) -> None:
233
+ """Sets whether a wireframe of the field is drawn to the raw video stream.
234
+
235
+ Note: This will dramatically increase loop times.
236
+ """
171
237
  self.videoSimWireframeEnabled = enabled
172
238
  raise Exception("Wireframe not implemented")
173
239
 
174
240
  def setWireframeResolution(self, resolution: float) -> None:
241
+ """Sets the resolution of the drawn wireframe if enabled. Drawn line segments will be subdivided
242
+ into smaller segments based on a threshold set by the resolution.
243
+
244
+ :param resolution: Resolution as a fraction(0 - 1) of the video frame's diagonal length in
245
+ pixels
246
+ """
175
247
  self.videoSimWireframeResolution = resolution
176
248
 
177
249
  def enableProcessedStream(self, enabled: bool) -> None:
250
+ """Sets whether the processed video stream simulation is enabled."""
178
251
  self.videoSimProcEnabled = enabled
179
252
  raise Exception("Processed stream not implemented")
180
253
 
@@ -187,25 +260,32 @@ class PhotonCameraSim:
187
260
 
188
261
  targets.sort(key=distance, reverse=True)
189
262
 
263
+ # all targets visible before noise
190
264
  visibleTgts: list[typing.Tuple[VisionTargetSim, np.ndarray]] = []
265
+ # all targets actually detected by camera (after noise)
191
266
  detectableTgts: list[PhotonTrackedTarget] = []
192
267
 
268
+ # basis change from world coordinates to camera coordinates
193
269
  camRt = RotTrlTransform3d.makeRelativeTo(cameraPose)
194
270
 
195
271
  for tgt in targets:
272
+ # pose isn't visible, skip to next
196
273
  if not self.canSeeTargetPose(cameraPose, tgt):
197
274
  continue
198
275
 
276
+ # find target's 3d corner points
199
277
  fieldCorners = tgt.getFieldVertices()
200
278
  isSpherical = tgt.getModel().getIsSpherical()
201
- if isSpherical:
279
+ if isSpherical: # target is spherical
202
280
  model = tgt.getModel()
281
+ # orient the model to the camera (like a sprite/decal) so it appears similar regardless of view
203
282
  fieldCorners = model.getFieldVertices(
204
283
  TargetModel.getOrientedPose(
205
284
  tgt.getPose().translation(), cameraPose.translation()
206
285
  )
207
286
  )
208
287
 
288
+ # project 3d target points into 2d image points
209
289
  imagePoints = OpenCVHelp.projectPoints(
210
290
  self.prop.getIntrinsics(),
211
291
  self.prop.getDistCoeffs(),
@@ -213,9 +293,11 @@ class PhotonCameraSim:
213
293
  fieldCorners,
214
294
  )
215
295
 
296
+ # spherical targets need a rotated rectangle of their midpoints for visualization
216
297
  if isSpherical:
217
298
  center = OpenCVHelp.avgPoint(imagePoints)
218
299
  l: int = 0
300
+ # reference point (left side midpoint)
219
301
  for i in range(4):
220
302
  if imagePoints[i, 0, 0] < imagePoints[l, 0, 0].x:
221
303
  l = i
@@ -239,6 +321,7 @@ class PhotonCameraSim:
239
321
  for i in range(4):
240
322
  if i != t and i != l and i != b:
241
323
  r = i
324
+ # create RotatedRect from midpoints
242
325
  rect = cv.RotatedRect(
243
326
  (center[0, 0], center[0, 1]),
244
327
  (
@@ -247,16 +330,23 @@ class PhotonCameraSim:
247
330
  ),
248
331
  -angles[r],
249
332
  )
333
+ # set target corners to rect corners
250
334
  imagePoints = np.array([[p[0], p[1], p[2]] for p in rect.points()])
251
335
 
336
+ # save visible targets for raw video stream simulation
252
337
  visibleTgts.append((tgt, imagePoints))
338
+ # estimate pixel noise
253
339
  noisyTargetCorners = self.prop.estPixelNoise(imagePoints)
340
+ # find the minimum area rectangle of target corners
254
341
  minAreaRect = OpenCVHelp.getMinAreaRect(noisyTargetCorners)
255
342
  minAreaRectPts = minAreaRect.points()
343
+ # find the (naive) 2d yaw/pitch
256
344
  centerPt = minAreaRect.center
257
345
  centerRot = self.prop.getPixelRot(centerPt)
346
+ # find contour area
258
347
  areaPercent = self.prop.getContourAreaPercent(noisyTargetCorners)
259
348
 
349
+ # projected target can't be detected, skip to next
260
350
  if (
261
351
  not self.canSeeCorner(noisyTargetCorners)
262
352
  or not areaPercent >= self.minTargetAreaPercent
@@ -265,6 +355,7 @@ class PhotonCameraSim:
265
355
 
266
356
  pnpSim: PnpResult | None = None
267
357
  if tgt.fiducialId >= 0 and len(tgt.getFieldVertices()) == 4:
358
+ # single AprilTag solvePNP
268
359
  pnpSim = OpenCVHelp.solvePNP_Square(
269
360
  self.prop.getIntrinsics(),
270
361
  self.prop.getDistCoeffs(),
@@ -295,6 +386,7 @@ class PhotonCameraSim:
295
386
 
296
387
  # Video streams disabled for now
297
388
  if self.videoSimRawEnabled:
389
+ # TODO Video streams disabled for now port and uncomment when implemented
298
390
  # VideoSimUtil::UpdateVideoProp(videoSimRaw, prop);
299
391
  # cv::Size videoFrameSize{prop.GetResWidth(), prop.GetResHeight()};
300
392
  # cv::Mat blankFrame = cv::Mat::zeros(videoFrameSize, CV_8UC1);
@@ -312,6 +404,7 @@ class PhotonCameraSim:
312
404
 
313
405
  if len(visibleLayoutTags) > 1:
314
406
  usedIds = [tag.ID for tag in visibleLayoutTags]
407
+ # sort target order sorts in ascending order by default
315
408
  usedIds.sort()
316
409
  pnpResult = VisionEstimation.estimateCamPosePNP(
317
410
  self.prop.getIntrinsics(),
@@ -323,10 +416,16 @@ class PhotonCameraSim:
323
416
  if pnpResult is not None:
324
417
  multiTagResults = MultiTargetPNPResult(pnpResult, usedIds)
325
418
 
419
+ # put this simulated data to NT
326
420
  self.heartbeatCounter += 1
421
+ now_micros = wpilib.Timer.getFPGATimestamp() * 1e6
327
422
  return PhotonPipelineResult(
328
423
  metadata=PhotonPipelineMetadata(
329
- self.heartbeatCounter, int(latency * 1e6), 1000000
424
+ self.heartbeatCounter,
425
+ int(now_micros - latency * 1e6),
426
+ int(now_micros),
427
+ # Pretend like we heard a pong recently
428
+ int(np.random.uniform(950, 1050)),
330
429
  ),
331
430
  targets=detectableTgts,
332
431
  multitagResult=multiTagResults,
@@ -335,6 +434,13 @@ class PhotonCameraSim:
335
434
  def submitProcessedFrame(
336
435
  self, result: PhotonPipelineResult, receiveTimestamp: float | None
337
436
  ):
437
+ """Simulate one processed frame of vision data, putting one result to NT. Image capture timestamp
438
+ overrides :meth:`.PhotonPipelineResult.getTimestampSeconds` for more
439
+ precise latency simulation.
440
+
441
+ :param result: The pipeline result to submit
442
+ :param receiveTimestamp: The (sim) timestamp when this result was read by NT in microseconds. If not passed image capture time is assumed be (current time - latency)
443
+ """
338
444
  if receiveTimestamp is None:
339
445
  receiveTimestamp = wpilib.Timer.getFPGATimestamp() * 1e6
340
446
  receiveTimestamp = int(receiveTimestamp)
@@ -9,9 +9,26 @@ from wpimath.units import hertz, seconds
9
9
 
10
10
  from ..estimation import RotTrlTransform3d
11
11
 
12
+ logger = logging.getLogger(__name__)
13
+
12
14
 
13
15
  class SimCameraProperties:
16
+ """Calibration and performance values for this camera.
17
+
18
+ The resolution will affect the accuracy of projected(3d to 2d) target corners and similarly
19
+ the severity of image noise on estimation(2d to 3d).
20
+
21
+ The camera intrinsics and distortion coefficients describe the results of calibration, and how
22
+ to map between 3d field points and 2d image points.
23
+
24
+ The performance values (framerate/exposure time, latency) determine how often results should
25
+ be updated and with how much latency in simulation. High exposure time causes motion blur which
26
+ can inhibit target detection while moving. Note that latency estimation does not account for
27
+ network latency and the latency reported will always be perfect.
28
+ """
29
+
14
30
  def __init__(self):
31
+ """Default constructor which is the same as {@link #PERFECT_90DEG}"""
15
32
  self.resWidth: int = -1
16
33
  self.resHeight: int = -1
17
34
  self.camIntrinsics: np.ndarray = np.zeros((3, 3)) # [3,3]
@@ -31,21 +48,25 @@ class SimCameraProperties:
31
48
  ) -> None:
32
49
  if fovDiag.degrees() < 1.0 or fovDiag.degrees() > 179.0:
33
50
  fovDiag = Rotation2d.fromDegrees(max(min(fovDiag.degrees(), 179.0), 1.0))
34
- logging.error("Requested invalid FOV! Clamping between (1, 179) degrees...")
51
+ logger.error("Requested invalid FOV! Clamping between (1, 179) degrees...")
35
52
 
36
53
  resDiag = math.sqrt(width * width + height * height)
37
54
  diagRatio = math.tan(fovDiag.radians() / 2.0)
38
55
  fovWidth = Rotation2d(math.atan((diagRatio * (width / resDiag)) * 2))
39
56
  fovHeight = Rotation2d(math.atan(diagRatio * (height / resDiag)) * 2)
40
57
 
58
+ # assume no distortion
41
59
  newDistCoeffs = np.zeros((8, 1))
42
60
 
61
+ # assume centered principal point (pixels)
43
62
  cx = width / 2.0 - 0.5
44
63
  cy = height / 2.0 - 0.5
45
64
 
65
+ # use given fov to determine focal point (pixels)
46
66
  fx = cx / math.tan(fovWidth.radians() / 2.0)
47
67
  fy = cy / math.tan(fovHeight.radians() / 2.0)
48
68
 
69
+ # create camera intrinsics matrix
49
70
  newCamIntrinsics = np.array([[fx, 0.0, cx], [0.0, fy, cy], [0.0, 0.0, 1.0]])
50
71
 
51
72
  self.setCalibrationFromIntrinsics(
@@ -65,6 +86,7 @@ class SimCameraProperties:
65
86
  self.camIntrinsics = newCamIntrinsics
66
87
  self.distCoeffs = newDistCoeffs
67
88
 
89
+ # left, right, up, and down view planes
68
90
  p = [
69
91
  Translation3d(
70
92
  1.0,
@@ -110,16 +132,33 @@ class SimCameraProperties:
110
132
  self.errorStdDevPx = newErrorStdDevPx
111
133
 
112
134
  def setFPS(self, fps: hertz):
135
+ """
136
+ :param fps: The average frames per second the camera should process at. :strong:`Exposure time limits
137
+ FPS if set!`
138
+ """
139
+
113
140
  self.frameSpeed = max(1.0 / fps, self.exposureTime)
114
141
 
115
142
  def setExposureTime(self, newExposureTime: seconds):
143
+ """
144
+ :param newExposureTime: The amount of time the "shutter" is open for one frame. Affects motion
145
+ blur. **Frame speed(from FPS) is limited to this!**
146
+ """
147
+
116
148
  self.exposureTime = newExposureTime
117
149
  self.frameSpeed = max(self.frameSpeed, self.exposureTime)
118
150
 
119
151
  def setAvgLatency(self, newAvgLatency: seconds):
152
+ """
153
+ :param newAvgLatency: The average latency (from image capture to data published) in milliseconds
154
+ a frame should have
155
+ """
120
156
  self.vgLatency = newAvgLatency
121
157
 
122
158
  def setLatencyStdDev(self, newLatencyStdDev: seconds):
159
+ """
160
+ :param latencyStdDevMs: The standard deviation in milliseconds of the latency
161
+ """
123
162
  self.latencyStdDev = newLatencyStdDev
124
163
 
125
164
  def getResWidth(self) -> int:
@@ -156,21 +195,43 @@ class SimCameraProperties:
156
195
  return self.latencyStdDev
157
196
 
158
197
  def getContourAreaPercent(self, points: np.ndarray) -> float:
198
+ """The percentage(0 - 100) of this camera's resolution the contour takes up in pixels of the
199
+ image.
200
+
201
+ :param points: Points of the contour
202
+ """
203
+
159
204
  return cv.contourArea(cv.convexHull(points)) / self.getResArea() * 100.0
160
205
 
161
206
  def getPixelYaw(self, pixelX: float) -> Rotation2d:
207
+ """The yaw from the principal point of this camera to the pixel x value. Positive values left."""
162
208
  fx = self.camIntrinsics[0, 0]
209
+ # account for principal point not being centered
163
210
  cx = self.camIntrinsics[0, 2]
164
211
  xOffset = cx - pixelX
165
212
  return Rotation2d(fx, xOffset)
166
213
 
167
214
  def getPixelPitch(self, pixelY: float) -> Rotation2d:
215
+ """The pitch from the principal point of this camera to the pixel y value. Pitch is positive down.
216
+
217
+ Note that this angle is naively computed and may be incorrect. See {@link
218
+ #getCorrectedPixelRot(Point)}.
219
+ """
220
+
168
221
  fy = self.camIntrinsics[1, 1]
222
+ # account for principal point not being centered
169
223
  cy = self.camIntrinsics[1, 2]
170
224
  yOffset = cy - pixelY
171
225
  return Rotation2d(fy, -yOffset)
172
226
 
173
227
  def getPixelRot(self, point: cv.typing.Point2f) -> Rotation3d:
228
+ """Finds the yaw and pitch to the given image point. Yaw is positive left, and pitch is positive
229
+ down.
230
+
231
+ Note that pitch is naively computed and may be incorrect. See {@link
232
+ #getCorrectedPixelRot(Point)}.
233
+ """
234
+
174
235
  return Rotation3d(
175
236
  0.0,
176
237
  self.getPixelPitch(point[1]).radians(),
@@ -178,6 +239,27 @@ class SimCameraProperties:
178
239
  )
179
240
 
180
241
  def getCorrectedPixelRot(self, point: cv.typing.Point2f) -> Rotation3d:
242
+ """Gives the yaw and pitch of the line intersecting the camera lens and the given pixel
243
+ coordinates on the sensor. Yaw is positive left, and pitch positive down.
244
+
245
+ The pitch traditionally calculated from pixel offsets do not correctly account for non-zero
246
+ values of yaw because of perspective distortion (not to be confused with lens distortion)-- for
247
+ example, the pitch angle is naively calculated as:
248
+
249
+ <pre>pitch = arctan(pixel y offset / focal length y)</pre>
250
+
251
+ However, using focal length as a side of the associated right triangle is not correct when the
252
+ pixel x value is not 0, because the distance from this pixel (projected on the x-axis) to the
253
+ camera lens increases. Projecting a line back out of the camera with these naive angles will
254
+ not intersect the 3d point that was originally projected into this 2d pixel. Instead, this
255
+ length should be:
256
+
257
+ <pre>focal length y ⟶ (focal length y / cos(arctan(pixel x offset / focal length x)))</pre>
258
+
259
+ :returns: Rotation3d with yaw and pitch of the line projected out of the camera from the given
260
+ pixel (roll is zero).
261
+ """
262
+
181
263
  fx = self.camIntrinsics[0, 0]
182
264
  cx = self.camIntrinsics[0, 2]
183
265
  xOffset = cx - point[0]
@@ -191,11 +273,13 @@ class SimCameraProperties:
191
273
  return Rotation3d(0.0, pitch.radians(), yaw.radians())
192
274
 
193
275
  def getHorizFOV(self) -> Rotation2d:
276
+ # sum of FOV left and right principal point
194
277
  left = self.getPixelYaw(0)
195
278
  right = self.getPixelYaw(self.resWidth)
196
279
  return left - right
197
280
 
198
281
  def getVertFOV(self) -> Rotation2d:
282
+ # sum of FOV above and below principal point
199
283
  above = self.getPixelPitch(0)
200
284
  below = self.getPixelPitch(self.resHeight)
201
285
  return below - above
@@ -208,9 +292,34 @@ class SimCameraProperties:
208
292
  def getVisibleLine(
209
293
  self, camRt: RotTrlTransform3d, a: Translation3d, b: Translation3d
210
294
  ) -> typing.Tuple[float | None, float | None]:
295
+ """Determines where the line segment defined by the two given translations intersects the camera's
296
+ frustum/field-of-vision, if at all.
297
+
298
+ The line is parametrized so any of its points <code>p = t * (b - a) + a</code>. This method
299
+ returns these values of t, minimum first, defining the region of the line segment which is
300
+ visible in the frustum. If both ends of the line segment are visible, this simply returns {0,
301
+ 1}. If, for example, point b is visible while a is not, and half of the line segment is inside
302
+ the camera frustum, {0.5, 1} would be returned.
303
+
304
+ :param camRt: The change in basis from world coordinates to camera coordinates. See {@link
305
+ RotTrlTransform3d#makeRelativeTo(Pose3d)}.
306
+ :param a: The initial translation of the line
307
+ :param b: The final translation of the line
308
+
309
+ :returns: A Pair of Doubles. The values may be null:
310
+
311
+ - {Double, Double} : Two parametrized values(t), minimum first, representing which
312
+ segment of the line is visible in the camera frustum.
313
+ - {Double, null} : One value(t) representing a single intersection point. For example,
314
+ the line only intersects the intersection of two adjacent viewplanes.
315
+ - {null, null} : No values. The line segment is not visible in the camera frustum.
316
+ """
317
+
318
+ # translations relative to the camera
211
319
  relA = camRt.applyTranslation(a)
212
320
  relB = camRt.applyTranslation(b)
213
321
 
322
+ # check if both ends are behind camera
214
323
  if relA.X() <= 0.0 and relB.X() <= 0.0:
215
324
  return (None, None)
216
325
 
@@ -221,6 +330,7 @@ class SimCameraProperties:
221
330
  aVisible = True
222
331
  bVisible = True
223
332
 
333
+ # check if the ends of the line segment are visible
224
334
  for normal in self.viewplanes:
225
335
  aVisibility = av.dot(normal)
226
336
  if aVisibility < 0:
@@ -229,38 +339,54 @@ class SimCameraProperties:
229
339
  bVisibility = bv.dot(normal)
230
340
  if bVisibility < 0:
231
341
  bVisible = False
342
+ # both ends are outside at least one of the same viewplane
232
343
  if aVisibility <= 0 and bVisibility <= 0:
233
344
  return (None, None)
234
345
 
346
+ # both ends are inside frustum
235
347
  if aVisible and bVisible:
236
348
  return (0.0, 1.0)
237
349
 
350
+ # parametrized (t=0 at a, t=1 at b) intersections with viewplanes
238
351
  intersections = [float("nan"), float("nan"), float("nan"), float("nan")]
239
352
 
240
353
  # Optionally 3x1 vector
241
354
  ipts: typing.List[np.ndarray | None] = [None, None, None, None]
242
355
 
356
+ # find intersections
243
357
  for i, normal in enumerate(self.viewplanes):
358
+
359
+ # // we want to know the value of t when the line intercepts this plane
360
+ # // parametrized: v = t * ab + a, where v lies on the plane
361
+ # // we can find the projection of a onto the plane normal
362
+ # // a_projn = normal.times(av.dot(normal) / normal.dot(normal));
244
363
  a_projn = (av.dot(normal) / normal.dot(normal)) * normal
245
364
 
365
+ # // this projection lets us determine the scalar multiple t of ab where
366
+ # // (t * ab + a) is a vector which lies on the plane
246
367
  if abs(abv.dot(normal)) < 1.0e-5:
247
368
  continue
248
369
  intersections[i] = a_projn.dot(a_projn) / -(abv.dot(a_projn))
249
370
 
371
+ # // vector a to the viewplane
250
372
  apv = intersections[i] * abv
373
+ # av + apv = intersection point
251
374
  intersectpt = av + apv
252
375
  ipts[i] = intersectpt
253
376
 
377
+ # // discard intersections outside the camera frustum
254
378
  for j in range(1, len(self.viewplanes)):
255
379
  if j == 0:
256
380
  continue
257
381
  oi = (i + j) % len(self.viewplanes)
258
382
  onormal = self.viewplanes[oi]
383
+ # if the dot of the intersection point with any plane normal is negative, it is outside
259
384
  if intersectpt.dot(onormal) < 0:
260
385
  intersections[i] = float("nan")
261
386
  ipts[i] = None
262
387
  break
263
388
 
389
+ # // discard duplicate intersections
264
390
  if ipts[i] is None:
265
391
  continue
266
392
 
@@ -275,6 +401,7 @@ class SimCameraProperties:
275
401
  ipts[i] = None
276
402
  break
277
403
 
404
+ # determine visible segment (minimum and maximum t)
278
405
  inter1 = float("nan")
279
406
  inter2 = float("nan")
280
407
  for inter in intersections:
@@ -284,6 +411,7 @@ class SimCameraProperties:
284
411
  else:
285
412
  inter2 = inter
286
413
 
414
+ # // two viewplane intersections
287
415
  if not math.isnan(inter2):
288
416
  max_ = max(inter1, inter2)
289
417
  min_ = min(inter1, inter2)
@@ -292,16 +420,19 @@ class SimCameraProperties:
292
420
  if bVisible:
293
421
  max_ = 1
294
422
  return (min_, max_)
423
+ # // one viewplane intersection
295
424
  elif not math.isnan(inter1):
296
425
  if aVisible:
297
426
  return (0, inter1)
298
427
  if bVisible:
299
428
  return (inter1, 1)
300
429
  return (inter1, None)
430
+ # no intersections
301
431
  else:
302
432
  return (None, None)
303
433
 
304
434
  def estPixelNoise(self, points: np.ndarray) -> np.ndarray:
435
+ """Returns these points after applying this camera's estimated noise."""
305
436
  assert points.shape[1] == 1, points.shape
306
437
  assert points.shape[2] == 2, points.shape
307
438
  if self.avgErrorPx == 0 and self.errorStdDevPx == 0:
@@ -309,6 +440,7 @@ class SimCameraProperties:
309
440
 
310
441
  noisyPts: list[list] = []
311
442
  for p in points:
443
+ # // error pixels in random direction
312
444
  error = np.random.normal(self.avgErrorPx, self.errorStdDevPx, 1)[0]
313
445
  errorAngle = np.random.uniform(-math.pi, math.pi)
314
446
  noisyPts.append(
@@ -324,16 +456,25 @@ class SimCameraProperties:
324
456
  return retval
325
457
 
326
458
  def estLatency(self) -> seconds:
459
+ """
460
+ :returns: Noisy estimation of a frame's processing latency
461
+ """
462
+
327
463
  return max(
328
464
  float(np.random.normal(self.avgLatency, self.latencyStdDev, 1)[0]),
329
465
  0.0,
330
466
  )
331
467
 
332
468
  def estSecUntilNextFrame(self) -> seconds:
469
+ """
470
+ :returns: Estimate how long until the next frame should be processed in milliseconds
471
+ """
472
+ # // exceptional processing latency blocks the next frame
333
473
  return self.frameSpeed + max(0.0, self.estLatency() - self.frameSpeed)
334
474
 
335
475
  @classmethod
336
476
  def PERFECT_90DEG(cls) -> typing.Self:
477
+ """960x720 resolution, 90 degree FOV, "perfect" lagless camera"""
337
478
  return cls()
338
479
 
339
480
  @classmethod