photonlibpy 2025.0.0b1__py3-none-any.whl → 2025.0.0b2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. photonlibpy/__init__.py +2 -2
  2. photonlibpy/estimation/__init__.py +5 -0
  3. photonlibpy/estimation/cameraTargetRelation.py +25 -0
  4. photonlibpy/estimation/openCVHelp.py +200 -0
  5. photonlibpy/estimation/rotTrlTransform3d.py +32 -0
  6. photonlibpy/estimation/targetModel.py +137 -0
  7. photonlibpy/estimation/visionEstimation.py +91 -0
  8. photonlibpy/generated/MultiTargetPNPResultSerde.py +1 -1
  9. photonlibpy/generated/PhotonPipelineMetadataSerde.py +1 -1
  10. photonlibpy/generated/PhotonPipelineResultSerde.py +1 -1
  11. photonlibpy/generated/PhotonTrackedTargetSerde.py +1 -1
  12. photonlibpy/generated/PnpResultSerde.py +1 -1
  13. photonlibpy/generated/TargetCornerSerde.py +1 -1
  14. photonlibpy/generated/__init__.py +0 -1
  15. photonlibpy/networktables/NTTopicSet.py +64 -0
  16. photonlibpy/networktables/__init__.py +1 -0
  17. photonlibpy/packet.py +17 -9
  18. photonlibpy/photonCamera.py +9 -6
  19. photonlibpy/photonPoseEstimator.py +3 -3
  20. photonlibpy/simulation/__init__.py +5 -0
  21. photonlibpy/simulation/photonCameraSim.py +408 -0
  22. photonlibpy/simulation/simCameraProperties.py +661 -0
  23. photonlibpy/simulation/videoSimUtil.py +2 -0
  24. photonlibpy/simulation/visionSystemSim.py +237 -0
  25. photonlibpy/simulation/visionTargetSim.py +50 -0
  26. photonlibpy/targeting/TargetCorner.py +5 -1
  27. photonlibpy/targeting/__init__.py +1 -1
  28. photonlibpy/targeting/multiTargetPNPResult.py +8 -2
  29. photonlibpy/targeting/photonPipelineResult.py +7 -4
  30. photonlibpy/targeting/photonTrackedTarget.py +7 -1
  31. photonlibpy/version.py +2 -2
  32. {photonlibpy-2025.0.0b1.dist-info → photonlibpy-2025.0.0b2.dist-info}/METADATA +6 -2
  33. photonlibpy-2025.0.0b2.dist-info/RECORD +36 -0
  34. photonlibpy-2025.0.0b1.dist-info/RECORD +0 -22
  35. {photonlibpy-2025.0.0b1.dist-info → photonlibpy-2025.0.0b2.dist-info}/WHEEL +0 -0
  36. {photonlibpy-2025.0.0b1.dist-info → photonlibpy-2025.0.0b2.dist-info}/top_level.txt +0 -0
photonlibpy/packet.py CHANGED
@@ -16,9 +16,17 @@
16
16
  ###############################################################################
17
17
 
18
18
  import struct
19
- from typing import Any, Optional, Type
20
- from wpimath.geometry import Transform3d, Translation3d, Rotation3d, Quaternion
19
+ from typing import Generic, Optional, Protocol, TypeVar
20
+
21
21
  import wpilib
22
+ from wpimath.geometry import Quaternion, Rotation3d, Transform3d, Translation3d
23
+
24
+ T = TypeVar("T")
25
+
26
+
27
+ class Serde(Generic[T], Protocol):
28
+ def pack(self, value: T) -> "Packet": ...
29
+ def unpack(self, packet: "Packet") -> T: ...
22
30
 
23
31
 
24
32
  class Packet:
@@ -33,9 +41,9 @@ class Packet:
33
41
  self.readPos = 0
34
42
  self.outOfBytes = False
35
43
 
36
- def clear(self):
44
+ def clear(self) -> None:
37
45
  """Clears the packet and resets the read and write positions."""
38
- self.packetData = [0] * self.size
46
+ self.packetData = bytes(self.size)
39
47
  self.readPos = 0
40
48
  self.outOfBytes = False
41
49
 
@@ -157,7 +165,7 @@ class Packet:
157
165
  ret.append(self.decodeDouble())
158
166
  return ret
159
167
 
160
- def decodeShortList(self) -> list[float]:
168
+ def decodeShortList(self) -> list[int]:
161
169
  """
162
170
  * Returns a decoded array of shorts from the packet.
163
171
  """
@@ -186,14 +194,14 @@ class Packet:
186
194
 
187
195
  return Transform3d(translation, rotation)
188
196
 
189
- def decodeList(self, serde: Type):
197
+ def decodeList(self, serde: Serde[T]) -> list[T]:
190
198
  retList = []
191
199
  arr_len = self.decode8()
192
200
  for _ in range(arr_len):
193
201
  retList.append(serde.unpack(self))
194
202
  return retList
195
203
 
196
- def decodeOptional(self, serde: Type) -> Optional[Any]:
204
+ def decodeOptional(self, serde: Serde[T]) -> Optional[T]:
197
205
  if self.decodeBoolean():
198
206
  return serde.unpack(self)
199
207
  else:
@@ -280,7 +288,7 @@ class Packet:
280
288
  self.encodeDouble(quaternion.Y())
281
289
  self.encodeDouble(quaternion.Z())
282
290
 
283
- def encodeList(self, values: list[Any], serde: Type):
291
+ def encodeList(self, values: list[T], serde: Serde[T]):
284
292
  """
285
293
  Encodes a list of items using a specific serializer and appends it to the packet.
286
294
  """
@@ -290,7 +298,7 @@ class Packet:
290
298
  self.packetData = self.packetData + packed.getData()
291
299
  self.size = len(self.packetData)
292
300
 
293
- def encodeOptional(self, value: Optional[Any], serde: Type):
301
+ def encodeOptional(self, value: Optional[T], serde: Serde[T]):
294
302
  """
295
303
  Encodes an optional value using a specific serializer.
296
304
  """
@@ -17,15 +17,17 @@
17
17
 
18
18
  from enum import Enum
19
19
  from typing import List
20
+
20
21
  import ntcore
21
- from wpilib import RobotController, Timer
22
- import wpilib
23
- from .packet import Packet
24
- from .targeting.photonPipelineResult import PhotonPipelineResult
25
- from .version import PHOTONVISION_VERSION, PHOTONLIB_VERSION # type: ignore[import-untyped]
26
22
 
27
23
  # magical import to make serde stuff work
28
24
  import photonlibpy.generated # noqa
25
+ import wpilib
26
+ from wpilib import RobotController, Timer
27
+
28
+ from .packet import Packet
29
+ from .targeting.photonPipelineResult import PhotonPipelineResult
30
+ from .version import PHOTONLIB_VERSION # type: ignore[import-untyped]
29
31
 
30
32
 
31
33
  class VisionLEDMode(Enum):
@@ -231,12 +233,13 @@ class PhotonCamera:
231
233
 
232
234
  remoteUUID = self._rawBytesEntry.getTopic().getProperty("message_uuid")
233
235
 
234
- if remoteUUID is None or len(remoteUUID) == 0:
236
+ if not remoteUUID:
235
237
  wpilib.reportWarning(
236
238
  f"PhotonVision coprocessor at path {self._path} has not reported a message interface UUID - is your coprocessor's camera started?",
237
239
  True,
238
240
  )
239
241
 
242
+ assert isinstance(remoteUUID, str)
240
243
  # ntcore hands us a JSON string with leading/trailing quotes - remove those
241
244
  remoteUUID = remoteUUID.replace('"', "")
242
245
 
@@ -20,11 +20,11 @@ from typing import Optional
20
20
 
21
21
  import wpilib
22
22
  from robotpy_apriltag import AprilTagFieldLayout
23
- from wpimath.geometry import Transform3d, Pose3d, Pose2d
23
+ from wpimath.geometry import Pose2d, Pose3d, Transform3d
24
24
 
25
- from .targeting.photonPipelineResult import PhotonPipelineResult
26
- from .photonCamera import PhotonCamera
27
25
  from .estimatedRobotPose import EstimatedRobotPose
26
+ from .photonCamera import PhotonCamera
27
+ from .targeting.photonPipelineResult import PhotonPipelineResult
28
28
 
29
29
 
30
30
  class PoseStrategy(enum.Enum):
@@ -0,0 +1,5 @@
1
+ from .photonCameraSim import PhotonCameraSim
2
+ from .simCameraProperties import SimCameraProperties
3
+ from .videoSimUtil import VideoSimUtil
4
+ from .visionSystemSim import VisionSystemSim
5
+ from .visionTargetSim import VisionTargetSim
@@ -0,0 +1,408 @@
1
+ import math
2
+ import typing
3
+
4
+ import cscore as cs
5
+ import cv2 as cv
6
+ import numpy as np
7
+ import robotpy_apriltag
8
+ import wpilib
9
+ from wpimath.geometry import Pose3d, Transform3d
10
+ from wpimath.units import meters, seconds
11
+
12
+ from ..estimation import OpenCVHelp, RotTrlTransform3d, TargetModel, VisionEstimation
13
+ from ..estimation.cameraTargetRelation import CameraTargetRelation
14
+ from ..networktables.NTTopicSet import NTTopicSet
15
+ from ..photonCamera import PhotonCamera
16
+ from ..targeting import (
17
+ MultiTargetPNPResult,
18
+ PhotonPipelineMetadata,
19
+ PhotonPipelineResult,
20
+ PhotonTrackedTarget,
21
+ PnpResult,
22
+ TargetCorner,
23
+ )
24
+ from .simCameraProperties import SimCameraProperties
25
+ from .visionTargetSim import VisionTargetSim
26
+
27
+
28
+ class PhotonCameraSim:
29
+ kDefaultMinAreaPx: float = 100
30
+
31
+ def __init__(
32
+ self,
33
+ camera: PhotonCamera,
34
+ props: SimCameraProperties | None = None,
35
+ minTargetAreaPercent: float | None = None,
36
+ maxSightRange: meters | None = None,
37
+ ):
38
+
39
+ self.minTargetAreaPercent: float = 0.0
40
+ self.maxSightRange: float = 1.0e99
41
+ self.videoSimRawEnabled: bool = False
42
+ self.videoSimWireframeEnabled: bool = False
43
+ self.videoSimWireframeResolution: float = 0.1
44
+ self.videoSimProcEnabled: bool = True
45
+ self.ts = NTTopicSet()
46
+ self.heartbeatCounter: int = 0
47
+ self.nextNtEntryTime = int(wpilib.Timer.getFPGATimestamp() * 1e6)
48
+ self.tagLayout = robotpy_apriltag.loadAprilTagLayoutField(
49
+ robotpy_apriltag.AprilTagField.k2024Crescendo
50
+ )
51
+
52
+ if (
53
+ camera is not None
54
+ and props is None
55
+ and minTargetAreaPercent is None
56
+ and maxSightRange is None
57
+ ):
58
+ props = SimCameraProperties.PERFECT_90DEG()
59
+ elif (
60
+ camera is not None
61
+ and props is not None
62
+ and minTargetAreaPercent is not None
63
+ and maxSightRange is not None
64
+ ):
65
+ pass
66
+ elif (
67
+ camera is not None
68
+ and props is not None
69
+ and minTargetAreaPercent is None
70
+ and maxSightRange is None
71
+ ):
72
+ pass
73
+ else:
74
+ raise Exception("Invalid Constructor Called")
75
+
76
+ self.cam = camera
77
+ self.prop = props
78
+ self.setMinTargetAreaPixels(PhotonCameraSim.kDefaultMinAreaPx)
79
+
80
+ # TODO Check fps is right
81
+ self.videoSimRaw = cs.CvSource(
82
+ self.cam.getName() + "-raw",
83
+ cs.VideoMode.PixelFormat.kGray,
84
+ self.prop.getResWidth(),
85
+ self.prop.getResHeight(),
86
+ 1,
87
+ )
88
+ self.videoSimFrameRaw = np.zeros(
89
+ (self.prop.getResWidth(), self.prop.getResHeight())
90
+ )
91
+
92
+ # TODO Check fps is right
93
+ self.videoSimProcessed = cs.CvSource(
94
+ self.cam.getName() + "-processed",
95
+ cs.VideoMode.PixelFormat.kGray,
96
+ self.prop.getResWidth(),
97
+ self.prop.getResHeight(),
98
+ 1,
99
+ )
100
+ self.videoSimFrameProcessed = np.zeros(
101
+ (self.prop.getResWidth(), self.prop.getResHeight())
102
+ )
103
+
104
+ self.ts.subTable = self.cam._cameraTable
105
+ self.ts.updateEntries()
106
+
107
+ # Handle this last explicitly for this function signature because the other constructor is called in the initialiser list
108
+ if (
109
+ camera is not None
110
+ and props is not None
111
+ and minTargetAreaPercent is not None
112
+ and maxSightRange is not None
113
+ ):
114
+ self.minTargetAreaPercent = minTargetAreaPercent
115
+ self.maxSightRange = maxSightRange
116
+
117
+ def getCamera(self) -> PhotonCamera:
118
+ return self.cam
119
+
120
+ def getMinTargetAreaPercent(self) -> float:
121
+ return self.minTargetAreaPercent
122
+
123
+ def getMinTargetAreaPixels(self) -> float:
124
+ return self.minTargetAreaPercent / 100.0 * self.prop.getResArea()
125
+
126
+ def getMaxSightRange(self) -> meters:
127
+ return self.maxSightRange
128
+
129
+ def getVideoSimRaw(self) -> cs.CvSource:
130
+ return self.videoSimRaw
131
+
132
+ def getVideoSimFrameRaw(self) -> np.ndarray:
133
+ return self.videoSimFrameRaw
134
+
135
+ def canSeeTargetPose(self, camPose: Pose3d, target: VisionTargetSim) -> bool:
136
+ rel = CameraTargetRelation(camPose, target.getPose())
137
+ return (
138
+ (
139
+ abs(rel.camToTargYaw.degrees())
140
+ < self.prop.getHorizFOV().degrees() / 2.0
141
+ and abs(rel.camToTargPitch.degrees())
142
+ < self.prop.getVertFOV().degrees() / 2.0
143
+ )
144
+ and (
145
+ not target.getModel().getIsPlanar()
146
+ or abs(rel.targtoCamAngle.degrees()) < 90
147
+ )
148
+ and rel.camToTarg.translation().norm() <= self.maxSightRange
149
+ )
150
+
151
+ def canSeeCorner(self, points: np.ndarray) -> bool:
152
+ assert points.shape[1] == 1
153
+ assert points.shape[2] == 2
154
+ for pt in points:
155
+ x = pt[0, 0]
156
+ y = pt[0, 1]
157
+ if (
158
+ x < 0
159
+ or x > self.prop.getResWidth()
160
+ or y < 0
161
+ or y > self.prop.getResHeight()
162
+ ):
163
+ return False
164
+
165
+ return True
166
+
167
+ def consumeNextEntryTime(self) -> float | None:
168
+ now = int(wpilib.Timer.getFPGATimestamp() * 1e6)
169
+ timestamp = 0
170
+ iter = 0
171
+ while now >= self.nextNtEntryTime:
172
+ timestamp = int(self.nextNtEntryTime)
173
+ frameTime = int(self.prop.estSecUntilNextFrame() * 1e6)
174
+ self.nextNtEntryTime += frameTime
175
+
176
+ iter += 1
177
+ if iter > 50:
178
+ timestamp = now
179
+ self.nextNtEntryTime = now + frameTime
180
+ break
181
+
182
+ if timestamp != 0:
183
+ return timestamp
184
+
185
+ return None
186
+
187
+ def setMinTargetAreaPercent(self, areaPercent: float) -> None:
188
+ self.minTargetAreaPercent = areaPercent
189
+
190
+ def setMinTargetAreaPixels(self, areaPx: float) -> None:
191
+ self.minTargetAreaPercent = areaPx / self.prop.getResArea() * 100.0
192
+
193
+ def setMaxSightRange(self, range: meters) -> None:
194
+ self.maxSightRange = range
195
+
196
+ def enableRawStream(self, enabled: bool) -> None:
197
+ raise Exception("Raw stream not implemented")
198
+ # self.videoSimRawEnabled = enabled
199
+
200
+ def enableDrawWireframe(self, enabled: bool) -> None:
201
+ raise Exception("Wireframe not implemented")
202
+ # self.videoSimWireframeEnabled = enabled
203
+
204
+ def setWireframeResolution(self, resolution: float) -> None:
205
+ self.videoSimWireframeResolution = resolution
206
+
207
+ def enableProcessedStream(self, enabled: bool) -> None:
208
+ raise Exception("Processed stream not implemented")
209
+ # self.videoSimProcEnabled = enabled
210
+
211
+ def process(
212
+ self, latency: seconds, cameraPose: Pose3d, targets: list[VisionTargetSim]
213
+ ) -> PhotonPipelineResult:
214
+ # Sort targets by distance to camera - furthest to closest
215
+ def distance(target: VisionTargetSim):
216
+ return target.getPose().translation().distance(cameraPose.translation())
217
+
218
+ targets.sort(key=distance, reverse=True)
219
+
220
+ visibleTgts: list[
221
+ typing.Tuple[VisionTargetSim, list[typing.Tuple[float, float]]]
222
+ ] = []
223
+ detectableTgts: list[PhotonTrackedTarget] = []
224
+
225
+ camRt = RotTrlTransform3d.makeRelativeTo(cameraPose)
226
+
227
+ for tgt in targets:
228
+ if not self.canSeeTargetPose(cameraPose, tgt):
229
+ continue
230
+
231
+ fieldCorners = tgt.getFieldVertices()
232
+ isSpherical = tgt.getModel().getIsSpherical()
233
+ if isSpherical:
234
+ model = tgt.getModel()
235
+ fieldCorners = model.getFieldVertices(
236
+ TargetModel.getOrientedPose(
237
+ tgt.getPose().translation(), cameraPose.translation()
238
+ )
239
+ )
240
+
241
+ imagePoints = OpenCVHelp.projectPoints(
242
+ self.prop.getIntrinsics(),
243
+ self.prop.getDistCoeffs(),
244
+ camRt,
245
+ fieldCorners,
246
+ )
247
+
248
+ if isSpherical:
249
+ center = OpenCVHelp.avgPoint(imagePoints)
250
+ l: int = 0
251
+ for i in range(4):
252
+ if imagePoints[i, 0, 0] < imagePoints[l, 0, 0].x:
253
+ l = i
254
+
255
+ lc = imagePoints[l]
256
+ angles = [
257
+ 0.0,
258
+ ] * 4
259
+ t = (l + 1) % 4
260
+ b = (l + 1) % 4
261
+ for i in range(4):
262
+ if i == l:
263
+ continue
264
+ ic = imagePoints[i]
265
+ angles[i] = math.atan2(lc[0, 1] - ic[0, 1], ic[0, 0] - lc[0, 0])
266
+ if angles[i] >= angles[t]:
267
+ t = i
268
+ if angles[i] <= angles[b]:
269
+ b = i
270
+ for i in range(4):
271
+ if i != t and i != l and i != b:
272
+ r = i
273
+ rect = cv.RotatedRect(
274
+ center,
275
+ (
276
+ imagePoints[r, 0, 0] - lc[0, 0],
277
+ imagePoints[b, 0, 1] - imagePoints[t, 0, 1],
278
+ ),
279
+ -angles[r],
280
+ )
281
+ imagePoints = rect.points()
282
+
283
+ visibleTgts.append((tgt, imagePoints))
284
+ noisyTargetCorners = self.prop.estPixelNoise(imagePoints)
285
+ minAreaRect = OpenCVHelp.getMinAreaRect(noisyTargetCorners)
286
+ minAreaRectPts = minAreaRect.points()
287
+ centerPt = minAreaRect.center
288
+ centerRot = self.prop.getPixelRot(centerPt)
289
+ areaPercent = self.prop.getContourAreaPercent(noisyTargetCorners)
290
+
291
+ if (
292
+ not self.canSeeCorner(noisyTargetCorners)
293
+ or not areaPercent >= self.minTargetAreaPercent
294
+ ):
295
+ continue
296
+
297
+ pnpSim: PnpResult | None = None
298
+ if tgt.fiducialId >= 0 and len(tgt.getFieldVertices()) == 4:
299
+ pnpSim = OpenCVHelp.solvePNP_Square(
300
+ self.prop.getIntrinsics(),
301
+ self.prop.getDistCoeffs(),
302
+ tgt.getModel().getVertices(),
303
+ noisyTargetCorners,
304
+ )
305
+
306
+ smallVec: list[TargetCorner] = []
307
+ for corner in minAreaRectPts:
308
+ smallVec.append(TargetCorner(corner[0], corner[1]))
309
+
310
+ cornersFloat = OpenCVHelp.pointsToTargetCorners(noisyTargetCorners)
311
+
312
+ detectableTgts.append(
313
+ PhotonTrackedTarget(
314
+ yaw=math.degrees(-centerRot.Z()),
315
+ pitch=math.degrees(-centerRot.Y()),
316
+ area=areaPercent,
317
+ skew=math.degrees(centerRot.X()),
318
+ fiducialId=tgt.fiducialId,
319
+ detectedCorners=cornersFloat,
320
+ minAreaRectCorners=smallVec,
321
+ bestCameraToTarget=pnpSim.best if pnpSim else Transform3d(),
322
+ altCameraToTarget=pnpSim.alt if pnpSim else Transform3d(),
323
+ poseAmbiguity=pnpSim.ambiguity if pnpSim else -1,
324
+ )
325
+ )
326
+
327
+ # Video streams disabled for now
328
+ if self.enableRawStream:
329
+ # VideoSimUtil::UpdateVideoProp(videoSimRaw, prop);
330
+ # cv::Size videoFrameSize{prop.GetResWidth(), prop.GetResHeight()};
331
+ # cv::Mat blankFrame = cv::Mat::zeros(videoFrameSize, CV_8UC1);
332
+ # blankFrame.assignTo(videoSimFrameRaw);
333
+ pass
334
+ if self.enableProcessedStream:
335
+ # VideoSimUtil::UpdateVideoProp(videoSimProcessed, prop);
336
+ pass
337
+
338
+ multiTagResults: MultiTargetPNPResult | None = None
339
+
340
+ visibleLayoutTags = VisionEstimation.getVisibleLayoutTags(
341
+ detectableTgts, self.tagLayout
342
+ )
343
+
344
+ if len(visibleLayoutTags) > 1:
345
+ usedIds = [tag.ID for tag in visibleLayoutTags]
346
+ usedIds.sort()
347
+ pnpResult = VisionEstimation.estimateCamPosePNP(
348
+ self.prop.getIntrinsics(),
349
+ self.prop.getDistCoeffs(),
350
+ detectableTgts,
351
+ self.tagLayout,
352
+ TargetModel.AprilTag36h11(),
353
+ )
354
+ if pnpResult is not None:
355
+ multiTagResults = MultiTargetPNPResult(pnpResult, usedIds)
356
+
357
+ self.heartbeatCounter += 1
358
+ return PhotonPipelineResult(
359
+ metadata=PhotonPipelineMetadata(
360
+ self.heartbeatCounter, int(latency * 1e6), 1000000
361
+ ),
362
+ targets=detectableTgts,
363
+ multitagResult=multiTagResults,
364
+ )
365
+
366
+ def submitProcessedFrame(
367
+ self, result: PhotonPipelineResult, receiveTimestamp: float | None
368
+ ):
369
+ if receiveTimestamp is None:
370
+ receiveTimestamp = wpilib.Timer.getFPGATimestamp() * 1e6
371
+ receiveTimestamp = int(receiveTimestamp)
372
+
373
+ self.ts.latencyMillisEntry.set(result.getLatencyMillis(), receiveTimestamp)
374
+
375
+ newPacket = PhotonPipelineResult.photonStruct.pack(result)
376
+ self.ts.rawBytesEntry.set(newPacket.getData(), receiveTimestamp)
377
+
378
+ hasTargets = result.hasTargets()
379
+ self.ts.hasTargetEntry.set(hasTargets, receiveTimestamp)
380
+ if not hasTargets:
381
+ self.ts.targetPitchEntry.set(0.0, receiveTimestamp)
382
+ self.ts.targetYawEntry.set(0.0, receiveTimestamp)
383
+ self.ts.targetAreaEntry.set(0.0, receiveTimestamp)
384
+ self.ts.targetPoseEntry.set(Transform3d(), receiveTimestamp)
385
+ self.ts.targetSkewEntry.set(0.0, receiveTimestamp)
386
+ else:
387
+ bestTarget = result.getBestTarget()
388
+
389
+ self.ts.targetPitchEntry.set(bestTarget.getPitch(), receiveTimestamp)
390
+ self.ts.targetYawEntry.set(bestTarget.getYaw(), receiveTimestamp)
391
+ self.ts.targetAreaEntry.set(bestTarget.getArea(), receiveTimestamp)
392
+ self.ts.targetSkewEntry.set(bestTarget.getSkew(), receiveTimestamp)
393
+
394
+ self.ts.targetPoseEntry.set(
395
+ bestTarget.getBestCameraToTarget(), receiveTimestamp
396
+ )
397
+
398
+ intrinsics = self.prop.getIntrinsics()
399
+ intrinsicsView = intrinsics.flatten().tolist()
400
+ self.ts.cameraIntrinsicsPublisher.set(intrinsicsView, receiveTimestamp)
401
+
402
+ distortion = self.prop.getDistCoeffs()
403
+ distortionView = distortion.flatten().tolist()
404
+ self.ts.cameraDistortionPublisher.set(distortionView, receiveTimestamp)
405
+
406
+ self.ts.heartbeatPublisher.set(self.heartbeatCounter, receiveTimestamp)
407
+
408
+ self.ts.subTable.getInstance().flush()