photonlibpy 2025.0.0b1__py3-none-any.whl → 2025.0.0b3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. photonlibpy/__init__.py +2 -2
  2. photonlibpy/estimation/__init__.py +5 -0
  3. photonlibpy/estimation/cameraTargetRelation.py +25 -0
  4. photonlibpy/estimation/openCVHelp.py +205 -0
  5. photonlibpy/estimation/rotTrlTransform3d.py +55 -0
  6. photonlibpy/estimation/targetModel.py +120 -0
  7. photonlibpy/estimation/visionEstimation.py +91 -0
  8. photonlibpy/generated/MultiTargetPNPResultSerde.py +7 -1
  9. photonlibpy/generated/PhotonPipelineMetadataSerde.py +6 -1
  10. photonlibpy/generated/PhotonPipelineResultSerde.py +9 -1
  11. photonlibpy/generated/PhotonTrackedTargetSerde.py +7 -1
  12. photonlibpy/generated/PnpResultSerde.py +6 -1
  13. photonlibpy/generated/TargetCornerSerde.py +6 -1
  14. photonlibpy/generated/__init__.py +0 -1
  15. photonlibpy/networktables/NTTopicSet.py +66 -0
  16. photonlibpy/networktables/__init__.py +1 -0
  17. photonlibpy/packet.py +17 -9
  18. photonlibpy/photonCamera.py +10 -7
  19. photonlibpy/photonPoseEstimator.py +3 -3
  20. photonlibpy/simulation/__init__.py +5 -0
  21. photonlibpy/simulation/photonCameraSim.py +378 -0
  22. photonlibpy/simulation/simCameraProperties.py +643 -0
  23. photonlibpy/simulation/videoSimUtil.py +2 -0
  24. photonlibpy/simulation/visionSystemSim.py +242 -0
  25. photonlibpy/simulation/visionTargetSim.py +50 -0
  26. photonlibpy/targeting/TargetCorner.py +5 -1
  27. photonlibpy/targeting/__init__.py +1 -1
  28. photonlibpy/targeting/multiTargetPNPResult.py +8 -13
  29. photonlibpy/targeting/photonPipelineResult.py +8 -4
  30. photonlibpy/targeting/photonTrackedTarget.py +7 -1
  31. photonlibpy/version.py +2 -2
  32. photonlibpy-2025.0.0b3.dist-info/METADATA +17 -0
  33. photonlibpy-2025.0.0b3.dist-info/RECORD +36 -0
  34. photonlibpy-2025.0.0b1.dist-info/METADATA +0 -13
  35. photonlibpy-2025.0.0b1.dist-info/RECORD +0 -22
  36. {photonlibpy-2025.0.0b1.dist-info → photonlibpy-2025.0.0b3.dist-info}/WHEEL +0 -0
  37. {photonlibpy-2025.0.0b1.dist-info → photonlibpy-2025.0.0b3.dist-info}/top_level.txt +0 -0
@@ -0,0 +1 @@
1
+ from .NTTopicSet import NTTopicSet
photonlibpy/packet.py CHANGED
@@ -16,9 +16,17 @@
16
16
  ###############################################################################
17
17
 
18
18
  import struct
19
- from typing import Any, Optional, Type
20
- from wpimath.geometry import Transform3d, Translation3d, Rotation3d, Quaternion
19
+ from typing import Generic, Optional, Protocol, TypeVar
20
+
21
21
  import wpilib
22
+ from wpimath.geometry import Quaternion, Rotation3d, Transform3d, Translation3d
23
+
24
+ T = TypeVar("T")
25
+
26
+
27
+ class Serde(Generic[T], Protocol):
28
+ def pack(self, value: T) -> "Packet": ...
29
+ def unpack(self, packet: "Packet") -> T: ...
22
30
 
23
31
 
24
32
  class Packet:
@@ -33,9 +41,9 @@ class Packet:
33
41
  self.readPos = 0
34
42
  self.outOfBytes = False
35
43
 
36
- def clear(self):
44
+ def clear(self) -> None:
37
45
  """Clears the packet and resets the read and write positions."""
38
- self.packetData = [0] * self.size
46
+ self.packetData = bytes(self.size)
39
47
  self.readPos = 0
40
48
  self.outOfBytes = False
41
49
 
@@ -157,7 +165,7 @@ class Packet:
157
165
  ret.append(self.decodeDouble())
158
166
  return ret
159
167
 
160
- def decodeShortList(self) -> list[float]:
168
+ def decodeShortList(self) -> list[int]:
161
169
  """
162
170
  * Returns a decoded array of shorts from the packet.
163
171
  """
@@ -186,14 +194,14 @@ class Packet:
186
194
 
187
195
  return Transform3d(translation, rotation)
188
196
 
189
- def decodeList(self, serde: Type):
197
+ def decodeList(self, serde: Serde[T]) -> list[T]:
190
198
  retList = []
191
199
  arr_len = self.decode8()
192
200
  for _ in range(arr_len):
193
201
  retList.append(serde.unpack(self))
194
202
  return retList
195
203
 
196
- def decodeOptional(self, serde: Type) -> Optional[Any]:
204
+ def decodeOptional(self, serde: Serde[T]) -> Optional[T]:
197
205
  if self.decodeBoolean():
198
206
  return serde.unpack(self)
199
207
  else:
@@ -280,7 +288,7 @@ class Packet:
280
288
  self.encodeDouble(quaternion.Y())
281
289
  self.encodeDouble(quaternion.Z())
282
290
 
283
- def encodeList(self, values: list[Any], serde: Type):
291
+ def encodeList(self, values: list[T], serde: Serde[T]):
284
292
  """
285
293
  Encodes a list of items using a specific serializer and appends it to the packet.
286
294
  """
@@ -290,7 +298,7 @@ class Packet:
290
298
  self.packetData = self.packetData + packed.getData()
291
299
  self.size = len(self.packetData)
292
300
 
293
- def encodeOptional(self, value: Optional[Any], serde: Type):
301
+ def encodeOptional(self, value: Optional[T], serde: Serde[T]):
294
302
  """
295
303
  Encodes an optional value using a specific serializer.
296
304
  """
@@ -17,15 +17,17 @@
17
17
 
18
18
  from enum import Enum
19
19
  from typing import List
20
+
20
21
  import ntcore
21
- from wpilib import RobotController, Timer
22
- import wpilib
23
- from .packet import Packet
24
- from .targeting.photonPipelineResult import PhotonPipelineResult
25
- from .version import PHOTONVISION_VERSION, PHOTONLIB_VERSION # type: ignore[import-untyped]
26
22
 
27
23
  # magical import to make serde stuff work
28
24
  import photonlibpy.generated # noqa
25
+ import wpilib
26
+ from wpilib import RobotController, Timer
27
+
28
+ from .packet import Packet
29
+ from .targeting.photonPipelineResult import PhotonPipelineResult
30
+ from .version import PHOTONLIB_VERSION # type: ignore[import-untyped]
29
31
 
30
32
 
31
33
  class VisionLEDMode(Enum):
@@ -229,14 +231,15 @@ class PhotonCamera:
229
231
  versionString = self.versionEntry.get(defaultValue="")
230
232
  localUUID = PhotonPipelineResult.photonStruct.MESSAGE_VERSION
231
233
 
232
- remoteUUID = self._rawBytesEntry.getTopic().getProperty("message_uuid")
234
+ remoteUUID = str(self._rawBytesEntry.getTopic().getProperty("message_uuid"))
233
235
 
234
- if remoteUUID is None or len(remoteUUID) == 0:
236
+ if not remoteUUID:
235
237
  wpilib.reportWarning(
236
238
  f"PhotonVision coprocessor at path {self._path} has not reported a message interface UUID - is your coprocessor's camera started?",
237
239
  True,
238
240
  )
239
241
 
242
+ assert isinstance(remoteUUID, str)
240
243
  # ntcore hands us a JSON string with leading/trailing quotes - remove those
241
244
  remoteUUID = remoteUUID.replace('"', "")
242
245
 
@@ -20,11 +20,11 @@ from typing import Optional
20
20
 
21
21
  import wpilib
22
22
  from robotpy_apriltag import AprilTagFieldLayout
23
- from wpimath.geometry import Transform3d, Pose3d, Pose2d
23
+ from wpimath.geometry import Pose2d, Pose3d, Transform3d
24
24
 
25
- from .targeting.photonPipelineResult import PhotonPipelineResult
26
- from .photonCamera import PhotonCamera
27
25
  from .estimatedRobotPose import EstimatedRobotPose
26
+ from .photonCamera import PhotonCamera
27
+ from .targeting.photonPipelineResult import PhotonPipelineResult
28
28
 
29
29
 
30
30
  class PoseStrategy(enum.Enum):
@@ -0,0 +1,5 @@
1
+ from .photonCameraSim import PhotonCameraSim
2
+ from .simCameraProperties import SimCameraProperties
3
+ from .videoSimUtil import VideoSimUtil
4
+ from .visionSystemSim import VisionSystemSim
5
+ from .visionTargetSim import VisionTargetSim
@@ -0,0 +1,378 @@
1
+ import math
2
+ import typing
3
+
4
+ import cscore as cs
5
+ import cv2 as cv
6
+ import numpy as np
7
+ import wpilib
8
+ from robotpy_apriltag import AprilTagField, AprilTagFieldLayout
9
+ from wpimath.geometry import Pose3d, Transform3d
10
+ from wpimath.units import meters, seconds
11
+
12
+ from ..estimation import OpenCVHelp, RotTrlTransform3d, TargetModel, VisionEstimation
13
+ from ..estimation.cameraTargetRelation import CameraTargetRelation
14
+ from ..networktables.NTTopicSet import NTTopicSet
15
+ from ..photonCamera import PhotonCamera
16
+ from ..targeting import (
17
+ MultiTargetPNPResult,
18
+ PhotonPipelineMetadata,
19
+ PhotonPipelineResult,
20
+ PhotonTrackedTarget,
21
+ PnpResult,
22
+ TargetCorner,
23
+ )
24
+ from .simCameraProperties import SimCameraProperties
25
+ from .visionTargetSim import VisionTargetSim
26
+
27
+
28
+ class PhotonCameraSim:
29
+ kDefaultMinAreaPx: float = 100
30
+
31
+ def __init__(
32
+ self,
33
+ camera: PhotonCamera,
34
+ props: SimCameraProperties = SimCameraProperties.PERFECT_90DEG(),
35
+ minTargetAreaPercent: float | None = None,
36
+ maxSightRange: meters | None = None,
37
+ ):
38
+
39
+ self.minTargetAreaPercent: float = 0.0
40
+ self.maxSightRange: float = 1.0e99
41
+ self.videoSimRawEnabled: bool = False
42
+ self.videoSimWireframeEnabled: bool = False
43
+ self.videoSimWireframeResolution: float = 0.1
44
+ self.videoSimProcEnabled: bool = (
45
+ False # TODO switch this back to default True when the functionality is enabled
46
+ )
47
+ self.heartbeatCounter: int = 0
48
+ self.nextNtEntryTime = int(wpilib.Timer.getFPGATimestamp() * 1e6)
49
+ self.tagLayout = AprilTagFieldLayout.loadField(AprilTagField.k2024Crescendo)
50
+
51
+ self.cam = camera
52
+ self.prop = props
53
+ self.setMinTargetAreaPixels(PhotonCameraSim.kDefaultMinAreaPx)
54
+
55
+ # TODO Check fps is right
56
+ self.videoSimRaw = cs.CvSource(
57
+ self.cam.getName() + "-raw",
58
+ cs.VideoMode.PixelFormat.kGray,
59
+ self.prop.getResWidth(),
60
+ self.prop.getResHeight(),
61
+ 1,
62
+ )
63
+ self.videoSimFrameRaw = np.zeros(
64
+ (self.prop.getResWidth(), self.prop.getResHeight())
65
+ )
66
+
67
+ # TODO Check fps is right
68
+ self.videoSimProcessed = cs.CvSource(
69
+ self.cam.getName() + "-processed",
70
+ cs.VideoMode.PixelFormat.kGray,
71
+ self.prop.getResWidth(),
72
+ self.prop.getResHeight(),
73
+ 1,
74
+ )
75
+ self.videoSimFrameProcessed = np.zeros(
76
+ (self.prop.getResWidth(), self.prop.getResHeight())
77
+ )
78
+
79
+ self.ts = NTTopicSet("photonvision", self.cam.getName())
80
+ self.ts.updateEntries()
81
+
82
+ # Handle this last explicitly for this function signature because the other constructor is called in the initialiser list
83
+ if minTargetAreaPercent is not None and maxSightRange is not None:
84
+ self.minTargetAreaPercent = minTargetAreaPercent
85
+ self.maxSightRange = maxSightRange
86
+
87
+ def getCamera(self) -> PhotonCamera:
88
+ return self.cam
89
+
90
+ def getMinTargetAreaPercent(self) -> float:
91
+ return self.minTargetAreaPercent
92
+
93
+ def getMinTargetAreaPixels(self) -> float:
94
+ return self.minTargetAreaPercent / 100.0 * self.prop.getResArea()
95
+
96
+ def getMaxSightRange(self) -> meters:
97
+ return self.maxSightRange
98
+
99
+ def getVideoSimRaw(self) -> cs.CvSource:
100
+ return self.videoSimRaw
101
+
102
+ def getVideoSimFrameRaw(self) -> np.ndarray:
103
+ return self.videoSimFrameRaw
104
+
105
+ def canSeeTargetPose(self, camPose: Pose3d, target: VisionTargetSim) -> bool:
106
+ rel = CameraTargetRelation(camPose, target.getPose())
107
+ return (
108
+ (
109
+ abs(rel.camToTargYaw.degrees())
110
+ < self.prop.getHorizFOV().degrees() / 2.0
111
+ and abs(rel.camToTargPitch.degrees())
112
+ < self.prop.getVertFOV().degrees() / 2.0
113
+ )
114
+ and (
115
+ not target.getModel().getIsPlanar()
116
+ or abs(rel.targtoCamAngle.degrees()) < 90
117
+ )
118
+ and rel.camToTarg.translation().norm() <= self.maxSightRange
119
+ )
120
+
121
+ def canSeeCorner(self, points: np.ndarray) -> bool:
122
+ assert points.shape[1] == 1
123
+ assert points.shape[2] == 2
124
+ for pt in points:
125
+ x = pt[0, 0]
126
+ y = pt[0, 1]
127
+ if (
128
+ x < 0
129
+ or x > self.prop.getResWidth()
130
+ or y < 0
131
+ or y > self.prop.getResHeight()
132
+ ):
133
+ return False
134
+
135
+ return True
136
+
137
+ def consumeNextEntryTime(self) -> float | None:
138
+ now = int(wpilib.Timer.getFPGATimestamp() * 1e6)
139
+ timestamp = 0
140
+ iter = 0
141
+ while now >= self.nextNtEntryTime:
142
+ timestamp = int(self.nextNtEntryTime)
143
+ frameTime = int(self.prop.estSecUntilNextFrame() * 1e6)
144
+ self.nextNtEntryTime += frameTime
145
+
146
+ iter += 1
147
+ if iter > 50:
148
+ timestamp = now
149
+ self.nextNtEntryTime = now + frameTime
150
+ break
151
+
152
+ if timestamp != 0:
153
+ return timestamp
154
+
155
+ return None
156
+
157
+ def setMinTargetAreaPercent(self, areaPercent: float) -> None:
158
+ self.minTargetAreaPercent = areaPercent
159
+
160
+ def setMinTargetAreaPixels(self, areaPx: float) -> None:
161
+ self.minTargetAreaPercent = areaPx / self.prop.getResArea() * 100.0
162
+
163
+ def setMaxSightRange(self, range: meters) -> None:
164
+ self.maxSightRange = range
165
+
166
+ def enableRawStream(self, enabled: bool) -> None:
167
+ self.videoSimRawEnabled = enabled
168
+ raise Exception("Raw stream not implemented")
169
+
170
+ def enableDrawWireframe(self, enabled: bool) -> None:
171
+ self.videoSimWireframeEnabled = enabled
172
+ raise Exception("Wireframe not implemented")
173
+
174
+ def setWireframeResolution(self, resolution: float) -> None:
175
+ self.videoSimWireframeResolution = resolution
176
+
177
+ def enableProcessedStream(self, enabled: bool) -> None:
178
+ self.videoSimProcEnabled = enabled
179
+ raise Exception("Processed stream not implemented")
180
+
181
+ def process(
182
+ self, latency: seconds, cameraPose: Pose3d, targets: list[VisionTargetSim]
183
+ ) -> PhotonPipelineResult:
184
+ # Sort targets by distance to camera - furthest to closest
185
+ def distance(target: VisionTargetSim):
186
+ return target.getPose().translation().distance(cameraPose.translation())
187
+
188
+ targets.sort(key=distance, reverse=True)
189
+
190
+ visibleTgts: list[typing.Tuple[VisionTargetSim, np.ndarray]] = []
191
+ detectableTgts: list[PhotonTrackedTarget] = []
192
+
193
+ camRt = RotTrlTransform3d.makeRelativeTo(cameraPose)
194
+
195
+ for tgt in targets:
196
+ if not self.canSeeTargetPose(cameraPose, tgt):
197
+ continue
198
+
199
+ fieldCorners = tgt.getFieldVertices()
200
+ isSpherical = tgt.getModel().getIsSpherical()
201
+ if isSpherical:
202
+ model = tgt.getModel()
203
+ fieldCorners = model.getFieldVertices(
204
+ TargetModel.getOrientedPose(
205
+ tgt.getPose().translation(), cameraPose.translation()
206
+ )
207
+ )
208
+
209
+ imagePoints = OpenCVHelp.projectPoints(
210
+ self.prop.getIntrinsics(),
211
+ self.prop.getDistCoeffs(),
212
+ camRt,
213
+ fieldCorners,
214
+ )
215
+
216
+ if isSpherical:
217
+ center = OpenCVHelp.avgPoint(imagePoints)
218
+ l: int = 0
219
+ for i in range(4):
220
+ if imagePoints[i, 0, 0] < imagePoints[l, 0, 0].x:
221
+ l = i
222
+
223
+ lc = imagePoints[l]
224
+ angles = [
225
+ 0.0,
226
+ ] * 4
227
+ t = (l + 1) % 4
228
+ b = (l + 1) % 4
229
+ r = 0
230
+ for i in range(4):
231
+ if i == l:
232
+ continue
233
+ ic = imagePoints[i]
234
+ angles[i] = math.atan2(lc[0, 1] - ic[0, 1], ic[0, 0] - lc[0, 0])
235
+ if angles[i] >= angles[t]:
236
+ t = i
237
+ if angles[i] <= angles[b]:
238
+ b = i
239
+ for i in range(4):
240
+ if i != t and i != l and i != b:
241
+ r = i
242
+ rect = cv.RotatedRect(
243
+ (center[0, 0], center[0, 1]),
244
+ (
245
+ imagePoints[r, 0, 0] - lc[0, 0],
246
+ imagePoints[b, 0, 1] - imagePoints[t, 0, 1],
247
+ ),
248
+ -angles[r],
249
+ )
250
+ imagePoints = np.array([[p[0], p[1], p[2]] for p in rect.points()])
251
+
252
+ visibleTgts.append((tgt, imagePoints))
253
+ noisyTargetCorners = self.prop.estPixelNoise(imagePoints)
254
+ minAreaRect = OpenCVHelp.getMinAreaRect(noisyTargetCorners)
255
+ minAreaRectPts = minAreaRect.points()
256
+ centerPt = minAreaRect.center
257
+ centerRot = self.prop.getPixelRot(centerPt)
258
+ areaPercent = self.prop.getContourAreaPercent(noisyTargetCorners)
259
+
260
+ if (
261
+ not self.canSeeCorner(noisyTargetCorners)
262
+ or not areaPercent >= self.minTargetAreaPercent
263
+ ):
264
+ continue
265
+
266
+ pnpSim: PnpResult | None = None
267
+ if tgt.fiducialId >= 0 and len(tgt.getFieldVertices()) == 4:
268
+ pnpSim = OpenCVHelp.solvePNP_Square(
269
+ self.prop.getIntrinsics(),
270
+ self.prop.getDistCoeffs(),
271
+ tgt.getModel().getVertices(),
272
+ noisyTargetCorners,
273
+ )
274
+
275
+ smallVec: list[TargetCorner] = []
276
+ for corner in minAreaRectPts:
277
+ smallVec.append(TargetCorner(corner[0], corner[1]))
278
+
279
+ cornersFloat = OpenCVHelp.pointsToTargetCorners(noisyTargetCorners)
280
+
281
+ detectableTgts.append(
282
+ PhotonTrackedTarget(
283
+ yaw=math.degrees(-centerRot.Z()),
284
+ pitch=math.degrees(-centerRot.Y()),
285
+ area=areaPercent,
286
+ skew=math.degrees(centerRot.X()),
287
+ fiducialId=tgt.fiducialId,
288
+ detectedCorners=cornersFloat,
289
+ minAreaRectCorners=smallVec,
290
+ bestCameraToTarget=pnpSim.best if pnpSim else Transform3d(),
291
+ altCameraToTarget=pnpSim.alt if pnpSim else Transform3d(),
292
+ poseAmbiguity=pnpSim.ambiguity if pnpSim else -1,
293
+ )
294
+ )
295
+
296
+ # Video streams disabled for now
297
+ if self.videoSimRawEnabled:
298
+ # VideoSimUtil::UpdateVideoProp(videoSimRaw, prop);
299
+ # cv::Size videoFrameSize{prop.GetResWidth(), prop.GetResHeight()};
300
+ # cv::Mat blankFrame = cv::Mat::zeros(videoFrameSize, CV_8UC1);
301
+ # blankFrame.assignTo(videoSimFrameRaw);
302
+ pass
303
+ if self.videoSimProcEnabled:
304
+ # VideoSimUtil::UpdateVideoProp(videoSimProcessed, prop);
305
+ pass
306
+
307
+ multiTagResults: MultiTargetPNPResult | None = None
308
+
309
+ visibleLayoutTags = VisionEstimation.getVisibleLayoutTags(
310
+ detectableTgts, self.tagLayout
311
+ )
312
+
313
+ if len(visibleLayoutTags) > 1:
314
+ usedIds = [tag.ID for tag in visibleLayoutTags]
315
+ usedIds.sort()
316
+ pnpResult = VisionEstimation.estimateCamPosePNP(
317
+ self.prop.getIntrinsics(),
318
+ self.prop.getDistCoeffs(),
319
+ detectableTgts,
320
+ self.tagLayout,
321
+ TargetModel.AprilTag36h11(),
322
+ )
323
+ if pnpResult is not None:
324
+ multiTagResults = MultiTargetPNPResult(pnpResult, usedIds)
325
+
326
+ self.heartbeatCounter += 1
327
+ return PhotonPipelineResult(
328
+ metadata=PhotonPipelineMetadata(
329
+ self.heartbeatCounter, int(latency * 1e6), 1000000
330
+ ),
331
+ targets=detectableTgts,
332
+ multitagResult=multiTagResults,
333
+ )
334
+
335
+ def submitProcessedFrame(
336
+ self, result: PhotonPipelineResult, receiveTimestamp: float | None
337
+ ):
338
+ if receiveTimestamp is None:
339
+ receiveTimestamp = wpilib.Timer.getFPGATimestamp() * 1e6
340
+ receiveTimestamp = int(receiveTimestamp)
341
+
342
+ self.ts.latencyMillisEntry.set(result.getLatencyMillis(), receiveTimestamp)
343
+
344
+ newPacket = PhotonPipelineResult.photonStruct.pack(result)
345
+ self.ts.rawBytesEntry.set(newPacket.getData(), receiveTimestamp)
346
+
347
+ hasTargets = result.hasTargets()
348
+ self.ts.hasTargetEntry.set(hasTargets, receiveTimestamp)
349
+ if not hasTargets:
350
+ self.ts.targetPitchEntry.set(0.0, receiveTimestamp)
351
+ self.ts.targetYawEntry.set(0.0, receiveTimestamp)
352
+ self.ts.targetAreaEntry.set(0.0, receiveTimestamp)
353
+ self.ts.targetPoseEntry.set(Transform3d(), receiveTimestamp)
354
+ self.ts.targetSkewEntry.set(0.0, receiveTimestamp)
355
+ else:
356
+ bestTarget = result.getBestTarget()
357
+ assert bestTarget
358
+
359
+ self.ts.targetPitchEntry.set(bestTarget.getPitch(), receiveTimestamp)
360
+ self.ts.targetYawEntry.set(bestTarget.getYaw(), receiveTimestamp)
361
+ self.ts.targetAreaEntry.set(bestTarget.getArea(), receiveTimestamp)
362
+ self.ts.targetSkewEntry.set(bestTarget.getSkew(), receiveTimestamp)
363
+
364
+ self.ts.targetPoseEntry.set(
365
+ bestTarget.getBestCameraToTarget(), receiveTimestamp
366
+ )
367
+
368
+ intrinsics = self.prop.getIntrinsics()
369
+ intrinsicsView = intrinsics.flatten().tolist()
370
+ self.ts.cameraIntrinsicsPublisher.set(intrinsicsView, receiveTimestamp)
371
+
372
+ distortion = self.prop.getDistCoeffs()
373
+ distortionView = distortion.flatten().tolist()
374
+ self.ts.cameraDistortionPublisher.set(distortionView, receiveTimestamp)
375
+
376
+ self.ts.heartbeatPublisher.set(self.heartbeatCounter, receiveTimestamp)
377
+
378
+ self.ts.subTable.getInstance().flush()