photonlibpy 2025.0.0a0__py3-none-any.whl → 2025.0.0b2__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (36) hide show
  1. photonlibpy/__init__.py +2 -2
  2. photonlibpy/estimation/__init__.py +5 -0
  3. photonlibpy/estimation/cameraTargetRelation.py +25 -0
  4. photonlibpy/estimation/openCVHelp.py +200 -0
  5. photonlibpy/estimation/rotTrlTransform3d.py +32 -0
  6. photonlibpy/estimation/targetModel.py +137 -0
  7. photonlibpy/estimation/visionEstimation.py +91 -0
  8. photonlibpy/generated/MultiTargetPNPResultSerde.py +12 -0
  9. photonlibpy/generated/PhotonPipelineMetadataSerde.py +23 -4
  10. photonlibpy/generated/PhotonPipelineResultSerde.py +19 -2
  11. photonlibpy/generated/PhotonTrackedTargetSerde.py +40 -0
  12. photonlibpy/generated/PnpResultSerde.py +19 -0
  13. photonlibpy/generated/TargetCornerSerde.py +12 -0
  14. photonlibpy/generated/__init__.py +0 -1
  15. photonlibpy/networktables/NTTopicSet.py +64 -0
  16. photonlibpy/networktables/__init__.py +1 -0
  17. photonlibpy/packet.py +123 -8
  18. photonlibpy/photonCamera.py +10 -7
  19. photonlibpy/photonPoseEstimator.py +5 -5
  20. photonlibpy/simulation/__init__.py +5 -0
  21. photonlibpy/simulation/photonCameraSim.py +408 -0
  22. photonlibpy/simulation/simCameraProperties.py +661 -0
  23. photonlibpy/simulation/videoSimUtil.py +2 -0
  24. photonlibpy/simulation/visionSystemSim.py +237 -0
  25. photonlibpy/simulation/visionTargetSim.py +50 -0
  26. photonlibpy/targeting/TargetCorner.py +5 -1
  27. photonlibpy/targeting/__init__.py +1 -1
  28. photonlibpy/targeting/multiTargetPNPResult.py +10 -4
  29. photonlibpy/targeting/photonPipelineResult.py +12 -5
  30. photonlibpy/targeting/photonTrackedTarget.py +13 -5
  31. photonlibpy/version.py +2 -2
  32. {photonlibpy-2025.0.0a0.dist-info → photonlibpy-2025.0.0b2.dist-info}/METADATA +6 -2
  33. photonlibpy-2025.0.0b2.dist-info/RECORD +36 -0
  34. {photonlibpy-2025.0.0a0.dist-info → photonlibpy-2025.0.0b2.dist-info}/WHEEL +1 -1
  35. photonlibpy-2025.0.0a0.dist-info/RECORD +0 -22
  36. {photonlibpy-2025.0.0a0.dist-info → photonlibpy-2025.0.0b2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,408 @@
1
+ import math
2
+ import typing
3
+
4
+ import cscore as cs
5
+ import cv2 as cv
6
+ import numpy as np
7
+ import robotpy_apriltag
8
+ import wpilib
9
+ from wpimath.geometry import Pose3d, Transform3d
10
+ from wpimath.units import meters, seconds
11
+
12
+ from ..estimation import OpenCVHelp, RotTrlTransform3d, TargetModel, VisionEstimation
13
+ from ..estimation.cameraTargetRelation import CameraTargetRelation
14
+ from ..networktables.NTTopicSet import NTTopicSet
15
+ from ..photonCamera import PhotonCamera
16
+ from ..targeting import (
17
+ MultiTargetPNPResult,
18
+ PhotonPipelineMetadata,
19
+ PhotonPipelineResult,
20
+ PhotonTrackedTarget,
21
+ PnpResult,
22
+ TargetCorner,
23
+ )
24
+ from .simCameraProperties import SimCameraProperties
25
+ from .visionTargetSim import VisionTargetSim
26
+
27
+
28
+ class PhotonCameraSim:
29
+ kDefaultMinAreaPx: float = 100
30
+
31
+ def __init__(
32
+ self,
33
+ camera: PhotonCamera,
34
+ props: SimCameraProperties | None = None,
35
+ minTargetAreaPercent: float | None = None,
36
+ maxSightRange: meters | None = None,
37
+ ):
38
+
39
+ self.minTargetAreaPercent: float = 0.0
40
+ self.maxSightRange: float = 1.0e99
41
+ self.videoSimRawEnabled: bool = False
42
+ self.videoSimWireframeEnabled: bool = False
43
+ self.videoSimWireframeResolution: float = 0.1
44
+ self.videoSimProcEnabled: bool = True
45
+ self.ts = NTTopicSet()
46
+ self.heartbeatCounter: int = 0
47
+ self.nextNtEntryTime = int(wpilib.Timer.getFPGATimestamp() * 1e6)
48
+ self.tagLayout = robotpy_apriltag.loadAprilTagLayoutField(
49
+ robotpy_apriltag.AprilTagField.k2024Crescendo
50
+ )
51
+
52
+ if (
53
+ camera is not None
54
+ and props is None
55
+ and minTargetAreaPercent is None
56
+ and maxSightRange is None
57
+ ):
58
+ props = SimCameraProperties.PERFECT_90DEG()
59
+ elif (
60
+ camera is not None
61
+ and props is not None
62
+ and minTargetAreaPercent is not None
63
+ and maxSightRange is not None
64
+ ):
65
+ pass
66
+ elif (
67
+ camera is not None
68
+ and props is not None
69
+ and minTargetAreaPercent is None
70
+ and maxSightRange is None
71
+ ):
72
+ pass
73
+ else:
74
+ raise Exception("Invalid Constructor Called")
75
+
76
+ self.cam = camera
77
+ self.prop = props
78
+ self.setMinTargetAreaPixels(PhotonCameraSim.kDefaultMinAreaPx)
79
+
80
+ # TODO Check fps is right
81
+ self.videoSimRaw = cs.CvSource(
82
+ self.cam.getName() + "-raw",
83
+ cs.VideoMode.PixelFormat.kGray,
84
+ self.prop.getResWidth(),
85
+ self.prop.getResHeight(),
86
+ 1,
87
+ )
88
+ self.videoSimFrameRaw = np.zeros(
89
+ (self.prop.getResWidth(), self.prop.getResHeight())
90
+ )
91
+
92
+ # TODO Check fps is right
93
+ self.videoSimProcessed = cs.CvSource(
94
+ self.cam.getName() + "-processed",
95
+ cs.VideoMode.PixelFormat.kGray,
96
+ self.prop.getResWidth(),
97
+ self.prop.getResHeight(),
98
+ 1,
99
+ )
100
+ self.videoSimFrameProcessed = np.zeros(
101
+ (self.prop.getResWidth(), self.prop.getResHeight())
102
+ )
103
+
104
+ self.ts.subTable = self.cam._cameraTable
105
+ self.ts.updateEntries()
106
+
107
+ # Handle this last explicitly for this function signature because the other constructor is called in the initialiser list
108
+ if (
109
+ camera is not None
110
+ and props is not None
111
+ and minTargetAreaPercent is not None
112
+ and maxSightRange is not None
113
+ ):
114
+ self.minTargetAreaPercent = minTargetAreaPercent
115
+ self.maxSightRange = maxSightRange
116
+
117
+ def getCamera(self) -> PhotonCamera:
118
+ return self.cam
119
+
120
+ def getMinTargetAreaPercent(self) -> float:
121
+ return self.minTargetAreaPercent
122
+
123
+ def getMinTargetAreaPixels(self) -> float:
124
+ return self.minTargetAreaPercent / 100.0 * self.prop.getResArea()
125
+
126
+ def getMaxSightRange(self) -> meters:
127
+ return self.maxSightRange
128
+
129
+ def getVideoSimRaw(self) -> cs.CvSource:
130
+ return self.videoSimRaw
131
+
132
+ def getVideoSimFrameRaw(self) -> np.ndarray:
133
+ return self.videoSimFrameRaw
134
+
135
+ def canSeeTargetPose(self, camPose: Pose3d, target: VisionTargetSim) -> bool:
136
+ rel = CameraTargetRelation(camPose, target.getPose())
137
+ return (
138
+ (
139
+ abs(rel.camToTargYaw.degrees())
140
+ < self.prop.getHorizFOV().degrees() / 2.0
141
+ and abs(rel.camToTargPitch.degrees())
142
+ < self.prop.getVertFOV().degrees() / 2.0
143
+ )
144
+ and (
145
+ not target.getModel().getIsPlanar()
146
+ or abs(rel.targtoCamAngle.degrees()) < 90
147
+ )
148
+ and rel.camToTarg.translation().norm() <= self.maxSightRange
149
+ )
150
+
151
+ def canSeeCorner(self, points: np.ndarray) -> bool:
152
+ assert points.shape[1] == 1
153
+ assert points.shape[2] == 2
154
+ for pt in points:
155
+ x = pt[0, 0]
156
+ y = pt[0, 1]
157
+ if (
158
+ x < 0
159
+ or x > self.prop.getResWidth()
160
+ or y < 0
161
+ or y > self.prop.getResHeight()
162
+ ):
163
+ return False
164
+
165
+ return True
166
+
167
+ def consumeNextEntryTime(self) -> float | None:
168
+ now = int(wpilib.Timer.getFPGATimestamp() * 1e6)
169
+ timestamp = 0
170
+ iter = 0
171
+ while now >= self.nextNtEntryTime:
172
+ timestamp = int(self.nextNtEntryTime)
173
+ frameTime = int(self.prop.estSecUntilNextFrame() * 1e6)
174
+ self.nextNtEntryTime += frameTime
175
+
176
+ iter += 1
177
+ if iter > 50:
178
+ timestamp = now
179
+ self.nextNtEntryTime = now + frameTime
180
+ break
181
+
182
+ if timestamp != 0:
183
+ return timestamp
184
+
185
+ return None
186
+
187
+ def setMinTargetAreaPercent(self, areaPercent: float) -> None:
188
+ self.minTargetAreaPercent = areaPercent
189
+
190
+ def setMinTargetAreaPixels(self, areaPx: float) -> None:
191
+ self.minTargetAreaPercent = areaPx / self.prop.getResArea() * 100.0
192
+
193
+ def setMaxSightRange(self, range: meters) -> None:
194
+ self.maxSightRange = range
195
+
196
+ def enableRawStream(self, enabled: bool) -> None:
197
+ raise Exception("Raw stream not implemented")
198
+ # self.videoSimRawEnabled = enabled
199
+
200
+ def enableDrawWireframe(self, enabled: bool) -> None:
201
+ raise Exception("Wireframe not implemented")
202
+ # self.videoSimWireframeEnabled = enabled
203
+
204
+ def setWireframeResolution(self, resolution: float) -> None:
205
+ self.videoSimWireframeResolution = resolution
206
+
207
+ def enableProcessedStream(self, enabled: bool) -> None:
208
+ raise Exception("Processed stream not implemented")
209
+ # self.videoSimProcEnabled = enabled
210
+
211
+ def process(
212
+ self, latency: seconds, cameraPose: Pose3d, targets: list[VisionTargetSim]
213
+ ) -> PhotonPipelineResult:
214
+ # Sort targets by distance to camera - furthest to closest
215
+ def distance(target: VisionTargetSim):
216
+ return target.getPose().translation().distance(cameraPose.translation())
217
+
218
+ targets.sort(key=distance, reverse=True)
219
+
220
+ visibleTgts: list[
221
+ typing.Tuple[VisionTargetSim, list[typing.Tuple[float, float]]]
222
+ ] = []
223
+ detectableTgts: list[PhotonTrackedTarget] = []
224
+
225
+ camRt = RotTrlTransform3d.makeRelativeTo(cameraPose)
226
+
227
+ for tgt in targets:
228
+ if not self.canSeeTargetPose(cameraPose, tgt):
229
+ continue
230
+
231
+ fieldCorners = tgt.getFieldVertices()
232
+ isSpherical = tgt.getModel().getIsSpherical()
233
+ if isSpherical:
234
+ model = tgt.getModel()
235
+ fieldCorners = model.getFieldVertices(
236
+ TargetModel.getOrientedPose(
237
+ tgt.getPose().translation(), cameraPose.translation()
238
+ )
239
+ )
240
+
241
+ imagePoints = OpenCVHelp.projectPoints(
242
+ self.prop.getIntrinsics(),
243
+ self.prop.getDistCoeffs(),
244
+ camRt,
245
+ fieldCorners,
246
+ )
247
+
248
+ if isSpherical:
249
+ center = OpenCVHelp.avgPoint(imagePoints)
250
+ l: int = 0
251
+ for i in range(4):
252
+ if imagePoints[i, 0, 0] < imagePoints[l, 0, 0].x:
253
+ l = i
254
+
255
+ lc = imagePoints[l]
256
+ angles = [
257
+ 0.0,
258
+ ] * 4
259
+ t = (l + 1) % 4
260
+ b = (l + 1) % 4
261
+ for i in range(4):
262
+ if i == l:
263
+ continue
264
+ ic = imagePoints[i]
265
+ angles[i] = math.atan2(lc[0, 1] - ic[0, 1], ic[0, 0] - lc[0, 0])
266
+ if angles[i] >= angles[t]:
267
+ t = i
268
+ if angles[i] <= angles[b]:
269
+ b = i
270
+ for i in range(4):
271
+ if i != t and i != l and i != b:
272
+ r = i
273
+ rect = cv.RotatedRect(
274
+ center,
275
+ (
276
+ imagePoints[r, 0, 0] - lc[0, 0],
277
+ imagePoints[b, 0, 1] - imagePoints[t, 0, 1],
278
+ ),
279
+ -angles[r],
280
+ )
281
+ imagePoints = rect.points()
282
+
283
+ visibleTgts.append((tgt, imagePoints))
284
+ noisyTargetCorners = self.prop.estPixelNoise(imagePoints)
285
+ minAreaRect = OpenCVHelp.getMinAreaRect(noisyTargetCorners)
286
+ minAreaRectPts = minAreaRect.points()
287
+ centerPt = minAreaRect.center
288
+ centerRot = self.prop.getPixelRot(centerPt)
289
+ areaPercent = self.prop.getContourAreaPercent(noisyTargetCorners)
290
+
291
+ if (
292
+ not self.canSeeCorner(noisyTargetCorners)
293
+ or not areaPercent >= self.minTargetAreaPercent
294
+ ):
295
+ continue
296
+
297
+ pnpSim: PnpResult | None = None
298
+ if tgt.fiducialId >= 0 and len(tgt.getFieldVertices()) == 4:
299
+ pnpSim = OpenCVHelp.solvePNP_Square(
300
+ self.prop.getIntrinsics(),
301
+ self.prop.getDistCoeffs(),
302
+ tgt.getModel().getVertices(),
303
+ noisyTargetCorners,
304
+ )
305
+
306
+ smallVec: list[TargetCorner] = []
307
+ for corner in minAreaRectPts:
308
+ smallVec.append(TargetCorner(corner[0], corner[1]))
309
+
310
+ cornersFloat = OpenCVHelp.pointsToTargetCorners(noisyTargetCorners)
311
+
312
+ detectableTgts.append(
313
+ PhotonTrackedTarget(
314
+ yaw=math.degrees(-centerRot.Z()),
315
+ pitch=math.degrees(-centerRot.Y()),
316
+ area=areaPercent,
317
+ skew=math.degrees(centerRot.X()),
318
+ fiducialId=tgt.fiducialId,
319
+ detectedCorners=cornersFloat,
320
+ minAreaRectCorners=smallVec,
321
+ bestCameraToTarget=pnpSim.best if pnpSim else Transform3d(),
322
+ altCameraToTarget=pnpSim.alt if pnpSim else Transform3d(),
323
+ poseAmbiguity=pnpSim.ambiguity if pnpSim else -1,
324
+ )
325
+ )
326
+
327
+ # Video streams disabled for now
328
+ if self.enableRawStream:
329
+ # VideoSimUtil::UpdateVideoProp(videoSimRaw, prop);
330
+ # cv::Size videoFrameSize{prop.GetResWidth(), prop.GetResHeight()};
331
+ # cv::Mat blankFrame = cv::Mat::zeros(videoFrameSize, CV_8UC1);
332
+ # blankFrame.assignTo(videoSimFrameRaw);
333
+ pass
334
+ if self.enableProcessedStream:
335
+ # VideoSimUtil::UpdateVideoProp(videoSimProcessed, prop);
336
+ pass
337
+
338
+ multiTagResults: MultiTargetPNPResult | None = None
339
+
340
+ visibleLayoutTags = VisionEstimation.getVisibleLayoutTags(
341
+ detectableTgts, self.tagLayout
342
+ )
343
+
344
+ if len(visibleLayoutTags) > 1:
345
+ usedIds = [tag.ID for tag in visibleLayoutTags]
346
+ usedIds.sort()
347
+ pnpResult = VisionEstimation.estimateCamPosePNP(
348
+ self.prop.getIntrinsics(),
349
+ self.prop.getDistCoeffs(),
350
+ detectableTgts,
351
+ self.tagLayout,
352
+ TargetModel.AprilTag36h11(),
353
+ )
354
+ if pnpResult is not None:
355
+ multiTagResults = MultiTargetPNPResult(pnpResult, usedIds)
356
+
357
+ self.heartbeatCounter += 1
358
+ return PhotonPipelineResult(
359
+ metadata=PhotonPipelineMetadata(
360
+ self.heartbeatCounter, int(latency * 1e6), 1000000
361
+ ),
362
+ targets=detectableTgts,
363
+ multitagResult=multiTagResults,
364
+ )
365
+
366
+ def submitProcessedFrame(
367
+ self, result: PhotonPipelineResult, receiveTimestamp: float | None
368
+ ):
369
+ if receiveTimestamp is None:
370
+ receiveTimestamp = wpilib.Timer.getFPGATimestamp() * 1e6
371
+ receiveTimestamp = int(receiveTimestamp)
372
+
373
+ self.ts.latencyMillisEntry.set(result.getLatencyMillis(), receiveTimestamp)
374
+
375
+ newPacket = PhotonPipelineResult.photonStruct.pack(result)
376
+ self.ts.rawBytesEntry.set(newPacket.getData(), receiveTimestamp)
377
+
378
+ hasTargets = result.hasTargets()
379
+ self.ts.hasTargetEntry.set(hasTargets, receiveTimestamp)
380
+ if not hasTargets:
381
+ self.ts.targetPitchEntry.set(0.0, receiveTimestamp)
382
+ self.ts.targetYawEntry.set(0.0, receiveTimestamp)
383
+ self.ts.targetAreaEntry.set(0.0, receiveTimestamp)
384
+ self.ts.targetPoseEntry.set(Transform3d(), receiveTimestamp)
385
+ self.ts.targetSkewEntry.set(0.0, receiveTimestamp)
386
+ else:
387
+ bestTarget = result.getBestTarget()
388
+
389
+ self.ts.targetPitchEntry.set(bestTarget.getPitch(), receiveTimestamp)
390
+ self.ts.targetYawEntry.set(bestTarget.getYaw(), receiveTimestamp)
391
+ self.ts.targetAreaEntry.set(bestTarget.getArea(), receiveTimestamp)
392
+ self.ts.targetSkewEntry.set(bestTarget.getSkew(), receiveTimestamp)
393
+
394
+ self.ts.targetPoseEntry.set(
395
+ bestTarget.getBestCameraToTarget(), receiveTimestamp
396
+ )
397
+
398
+ intrinsics = self.prop.getIntrinsics()
399
+ intrinsicsView = intrinsics.flatten().tolist()
400
+ self.ts.cameraIntrinsicsPublisher.set(intrinsicsView, receiveTimestamp)
401
+
402
+ distortion = self.prop.getDistCoeffs()
403
+ distortionView = distortion.flatten().tolist()
404
+ self.ts.cameraDistortionPublisher.set(distortionView, receiveTimestamp)
405
+
406
+ self.ts.heartbeatPublisher.set(self.heartbeatCounter, receiveTimestamp)
407
+
408
+ self.ts.subTable.getInstance().flush()