wirepod-vector-sdk-audio 0.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (71) hide show
  1. anki_vector/__init__.py +43 -0
  2. anki_vector/animation.py +272 -0
  3. anki_vector/annotate.py +590 -0
  4. anki_vector/audio.py +212 -0
  5. anki_vector/audio_stream.py +335 -0
  6. anki_vector/behavior.py +1135 -0
  7. anki_vector/camera.py +670 -0
  8. anki_vector/camera_viewer/__init__.py +121 -0
  9. anki_vector/color.py +88 -0
  10. anki_vector/configure/__main__.py +331 -0
  11. anki_vector/connection.py +838 -0
  12. anki_vector/events.py +420 -0
  13. anki_vector/exceptions.py +185 -0
  14. anki_vector/faces.py +819 -0
  15. anki_vector/lights.py +210 -0
  16. anki_vector/mdns.py +131 -0
  17. anki_vector/messaging/__init__.py +45 -0
  18. anki_vector/messaging/alexa_pb2.py +36 -0
  19. anki_vector/messaging/alexa_pb2_grpc.py +3 -0
  20. anki_vector/messaging/behavior_pb2.py +40 -0
  21. anki_vector/messaging/behavior_pb2_grpc.py +3 -0
  22. anki_vector/messaging/client.py +33 -0
  23. anki_vector/messaging/cube_pb2.py +113 -0
  24. anki_vector/messaging/cube_pb2_grpc.py +3 -0
  25. anki_vector/messaging/extensions_pb2.py +25 -0
  26. anki_vector/messaging/extensions_pb2_grpc.py +3 -0
  27. anki_vector/messaging/external_interface_pb2.py +169 -0
  28. anki_vector/messaging/external_interface_pb2_grpc.py +1267 -0
  29. anki_vector/messaging/messages_pb2.py +431 -0
  30. anki_vector/messaging/messages_pb2_grpc.py +3 -0
  31. anki_vector/messaging/nav_map_pb2.py +33 -0
  32. anki_vector/messaging/nav_map_pb2_grpc.py +3 -0
  33. anki_vector/messaging/protocol.py +33 -0
  34. anki_vector/messaging/response_status_pb2.py +27 -0
  35. anki_vector/messaging/response_status_pb2_grpc.py +3 -0
  36. anki_vector/messaging/settings_pb2.py +72 -0
  37. anki_vector/messaging/settings_pb2_grpc.py +3 -0
  38. anki_vector/messaging/shared_pb2.py +54 -0
  39. anki_vector/messaging/shared_pb2_grpc.py +3 -0
  40. anki_vector/motors.py +127 -0
  41. anki_vector/nav_map.py +409 -0
  42. anki_vector/objects.py +1782 -0
  43. anki_vector/opengl/__init__.py +103 -0
  44. anki_vector/opengl/assets/LICENSE.txt +21 -0
  45. anki_vector/opengl/assets/cube.jpg +0 -0
  46. anki_vector/opengl/assets/cube.mtl +9 -0
  47. anki_vector/opengl/assets/cube.obj +1000 -0
  48. anki_vector/opengl/assets/vector.mtl +67 -0
  49. anki_vector/opengl/assets/vector.obj +13220 -0
  50. anki_vector/opengl/opengl.py +864 -0
  51. anki_vector/opengl/opengl_vector.py +620 -0
  52. anki_vector/opengl/opengl_viewer.py +689 -0
  53. anki_vector/photos.py +145 -0
  54. anki_vector/proximity.py +176 -0
  55. anki_vector/reserve_control/__main__.py +36 -0
  56. anki_vector/robot.py +930 -0
  57. anki_vector/screen.py +201 -0
  58. anki_vector/status.py +322 -0
  59. anki_vector/touch.py +119 -0
  60. anki_vector/user_intent.py +186 -0
  61. anki_vector/util.py +1132 -0
  62. anki_vector/version.py +15 -0
  63. anki_vector/viewer.py +403 -0
  64. anki_vector/vision.py +202 -0
  65. anki_vector/world.py +899 -0
  66. wirepod_vector_sdk_audio-0.9.0.dist-info/METADATA +80 -0
  67. wirepod_vector_sdk_audio-0.9.0.dist-info/RECORD +71 -0
  68. wirepod_vector_sdk_audio-0.9.0.dist-info/WHEEL +5 -0
  69. wirepod_vector_sdk_audio-0.9.0.dist-info/licenses/LICENSE.txt +180 -0
  70. wirepod_vector_sdk_audio-0.9.0.dist-info/top_level.txt +1 -0
  71. wirepod_vector_sdk_audio-0.9.0.dist-info/zip-safe +1 -0
anki_vector/camera.py ADDED
@@ -0,0 +1,670 @@
1
+ # Copyright (c) 2018 Anki, Inc.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License in the file LICENSE.txt or at
6
+ #
7
+ # https://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """Support for Vector's camera.
16
+
17
+ Vector has a built-in camera which he uses to observe the world around him.
18
+
19
+ The :class:`CameraComponent` class defined in this module is made available as
20
+ :attr:`anki_vector.robot.Robot.camera` and can be used to enable/disable image
21
+ sending and observe images being sent by the robot. It emits :class:`EvtNewRawCameraImage`
22
+ and :class:`EvtNewCameraImage` objects whenever a new camera image is available.
23
+
24
+ The camera resolution is 1280 x 720 with a field of view of 90 deg (H) x 50 deg (V).
25
+ """
26
+
27
+ # __all__ should order by constants, event classes, other classes, functions.
28
+ __all__ = ["EvtNewRawCameraImage", "EvtNewCameraImage",
29
+ "CameraComponent", "CameraConfig", "CameraImage"]
30
+
31
+ import asyncio
32
+ from concurrent.futures import CancelledError
33
+ import io
34
+ import time
35
+ import sys
36
+
37
+ from . import annotate, connection, util
38
+ from .events import Events
39
+ from .exceptions import VectorCameraFeedException, VectorCameraImageCaptureException
40
+ from .messaging import protocol
41
+
42
+ try:
43
+ import numpy as np
44
+ except ImportError:
45
+ sys.exit("Cannot import numpy: Do `pip3 install numpy` to install")
46
+
47
+ try:
48
+ from PIL import Image
49
+ except ImportError:
50
+ sys.exit("Cannot import from PIL: Do `pip3 install --user Pillow` to install")
51
+
52
+
53
+ def _convert_to_pillow_image(image_data: bytes) -> Image.Image:
54
+ """Convert raw image bytes to a Pillow Image."""
55
+ size = len(image_data)
56
+
57
+ # Constuct numpy array out of source data
58
+ array = np.empty(size, dtype=np.uint8)
59
+ array[0:size] = list(image_data)
60
+
61
+ # Decode compressed source data into uncompressed image data
62
+ image = Image.open(io.BytesIO(array))
63
+ return image
64
+
65
+
66
+ class CameraImage:
67
+ """A single image from the robot's camera.
68
+ This wraps a raw image and provides an :meth:`annotate_image` method
69
+ that can resize and add dynamic annotations to the image, such as
70
+ marking up the location of objects and faces.
71
+
72
+ .. testcode::
73
+
74
+ import anki_vector
75
+
76
+ with anki_vector.Robot() as robot:
77
+ image = robot.camera.capture_single_image()
78
+ print(f"Displaying image with id {image.image_id}, received at {image.image_recv_time}")
79
+ image.raw_image.show()
80
+
81
+ :param raw_image: The raw unprocessed image from the camera.
82
+ :param image_annotator: The image annotation object.
83
+ :param image_id: An image number that increments on every new image received.
84
+ """
85
+
86
+ def __init__(self, raw_image: Image.Image, image_annotator: annotate.ImageAnnotator, image_id: int):
87
+
88
+ self._raw_image = raw_image
89
+ self._image_annotator = image_annotator
90
+ self._image_id = image_id
91
+ self._image_recv_time = time.time()
92
+
93
+ @property
94
+ def raw_image(self) -> Image.Image:
95
+ """The raw unprocessed image from the camera."""
96
+ return self._raw_image
97
+
98
+ @property
99
+ def image_id(self) -> int:
100
+ """An image number that increments on every new image received."""
101
+ return self._image_id
102
+
103
+ @property
104
+ def image_recv_time(self) -> float:
105
+ """The time the image was received and processed by the SDK."""
106
+ return self._image_recv_time
107
+
108
+ def annotate_image(self, scale: float = None, fit_size: tuple = None, resample_mode: int = annotate.RESAMPLE_MODE_NEAREST) -> Image.Image:
109
+ """Adds any enabled annotations to the image.
110
+ Optionally resizes the image prior to annotations being applied. The
111
+ aspect ratio of the resulting image always matches that of the raw image.
112
+
113
+ .. testcode::
114
+
115
+ import anki_vector
116
+
117
+ with anki_vector.Robot() as robot:
118
+ image = robot.camera.capture_single_image()
119
+ annotated_image = image.annotate_image()
120
+ annotated_image.show()
121
+
122
+ :param scale: If set then the base image will be scaled by the
123
+ supplied multiplier. Cannot be combined with fit_size
124
+ :param fit_size: If set, then scale the image to fit inside
125
+ the supplied (width, height) dimensions. The original aspect
126
+ ratio will be preserved. Cannot be combined with scale.
127
+ :param resample_mode: The resampling mode to use when scaling the
128
+ image. Should be either :attr:`~anki_vector.annotate.RESAMPLE_MODE_NEAREST`
129
+ (fast) or :attr:`~anki_vector.annotate.RESAMPLE_MODE_BILINEAR` (slower,
130
+ but smoother).
131
+ """
132
+ if self._raw_image.size != (640, 360):
133
+ raise VectorCameraImageCaptureException("Annotation is only supported for default resolution images.")
134
+ return self._image_annotator.annotate_image(self._raw_image,
135
+ scale=scale,
136
+ fit_size=fit_size,
137
+ resample_mode=resample_mode)
138
+
139
+
140
+ class CameraConfig:
141
+ """ The fixed properties for Vector's camera.
142
+
143
+ A full 3x3 calibration matrix for doing 3D reasoning based on the camera
144
+ images would look like:
145
+
146
+ +--------------+--------------+---------------+
147
+ |focal_length.x| 0 | center.x |
148
+ +--------------+--------------+---------------+
149
+ | 0 |focal_length.y| center.y |
150
+ +--------------+--------------+---------------+
151
+ | 0 | 0 | 1 |
152
+ +--------------+--------------+---------------+
153
+
154
+ .. testcode::
155
+
156
+ import anki_vector
157
+
158
+ with anki_vector.Robot() as robot:
159
+ min = robot.camera.config.min_gain
160
+ max = robot.camera.config.max_gain
161
+ print(f"Robot camera allowable exposure gain range is from {min} to {max}")
162
+ """
163
+
164
+ def __init__(self,
165
+ focal_length_x: float,
166
+ focal_length_y: float,
167
+ center_x: float,
168
+ center_y: float,
169
+ fov_x: float,
170
+ fov_y: float,
171
+ min_exposure_time_ms: int,
172
+ max_exposure_time_ms: int,
173
+ min_gain: float,
174
+ max_gain: float):
175
+ self._focal_length = util.Vector2(focal_length_x, focal_length_y)
176
+ self._center = util.Vector2(center_x, center_y)
177
+ self._fov_x = util.degrees(fov_x)
178
+ self._fov_y = util.degrees(fov_y)
179
+ self._min_exposure_ms = min_exposure_time_ms
180
+ self._max_exposure_ms = max_exposure_time_ms
181
+ self._min_gain = min_gain
182
+ self._max_gain = max_gain
183
+
184
+ @classmethod
185
+ def create_from_message(cls, msg: protocol.CameraConfigResponse):
186
+ """Create camera configuration based on Vector's camera configuration from the message sent from the Robot """
187
+ return cls(msg.focal_length_x,
188
+ msg.focal_length_y,
189
+ msg.center_x,
190
+ msg.center_y,
191
+ msg.fov_x,
192
+ msg.fov_y,
193
+ msg.min_camera_exposure_time_ms,
194
+ msg.max_camera_exposure_time_ms,
195
+ msg.min_camera_gain,
196
+ msg.max_camera_gain)
197
+
198
+ @property
199
+ def min_gain(self) -> float:
200
+ """The minimum supported camera gain."""
201
+ return self._min_gain
202
+
203
+ @property
204
+ def max_gain(self) -> float:
205
+ """The maximum supported camera gain."""
206
+ return self._max_gain
207
+
208
+ @property
209
+ def min_exposure_time_ms(self) -> int:
210
+ """The minimum supported exposure time in milliseconds."""
211
+ return self._min_exposure_ms
212
+
213
+ @property
214
+ def max_exposure_time_ms(self) -> int:
215
+ """The maximum supported exposure time in milliseconds."""
216
+ return self._max_exposure_ms
217
+
218
+ @property
219
+ def focal_length(self):
220
+ """:class:`anki_vector.util.Vector2`: The focal length of the camera.
221
+
222
+ This is focal length combined with pixel skew (as the pixels aren't
223
+ perfectly square), so there are subtly different values for x and y.
224
+ It is in floating point pixel values e.g. <288.87, 288.36>.
225
+ """
226
+ return self._focal_length
227
+
228
+ @property
229
+ def center(self):
230
+ """:class:`anki_vector.util.Vector2`: The focal center of the camera.
231
+
232
+ This is the position of the optical center of projection within the
233
+ image. It will be close to the center of the image, but adjusted based
234
+ on the calibration of the lens. It is in floating point pixel values
235
+ e.g. <155.11, 111.40>.
236
+ """
237
+ return self._center
238
+
239
+ @property
240
+ def fov_x(self):
241
+ """:class:`anki_vector.util.Angle`: The x (horizontal) field of view."""
242
+ return self._fov_x
243
+
244
+ @property
245
+ def fov_y(self):
246
+ """:class:`anki_vector.util.Angle`: The y (vertical) field of view."""
247
+ return self._fov_y
248
+
249
+
250
+ class CameraComponent(util.Component):
251
+ """Represents Vector's camera.
252
+
253
+ The CameraComponent object receives images from Vector's camera, unpacks the data,
254
+ composes it and makes it available as latest_image.
255
+
256
+ The :class:`anki_vector.robot.Robot` or :class:`anki_vector.robot.AsyncRobot` instance observes the camera.
257
+
258
+ .. testcode::
259
+
260
+ import anki_vector
261
+
262
+ with anki_vector.Robot() as robot:
263
+ robot.camera.init_camera_feed()
264
+ image = robot.camera.latest_image
265
+ image.raw_image.show()
266
+
267
+ :param robot: A reference to the owner Robot object.
268
+ """
269
+
270
+ #: callable: The factory function that returns an
271
+ #: :class:`annotate.ImageAnnotator` class or subclass instance.
272
+ annotator_factory = annotate.ImageAnnotator
273
+
274
+ def __init__(self, robot):
275
+ super().__init__(robot)
276
+
277
+ self._image_annotator: annotate.ImageAnnotator = self.annotator_factory(self.robot.world)
278
+ self._latest_image: CameraImage = None
279
+ self._latest_image_id: int = None
280
+ self._camera_feed_task: asyncio.Task = None
281
+ self._enabled = False
282
+ self._config = None # type CameraConfig
283
+ self._gain = 0.0
284
+ self._exposure_ms = 0
285
+ self._auto_exposure_enabled = True
286
+
287
+ def set_config(self, message: protocol.CameraConfigRequest):
288
+ """Update Vector's camera configuration from the message sent from the Robot """
289
+ try:
290
+ self._config = CameraConfig.create_from_message(message)
291
+ except:
292
+ self._config = CameraConfig(0,0,0,0,0,0,0,0,0,0)
293
+
294
+ @connection.on_connection_thread(requires_control=False)
295
+ async def get_camera_config(self) -> protocol.CameraConfigResponse:
296
+ """ Get Vector's camera configuration
297
+
298
+ Retrieves the calibrated camera settings. This is called during the Robot connection initialization, SDK
299
+ users should use the `config` property in most instances.
300
+
301
+ :return:
302
+ """
303
+ try:
304
+ request = protocol.CameraConfigRequest()
305
+ return await self.conn.grpc_interface.GetCameraConfig(request)
306
+ except:
307
+ pass
308
+
309
+ @property
310
+ def config(self) -> CameraConfig:
311
+ """:class:`anki_vector.camera.CameraConfig`: The read-only config/calibration for the camera"""
312
+ return self._config
313
+
314
+ @property
315
+ def is_auto_exposure_enabled(self) -> bool:
316
+ """bool: True if auto exposure is currently enabled
317
+
318
+ If auto exposure is enabled the `gain` and `exposure_ms`
319
+ values will constantly be updated by Vector.
320
+ """
321
+ return self._auto_exposure_enabled
322
+
323
+ @property
324
+ def gain(self) -> float:
325
+ """float: The current camera gain setting."""
326
+ return self._gain
327
+
328
+ @property
329
+ def exposure_ms(self) -> int:
330
+ """int: The current camera exposure setting in milliseconds."""
331
+ return self._exposure_ms
332
+
333
+ def update_state(self, _robot, _event_type, msg):
334
+ self._gain = msg.gain
335
+ self._exposure_ms = msg.exposure_ms
336
+ self._auto_exposure_enabled = msg.auto_exposure_enabled
337
+
338
+ @property
339
+ @util.block_while_none()
340
+ def latest_image(self) -> CameraImage:
341
+ """:class:`Image.Image`: The most recently processed image received from the robot.
342
+
343
+ The resolution of latest_image is 640x360.
344
+
345
+ :getter: Returns the Pillow Image representing the latest image
346
+
347
+ .. testcode::
348
+
349
+ import anki_vector
350
+
351
+ with anki_vector.Robot() as robot:
352
+ robot.camera.init_camera_feed()
353
+ image = robot.camera.latest_image
354
+ image.raw_image.show()
355
+ """
356
+ if not self._camera_feed_task:
357
+ raise VectorCameraFeedException()
358
+ return self._latest_image
359
+
360
+ @property
361
+ @util.block_while_none()
362
+ def latest_image_id(self) -> int:
363
+ """The most recently processed image's id received from the robot.
364
+
365
+ Used only to track chunks of the same image.
366
+
367
+ :getter: Returns the id for the latest image
368
+
369
+ .. testcode::
370
+
371
+ import anki_vector
372
+
373
+ with anki_vector.Robot() as robot:
374
+ robot.camera.init_camera_feed()
375
+ image = robot.camera.latest_image
376
+ image.raw_image.show()
377
+ print(f"latest_image_id: {robot.camera.latest_image_id}")
378
+ """
379
+ if not self._camera_feed_task:
380
+ raise VectorCameraFeedException()
381
+ return self._latest_image_id
382
+
383
+ @property
384
+ def image_annotator(self) -> annotate.ImageAnnotator:
385
+ """The image annotator used to add annotations to the raw camera images.
386
+
387
+ .. testcode::
388
+
389
+ import time
390
+ import anki_vector
391
+
392
+ with anki_vector.Robot(show_viewer=True) as robot:
393
+ # Annotations (enabled by default) are displayed on the camera feed
394
+ time.sleep(5)
395
+ # Disable all annotations
396
+ robot.camera.image_annotator.annotation_enabled = False
397
+ time.sleep(5)
398
+ """
399
+ return self._image_annotator
400
+
401
+ def init_camera_feed(self) -> None:
402
+ """Begin camera feed task.
403
+
404
+ .. testcode::
405
+
406
+ import anki_vector
407
+
408
+ with anki_vector.Robot() as robot:
409
+ robot.camera.init_camera_feed()
410
+ image = robot.camera.latest_image
411
+ image.raw_image.show()
412
+ """
413
+ if not self._camera_feed_task or self._camera_feed_task.done():
414
+ self._enabled = True
415
+ self._camera_feed_task = self.conn.loop.create_task(self._request_and_handle_images())
416
+
417
+ def close_camera_feed(self) -> None:
418
+ """Cancel camera feed task."""
419
+ if self._camera_feed_task:
420
+ self._enabled = False
421
+ self._camera_feed_task.cancel()
422
+ future = self.conn.run_coroutine(self._camera_feed_task)
423
+ try:
424
+ future.result()
425
+ except CancelledError:
426
+ self.logger.debug('Camera feed task was cancelled. This is expected during disconnection.')
427
+ # wait for streaming to end, up to 10 seconds
428
+ iterations = 0
429
+ max_iterations = 100
430
+ while self.image_streaming_enabled():
431
+ time.sleep(0.1)
432
+ iterations += 1
433
+ if iterations > max_iterations:
434
+ # leave loop, even if streaming is still enabled
435
+ # because other SDK functions will still work and
436
+ # the RPC should have had enough time to finish
437
+ # which means we _should_ be in a good state.
438
+ self.logger.info('Camera Feed closed, but streaming on'
439
+ ' robot remained enabled. This is unexpected.')
440
+ break
441
+ self._camera_feed_task = None
442
+
443
+ async def _image_streaming_enabled(self) -> bool:
444
+ """request streaming enabled status from the robot"""
445
+ request = protocol.IsImageStreamingEnabledRequest()
446
+ response = await self.conn.grpc_interface.IsImageStreamingEnabled(request)
447
+ enabled = False
448
+ if response:
449
+ enabled = response.is_image_streaming_enabled
450
+ return enabled
451
+
452
+ def image_streaming_enabled(self) -> bool:
453
+ """True if image streaming is enabled on the robot
454
+
455
+ .. testcode::
456
+
457
+ import anki_vector
458
+ with anki_vector.Robot() as robot:
459
+ image_streaming_enabled = robot.camera.image_streaming_enabled()
460
+ if image_streaming_enabled:
461
+ print("Robot is streaming video")
462
+ else:
463
+ print("Robot is not streaming video")
464
+ """
465
+ future = self.conn.run_coroutine(self._image_streaming_enabled())
466
+ return future.result()
467
+
468
+ def _unpack_image(self, msg: protocol.CameraFeedResponse) -> None:
469
+ """Processes raw data from the robot into a more useful image structure."""
470
+ image = _convert_to_pillow_image(msg.data)
471
+
472
+ self._latest_image = CameraImage(image, self._image_annotator, msg.image_id)
473
+ self._latest_image_id = msg.image_id
474
+
475
+ self.conn.run_soon(self.robot.events.dispatch_event(EvtNewRawCameraImage(image),
476
+ Events.new_raw_camera_image))
477
+ self.conn.run_soon(self.robot.events.dispatch_event(EvtNewCameraImage(self._latest_image),
478
+ Events.new_camera_image))
479
+
480
+ if self._image_annotator.annotation_enabled:
481
+ image = self._image_annotator.annotate_image(image)
482
+ self.robot.viewer.enqueue_frame(image)
483
+
484
+ async def _request_and_handle_images(self) -> None:
485
+ """Queries and listens for camera feed events from the robot.
486
+ Received events are parsed by a helper function."""
487
+ try:
488
+ req = protocol.CameraFeedRequest()
489
+ async for evt in self.grpc_interface.CameraFeed(req):
490
+ # If the camera feed is disabled after stream is setup, exit the stream
491
+ # (the camera feed on the robot is disabled internally on stream exit)
492
+ if not self._enabled:
493
+ self.logger.warning('Camera feed has been disabled. Enable the feed to start/continue receiving camera feed data')
494
+ return
495
+ self._unpack_image(evt)
496
+ except CancelledError:
497
+ self.logger.debug('Camera feed task was cancelled. This is expected during disconnection.')
498
+
499
+ @connection.on_connection_thread()
500
+ async def capture_single_image(self, enable_high_resolution: bool = False) -> CameraImage:
501
+ """Request to capture a single image from the robot's camera.
502
+
503
+ This call requests the robot to capture an image and returns the
504
+ received image, formatted as a Pillow image. This differs from `latest_image`,
505
+ which maintains the last image received from the camera feed (if enabled).
506
+
507
+ Note that when the camera feed is enabled this call returns the `latest_image`.
508
+
509
+ .. testcode::
510
+
511
+ import anki_vector
512
+
513
+ with anki_vector.Robot() as robot:
514
+ image = robot.camera.capture_single_image()
515
+ image.raw_image.show()
516
+
517
+ :param enable_high_resolution: Enable/disable request for high resolution images. The default resolution
518
+ is 640x360, while the high resolution is 1280x720.
519
+ """
520
+ if self._enabled:
521
+ self.logger.warning('Camera feed is enabled. Receiving image from the feed at default resolution.')
522
+ return self._latest_image
523
+ if enable_high_resolution:
524
+ self.logger.warning('Capturing a high resolution (1280*720) image. Image events for this frame need to be scaled.')
525
+ req = protocol.CaptureSingleImageRequest(enable_high_resolution=enable_high_resolution)
526
+ res = await self.grpc_interface.CaptureSingleImage(req)
527
+ if res and res.data:
528
+ image = _convert_to_pillow_image(res.data)
529
+ return CameraImage(image, self._image_annotator, res.image_id)
530
+
531
+ self.logger.error('Failed to capture a single image')
532
+
533
+ @connection.on_connection_thread()
534
+ async def enable_auto_exposure(self, enable_auto_exposure=True) -> protocol.SetCameraSettingsResponse:
535
+ """Enable auto exposure on Vector's Camera.
536
+
537
+ Enable auto exposure on Vector's camera to constantly update the exposure
538
+ time and gain values based on the recent images. This is the default mode
539
+ when any SDK program starts.
540
+
541
+ .. testcode::
542
+
543
+ import time
544
+ import anki_vector
545
+ with anki_vector.Robot() as robot:
546
+ robot.camera.enable_auto_exposure(False)
547
+ time.sleep(5)
548
+
549
+ :param enable_auto_exposure: whether the camera should automatically adjust exposure
550
+ """
551
+
552
+ set_camera_settings_request = protocol.SetCameraSettingsRequest(enable_auto_exposure=enable_auto_exposure)
553
+ result = await self.conn.grpc_interface.SetCameraSettings(set_camera_settings_request)
554
+ self._auto_exposure_enabled = enable_auto_exposure
555
+ return result
556
+
557
+ @connection.on_connection_thread()
558
+ async def set_manual_exposure(self, exposure_ms: int, gain: float) -> protocol.SetCameraSettingsResponse:
559
+ """Set manual exposure values for Vector's Camera.
560
+
561
+ This will disable auto exposure on Vector's camera and force the specified exposure
562
+ time and gain values.
563
+
564
+ .. testcode::
565
+
566
+ import time
567
+ import anki_vector
568
+ with anki_vector.Robot() as robot:
569
+ robot.camera.set_manual_exposure(1, 0.25)
570
+ time.sleep(5)
571
+
572
+ :param exposure_ms: The desired exposure time in milliseconds.
573
+ Must be within the robot's exposure range from :attr:`CameraConfig.min_exposure_time_ms` to
574
+ :attr:`CameraConfig.max_exposure_time_ms`
575
+ :param gain: The desired gain value.
576
+ Must be within the robot's gain range from :attr:`CameraConfig.min_gain` to
577
+ :attr:`CameraConfig.max_gain`
578
+ Raises:
579
+ :class:`ValueError` if supplied an out-of-range exposure or gain
580
+
581
+ """
582
+
583
+ if exposure_ms < self._config.min_exposure_time_ms \
584
+ or exposure_ms > self._config.max_exposure_time_ms \
585
+ or gain < self._config.min_gain \
586
+ or gain > self._config.max_gain:
587
+ raise ValueError("Exposure settings out of range")
588
+
589
+ set_camera_settings_request = protocol.SetCameraSettingsRequest(gain=gain,
590
+ exposure_ms=exposure_ms,
591
+ enable_auto_exposure=False)
592
+ result = await self.conn.grpc_interface.SetCameraSettings(set_camera_settings_request)
593
+ self._gain = gain
594
+ self._exposure_ms = exposure_ms
595
+ self._auto_exposure_enabled = False
596
+ return result
597
+
598
+
599
+ class EvtNewRawCameraImage: # pylint: disable=too-few-public-methods
600
+ """Dispatched when a new raw image is received from the robot's camera.
601
+
602
+ See also :class:`~anki_vector.camera.EvtNewCameraImage` which provides access
603
+ to both the raw image and a scaled and annotated version.
604
+
605
+ .. testcode::
606
+
607
+ import threading
608
+
609
+ import anki_vector
610
+ from anki_vector import events
611
+
612
+ def on_new_raw_camera_image(robot, event_type, event, done):
613
+ print("Display new camera image")
614
+ event.image.show()
615
+ done.set()
616
+
617
+ with anki_vector.Robot() as robot:
618
+ robot.camera.init_camera_feed()
619
+ done = threading.Event()
620
+ robot.events.subscribe(on_new_raw_camera_image, events.Events.new_raw_camera_image, done)
621
+
622
+ print("------ waiting for camera events, press ctrl+c to exit early ------")
623
+
624
+ try:
625
+ if not done.wait(timeout=5):
626
+ print("------ Did not receive a new camera image! ------")
627
+ except KeyboardInterrupt:
628
+ pass
629
+
630
+ :param image: A raw camera image.
631
+ """
632
+
633
+ def __init__(self, image: Image.Image):
634
+ self.image = image
635
+
636
+
637
+ class EvtNewCameraImage: # pylint: disable=too-few-public-methods
638
+ """Dispatched when a new camera image is received and processed from the robot's camera.
639
+
640
+ .. testcode::
641
+
642
+ import threading
643
+
644
+ import anki_vector
645
+ from anki_vector import events
646
+
647
+ def on_new_camera_image(robot, event_type, event, done):
648
+ print(f"Display new annotated camera image with id {event.image.image_id}")
649
+ annotated_image = event.image.annotate_image()
650
+ annotated_image.show()
651
+ done.set()
652
+
653
+ with anki_vector.Robot(enable_face_detection=True, enable_custom_object_detection=True) as robot:
654
+ robot.camera.init_camera_feed()
655
+ done = threading.Event()
656
+ robot.events.subscribe(on_new_camera_image, events.Events.new_camera_image, done)
657
+
658
+ print("------ waiting for camera events, press ctrl+c to exit early ------")
659
+
660
+ try:
661
+ if not done.wait(timeout=5):
662
+ print("------ Did not receive a new camera image! ------")
663
+ except KeyboardInterrupt:
664
+ pass
665
+
666
+ :param: A wrapped camera image object that contains the raw image.
667
+ """
668
+
669
+ def __init__(self, image: CameraImage):
670
+ self.image = image