wirepod-vector-sdk-audio 0.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (71) hide show
  1. anki_vector/__init__.py +43 -0
  2. anki_vector/animation.py +272 -0
  3. anki_vector/annotate.py +590 -0
  4. anki_vector/audio.py +212 -0
  5. anki_vector/audio_stream.py +335 -0
  6. anki_vector/behavior.py +1135 -0
  7. anki_vector/camera.py +670 -0
  8. anki_vector/camera_viewer/__init__.py +121 -0
  9. anki_vector/color.py +88 -0
  10. anki_vector/configure/__main__.py +331 -0
  11. anki_vector/connection.py +838 -0
  12. anki_vector/events.py +420 -0
  13. anki_vector/exceptions.py +185 -0
  14. anki_vector/faces.py +819 -0
  15. anki_vector/lights.py +210 -0
  16. anki_vector/mdns.py +131 -0
  17. anki_vector/messaging/__init__.py +45 -0
  18. anki_vector/messaging/alexa_pb2.py +36 -0
  19. anki_vector/messaging/alexa_pb2_grpc.py +3 -0
  20. anki_vector/messaging/behavior_pb2.py +40 -0
  21. anki_vector/messaging/behavior_pb2_grpc.py +3 -0
  22. anki_vector/messaging/client.py +33 -0
  23. anki_vector/messaging/cube_pb2.py +113 -0
  24. anki_vector/messaging/cube_pb2_grpc.py +3 -0
  25. anki_vector/messaging/extensions_pb2.py +25 -0
  26. anki_vector/messaging/extensions_pb2_grpc.py +3 -0
  27. anki_vector/messaging/external_interface_pb2.py +169 -0
  28. anki_vector/messaging/external_interface_pb2_grpc.py +1267 -0
  29. anki_vector/messaging/messages_pb2.py +431 -0
  30. anki_vector/messaging/messages_pb2_grpc.py +3 -0
  31. anki_vector/messaging/nav_map_pb2.py +33 -0
  32. anki_vector/messaging/nav_map_pb2_grpc.py +3 -0
  33. anki_vector/messaging/protocol.py +33 -0
  34. anki_vector/messaging/response_status_pb2.py +27 -0
  35. anki_vector/messaging/response_status_pb2_grpc.py +3 -0
  36. anki_vector/messaging/settings_pb2.py +72 -0
  37. anki_vector/messaging/settings_pb2_grpc.py +3 -0
  38. anki_vector/messaging/shared_pb2.py +54 -0
  39. anki_vector/messaging/shared_pb2_grpc.py +3 -0
  40. anki_vector/motors.py +127 -0
  41. anki_vector/nav_map.py +409 -0
  42. anki_vector/objects.py +1782 -0
  43. anki_vector/opengl/__init__.py +103 -0
  44. anki_vector/opengl/assets/LICENSE.txt +21 -0
  45. anki_vector/opengl/assets/cube.jpg +0 -0
  46. anki_vector/opengl/assets/cube.mtl +9 -0
  47. anki_vector/opengl/assets/cube.obj +1000 -0
  48. anki_vector/opengl/assets/vector.mtl +67 -0
  49. anki_vector/opengl/assets/vector.obj +13220 -0
  50. anki_vector/opengl/opengl.py +864 -0
  51. anki_vector/opengl/opengl_vector.py +620 -0
  52. anki_vector/opengl/opengl_viewer.py +689 -0
  53. anki_vector/photos.py +145 -0
  54. anki_vector/proximity.py +176 -0
  55. anki_vector/reserve_control/__main__.py +36 -0
  56. anki_vector/robot.py +930 -0
  57. anki_vector/screen.py +201 -0
  58. anki_vector/status.py +322 -0
  59. anki_vector/touch.py +119 -0
  60. anki_vector/user_intent.py +186 -0
  61. anki_vector/util.py +1132 -0
  62. anki_vector/version.py +15 -0
  63. anki_vector/viewer.py +403 -0
  64. anki_vector/vision.py +202 -0
  65. anki_vector/world.py +899 -0
  66. wirepod_vector_sdk_audio-0.9.0.dist-info/METADATA +80 -0
  67. wirepod_vector_sdk_audio-0.9.0.dist-info/RECORD +71 -0
  68. wirepod_vector_sdk_audio-0.9.0.dist-info/WHEEL +5 -0
  69. wirepod_vector_sdk_audio-0.9.0.dist-info/licenses/LICENSE.txt +180 -0
  70. wirepod_vector_sdk_audio-0.9.0.dist-info/top_level.txt +1 -0
  71. wirepod_vector_sdk_audio-0.9.0.dist-info/zip-safe +1 -0
anki_vector/version.py ADDED
@@ -0,0 +1,15 @@
1
+ # Copyright (c) 2018 Anki, Inc.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License in the file LICENSE.txt or at
6
+ #
7
+ # https://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ __version__ = "0.9.0"
anki_vector/viewer.py ADDED
@@ -0,0 +1,403 @@
1
+ # Copyright (c) 2018 Anki, Inc.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License in the file LICENSE.txt or at
6
+ #
7
+ # https://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """Displays camera feed from Vector's camera.
16
+ """
17
+
18
+ # __all__ should order by constants, event classes, other classes, functions.
19
+ __all__ = ['ViewerComponent', 'Viewer3DComponent']
20
+
21
+ import multiprocessing as mp
22
+ import sys
23
+ import threading
24
+
25
+ try:
26
+ from PIL import Image
27
+ except ImportError:
28
+ sys.exit("Cannot import from PIL: Do `pip3 install --user Pillow` to install")
29
+
30
+ from . import util
31
+ from .events import Events
32
+
33
+
34
+ class ViewerComponent(util.Component):
35
+ """This component opens a window and renders the images obtained from Vector's camera.
36
+ This viewer window is run in a separate process spawned by :func:`~ViewerComponent.show`.
37
+ Being on a separate process means the rendering of the camera does not block the main thread
38
+ of the calling code, and allows the viewer to have its own ui thread which it can operate on.
39
+ :func:`~ViewerComponent.close` will stop the viewer process.
40
+
41
+ .. testcode::
42
+
43
+ import anki_vector
44
+
45
+ import time
46
+
47
+ with anki_vector.Robot(show_viewer=True) as robot:
48
+ time.sleep(5)
49
+
50
+ :param robot: A reference to the owner Robot object. (May be :class:`None`)
51
+ """
52
+
53
+ def __init__(self, robot):
54
+ super().__init__(robot)
55
+ self.overlays: list = []
56
+ self._close_event: mp.Event = None
57
+ self._frame_queue: mp.Queue = None
58
+ self._process = None
59
+
60
+ def show(self, timeout: float = 10.0, force_on_top: bool = True) -> None:
61
+ """Render a video stream using the images obtained from
62
+ Vector's camera feed.
63
+
64
+ .. testcode::
65
+
66
+ import anki_vector
67
+ import time
68
+
69
+ with anki_vector.Robot() as robot:
70
+ robot.viewer.show()
71
+ time.sleep(10)
72
+
73
+ :param timeout: Render video for the given time. (Renders forever, if timeout not given.)
74
+ :param force_on_top: Specifies whether the window should be forced on top of all others.
75
+ """
76
+ from . import camera_viewer
77
+
78
+ self.robot.camera.init_camera_feed()
79
+
80
+ ctx = mp.get_context('spawn')
81
+ self._close_event = ctx.Event()
82
+ self._frame_queue = ctx.Queue(maxsize=4)
83
+ self._process = ctx.Process(target=camera_viewer.main,
84
+ args=(self._frame_queue,
85
+ self._close_event,
86
+ self.overlays,
87
+ timeout,
88
+ force_on_top),
89
+ daemon=True,
90
+ name="Camera Viewer Process")
91
+ self._process.start()
92
+
93
+ def close(self) -> None:
94
+ """Stop rendering video of Vector's camera feed and close the viewer process.
95
+
96
+ .. testcode::
97
+
98
+ import anki_vector
99
+ import time
100
+
101
+ with anki_vector.Robot(show_viewer=True) as robot:
102
+ time.sleep(10)
103
+ robot.viewer.close()
104
+ """
105
+ if self._close_event:
106
+ self._close_event.set()
107
+ self._close_event = None
108
+ if self._frame_queue:
109
+ try:
110
+ self._frame_queue.put(None, False)
111
+ except mp.queues.Full:
112
+ pass
113
+ self._frame_queue = None
114
+ if self._process:
115
+ self._process.join(timeout=5)
116
+ if self._process.is_alive():
117
+ self._process.terminate()
118
+ self._process = None
119
+
120
+ def enqueue_frame(self, image: Image.Image):
121
+ """Sends a frame to the viewer's rendering process. Sending `None` to the viewer
122
+ will cause it to gracefully shutdown.
123
+
124
+ .. note::
125
+
126
+ This function will be called automatically from the camera feed when the
127
+ :class:`~anki_vector.robot.Robot` or :class:`~anki_vector.robot.AsyncRobot`
128
+ object is created with ``show_viewer=True``.
129
+
130
+ .. code-block:: python
131
+
132
+ import anki_vector
133
+ from PIL.Image import Image
134
+
135
+ image = Image()
136
+ with anki_vector.Robot(show_viewer=True) as robot:
137
+ robot.viewer.enqueue_frame(image)
138
+
139
+ :param image: A frame from Vector's camera.
140
+ """
141
+ close_event = self._close_event
142
+ if self._frame_queue is not None and close_event is not None and not close_event.is_set():
143
+ try:
144
+ self._frame_queue.put(image, False)
145
+ except mp.queues.Full:
146
+ pass
147
+
148
+ def _apply_overlays(self, image: Image.Image) -> None:
149
+ """Apply all overlays attached to viewer instance on to image from camera feed."""
150
+ for overlay in self.overlays:
151
+ overlay.apply_overlay(image)
152
+ return image
153
+
154
+
155
+ class _ExternalRenderCallFunctor(): # pylint: disable=too-few-public-methods
156
+ """Externally specified OpenGL render function.
157
+
158
+ Allows extra geometry to be rendered into OpenGLViewer.
159
+
160
+ :param f: function to call inside the rendering loop
161
+ :param f_args: a list of arguments to supply to the callable function
162
+ """
163
+
164
+ def __init__(self, f: callable, f_args: list):
165
+ self._f = f
166
+ self._f_args = f_args
167
+
168
+ def invoke(self, user_data_queue):
169
+ """Calls the internal function"""
170
+ self._f(*self._f_args, user_data_queue=user_data_queue)
171
+
172
+
173
+ class Viewer3DComponent(util.Component):
174
+ """This component opens a window and renders the a 3D view obtained from Vector's navigation map.
175
+ This viewer window is run in a separate process spawned by :func:`~Viewer3DComponent.show`.
176
+ Being on a separate process means the rendering of the 3D view does not block the main thread
177
+ of the calling code, and allows the viewer to have its own ui thread with which it can render OpenGL.
178
+ :func:`~Viewer3DComponent.close` will stop the viewer process.
179
+
180
+ .. testcode::
181
+
182
+ import anki_vector
183
+
184
+ import time
185
+
186
+ with anki_vector.Robot(enable_nav_map_feed=True, show_3d_viewer=True) as robot:
187
+ time.sleep(5)
188
+
189
+ :param robot: A reference to the owner Robot object. (May be :class:`None`)
190
+ """
191
+
192
+ def __init__(self, robot):
193
+ super().__init__(robot)
194
+ self.overlays: list = []
195
+ self._close_event: mp.Event = None
196
+ self._input_intent_queue: mp.Queue = None
197
+ self._nav_map_queue: mp.Queue = None
198
+ self._world_frame_queue: mp.Queue = None
199
+ self._extra_render_function_queue: mp.Queue = None
200
+ self._user_data_queue: mp.Queue = None
201
+ self._process: mp.process.BaseProcess = None
202
+ self._update_thread: threading.Thread = None
203
+ self._last_robot_control_intents = None
204
+ self.connecting_to_cube = False
205
+
206
+ def show(self, show_viewer_controls: bool = True):
207
+ """Spawns a background process that shows the navigation map in a 3D view.
208
+
209
+ .. testcode::
210
+
211
+ import anki_vector
212
+
213
+ import time
214
+
215
+ with anki_vector.Robot(enable_nav_map_feed=True) as robot:
216
+ robot.viewer_3d.show()
217
+ time.sleep(5)
218
+ robot.viewer_3d.close()
219
+
220
+ :param show_viewer_controls: Specifies whether to draw controls on the view.
221
+ """
222
+ from . import opengl
223
+ ctx = mp.get_context('spawn')
224
+ self._close_event = ctx.Event()
225
+ self._input_intent_queue = ctx.Queue(maxsize=10)
226
+ self._nav_map_queue = ctx.Queue(maxsize=10)
227
+ self._world_frame_queue = ctx.Queue(maxsize=10)
228
+ self._extra_render_function_queue = ctx.Queue(maxsize=1)
229
+ self._user_data_queue = ctx.Queue()
230
+ self._update_thread = threading.Thread(target=self._update,
231
+ args=(),
232
+ daemon=True,
233
+ name="3D Viewer Update Thread")
234
+ self._update_thread.start()
235
+ self._process = ctx.Process(target=opengl.main,
236
+ args=(self._close_event,
237
+ self._input_intent_queue,
238
+ self._nav_map_queue,
239
+ self._world_frame_queue,
240
+ self._extra_render_function_queue,
241
+ self._user_data_queue,
242
+ show_viewer_controls),
243
+ daemon=True,
244
+ name="3D Viewer Process")
245
+ self._process.start()
246
+ self.robot.events.subscribe(self._on_robot_state_update, Events.robot_state)
247
+ self.robot.events.subscribe(self._on_nav_map_update, Events.nav_map_update)
248
+
249
+ @property
250
+ def user_data_queue(self):
251
+ """A queue to send custom data to the 3D viewer process.
252
+
253
+ Best used in conjunction with :func:`~Viewer3DComponent.add_render_call` to place
254
+ a process on the 3D viewer process then obtain data from this queue.
255
+ """
256
+ return self._user_data_queue
257
+
258
+ def add_render_call(self, render_function: callable, *args):
259
+ """Allows external functions to be injected into the viewer process which
260
+ will be called at the appropriate time in the rendering pipeline.
261
+
262
+ Example usage to draw a dot at the world origin:
263
+
264
+ .. code-block:: python
265
+
266
+ import time
267
+
268
+ import anki_vector
269
+
270
+ def my_render_function(user_data_queue):
271
+ glBegin(GL_POINTS)
272
+ glVertex3f(0, 0, 0)
273
+ glEnd()
274
+
275
+ with anki_vector.Robot(enable_nav_map_feed=True, show_3d_viewer=True) as robot:
276
+ robot.viewer_3d.add_render_call(my_render_function)
277
+ time.sleep(10)
278
+
279
+ :param render_function: The delegated function to be invoked in the pipeline.
280
+ :param args: An optional list of arguments to send to the render_function
281
+ the arguments list must match the parameters accepted by the
282
+ supplied function.
283
+ """
284
+ self._extra_render_function_queue.put(_ExternalRenderCallFunctor(render_function, args))
285
+
286
+ def close(self):
287
+ """Closes the background process showing the 3D view.
288
+
289
+ .. testcode::
290
+
291
+ import anki_vector
292
+
293
+ import time
294
+
295
+ with anki_vector.Robot(enable_nav_map_feed=True) as robot:
296
+ robot.viewer_3d.show()
297
+ time.sleep(5)
298
+ robot.viewer_3d.close()
299
+ """
300
+ if self._close_event:
301
+ self._close_event.set()
302
+ self._close_event = None
303
+ if self._update_thread:
304
+ self._update_thread.join(timeout=2)
305
+ self._update_thread = None
306
+ self._input_intent_queue = None
307
+ self._nav_map_queue = None
308
+ self._world_frame_queue = None
309
+ if self._process:
310
+ self._process.join(timeout=5)
311
+ if self._process.is_alive():
312
+ self._process.terminate()
313
+ self._process = None
314
+
315
+ def connect_to_cube(self):
316
+ '''Connect to light cube'''
317
+ if self.connecting_to_cube:
318
+ return
319
+
320
+ self.connecting_to_cube = True
321
+ self.robot.world.connect_cube()
322
+ self.connecting_to_cube = False
323
+ return
324
+
325
+ def _update(self):
326
+ """Reads most recently stored user-triggered intents, and sends
327
+ motor messages to the robot if the intents should effect the robot's
328
+ current motion.
329
+
330
+ Called on SDK thread, for controlling robot from input intents
331
+ pushed from the OpenGL thread.
332
+
333
+ :param robot: the robot being updated by this View Controller
334
+ """
335
+ close_event = self._close_event
336
+ while close_event and not close_event.is_set():
337
+ try:
338
+ input_intents = self._input_intent_queue.get(True, timeout=2) # type: RobotControlIntents
339
+
340
+ # Track last-used intents so that we only issue motor controls
341
+ # if different from the last frame (to minimize it fighting with an SDK
342
+ # program controlling the robot):
343
+ old_intents = self._last_robot_control_intents
344
+ self._last_robot_control_intents = input_intents
345
+
346
+ if not old_intents or (old_intents.left_wheel_speed != input_intents.left_wheel_speed
347
+ or old_intents.right_wheel_speed != input_intents.right_wheel_speed):
348
+ self.robot.motors.set_wheel_motors(input_intents.left_wheel_speed,
349
+ input_intents.right_wheel_speed,
350
+ input_intents.left_wheel_speed * 4,
351
+ input_intents.right_wheel_speed * 4,
352
+ _return_future=True)
353
+
354
+ if not old_intents or old_intents.lift_speed != input_intents.lift_speed:
355
+ self.robot.motors.set_lift_motor(input_intents.lift_speed, _return_future=True)
356
+
357
+ if not old_intents or old_intents.head_speed != input_intents.head_speed:
358
+ self.robot.motors.set_head_motor(input_intents.head_speed, _return_future=True)
359
+
360
+ if input_intents.connect_to_light_block and (old_intents is None or not old_intents.connect_to_light_block):
361
+ threading.Thread(target=self.connect_to_cube).start()
362
+
363
+ except mp.queues.Empty:
364
+ pass
365
+ close_event = self._close_event
366
+
367
+ def _on_robot_state_update(self, robot, *_):
368
+ """Called from SDK process whenever the robot state is updated (so i.e. every engine tick).
369
+
370
+ Note:
371
+
372
+ This is called from the SDK process, and will pass the nav map data to the
373
+ 3D viewer process.
374
+
375
+ We can safely capture any robot and world state here, and push to OpenGL
376
+ (main) process via a multiprocessing queue.
377
+ """
378
+ from .opengl import opengl_vector
379
+ world_frame = opengl_vector.WorldRenderFrame(robot, self.connecting_to_cube)
380
+ queue = self._world_frame_queue
381
+ if queue:
382
+ try:
383
+ queue.put(world_frame, False)
384
+ except mp.queues.Full:
385
+ pass
386
+
387
+ def _on_nav_map_update(self, _robot, _event_type, msg):
388
+ """Called from SDK process whenever the nav map is updated.
389
+
390
+ Note:
391
+
392
+ This is called from the SDK process, and will pass the nav map data to the
393
+ 3D viewer process.
394
+
395
+ We can safely capture any robot and world state here, and push to OpenGL
396
+ (main) process via a multiprocessing queue.
397
+ """
398
+ queue = self._nav_map_queue
399
+ if queue:
400
+ try:
401
+ queue.put(msg, False)
402
+ except mp.queues.Full:
403
+ pass
anki_vector/vision.py ADDED
@@ -0,0 +1,202 @@
1
+ # Copyright (c) 2018 Anki, Inc.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License in the file LICENSE.txt or at
6
+ #
7
+ # https://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """Utility methods for Vector's vision
16
+
17
+ Vector's can detect various types of objects through his camera feed.
18
+
19
+ The :class:`VisionComponent` class defined in this module is made available as
20
+ :attr:`anki_vector.robot.Robot.vision` and can be used to enable/disable vision
21
+ processing on the robot.
22
+ """
23
+
24
+ # __all__ should order by constants, event classes, other classes, functions.
25
+ __all__ = ['VisionComponent']
26
+
27
+ from concurrent import futures
28
+
29
+ from . import util, connection, events
30
+ from .messaging import protocol
31
+
32
+
33
+ class VisionComponent(util.Component): # pylint: disable=too-few-public-methods
34
+ """VisionComponent exposes controls for the robot's internal image processing.
35
+
36
+ The :class:`anki_vector.robot.Robot` or :class:`anki_vector.robot.AsyncRobot` instance owns this vision component.
37
+
38
+ :param robot: A reference to the owner Robot object.
39
+ """
40
+
41
+ def __init__(self, robot):
42
+ super().__init__(robot)
43
+
44
+ self._detect_faces = False
45
+ self._detect_custom_objects = False
46
+ self._detect_motion = False
47
+ self._display_camera_feed_on_face = False
48
+
49
+ robot.events.subscribe(self._handle_mirror_mode_disabled_event, events.Events.mirror_mode_disabled)
50
+ robot.events.subscribe(self._handle_vision_modes_auto_disabled_event, events.Events.vision_modes_auto_disabled)
51
+
52
+ def close(self):
53
+ """Close all the running vision modes and wait for a response."""
54
+ vision_mode = self.disable_all_vision_modes() # pylint: disable=assignment-from-no-return
55
+ if isinstance(vision_mode, futures.Future):
56
+ vision_mode.result()
57
+
58
+ def _handle_mirror_mode_disabled_event(self, _robot, _event_type, _msg):
59
+ self._display_camera_feed_on_face = False
60
+
61
+ def _handle_vision_modes_auto_disabled_event(self, _robot, _event_type, _msg):
62
+ self._detect_faces = False
63
+ self._detect_custom_objects = False
64
+ self._detect_motion = False
65
+ self._display_camera_feed_on_face = False
66
+
67
+ @property
68
+ def detect_faces(self):
69
+ return self._detect_faces
70
+
71
+ @property
72
+ def detect_custom_objects(self):
73
+ return self._detect_custom_objects
74
+
75
+ @property
76
+ def detect_motion(self):
77
+ return self._detect_motion
78
+
79
+ @property
80
+ def display_camera_feed_on_face(self):
81
+ return self._display_camera_feed_on_face
82
+
83
+ @connection.on_connection_thread()
84
+ async def disable_all_vision_modes(self):
85
+ if self.detect_faces:
86
+ await self.enable_face_detection(False, False)
87
+ if self.detect_custom_objects:
88
+ await self.enable_custom_object_detection(False)
89
+ if self.detect_motion:
90
+ await self.enable_motion_detection(False)
91
+ if self.display_camera_feed_on_face:
92
+ await self.enable_display_camera_feed_on_face(False)
93
+
94
+ # TODO: add return type hint
95
+ @connection.on_connection_thread()
96
+ async def enable_custom_object_detection(self, detect_custom_objects: bool = True):
97
+ """Enable custom object detection on the robot's camera.
98
+
99
+ If custom object detection is being turned off, the robot may still choose to keep it on
100
+ if another subscriber (including one internal to the robot) requests this vision mode be active.
101
+
102
+ See :class:`objects.CustomObjectMarkers`.
103
+
104
+ :param detect_custom_objects: Specify whether we want the robot to detect custom objects.
105
+
106
+ .. testcode::
107
+
108
+ import anki_vector
109
+ with anki_vector.Robot() as robot:
110
+ robot.vision.enable_custom_object_detection()
111
+ """
112
+ self._detect_custom_objects = detect_custom_objects
113
+
114
+ enable_marker_detection_request = protocol.EnableMarkerDetectionRequest(enable=detect_custom_objects)
115
+ return await self.grpc_interface.EnableMarkerDetection(enable_marker_detection_request)
116
+
117
+ # TODO: add return type hint
118
+ @connection.on_connection_thread()
119
+ async def enable_face_detection(
120
+ self,
121
+ detect_faces: bool = True,
122
+ # detect_smile: bool = False,
123
+ estimate_expression: bool = False,
124
+ # detect_blink: bool = False,
125
+ # detect_gaze: bool = False
126
+ ):
127
+ """Enable face detection on the robot's camera
128
+
129
+ :param detect_faces: Specify whether we want the robot to detect faces.
130
+ :param detect_smile: Specify whether we want the robot to detect smiles in detected faces.
131
+ :param estimate_expression: Specify whether we want the robot to estimate what expression detected faces are showing.
132
+ :param detect_blink: Specify whether we want the robot to detect how much detected faces are blinking.
133
+ :param detect_gaze: Specify whether we want the robot to detect where detected faces are looking.
134
+ """
135
+ self._detect_faces = detect_faces
136
+
137
+ enable_face_detection_request = protocol.EnableFaceDetectionRequest(
138
+ enable=detect_faces,
139
+ enable_smile_detection=False,
140
+ enable_expression_estimation=estimate_expression,
141
+ enable_blink_detection=False,
142
+ enable_gaze_detection=False)
143
+ return await self.grpc_interface.EnableFaceDetection(enable_face_detection_request)
144
+
145
+ @connection.on_connection_thread()
146
+ async def enable_motion_detection(self, detect_motion: bool = True):
147
+ """Enable motion detection on the robot's camera
148
+
149
+ :param detect_motion: Specify whether we want the robot to detect motion.
150
+
151
+ .. testcode::
152
+
153
+ import time
154
+
155
+ import anki_vector
156
+ from anki_vector.events import Events
157
+ from anki_vector.util import degrees
158
+
159
+ def on_robot_observed_motion(robot, event_type, event):
160
+ print("Robot observed motion")
161
+
162
+ with anki_vector.Robot(show_viewer=True) as robot:
163
+ robot.events.subscribe(on_robot_observed_motion, Events.robot_observed_motion)
164
+
165
+ # If necessary, move Vector's Head and Lift to make it easy to see his face
166
+ robot.behavior.set_head_angle(degrees(45.0))
167
+ robot.behavior.set_lift_height(0.0)
168
+
169
+ robot.vision.enable_motion_detection(detect_motion=True)
170
+
171
+ print("Vector is waiting to see motion. Make some movement within Vector's camera view")
172
+
173
+ time.sleep(3.0)
174
+
175
+ robot.events.unsubscribe(on_robot_observed_motion, Events.robot_observed_motion)
176
+ """
177
+ self._detect_motion = detect_motion
178
+
179
+ enable_motion_detection_request = protocol.EnableMotionDetectionRequest(enable=detect_motion)
180
+ return await self.grpc_interface.EnableMotionDetection(enable_motion_detection_request)
181
+
182
+ # TODO: add return type hint
183
+ @connection.on_connection_thread()
184
+ async def enable_display_camera_feed_on_face(self, display_camera_feed_on_face: bool = True):
185
+ """Display the robot's camera feed on its face along with any detections (if enabled)
186
+
187
+ :param display_camera_feed_on_face: Specify whether we want to display the robot's camera feed on its face.
188
+
189
+ .. testcode::
190
+
191
+ import anki_vector
192
+
193
+ import time
194
+
195
+ with anki_vector.Robot() as robot:
196
+ robot.vision.enable_display_camera_feed_on_face()
197
+ time.sleep(10.0)
198
+ """
199
+ self._display_camera_feed_on_face = display_camera_feed_on_face
200
+
201
+ display_camera_feed_request = protocol.EnableMirrorModeRequest(enable=display_camera_feed_on_face)
202
+ return await self.grpc_interface.EnableMirrorMode(display_camera_feed_request)