viam-sdk 0.3.0__py3-none-linux_armv6l.whl → 0.66.0__py3-none-linux_armv6l.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of viam-sdk might be problematic. Click here for more details.
- viam/__init__.py +29 -2
- viam/app/_logs.py +34 -0
- viam/app/app_client.py +2696 -0
- viam/app/billing_client.py +185 -0
- viam/app/data_client.py +2231 -0
- viam/app/ml_training_client.py +249 -0
- viam/app/provisioning_client.py +93 -0
- viam/app/viam_client.py +275 -0
- viam/components/arm/__init__.py +3 -26
- viam/components/arm/arm.py +123 -8
- viam/components/arm/client.py +37 -24
- viam/components/arm/service.py +35 -32
- viam/components/audio_in/__init__.py +24 -0
- viam/components/audio_in/audio_in.py +74 -0
- viam/components/audio_in/client.py +76 -0
- viam/components/audio_in/service.py +83 -0
- viam/components/audio_out/__init__.py +21 -0
- viam/components/audio_out/audio_out.py +72 -0
- viam/components/audio_out/client.py +67 -0
- viam/components/audio_out/service.py +63 -0
- viam/components/base/__init__.py +6 -11
- viam/components/base/base.py +134 -8
- viam/components/base/client.py +51 -23
- viam/components/base/service.py +33 -30
- viam/components/board/__init__.py +3 -12
- viam/components/board/board.py +247 -91
- viam/components/board/client.py +149 -83
- viam/components/board/service.py +63 -33
- viam/components/button/__init__.py +10 -0
- viam/components/button/button.py +41 -0
- viam/components/button/client.py +52 -0
- viam/components/button/service.py +46 -0
- viam/components/camera/__init__.py +3 -3
- viam/components/camera/camera.py +62 -27
- viam/components/camera/client.py +59 -27
- viam/components/camera/service.py +42 -65
- viam/components/component_base.py +28 -5
- viam/components/encoder/__init__.py +1 -1
- viam/components/encoder/client.py +25 -14
- viam/components/encoder/encoder.py +48 -10
- viam/components/encoder/service.py +14 -18
- viam/components/gantry/__init__.py +1 -13
- viam/components/gantry/client.py +80 -25
- viam/components/gantry/gantry.py +123 -9
- viam/components/gantry/service.py +51 -29
- viam/components/generic/__init__.py +1 -1
- viam/components/generic/client.py +21 -8
- viam/components/generic/generic.py +10 -2
- viam/components/generic/service.py +12 -7
- viam/components/gripper/__init__.py +3 -13
- viam/components/gripper/client.py +69 -21
- viam/components/gripper/gripper.py +123 -3
- viam/components/gripper/service.py +44 -22
- viam/components/input/__init__.py +1 -14
- viam/components/input/client.py +55 -23
- viam/components/input/input.py +106 -3
- viam/components/input/service.py +16 -21
- viam/components/motor/__init__.py +1 -21
- viam/components/motor/client.py +56 -33
- viam/components/motor/motor.py +127 -4
- viam/components/motor/service.py +33 -44
- viam/components/movement_sensor/__init__.py +1 -1
- viam/components/movement_sensor/client.py +102 -45
- viam/components/movement_sensor/movement_sensor.py +130 -61
- viam/components/movement_sensor/service.py +38 -41
- viam/components/pose_tracker/__init__.py +1 -1
- viam/components/pose_tracker/client.py +18 -7
- viam/components/pose_tracker/pose_tracker.py +4 -2
- viam/components/pose_tracker/service.py +12 -10
- viam/components/power_sensor/__init__.py +17 -0
- viam/components/power_sensor/client.py +86 -0
- viam/components/power_sensor/power_sensor.py +104 -0
- viam/components/power_sensor/service.py +72 -0
- viam/components/sensor/__init__.py +2 -1
- viam/components/sensor/client.py +26 -10
- viam/components/sensor/sensor.py +22 -4
- viam/components/sensor/service.py +20 -11
- viam/components/servo/__init__.py +1 -13
- viam/components/servo/client.py +47 -21
- viam/components/servo/service.py +15 -22
- viam/components/servo/servo.py +61 -2
- viam/components/switch/__init__.py +10 -0
- viam/components/switch/client.py +83 -0
- viam/components/switch/service.py +72 -0
- viam/components/switch/switch.py +98 -0
- viam/errors.py +10 -0
- viam/gen/app/agent/v1/agent_grpc.py +29 -0
- viam/gen/app/agent/v1/agent_pb2.py +47 -0
- viam/gen/app/agent/v1/agent_pb2.pyi +280 -0
- viam/gen/app/cloudslam/v1/__init__.py +0 -0
- viam/gen/app/cloudslam/v1/cloud_slam_grpc.py +70 -0
- viam/gen/app/cloudslam/v1/cloud_slam_pb2.py +54 -0
- viam/gen/app/cloudslam/v1/cloud_slam_pb2.pyi +384 -0
- viam/gen/app/data/v1/data_grpc.py +197 -8
- viam/gen/app/data/v1/data_pb2.py +238 -99
- viam/gen/app/data/v1/data_pb2.pyi +1222 -259
- viam/gen/app/datapipelines/__init__.py +0 -0
- viam/gen/app/datapipelines/v1/__init__.py +0 -0
- viam/gen/app/datapipelines/v1/data_pipelines_grpc.py +84 -0
- viam/gen/app/datapipelines/v1/data_pipelines_pb2.py +57 -0
- viam/gen/app/datapipelines/v1/data_pipelines_pb2.pyi +387 -0
- viam/gen/app/dataset/__init__.py +0 -0
- viam/gen/app/dataset/v1/__init__.py +0 -0
- viam/gen/app/dataset/v1/dataset_grpc.py +68 -0
- viam/gen/app/dataset/v1/dataset_pb2.py +44 -0
- viam/gen/app/dataset/v1/dataset_pb2.pyi +214 -0
- viam/gen/app/datasync/v1/data_sync_grpc.py +21 -4
- viam/gen/app/datasync/v1/data_sync_pb2.py +62 -128
- viam/gen/app/datasync/v1/data_sync_pb2.pyi +156 -199
- viam/gen/app/mlinference/__init__.py +0 -0
- viam/gen/app/mlinference/v1/__init__.py +0 -0
- viam/gen/app/mlinference/v1/ml_inference_grpc.py +28 -0
- viam/gen/app/mlinference/v1/ml_inference_pb2.py +23 -0
- viam/gen/app/mlinference/v1/ml_inference_pb2.pyi +63 -0
- viam/gen/app/mltraining/v1/ml_training_grpc.py +51 -3
- viam/gen/app/mltraining/v1/ml_training_pb2.py +135 -58
- viam/gen/app/mltraining/v1/ml_training_pb2.pyi +328 -39
- viam/gen/app/packages/v1/packages_grpc.py +15 -1
- viam/gen/app/packages/v1/packages_pb2.py +44 -64
- viam/gen/app/packages/v1/packages_pb2.pyi +75 -85
- viam/gen/app/v1/app_grpc.py +644 -3
- viam/gen/app/v1/app_pb2.py +695 -295
- viam/gen/app/v1/app_pb2.pyi +4488 -635
- viam/gen/app/v1/billing_grpc.py +53 -11
- viam/gen/app/v1/billing_pb2.py +94 -39
- viam/gen/app/v1/billing_pb2.pyi +391 -191
- viam/gen/app/v1/end_user_grpc.py +59 -0
- viam/gen/app/v1/end_user_pb2.py +55 -0
- viam/gen/app/v1/end_user_pb2.pyi +181 -0
- viam/gen/app/v1/robot_grpc.py +16 -1
- viam/gen/app/v1/robot_pb2.py +122 -94
- viam/gen/app/v1/robot_pb2.pyi +463 -123
- viam/gen/common/v1/common_pb2.py +87 -58
- viam/gen/common/v1/common_pb2.pyi +456 -149
- viam/gen/component/arm/v1/arm_grpc.py +58 -2
- viam/gen/component/arm/v1/arm_pb2.py +68 -51
- viam/gen/component/arm/v1/arm_pb2.pyi +108 -42
- viam/gen/component/audioin/__init__.py +0 -0
- viam/gen/component/audioin/v1/__init__.py +0 -0
- viam/gen/component/audioin/v1/audioin_grpc.py +54 -0
- viam/gen/component/audioin/v1/audioin_pb2.py +34 -0
- viam/gen/component/audioin/v1/audioin_pb2.pyi +94 -0
- viam/gen/component/audioinput/v1/audioinput_grpc.py +25 -2
- viam/gen/component/audioinput/v1/audioinput_pb2.py +36 -31
- viam/gen/component/audioinput/v1/audioinput_pb2.pyi +22 -22
- viam/gen/component/audioout/__init__.py +0 -0
- viam/gen/component/audioout/v1/__init__.py +0 -0
- viam/gen/component/audioout/v1/audioout_grpc.py +54 -0
- viam/gen/component/audioout/v1/audioout_pb2.py +32 -0
- viam/gen/component/audioout/v1/audioout_pb2.pyi +47 -0
- viam/gen/component/base/v1/base_grpc.py +42 -2
- viam/gen/component/base/v1/base_pb2.py +58 -47
- viam/gen/component/base/v1/base_pb2.pyi +65 -30
- viam/gen/component/board/v1/board_grpc.py +59 -7
- viam/gen/component/board/v1/board_pb2.py +94 -73
- viam/gen/component/board/v1/board_pb2.pyi +165 -68
- viam/gen/component/button/__init__.py +0 -0
- viam/gen/component/button/v1/__init__.py +0 -0
- viam/gen/component/button/v1/button_grpc.py +38 -0
- viam/gen/component/button/v1/button_pb2.py +28 -0
- viam/gen/component/button/v1/button_pb2.pyi +39 -0
- viam/gen/component/camera/v1/camera_grpc.py +38 -2
- viam/gen/component/camera/v1/camera_pb2.py +60 -43
- viam/gen/component/camera/v1/camera_pb2.pyi +191 -37
- viam/gen/component/encoder/v1/encoder_grpc.py +25 -2
- viam/gen/component/encoder/v1/encoder_pb2.py +36 -31
- viam/gen/component/encoder/v1/encoder_pb2.pyi +15 -15
- viam/gen/component/gantry/v1/gantry_grpc.py +47 -2
- viam/gen/component/gantry/v1/gantry_pb2.py +56 -43
- viam/gen/component/gantry/v1/gantry_pb2.pyi +67 -31
- viam/gen/component/generic/v1/generic_grpc.py +16 -2
- viam/gen/component/generic/v1/generic_pb2.py +16 -11
- viam/gen/component/gripper/v1/gripper_grpc.py +44 -2
- viam/gen/component/gripper/v1/gripper_pb2.py +48 -35
- viam/gen/component/gripper/v1/gripper_pb2.pyi +62 -24
- viam/gen/component/inputcontroller/v1/input_controller_grpc.py +28 -2
- viam/gen/component/inputcontroller/v1/input_controller_pb2.py +46 -41
- viam/gen/component/inputcontroller/v1/input_controller_pb2.pyi +32 -36
- viam/gen/component/motor/v1/motor_grpc.py +51 -2
- viam/gen/component/motor/v1/motor_pb2.py +78 -67
- viam/gen/component/motor/v1/motor_pb2.pyi +75 -46
- viam/gen/component/movementsensor/v1/movementsensor_grpc.py +48 -2
- viam/gen/component/movementsensor/v1/movementsensor_pb2.py +70 -63
- viam/gen/component/movementsensor/v1/movementsensor_pb2.pyi +84 -57
- viam/gen/component/posetracker/v1/pose_tracker_grpc.py +19 -2
- viam/gen/component/posetracker/v1/pose_tracker_pb2.py +26 -21
- viam/gen/component/posetracker/v1/pose_tracker_pb2.pyi +9 -13
- viam/gen/component/powersensor/__init__.py +0 -0
- viam/gen/component/powersensor/v1/__init__.py +0 -0
- viam/gen/component/powersensor/v1/powersensor_grpc.py +62 -0
- viam/gen/component/powersensor/v1/powersensor_pb2.py +42 -0
- viam/gen/component/powersensor/v1/powersensor_pb2.pyi +124 -0
- viam/gen/component/sensor/v1/sensor_grpc.py +21 -5
- viam/gen/component/sensor/v1/sensor_pb2.py +18 -22
- viam/gen/component/sensor/v1/sensor_pb2.pyi +1 -69
- viam/gen/component/servo/v1/servo_grpc.py +28 -2
- viam/gen/component/servo/v1/servo_pb2.py +42 -37
- viam/gen/component/servo/v1/servo_pb2.pyi +22 -26
- viam/gen/component/switch/__init__.py +0 -0
- viam/gen/component/switch/v1/__init__.py +0 -0
- viam/gen/component/switch/v1/switch_grpc.py +54 -0
- viam/gen/component/switch/v1/switch_pb2.py +40 -0
- viam/gen/component/switch/v1/switch_pb2.pyi +116 -0
- viam/gen/component/testecho/v1/testecho_grpc.py +15 -0
- viam/gen/component/testecho/v1/testecho_pb2.py +29 -26
- viam/gen/component/testecho/v1/testecho_pb2.pyi +16 -20
- viam/gen/module/v1/module_grpc.py +18 -0
- viam/gen/module/v1/module_pb2.py +36 -33
- viam/gen/module/v1/module_pb2.pyi +39 -34
- viam/gen/opentelemetry/__init__.py +0 -0
- viam/gen/opentelemetry/proto/__init__.py +0 -0
- viam/gen/opentelemetry/proto/common/__init__.py +0 -0
- viam/gen/opentelemetry/proto/common/v1/__init__.py +0 -0
- viam/gen/opentelemetry/proto/common/v1/common_grpc.py +0 -0
- viam/gen/opentelemetry/proto/common/v1/common_pb2.py +27 -0
- viam/gen/opentelemetry/proto/common/v1/common_pb2.pyi +208 -0
- viam/gen/opentelemetry/proto/resource/__init__.py +0 -0
- viam/gen/opentelemetry/proto/resource/v1/__init__.py +0 -0
- viam/gen/opentelemetry/proto/resource/v1/resource_grpc.py +0 -0
- viam/gen/opentelemetry/proto/resource/v1/resource_pb2.py +18 -0
- viam/gen/opentelemetry/proto/resource/v1/resource_pb2.pyi +59 -0
- viam/gen/opentelemetry/proto/trace/__init__.py +0 -0
- viam/gen/opentelemetry/proto/trace/v1/__init__.py +0 -0
- viam/gen/opentelemetry/proto/trace/v1/trace_grpc.py +0 -0
- viam/gen/opentelemetry/proto/trace/v1/trace_pb2.py +37 -0
- viam/gen/opentelemetry/proto/trace/v1/trace_pb2.pyi +402 -0
- viam/gen/proto/rpc/examples/echo/v1/echo_grpc.py +12 -0
- viam/gen/proto/rpc/examples/echo/v1/echo_pb2.py +25 -22
- viam/gen/proto/rpc/examples/echo/v1/echo_pb2.pyi +13 -17
- viam/gen/proto/rpc/examples/echoresource/v1/echoresource_grpc.py +12 -0
- viam/gen/proto/rpc/examples/echoresource/v1/echoresource_pb2.py +23 -20
- viam/gen/proto/rpc/examples/echoresource/v1/echoresource_pb2.pyi +13 -17
- viam/gen/proto/rpc/v1/auth_grpc.py +11 -0
- viam/gen/proto/rpc/v1/auth_pb2.py +27 -24
- viam/gen/proto/rpc/v1/auth_pb2.pyi +12 -16
- viam/gen/proto/rpc/webrtc/v1/grpc_pb2.py +35 -32
- viam/gen/proto/rpc/webrtc/v1/grpc_pb2.pyi +37 -41
- viam/gen/proto/rpc/webrtc/v1/signaling_grpc.py +15 -0
- viam/gen/proto/rpc/webrtc/v1/signaling_pb2.py +62 -57
- viam/gen/proto/rpc/webrtc/v1/signaling_pb2.pyi +78 -69
- viam/gen/provisioning/__init__.py +0 -0
- viam/gen/provisioning/v1/__init__.py +0 -0
- viam/gen/provisioning/v1/provisioning_grpc.py +59 -0
- viam/gen/provisioning/v1/provisioning_pb2.py +45 -0
- viam/gen/provisioning/v1/provisioning_pb2.pyi +229 -0
- viam/gen/robot/v1/robot_grpc.py +144 -15
- viam/gen/robot/v1/robot_pb2.py +193 -119
- viam/gen/robot/v1/robot_pb2.pyi +565 -137
- viam/gen/service/datamanager/v1/data_manager_grpc.py +20 -2
- viam/gen/service/datamanager/v1/data_manager_pb2.py +27 -17
- viam/gen/service/datamanager/v1/data_manager_pb2.pyi +52 -10
- viam/gen/service/discovery/__init__.py +0 -0
- viam/gen/service/discovery/v1/__init__.py +0 -0
- viam/gen/service/discovery/v1/discovery_grpc.py +39 -0
- viam/gen/service/discovery/v1/discovery_pb2.py +29 -0
- viam/gen/service/discovery/v1/discovery_pb2.pyi +51 -0
- viam/gen/service/generic/__init__.py +0 -0
- viam/gen/service/generic/v1/__init__.py +0 -0
- viam/gen/service/generic/v1/generic_grpc.py +29 -0
- viam/gen/service/generic/v1/generic_pb2.py +21 -0
- viam/gen/service/generic/v1/generic_pb2.pyi +6 -0
- viam/gen/service/mlmodel/v1/mlmodel_grpc.py +9 -0
- viam/gen/service/mlmodel/v1/mlmodel_pb2.py +76 -29
- viam/gen/service/mlmodel/v1/mlmodel_pb2.pyi +307 -28
- viam/gen/service/motion/v1/motion_grpc.py +42 -4
- viam/gen/service/motion/v1/motion_pb2.py +119 -51
- viam/gen/service/motion/v1/motion_pb2.pyi +595 -120
- viam/gen/service/navigation/v1/navigation_grpc.py +49 -1
- viam/gen/service/navigation/v1/navigation_pb2.py +76 -51
- viam/gen/service/navigation/v1/navigation_pb2.pyi +188 -33
- viam/gen/service/sensors/v1/sensors_grpc.py +12 -0
- viam/gen/service/sensors/v1/sensors_pb2.py +60 -29
- viam/gen/service/sensors/v1/sensors_pb2.pyi +18 -21
- viam/gen/service/shell/v1/shell_grpc.py +27 -1
- viam/gen/service/shell/v1/shell_pb2.py +37 -15
- viam/gen/service/shell/v1/shell_pb2.pyi +260 -7
- viam/gen/service/slam/v1/slam_grpc.py +24 -2
- viam/gen/service/slam/v1/slam_pb2.py +44 -30
- viam/gen/service/slam/v1/slam_pb2.pyi +128 -27
- viam/gen/service/video/__init__.py +0 -0
- viam/gen/service/video/v1/__init__.py +0 -0
- viam/gen/service/video/v1/video_grpc.py +39 -0
- viam/gen/service/video/v1/video_pb2.py +29 -0
- viam/gen/service/video/v1/video_pb2.pyi +72 -0
- viam/gen/service/vision/v1/vision_grpc.py +39 -1
- viam/gen/service/vision/v1/vision_pb2.py +61 -45
- viam/gen/service/vision/v1/vision_pb2.pyi +180 -41
- viam/gen/service/worldstatestore/__init__.py +0 -0
- viam/gen/service/worldstatestore/v1/__init__.py +0 -0
- viam/gen/service/worldstatestore/v1/world_state_store_grpc.py +55 -0
- viam/gen/service/worldstatestore/v1/world_state_store_pb2.py +39 -0
- viam/gen/service/worldstatestore/v1/world_state_store_pb2.pyi +171 -0
- viam/gen/stream/__init__.py +0 -0
- viam/gen/stream/v1/__init__.py +0 -0
- viam/gen/stream/v1/stream_grpc.py +59 -0
- viam/gen/stream/v1/stream_pb2.py +39 -0
- viam/gen/stream/v1/stream_pb2.pyi +161 -0
- viam/gen/tagger/v1/tagger_pb2.py +9 -8
- viam/logging.py +160 -17
- viam/media/__init__.py +0 -9
- viam/media/audio.py +22 -10
- viam/media/utils/__init__.py +0 -0
- viam/media/utils/pil/__init__.py +55 -0
- viam/media/{viam_rgba_plugin.py → utils/pil/viam_rgba_plugin.py} +10 -16
- viam/media/viam_rgba.py +10 -0
- viam/media/video.py +197 -73
- viam/module/module.py +191 -44
- viam/module/resource_data_consumer.py +41 -0
- viam/module/service.py +9 -1
- viam/module/types.py +4 -5
- viam/operations.py +4 -3
- viam/proto/app/__init__.py +361 -5
- viam/proto/app/agent/__init__.py +28 -0
- viam/proto/app/billing.py +51 -27
- viam/proto/app/cloudslam/__init__.py +48 -0
- viam/proto/app/data/__init__.py +103 -17
- viam/proto/app/datapipelines/__init__.py +56 -0
- viam/proto/app/dataset/__init__.py +40 -0
- viam/proto/app/datasync/__init__.py +11 -5
- viam/proto/app/end_user.py +34 -0
- viam/proto/app/mlinference/__init__.py +15 -0
- viam/proto/app/mltraining/__init__.py +25 -1
- viam/proto/app/packages/__init__.py +3 -3
- viam/proto/app/robot.py +19 -1
- viam/proto/common/__init__.py +35 -8
- viam/proto/component/arm/__init__.py +9 -1
- viam/proto/component/audioin/__init__.py +16 -0
- viam/proto/component/audioinput/__init__.py +3 -1
- viam/proto/component/audioout/__init__.py +15 -0
- viam/proto/component/base/__init__.py +7 -1
- viam/proto/component/board/__init__.py +13 -5
- viam/proto/component/button/__init__.py +15 -0
- viam/proto/component/camera/__init__.py +9 -1
- viam/proto/component/encoder/__init__.py +3 -1
- viam/proto/component/gantry/__init__.py +7 -1
- viam/proto/component/generic/__init__.py +3 -1
- viam/proto/component/gripper/__init__.py +7 -1
- viam/proto/component/inputcontroller/__init__.py +7 -1
- viam/proto/component/motor/__init__.py +7 -1
- viam/proto/component/movementsensor/__init__.py +7 -1
- viam/proto/component/posetracker/__init__.py +7 -1
- viam/proto/component/powersensor/__init__.py +30 -0
- viam/proto/component/sensor/__init__.py +3 -4
- viam/proto/component/servo/__init__.py +3 -1
- viam/proto/component/switch/__init__.py +26 -0
- viam/proto/component/testecho/__init__.py +3 -1
- viam/proto/module/__init__.py +3 -1
- viam/proto/opentelemetry/__init__.py +0 -0
- viam/proto/opentelemetry/proto/__init__.py +0 -0
- viam/proto/opentelemetry/proto/common/__init__.py +15 -0
- viam/proto/opentelemetry/proto/resource/__init__.py +10 -0
- viam/proto/opentelemetry/proto/trace/__init__.py +15 -0
- viam/proto/provisioning/__init__.py +42 -0
- viam/proto/robot/__init__.py +57 -9
- viam/proto/rpc/auth.py +11 -1
- viam/proto/rpc/examples/echo/__init__.py +3 -1
- viam/proto/rpc/examples/echoresource/__init__.py +7 -1
- viam/proto/rpc/webrtc/grpc.py +3 -1
- viam/proto/rpc/webrtc/signaling.py +5 -1
- viam/proto/service/datamanager/__init__.py +15 -2
- viam/proto/service/discovery/__init__.py +15 -0
- viam/proto/service/generic/__init__.py +12 -0
- viam/proto/service/mlmodel/__init__.py +27 -1
- viam/proto/service/motion/__init__.py +35 -5
- viam/proto/service/navigation/__init__.py +19 -1
- viam/proto/service/sensors/__init__.py +3 -1
- viam/proto/service/shell/__init__.py +25 -2
- viam/proto/service/slam/__init__.py +13 -1
- viam/proto/service/video/__init__.py +15 -0
- viam/proto/service/vision/__init__.py +11 -1
- viam/proto/service/worldstatestore/__init__.py +32 -0
- viam/proto/stream/__init__.py +36 -0
- viam/py.typed +0 -0
- viam/resource/base.py +45 -8
- viam/resource/easy_resource.py +149 -0
- viam/resource/manager.py +35 -14
- viam/resource/registry.py +40 -52
- viam/resource/rpc_client_base.py +33 -1
- viam/resource/rpc_service_base.py +15 -8
- viam/resource/types.py +39 -26
- viam/robot/client.py +458 -91
- viam/robot/service.py +13 -107
- viam/rpc/dial.py +133 -15
- viam/rpc/libviam_rust_utils.so +0 -0
- viam/rpc/server.py +59 -15
- viam/rpc/types.py +2 -4
- viam/services/discovery/__init__.py +12 -0
- viam/services/discovery/client.py +55 -0
- viam/services/discovery/discovery.py +52 -0
- viam/services/discovery/service.py +43 -0
- viam/services/generic/__init__.py +18 -0
- viam/services/generic/client.py +58 -0
- viam/services/generic/generic.py +58 -0
- viam/services/generic/service.py +29 -0
- viam/services/mlmodel/__init__.py +15 -1
- viam/services/mlmodel/client.py +20 -15
- viam/services/mlmodel/mlmodel.py +44 -7
- viam/services/mlmodel/service.py +9 -13
- viam/services/mlmodel/utils.py +101 -0
- viam/services/motion/__init__.py +15 -3
- viam/services/motion/client.py +109 -150
- viam/services/motion/motion.py +380 -0
- viam/services/motion/service.py +132 -0
- viam/services/navigation/__init__.py +11 -0
- viam/services/navigation/client.py +99 -0
- viam/services/navigation/navigation.py +250 -0
- viam/services/navigation/service.py +137 -0
- viam/services/service_base.py +43 -4
- viam/services/service_client_base.py +4 -4
- viam/services/slam/__init__.py +4 -1
- viam/services/slam/client.py +21 -11
- viam/services/slam/service.py +16 -19
- viam/services/slam/slam.py +66 -5
- viam/services/vision/__init__.py +8 -0
- viam/services/vision/client.py +115 -111
- viam/services/vision/service.py +143 -0
- viam/services/vision/vision.py +317 -0
- viam/services/worldstatestore/__init__.py +18 -0
- viam/services/worldstatestore/client.py +94 -0
- viam/services/worldstatestore/service.py +55 -0
- viam/services/worldstatestore/worldstatestore.py +90 -0
- viam/sessions_client.py +254 -0
- viam/streams.py +44 -0
- viam/utils.py +143 -15
- viam/version_metadata.py +4 -0
- viam_sdk-0.66.0.dist-info/METADATA +157 -0
- viam_sdk-0.66.0.dist-info/RECORD +531 -0
- {viam_sdk-0.3.0.dist-info → viam_sdk-0.66.0.dist-info}/WHEEL +1 -1
- viam/components/audio_input/__init__.py +0 -18
- viam/components/audio_input/audio_input.py +0 -79
- viam/components/audio_input/client.py +0 -60
- viam/components/audio_input/service.py +0 -118
- viam/components/types.py +0 -5
- viam/gen/app/model/v1/model_grpc.py +0 -39
- viam/gen/app/model/v1/model_pb2.py +0 -71
- viam/gen/app/model/v1/model_pb2.pyi +0 -285
- viam/gen/proto/rpc/examples/fileupload/v1/fileupload_grpc.py +0 -21
- viam/gen/proto/rpc/examples/fileupload/v1/fileupload_pb2.py +0 -18
- viam/gen/proto/rpc/examples/fileupload/v1/fileupload_pb2.pyi +0 -49
- viam/media/media.py +0 -53
- viam/proto/app/model/__init__.py +0 -40
- viam/proto/rpc/examples/fileupload/__init__.py +0 -13
- viam/services/sensors/__init__.py +0 -5
- viam/services/sensors/client.py +0 -63
- viam_sdk-0.3.0.dist-info/LICENSE +0 -202
- viam_sdk-0.3.0.dist-info/METADATA +0 -122
- viam_sdk-0.3.0.dist-info/RECORD +0 -372
- /viam/{gen/app/model → app}/__init__.py +0 -0
- /viam/gen/app/{model/v1 → agent}/__init__.py +0 -0
- /viam/gen/{proto/rpc/examples/fileupload → app/agent/v1}/__init__.py +0 -0
- /viam/gen/{proto/rpc/examples/fileupload/v1 → app/cloudslam}/__init__.py +0 -0
- /LICENSE → /viam_sdk-0.66.0.dist-info/licenses/LICENSE +0 -0
viam/services/slam/slam.py
CHANGED
|
@@ -1,11 +1,18 @@
|
|
|
1
1
|
import abc
|
|
2
|
+
import sys
|
|
2
3
|
from typing import Final, List, Optional
|
|
3
4
|
|
|
4
|
-
from viam.
|
|
5
|
+
from viam.proto.service.slam import GetPropertiesResponse
|
|
6
|
+
from viam.resource.types import API, RESOURCE_NAMESPACE_RDK, RESOURCE_TYPE_SERVICE
|
|
5
7
|
|
|
6
8
|
from ..service_base import ServiceBase
|
|
7
9
|
from . import Pose
|
|
8
10
|
|
|
11
|
+
if sys.version_info >= (3, 10):
|
|
12
|
+
from typing import TypeAlias
|
|
13
|
+
else:
|
|
14
|
+
from typing_extensions import TypeAlias
|
|
15
|
+
|
|
9
16
|
|
|
10
17
|
class SLAM(ServiceBase):
|
|
11
18
|
"""
|
|
@@ -14,28 +21,54 @@ class SLAM(ServiceBase):
|
|
|
14
21
|
This acts as an abstract base class for any drivers representing specific
|
|
15
22
|
arm implementations. This cannot be used on its own. If the ``__init__()`` function is
|
|
16
23
|
overridden, it must call the ``super().__init__()`` function.
|
|
24
|
+
|
|
25
|
+
For more information, see `SLAM service <https://docs.viam.com/dev/reference/apis/services/slam/>`_.
|
|
17
26
|
"""
|
|
18
27
|
|
|
19
|
-
|
|
28
|
+
API: Final = API(RESOURCE_NAMESPACE_RDK, RESOURCE_TYPE_SERVICE, "slam") # pyright: ignore [reportIncompatibleVariableOverride]
|
|
29
|
+
|
|
30
|
+
Properties: "TypeAlias" = GetPropertiesResponse
|
|
20
31
|
|
|
21
32
|
@abc.abstractmethod
|
|
22
33
|
async def get_internal_state(self, *, timeout: Optional[float]) -> List[bytes]:
|
|
23
|
-
"""
|
|
34
|
+
"""
|
|
35
|
+
Get the internal state of the SLAM algorithm required to continue mapping/localization.
|
|
36
|
+
|
|
37
|
+
::
|
|
38
|
+
|
|
39
|
+
slam = SLAMClient.from_robot(robot=machine, name="my_slam_service")
|
|
40
|
+
|
|
41
|
+
# Get the internal state of the SLAM algorithm required to continue mapping/localization.
|
|
42
|
+
internal_state = await slam.get_internal_state()
|
|
24
43
|
|
|
25
44
|
Returns:
|
|
26
45
|
List[GetInternalStateResponse]: Chunks of the internal state of the SLAM algorithm
|
|
27
46
|
|
|
47
|
+
For more information, see `SLAM service <https://docs.viam.com/dev/reference/apis/services/slam/#getinternalstate>`_.
|
|
28
48
|
"""
|
|
29
49
|
...
|
|
30
50
|
|
|
31
51
|
@abc.abstractmethod
|
|
32
|
-
async def get_point_cloud_map(self, *, timeout: Optional[float]) -> List[bytes]:
|
|
52
|
+
async def get_point_cloud_map(self, return_edited_map: bool = False, *, timeout: Optional[float]) -> List[bytes]:
|
|
33
53
|
"""
|
|
34
54
|
Get the point cloud map.
|
|
35
55
|
|
|
56
|
+
::
|
|
57
|
+
|
|
58
|
+
slam_svc = SLAMClient.from_robot(robot=machine, name="my_slam_service")
|
|
59
|
+
|
|
60
|
+
# Get the point cloud map in standard PCD format.
|
|
61
|
+
pcd_map = await slam_svc.get_point_cloud_map()
|
|
62
|
+
|
|
63
|
+
Args:
|
|
64
|
+
return_edited_map (bool): signal to the SLAM service to return an edited map, if the map package contains one and if
|
|
65
|
+
the SLAM service supports the feature
|
|
66
|
+
|
|
36
67
|
Returns:
|
|
37
68
|
List[GetPointCloudMapResponse]: Complete pointcloud in standard PCD format. Chunks of the PointCloud, concatenating all
|
|
38
|
-
|
|
69
|
+
GetPointCloudMapResponse.point_cloud_pcd_chunk values.
|
|
70
|
+
|
|
71
|
+
For more information, see `SLAM service <https://docs.viam.com/dev/reference/apis/services/slam/#getpointcloudmap>`_.
|
|
39
72
|
"""
|
|
40
73
|
...
|
|
41
74
|
|
|
@@ -44,7 +77,35 @@ class SLAM(ServiceBase):
|
|
|
44
77
|
"""
|
|
45
78
|
Get current position of the specified component in the SLAM Map.
|
|
46
79
|
|
|
80
|
+
::
|
|
81
|
+
|
|
82
|
+
slam_svc = SLAMClient.from_robot(robot=machine, name="my_slam_service")
|
|
83
|
+
|
|
84
|
+
# Get the current position of the specified source component in the SLAM map as a Pose.
|
|
85
|
+
pose = await slam.get_position()
|
|
86
|
+
|
|
47
87
|
Returns:
|
|
48
88
|
Pose: The current position of the specified component
|
|
89
|
+
|
|
90
|
+
For more information, see `SLAM service <https://docs.viam.com/dev/reference/apis/services/slam/#getposition>`_.
|
|
91
|
+
"""
|
|
92
|
+
...
|
|
93
|
+
|
|
94
|
+
@abc.abstractmethod
|
|
95
|
+
async def get_properties(self, *, timeout: Optional[float]) -> Properties:
|
|
96
|
+
"""
|
|
97
|
+
Get information regarding the current SLAM session.
|
|
98
|
+
|
|
99
|
+
::
|
|
100
|
+
|
|
101
|
+
slam_svc = SLAMClient.from_robot(robot=machine, name="my_slam_service")
|
|
102
|
+
|
|
103
|
+
# Get the properties of your current SLAM session.
|
|
104
|
+
slam_properties = await slam_svc.get_properties()
|
|
105
|
+
|
|
106
|
+
Returns:
|
|
107
|
+
Properties: The properties of SLAM
|
|
108
|
+
|
|
109
|
+
For more information, see `SLAM service <https://docs.viam.com/dev/reference/apis/services/slam/#getproperties>`_.
|
|
49
110
|
"""
|
|
50
111
|
...
|
viam/services/vision/__init__.py
CHANGED
|
@@ -1,7 +1,15 @@
|
|
|
1
|
+
from viam.resource.registry import Registry, ResourceRegistration
|
|
2
|
+
from viam.services.vision.service import VisionRPCService
|
|
3
|
+
|
|
1
4
|
from .client import Classification, Detection, VisionClient
|
|
5
|
+
from .vision import CaptureAllResult, Vision
|
|
2
6
|
|
|
3
7
|
__all__ = [
|
|
8
|
+
"CaptureAllResult",
|
|
4
9
|
"Classification",
|
|
5
10
|
"Detection",
|
|
6
11
|
"VisionClient",
|
|
12
|
+
"Vision",
|
|
7
13
|
]
|
|
14
|
+
|
|
15
|
+
Registry.register_api(ResourceRegistration(Vision, VisionRPCService, lambda name, channel: VisionClient(name, channel)))
|
viam/services/vision/client.py
CHANGED
|
@@ -1,12 +1,13 @@
|
|
|
1
|
-
from
|
|
2
|
-
from typing import Any, Final, List, Mapping, Optional, Union
|
|
1
|
+
from typing import Any, List, Mapping, Optional
|
|
3
2
|
|
|
4
3
|
from grpclib.client import Channel
|
|
5
4
|
|
|
6
|
-
from viam.
|
|
7
|
-
from viam.media.video import CameraMimeType,
|
|
5
|
+
from viam.errors import ViamError
|
|
6
|
+
from viam.media.video import CameraMimeType, ViamImage
|
|
8
7
|
from viam.proto.common import DoCommandRequest, DoCommandResponse, PointCloudObject
|
|
9
8
|
from viam.proto.service.vision import (
|
|
9
|
+
CaptureAllFromCameraRequest,
|
|
10
|
+
CaptureAllFromCameraResponse,
|
|
10
11
|
Classification,
|
|
11
12
|
Detection,
|
|
12
13
|
GetClassificationsFromCameraRequest,
|
|
@@ -19,78 +20,102 @@ from viam.proto.service.vision import (
|
|
|
19
20
|
GetDetectionsResponse,
|
|
20
21
|
GetObjectPointCloudsRequest,
|
|
21
22
|
GetObjectPointCloudsResponse,
|
|
23
|
+
GetPropertiesRequest,
|
|
24
|
+
GetPropertiesResponse,
|
|
22
25
|
VisionServiceStub,
|
|
23
26
|
)
|
|
24
27
|
from viam.resource.rpc_client_base import ReconfigurableResourceRPCClientBase
|
|
25
|
-
from viam.resource.types import RESOURCE_NAMESPACE_RDK, RESOURCE_TYPE_SERVICE, Subtype
|
|
26
|
-
from viam.services.service_client_base import ServiceClientBase
|
|
27
28
|
from viam.utils import ValueTypes, dict_to_struct, struct_to_dict
|
|
28
29
|
|
|
30
|
+
from .vision import CaptureAllResult, Vision
|
|
29
31
|
|
|
30
|
-
|
|
32
|
+
|
|
33
|
+
class VisionClient(Vision, ReconfigurableResourceRPCClientBase):
|
|
31
34
|
"""
|
|
32
35
|
Connect to the Vision service, which allows you to access various computer vision algorithms
|
|
33
36
|
(like detection, segmentation, tracking, etc) that usually only require a camera or image input.
|
|
34
37
|
"""
|
|
35
38
|
|
|
36
|
-
SUBTYPE: Final = Subtype(RESOURCE_NAMESPACE_RDK, RESOURCE_TYPE_SERVICE, "vision")
|
|
37
39
|
client: VisionServiceStub
|
|
38
40
|
|
|
39
41
|
def __init__(self, name: str, channel: Channel):
|
|
40
|
-
super().__init__(name
|
|
42
|
+
super().__init__(name)
|
|
43
|
+
self.channel = channel
|
|
41
44
|
self.client = VisionServiceStub(channel)
|
|
42
45
|
|
|
46
|
+
async def capture_all_from_camera(
|
|
47
|
+
self,
|
|
48
|
+
camera_name: str,
|
|
49
|
+
return_image: bool = False,
|
|
50
|
+
return_classifications: bool = False,
|
|
51
|
+
return_detections: bool = False,
|
|
52
|
+
return_object_point_clouds: bool = False,
|
|
53
|
+
*,
|
|
54
|
+
extra: Optional[Mapping[str, Any]] = None,
|
|
55
|
+
timeout: Optional[float] = None,
|
|
56
|
+
**kwargs,
|
|
57
|
+
) -> CaptureAllResult:
|
|
58
|
+
md = kwargs.get("metadata", self.Metadata()).proto
|
|
59
|
+
request = CaptureAllFromCameraRequest(
|
|
60
|
+
name=self.name,
|
|
61
|
+
camera_name=camera_name,
|
|
62
|
+
return_image=return_image,
|
|
63
|
+
return_classifications=return_classifications,
|
|
64
|
+
return_detections=return_detections,
|
|
65
|
+
return_object_point_clouds=return_object_point_clouds,
|
|
66
|
+
extra=dict_to_struct(extra),
|
|
67
|
+
)
|
|
68
|
+
response: CaptureAllFromCameraResponse = await self.client.CaptureAllFromCamera(request, timeout=timeout, metadata=md)
|
|
69
|
+
result = CaptureAllResult()
|
|
70
|
+
result.extra = struct_to_dict(response.extra)
|
|
71
|
+
if return_image:
|
|
72
|
+
mime_type = CameraMimeType.from_string(response.image.mime_type)
|
|
73
|
+
img = ViamImage(response.image.image, mime_type)
|
|
74
|
+
result.image = img
|
|
75
|
+
if return_classifications:
|
|
76
|
+
result.classifications = list(response.classifications)
|
|
77
|
+
if return_detections:
|
|
78
|
+
result.detections = list(response.detections)
|
|
79
|
+
if return_object_point_clouds:
|
|
80
|
+
result.objects = list(response.objects)
|
|
81
|
+
return result
|
|
82
|
+
|
|
43
83
|
async def get_detections_from_camera(
|
|
44
|
-
self,
|
|
84
|
+
self,
|
|
85
|
+
camera_name: str,
|
|
86
|
+
*,
|
|
87
|
+
extra: Optional[Mapping[str, Any]] = None,
|
|
88
|
+
timeout: Optional[float] = None,
|
|
89
|
+
**kwargs,
|
|
45
90
|
) -> List[Detection]:
|
|
46
|
-
""
|
|
47
|
-
|
|
48
|
-
Args:
|
|
49
|
-
camera_name (str): The name of the camera to use for detection
|
|
50
|
-
|
|
51
|
-
Returns:
|
|
52
|
-
List[viam.proto.service.vision.Detection]: A list of 2D bounding boxes, their labels, and the
|
|
53
|
-
confidence score of the labels, around the found objects in the next 2D image
|
|
54
|
-
from the given camera, with the given detector applied to it.
|
|
55
|
-
"""
|
|
56
|
-
if extra is None:
|
|
57
|
-
extra = {}
|
|
91
|
+
md = kwargs.get("metadata", self.Metadata()).proto
|
|
58
92
|
request = GetDetectionsFromCameraRequest(name=self.name, camera_name=camera_name, extra=dict_to_struct(extra))
|
|
59
|
-
response: GetDetectionsFromCameraResponse = await self.client.GetDetectionsFromCamera(request, timeout=timeout)
|
|
93
|
+
response: GetDetectionsFromCameraResponse = await self.client.GetDetectionsFromCamera(request, timeout=timeout, metadata=md)
|
|
60
94
|
return list(response.detections)
|
|
61
95
|
|
|
62
96
|
async def get_detections(
|
|
63
97
|
self,
|
|
64
|
-
image:
|
|
98
|
+
image: ViamImage,
|
|
65
99
|
*,
|
|
66
100
|
extra: Optional[Mapping[str, Any]] = None,
|
|
67
101
|
timeout: Optional[float] = None,
|
|
102
|
+
**kwargs,
|
|
68
103
|
) -> List[Detection]:
|
|
69
|
-
""
|
|
70
|
-
|
|
71
|
-
Args:
|
|
72
|
-
image (Image): The image to get detections from
|
|
73
|
-
|
|
74
|
-
Returns:
|
|
75
|
-
List[viam.proto.service.vision.Detection]: A list of 2D bounding boxes, their labels, and the
|
|
76
|
-
confidence score of the labels, around the found objects in the next 2D image
|
|
77
|
-
from the given camera, with the given detector applied to it.
|
|
78
|
-
"""
|
|
79
|
-
if extra is None:
|
|
80
|
-
extra = {}
|
|
104
|
+
md = kwargs.get("metadata", self.Metadata()).proto
|
|
81
105
|
mime_type = CameraMimeType.JPEG
|
|
82
|
-
if isinstance(image, RawImage):
|
|
83
|
-
image = Image.open(BytesIO(image.data), formats=[mime_type.name])
|
|
84
106
|
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
107
|
+
if image.width is None or image.height is None:
|
|
108
|
+
raise ViamError(f"image {image} needs to have a specified width and height")
|
|
109
|
+
else:
|
|
110
|
+
request = GetDetectionsRequest(
|
|
111
|
+
name=self.name,
|
|
112
|
+
image=image.data,
|
|
113
|
+
width=image.width,
|
|
114
|
+
height=image.height,
|
|
115
|
+
mime_type=mime_type,
|
|
116
|
+
extra=dict_to_struct(extra),
|
|
117
|
+
)
|
|
118
|
+
response: GetDetectionsResponse = await self.client.GetDetections(request, timeout=timeout, metadata=md)
|
|
94
119
|
return list(response.detections)
|
|
95
120
|
|
|
96
121
|
async def get_classifications_from_camera(
|
|
@@ -100,103 +125,82 @@ class VisionClient(ServiceClientBase, ReconfigurableResourceRPCClientBase):
|
|
|
100
125
|
*,
|
|
101
126
|
extra: Optional[Mapping[str, Any]] = None,
|
|
102
127
|
timeout: Optional[float] = None,
|
|
128
|
+
**kwargs,
|
|
103
129
|
) -> List[Classification]:
|
|
104
|
-
""
|
|
105
|
-
|
|
106
|
-
Args:
|
|
107
|
-
camera_name (str): The name of the camera to use for detection
|
|
108
|
-
count (int): The number of classifications desired
|
|
109
|
-
|
|
110
|
-
returns:
|
|
111
|
-
List[viam.proto.service.vision.Classification]: The list of Classifications
|
|
112
|
-
"""
|
|
113
|
-
if extra is None:
|
|
114
|
-
extra = {}
|
|
130
|
+
md = kwargs.get("metadata", self.Metadata()).proto
|
|
115
131
|
request = GetClassificationsFromCameraRequest(name=self.name, camera_name=camera_name, n=count, extra=dict_to_struct(extra))
|
|
116
|
-
response: GetClassificationsFromCameraResponse = await self.client.GetClassificationsFromCamera(
|
|
132
|
+
response: GetClassificationsFromCameraResponse = await self.client.GetClassificationsFromCamera(
|
|
133
|
+
request, timeout=timeout, metadata=md
|
|
134
|
+
)
|
|
117
135
|
return list(response.classifications)
|
|
118
136
|
|
|
119
137
|
async def get_classifications(
|
|
120
138
|
self,
|
|
121
|
-
image:
|
|
139
|
+
image: ViamImage,
|
|
122
140
|
count: int,
|
|
123
141
|
*,
|
|
124
142
|
extra: Optional[Mapping[str, Any]] = None,
|
|
125
143
|
timeout: Optional[float] = None,
|
|
144
|
+
**kwargs,
|
|
126
145
|
) -> List[Classification]:
|
|
127
|
-
""
|
|
146
|
+
md = kwargs.get("metadata", self.Metadata()).proto
|
|
128
147
|
|
|
129
|
-
Args:
|
|
130
|
-
image (Image): The image to get detections from
|
|
131
|
-
|
|
132
|
-
Returns:
|
|
133
|
-
List[viam.proto.service.vision.Classification]: The list of Classifications
|
|
134
|
-
"""
|
|
135
|
-
if extra is None:
|
|
136
|
-
extra = {}
|
|
137
148
|
mime_type = CameraMimeType.JPEG
|
|
138
|
-
if
|
|
139
|
-
|
|
140
|
-
|
|
149
|
+
if image.width is None or image.height is None:
|
|
150
|
+
raise ViamError(f"image {image} needs to have a specified width and height")
|
|
141
151
|
request = GetClassificationsRequest(
|
|
142
152
|
name=self.name,
|
|
143
|
-
image=
|
|
153
|
+
image=image.data,
|
|
144
154
|
width=image.width,
|
|
145
155
|
height=image.height,
|
|
146
156
|
mime_type=mime_type,
|
|
147
157
|
n=count,
|
|
148
158
|
extra=dict_to_struct(extra),
|
|
149
159
|
)
|
|
150
|
-
response: GetClassificationsResponse = await self.client.GetClassifications(request, timeout=timeout)
|
|
160
|
+
response: GetClassificationsResponse = await self.client.GetClassifications(request, timeout=timeout, metadata=md)
|
|
151
161
|
return list(response.classifications)
|
|
152
162
|
|
|
153
163
|
async def get_object_point_clouds(
|
|
154
|
-
self,
|
|
164
|
+
self,
|
|
165
|
+
camera_name: str,
|
|
166
|
+
*,
|
|
167
|
+
extra: Optional[Mapping[str, Any]] = None,
|
|
168
|
+
timeout: Optional[float] = None,
|
|
169
|
+
**kwargs,
|
|
155
170
|
) -> List[PointCloudObject]:
|
|
156
|
-
""
|
|
157
|
-
Returns a list of the 3D point cloud objects and associated metadata in the latest
|
|
158
|
-
picture obtained from the specified 3D camera (using the specified segmenter).
|
|
159
|
-
|
|
160
|
-
To deserialize the returned information into a numpy array, use the Open3D library.
|
|
161
|
-
::
|
|
162
|
-
|
|
163
|
-
import numpy as np
|
|
164
|
-
import open3d as o3d
|
|
165
|
-
|
|
166
|
-
object_point_clouds = await vision.get_object_point_clouds(camera_name, segmenter_name)
|
|
167
|
-
|
|
168
|
-
# write the first object point cloud into a temporary file
|
|
169
|
-
with open("/tmp/pointcloud_data.pcd", "wb") as f:
|
|
170
|
-
f.write(object_point_clouds[0].point_cloud)
|
|
171
|
-
pcd = o3d.io.read_point_cloud("/tmp/pointcloud_data.pcd")
|
|
172
|
-
points = np.asarray(pcd.points)
|
|
173
|
-
|
|
174
|
-
Args:
|
|
175
|
-
camera_name (str): The name of the camera
|
|
176
|
-
|
|
177
|
-
Returns:
|
|
178
|
-
List[viam.proto.common.PointCloudObject]: The pointcloud objects with metadata
|
|
179
|
-
"""
|
|
180
|
-
if extra is None:
|
|
181
|
-
extra = {}
|
|
171
|
+
md = kwargs.get("metadata", self.Metadata()).proto
|
|
182
172
|
request = GetObjectPointCloudsRequest(
|
|
183
173
|
name=self.name,
|
|
184
174
|
camera_name=camera_name,
|
|
185
175
|
mime_type=CameraMimeType.PCD,
|
|
186
176
|
extra=dict_to_struct(extra),
|
|
187
177
|
)
|
|
188
|
-
response: GetObjectPointCloudsResponse = await self.client.GetObjectPointClouds(request, timeout=timeout)
|
|
178
|
+
response: GetObjectPointCloudsResponse = await self.client.GetObjectPointClouds(request, timeout=timeout, metadata=md)
|
|
189
179
|
return list(response.objects)
|
|
190
180
|
|
|
191
|
-
async def
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
181
|
+
async def get_properties(
|
|
182
|
+
self,
|
|
183
|
+
*,
|
|
184
|
+
extra: Optional[Mapping[str, Any]] = None,
|
|
185
|
+
timeout: Optional[float] = None,
|
|
186
|
+
**kwargs,
|
|
187
|
+
) -> Vision.Properties:
|
|
188
|
+
md = kwargs.get("metadata", self.Metadata()).proto
|
|
189
|
+
request = GetPropertiesRequest(
|
|
190
|
+
name=self.name,
|
|
191
|
+
extra=dict_to_struct(extra),
|
|
192
|
+
)
|
|
193
|
+
response: GetPropertiesResponse = await self.client.GetProperties(request, timeout=timeout, metadata=md)
|
|
194
|
+
return response
|
|
196
195
|
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
196
|
+
async def do_command(
|
|
197
|
+
self,
|
|
198
|
+
command: Mapping[str, ValueTypes],
|
|
199
|
+
*,
|
|
200
|
+
timeout: Optional[float] = None,
|
|
201
|
+
**kwargs,
|
|
202
|
+
) -> Mapping[str, ValueTypes]:
|
|
203
|
+
md = kwargs.get("metadata", self.Metadata()).proto
|
|
200
204
|
request = DoCommandRequest(name=self.name, command=dict_to_struct(command))
|
|
201
|
-
response: DoCommandResponse = await self.client.DoCommand(request, timeout=timeout)
|
|
205
|
+
response: DoCommandResponse = await self.client.DoCommand(request, timeout=timeout, metadata=md)
|
|
202
206
|
return struct_to_dict(response.result)
|
|
@@ -0,0 +1,143 @@
|
|
|
1
|
+
from grpclib.server import Stream
|
|
2
|
+
|
|
3
|
+
from viam.media.video import CameraMimeType, ViamImage
|
|
4
|
+
from viam.proto.common import DoCommandRequest, DoCommandResponse
|
|
5
|
+
from viam.proto.component.camera import Image
|
|
6
|
+
from viam.proto.service.vision import (
|
|
7
|
+
CaptureAllFromCameraRequest,
|
|
8
|
+
CaptureAllFromCameraResponse,
|
|
9
|
+
GetClassificationsFromCameraRequest,
|
|
10
|
+
GetClassificationsFromCameraResponse,
|
|
11
|
+
GetClassificationsRequest,
|
|
12
|
+
GetClassificationsResponse,
|
|
13
|
+
GetDetectionsFromCameraRequest,
|
|
14
|
+
GetDetectionsFromCameraResponse,
|
|
15
|
+
GetDetectionsRequest,
|
|
16
|
+
GetDetectionsResponse,
|
|
17
|
+
GetObjectPointCloudsRequest,
|
|
18
|
+
GetObjectPointCloudsResponse,
|
|
19
|
+
GetPropertiesRequest,
|
|
20
|
+
GetPropertiesResponse,
|
|
21
|
+
UnimplementedVisionServiceBase,
|
|
22
|
+
)
|
|
23
|
+
from viam.resource.rpc_service_base import ResourceRPCServiceBase
|
|
24
|
+
from viam.utils import dict_to_struct, struct_to_dict
|
|
25
|
+
|
|
26
|
+
from .vision import Vision
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class VisionRPCService(UnimplementedVisionServiceBase, ResourceRPCServiceBase[Vision]):
|
|
30
|
+
"""
|
|
31
|
+
gRPC service for a Vision service
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
RESOURCE_TYPE = Vision
|
|
35
|
+
|
|
36
|
+
async def CaptureAllFromCamera(self, stream: Stream[CaptureAllFromCameraRequest, CaptureAllFromCameraResponse]) -> None:
|
|
37
|
+
request = await stream.recv_message()
|
|
38
|
+
assert request is not None
|
|
39
|
+
vision = self.get_resource(request.name)
|
|
40
|
+
extra = struct_to_dict(request.extra)
|
|
41
|
+
timeout = stream.deadline.time_remaining() if stream.deadline else None
|
|
42
|
+
result = await vision.capture_all_from_camera(
|
|
43
|
+
request.camera_name,
|
|
44
|
+
return_image=request.return_image,
|
|
45
|
+
return_classifications=request.return_classifications,
|
|
46
|
+
return_detections=request.return_detections,
|
|
47
|
+
return_object_point_clouds=request.return_object_point_clouds,
|
|
48
|
+
extra=extra,
|
|
49
|
+
timeout=timeout,
|
|
50
|
+
)
|
|
51
|
+
img = None
|
|
52
|
+
if result.image is not None:
|
|
53
|
+
img_bytes = result.image.data
|
|
54
|
+
img = Image(source_name=request.camera_name, mime_type=result.image.mime_type, image=img_bytes)
|
|
55
|
+
response = CaptureAllFromCameraResponse(
|
|
56
|
+
image=img,
|
|
57
|
+
detections=result.detections,
|
|
58
|
+
classifications=result.classifications,
|
|
59
|
+
objects=result.objects,
|
|
60
|
+
extra=dict_to_struct(result.extra if result.extra else {}),
|
|
61
|
+
)
|
|
62
|
+
await stream.send_message(response)
|
|
63
|
+
|
|
64
|
+
async def GetDetectionsFromCamera(self, stream: Stream[GetDetectionsFromCameraRequest, GetDetectionsFromCameraResponse]) -> None:
|
|
65
|
+
request = await stream.recv_message()
|
|
66
|
+
assert request is not None
|
|
67
|
+
vision = self.get_resource(request.name)
|
|
68
|
+
extra = struct_to_dict(request.extra)
|
|
69
|
+
timeout = stream.deadline.time_remaining() if stream.deadline else None
|
|
70
|
+
result = await vision.get_detections_from_camera(request.camera_name, extra=extra, timeout=timeout)
|
|
71
|
+
response = GetDetectionsFromCameraResponse(detections=result)
|
|
72
|
+
await stream.send_message(response)
|
|
73
|
+
|
|
74
|
+
async def GetDetections(self, stream: Stream[GetDetectionsRequest, GetDetectionsResponse]) -> None:
|
|
75
|
+
request = await stream.recv_message()
|
|
76
|
+
assert request is not None
|
|
77
|
+
vision = self.get_resource(request.name)
|
|
78
|
+
extra = struct_to_dict(request.extra)
|
|
79
|
+
timeout = stream.deadline.time_remaining() if stream.deadline else None
|
|
80
|
+
|
|
81
|
+
image = ViamImage(request.image, CameraMimeType.from_string(request.mime_type))
|
|
82
|
+
|
|
83
|
+
result = await vision.get_detections(image, extra=extra, timeout=timeout)
|
|
84
|
+
response = GetDetectionsResponse(detections=result)
|
|
85
|
+
await stream.send_message(response)
|
|
86
|
+
|
|
87
|
+
async def GetClassificationsFromCamera(
|
|
88
|
+
self, stream: Stream[GetClassificationsFromCameraRequest, GetClassificationsFromCameraResponse]
|
|
89
|
+
) -> None:
|
|
90
|
+
request = await stream.recv_message()
|
|
91
|
+
assert request is not None
|
|
92
|
+
vision = self.get_resource(request.name)
|
|
93
|
+
extra = struct_to_dict(request.extra)
|
|
94
|
+
timeout = stream.deadline.time_remaining() if stream.deadline else None
|
|
95
|
+
result = await vision.get_classifications_from_camera(request.camera_name, request.n, extra=extra, timeout=timeout)
|
|
96
|
+
response = GetClassificationsFromCameraResponse(classifications=result)
|
|
97
|
+
await stream.send_message(response)
|
|
98
|
+
|
|
99
|
+
async def GetClassifications(self, stream: Stream[GetClassificationsRequest, GetClassificationsResponse]) -> None:
|
|
100
|
+
request = await stream.recv_message()
|
|
101
|
+
assert request is not None
|
|
102
|
+
vision = self.get_resource(request.name)
|
|
103
|
+
extra = struct_to_dict(request.extra)
|
|
104
|
+
timeout = stream.deadline.time_remaining() if stream.deadline else None
|
|
105
|
+
|
|
106
|
+
image = ViamImage(request.image, CameraMimeType.from_string(request.mime_type))
|
|
107
|
+
|
|
108
|
+
result = await vision.get_classifications(image, request.n, extra=extra, timeout=timeout)
|
|
109
|
+
response = GetClassificationsResponse(classifications=result)
|
|
110
|
+
await stream.send_message(response)
|
|
111
|
+
|
|
112
|
+
async def GetObjectPointClouds(self, stream: Stream[GetObjectPointCloudsRequest, GetObjectPointCloudsResponse]) -> None:
|
|
113
|
+
request = await stream.recv_message()
|
|
114
|
+
assert request is not None
|
|
115
|
+
vision = self.get_resource(request.name)
|
|
116
|
+
extra = struct_to_dict(request.extra)
|
|
117
|
+
timeout = stream.deadline.time_remaining() if stream.deadline else None
|
|
118
|
+
result = await vision.get_object_point_clouds(request.camera_name, extra=extra, timeout=timeout)
|
|
119
|
+
response = GetObjectPointCloudsResponse(mime_type=CameraMimeType.PCD, objects=result)
|
|
120
|
+
await stream.send_message(response)
|
|
121
|
+
|
|
122
|
+
async def GetProperties(self, stream: Stream[GetPropertiesRequest, GetPropertiesResponse]) -> None:
|
|
123
|
+
request = await stream.recv_message()
|
|
124
|
+
assert request is not None
|
|
125
|
+
name = request.name
|
|
126
|
+
vision = self.get_resource(name)
|
|
127
|
+
extra = struct_to_dict(request.extra)
|
|
128
|
+
timeout = stream.deadline.time_remaining() if stream.deadline else None
|
|
129
|
+
properties = await vision.get_properties(extra=extra, timeout=timeout)
|
|
130
|
+
response = GetPropertiesResponse(
|
|
131
|
+
classifications_supported=properties.classifications_supported,
|
|
132
|
+
detections_supported=properties.detections_supported,
|
|
133
|
+
object_point_clouds_supported=properties.object_point_clouds_supported,
|
|
134
|
+
)
|
|
135
|
+
await stream.send_message(response)
|
|
136
|
+
|
|
137
|
+
async def DoCommand(self, stream: Stream[DoCommandRequest, DoCommandResponse]) -> None:
|
|
138
|
+
request = await stream.recv_message()
|
|
139
|
+
assert request is not None
|
|
140
|
+
vision = self.get_resource(request.name)
|
|
141
|
+
timeout = stream.deadline.time_remaining() if stream.deadline else None
|
|
142
|
+
result = await vision.do_command(struct_to_dict(request.command), timeout=timeout)
|
|
143
|
+
await stream.send_message(DoCommandResponse(result=dict_to_struct(result)))
|