kuavo-humanoid-sdk 1.2.1b3290__20250915213813-py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of kuavo-humanoid-sdk might be problematic. Click here for more details.
- kuavo_humanoid_sdk/__init__.py +6 -0
- kuavo_humanoid_sdk/common/logger.py +45 -0
- kuavo_humanoid_sdk/interfaces/__init__.py +4 -0
- kuavo_humanoid_sdk/interfaces/data_types.py +288 -0
- kuavo_humanoid_sdk/interfaces/end_effector.py +62 -0
- kuavo_humanoid_sdk/interfaces/robot.py +22 -0
- kuavo_humanoid_sdk/interfaces/robot_info.py +56 -0
- kuavo_humanoid_sdk/kuavo/__init__.py +16 -0
- kuavo_humanoid_sdk/kuavo/core/audio.py +32 -0
- kuavo_humanoid_sdk/kuavo/core/core.py +666 -0
- kuavo_humanoid_sdk/kuavo/core/dex_hand_control.py +114 -0
- kuavo_humanoid_sdk/kuavo/core/leju_claw_control.py +67 -0
- kuavo_humanoid_sdk/kuavo/core/llm_doubao.py +608 -0
- kuavo_humanoid_sdk/kuavo/core/microphone.py +192 -0
- kuavo_humanoid_sdk/kuavo/core/navigation.py +70 -0
- kuavo_humanoid_sdk/kuavo/core/ros/audio.py +110 -0
- kuavo_humanoid_sdk/kuavo/core/ros/camera.py +105 -0
- kuavo_humanoid_sdk/kuavo/core/ros/control.py +1524 -0
- kuavo_humanoid_sdk/kuavo/core/ros/microphone.py +38 -0
- kuavo_humanoid_sdk/kuavo/core/ros/navigation.py +217 -0
- kuavo_humanoid_sdk/kuavo/core/ros/observation.py +94 -0
- kuavo_humanoid_sdk/kuavo/core/ros/param.py +201 -0
- kuavo_humanoid_sdk/kuavo/core/ros/sat_utils.py +103 -0
- kuavo_humanoid_sdk/kuavo/core/ros/state.py +652 -0
- kuavo_humanoid_sdk/kuavo/core/ros/tools.py +220 -0
- kuavo_humanoid_sdk/kuavo/core/ros/vision.py +234 -0
- kuavo_humanoid_sdk/kuavo/core/ros_env.py +238 -0
- kuavo_humanoid_sdk/kuavo/core/sdk_deprecated.py +41 -0
- kuavo_humanoid_sdk/kuavo/demo_climbstair.py +249 -0
- kuavo_humanoid_sdk/kuavo/dexterous_hand.py +238 -0
- kuavo_humanoid_sdk/kuavo/leju_claw.py +235 -0
- kuavo_humanoid_sdk/kuavo/logger_client.py +80 -0
- kuavo_humanoid_sdk/kuavo/robot.py +561 -0
- kuavo_humanoid_sdk/kuavo/robot_arm.py +411 -0
- kuavo_humanoid_sdk/kuavo/robot_audio.py +39 -0
- kuavo_humanoid_sdk/kuavo/robot_blockly.py +1154 -0
- kuavo_humanoid_sdk/kuavo/robot_climbstair.py +1607 -0
- kuavo_humanoid_sdk/kuavo/robot_head.py +95 -0
- kuavo_humanoid_sdk/kuavo/robot_info.py +134 -0
- kuavo_humanoid_sdk/kuavo/robot_microphone.py +19 -0
- kuavo_humanoid_sdk/kuavo/robot_navigation.py +135 -0
- kuavo_humanoid_sdk/kuavo/robot_observation.py +64 -0
- kuavo_humanoid_sdk/kuavo/robot_speech.py +24 -0
- kuavo_humanoid_sdk/kuavo/robot_state.py +310 -0
- kuavo_humanoid_sdk/kuavo/robot_tool.py +109 -0
- kuavo_humanoid_sdk/kuavo/robot_vision.py +81 -0
- kuavo_humanoid_sdk/kuavo_strategy/__init__.py +2 -0
- kuavo_humanoid_sdk/kuavo_strategy/grasp_box/grasp_box_strategy.py +1325 -0
- kuavo_humanoid_sdk/kuavo_strategy/kuavo_strategy.py +106 -0
- kuavo_humanoid_sdk/kuavo_strategy_v2/common/data_type.py +340 -0
- kuavo_humanoid_sdk/kuavo_strategy_v2/common/events/base_event.py +215 -0
- kuavo_humanoid_sdk/kuavo_strategy_v2/common/robot_sdk.py +25 -0
- kuavo_humanoid_sdk/kuavo_strategy_v2/pick_place_box/case.py +331 -0
- kuavo_humanoid_sdk/kuavo_strategy_v2/pick_place_box/strategy.py +504 -0
- kuavo_humanoid_sdk/kuavo_strategy_v2/utils/logger_setup.py +40 -0
- kuavo_humanoid_sdk/kuavo_strategy_v2/utils/utils.py +88 -0
- kuavo_humanoid_sdk/msg/__init__.py +4 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/__init__.py +7 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_AprilTagDetection.py +306 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_AprilTagDetectionArray.py +437 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_AudioReceiverData.py +122 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_FTsensorData.py +260 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_JoySticks.py +191 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_Metadata.py +199 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_MmDetectionMsg.py +264 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_RobotActionState.py +112 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_TFArray.py +323 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_TaskPoint.py +175 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/__init__.py +62 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_armCollisionCheckInfo.py +160 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_armHandPose.py +161 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_armHandPoseFree.py +171 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_armPoseWithTimeStamp.py +168 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_armTargetPoses.py +171 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_bezierCurveCubicPoint.py +178 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_dexhandCommand.py +229 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_dexhandTouchState.py +256 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_endEffectorData.py +227 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_footPose.py +123 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_footPose6D.py +123 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_footPose6DTargetTrajectories.py +320 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_footPoseTargetTrajectories.py +301 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_footPoseWithVision.py +136 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_footPoseWithVisionArray.py +231 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_footPoses.py +149 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_footPoses6D.py +149 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_fullBodyTargetTrajectories.py +258 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_gaitTimeName.py +147 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_gestureInfo.py +218 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_gestureTask.py +149 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_handPose.py +136 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_headBodyPose.py +145 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_ikSolveError.py +171 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_ikSolveParam.py +140 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_imuData.py +165 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_jointBezierTrajectory.py +201 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_jointCmd.py +390 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_jointData.py +205 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_kuavoModeSchedule.py +224 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_lejuClawCommand.py +320 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_lejuClawState.py +341 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_motorParam.py +122 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_picoPoseInfo.py +143 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_picoPoseInfoList.py +220 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_planArmState.py +120 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_qv.py +121 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_robotArmQVVD.py +177 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_robotBodyMatrices.py +332 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_robotHandPosition.py +225 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_robotHeadMotionData.py +128 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_robotState.py +222 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_sensorsData.py +655 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_switchGaitByName.py +200 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_tagDataArray.py +216 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_touchSensorStatus.py +162 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_twoArmHandPose.py +273 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_twoArmHandPoseCmd.py +316 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_twoArmHandPoseCmdFree.py +338 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_twoArmHandPoseFree.py +299 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_yoloDetection.py +251 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/msg/_yoloOutputData.py +168 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/_CreatePath.py +581 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/_ExecuteArmAction.py +281 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/_GetAllMaps.py +241 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/_GetCurrentMap.py +225 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/_GetTargetPartPoseInCamera.py +298 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/_InitialPoseWithTaskPoint.py +281 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/_LoadMap.py +281 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/_NavigateToTaskPoint.py +281 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/_RepublishTFs.py +373 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/_SetInitialPose.py +394 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/_SetJoyTopic.py +282 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/_SetLEDMode.py +468 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/_SetLEDMode_free.py +289 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/_SpeechSynthesis.py +270 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/_TaskPointOperation.py +536 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/__init__.py +43 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/_adjustZeroPoint.py +277 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/_changeArmCtrlMode.py +275 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/_changeArmCtrlModeKuavo.py +236 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/_changeMotorParam.py +299 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/_changeTorsoCtrlMode.py +274 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/_controlLejuClaw.py +408 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/_enableHandTouchSensor.py +304 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/_fkSrv.py +395 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/_footPose6DTargetTrajectoriesSrv.py +426 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/_footPoseTargetTrajectoriesSrv.py +409 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/_gestureExecute.py +339 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/_gestureExecuteState.py +257 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/_gestureList.py +418 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/_getCurrentGaitName.py +253 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/_getMotorParam.py +299 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/_getMotorZeroPoints.py +286 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/_handForceLevel.py +330 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/_jointMoveTo.py +302 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/_planArmTrajectoryBezierCurve.py +422 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/_planArmTrajectoryCubicSpline.py +490 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/_playmusic.py +268 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/_setHwIntialState.py +304 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/_setMmCtrlFrame.py +273 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/_setMotorEncoderRoundService.py +283 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/_setTagId.py +275 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/_singleStepControl.py +444 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/_twoArmHandPoseCmdFreeSrv.py +716 -0
- kuavo_humanoid_sdk/msg/kuavo_msgs/srv/_twoArmHandPoseCmdSrv.py +664 -0
- kuavo_humanoid_sdk/msg/motion_capture_ik/__init__.py +7 -0
- kuavo_humanoid_sdk/msg/ocs2_msgs/__init__.py +7 -0
- kuavo_humanoid_sdk/msg/ocs2_msgs/msg/__init__.py +12 -0
- kuavo_humanoid_sdk/msg/ocs2_msgs/msg/_constraint.py +142 -0
- kuavo_humanoid_sdk/msg/ocs2_msgs/msg/_controller_data.py +121 -0
- kuavo_humanoid_sdk/msg/ocs2_msgs/msg/_lagrangian_metrics.py +148 -0
- kuavo_humanoid_sdk/msg/ocs2_msgs/msg/_mode_schedule.py +150 -0
- kuavo_humanoid_sdk/msg/ocs2_msgs/msg/_mpc_flattened_controller.py +666 -0
- kuavo_humanoid_sdk/msg/ocs2_msgs/msg/_mpc_input.py +122 -0
- kuavo_humanoid_sdk/msg/ocs2_msgs/msg/_mpc_observation.py +209 -0
- kuavo_humanoid_sdk/msg/ocs2_msgs/msg/_mpc_performance_indices.py +140 -0
- kuavo_humanoid_sdk/msg/ocs2_msgs/msg/_mpc_solver_data.py +886 -0
- kuavo_humanoid_sdk/msg/ocs2_msgs/msg/_mpc_state.py +122 -0
- kuavo_humanoid_sdk/msg/ocs2_msgs/msg/_mpc_target_trajectories.py +239 -0
- kuavo_humanoid_sdk/msg/ocs2_msgs/msg/_multiplier.py +148 -0
- kuavo_humanoid_sdk/msg/ocs2_msgs/srv/__init__.py +1 -0
- kuavo_humanoid_sdk/msg/ocs2_msgs/srv/_reset.py +376 -0
- kuavo_humanoid_sdk-1.2.1b3290.dist-info/METADATA +296 -0
- kuavo_humanoid_sdk-1.2.1b3290.dist-info/RECORD +186 -0
- kuavo_humanoid_sdk-1.2.1b3290.dist-info/WHEEL +6 -0
- kuavo_humanoid_sdk-1.2.1b3290.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1154 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import signal
|
|
3
|
+
from kuavo_humanoid_sdk import KuavoPose
|
|
4
|
+
from kuavo_humanoid_sdk.kuavo.robot import KuavoRobot
|
|
5
|
+
from kuavo_humanoid_sdk.kuavo.robot_state import KuavoRobotState
|
|
6
|
+
from kuavo_humanoid_sdk.kuavo.robot_audio import KuavoRobotAudio
|
|
7
|
+
from kuavo_humanoid_sdk.kuavo.robot_climbstair import KuavoRobotClimbStair, set_pitch_limit
|
|
8
|
+
from kuavo_humanoid_sdk.interfaces.data_types import KuavoPose
|
|
9
|
+
from kuavo_msgs.srv import planArmTrajectoryBezierCurve, planArmTrajectoryBezierCurveRequest
|
|
10
|
+
from kuavo_msgs.msg import planArmState, jointBezierTrajectory, bezierCurveCubicPoint, robotHandPosition, robotHeadMotionData, sensorsData
|
|
11
|
+
from geometry_msgs.msg import Twist
|
|
12
|
+
import asyncio
|
|
13
|
+
import math
|
|
14
|
+
import json
|
|
15
|
+
import os
|
|
16
|
+
import rospkg
|
|
17
|
+
import rospy
|
|
18
|
+
import threading
|
|
19
|
+
import tf
|
|
20
|
+
import time
|
|
21
|
+
import pwd
|
|
22
|
+
import numpy as np
|
|
23
|
+
from sensor_msgs.msg import JointState
|
|
24
|
+
from trajectory_msgs.msg import JointTrajectory
|
|
25
|
+
|
|
26
|
+
from geometry_msgs.msg import Pose, Point, Quaternion
|
|
27
|
+
|
|
28
|
+
from std_msgs.msg import Float64MultiArray
|
|
29
|
+
|
|
30
|
+
import inspect
|
|
31
|
+
from kuavo_humanoid_sdk.kuavo.core.model_utils.model_utils import *
|
|
32
|
+
from kuavo_humanoid_sdk.kuavo.core.model_utils import model_utils as model_utils
|
|
33
|
+
|
|
34
|
+
from enum import IntEnum
|
|
35
|
+
# Global flag for handling Ctrl+C
|
|
36
|
+
running = False
|
|
37
|
+
robot_version = int(os.environ.get("ROBOT_VERSION", "45"))
|
|
38
|
+
ocs2_joint_state = JointState()
|
|
39
|
+
ocs2_hand_state = robotHandPosition()
|
|
40
|
+
ocs2_head_state = robotHeadMotionData()
|
|
41
|
+
if robot_version >= 40:
|
|
42
|
+
INIT_ARM_POS = [20, 0, 0, -30, 0, 0, 0, 20, 0, 0, -30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
|
|
43
|
+
else:
|
|
44
|
+
INIT_ARM_POS = [22.91831, 0, 0, -45.83662, 22.91831, 0, 0, -45.83662] # task.info: shoudler_center: 0.4rad, elbow_center: -0.8rad
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def signal_handler(sig, frame):
|
|
48
|
+
"""Signal handler for catching Ctrl+C and safely stopping the robot.
|
|
49
|
+
|
|
50
|
+
Args:
|
|
51
|
+
sig: Signal number.
|
|
52
|
+
frame: Current stack frame.
|
|
53
|
+
"""
|
|
54
|
+
global running
|
|
55
|
+
print('\nCtrl+C pressed. Stopping robot...')
|
|
56
|
+
running = False
|
|
57
|
+
|
|
58
|
+
def frames_to_custom_action_data(file_path: str):
|
|
59
|
+
"""Parse action file and convert frames to custom action data.
|
|
60
|
+
|
|
61
|
+
Args:
|
|
62
|
+
file_path (str): Path to the action file.
|
|
63
|
+
|
|
64
|
+
Returns:
|
|
65
|
+
dict: Action data for each joint.
|
|
66
|
+
"""
|
|
67
|
+
with open(file_path, "r") as f:
|
|
68
|
+
data = json.load(f)
|
|
69
|
+
frames = data["frames"]
|
|
70
|
+
action_data = {}
|
|
71
|
+
for frame in frames:
|
|
72
|
+
servos, keyframe, attribute = frame["servos"], frame["keyframe"], frame["attribute"]
|
|
73
|
+
for index, value in enumerate(servos):
|
|
74
|
+
key = index + 1
|
|
75
|
+
if key not in action_data:
|
|
76
|
+
action_data[key] = []
|
|
77
|
+
if keyframe != 0 and len(action_data[key]) == 0:
|
|
78
|
+
if key <= len(INIT_ARM_POS):
|
|
79
|
+
action_data[key].append([
|
|
80
|
+
[0, INIT_ARM_POS[key-1]],
|
|
81
|
+
[0, 0],
|
|
82
|
+
[0, INIT_ARM_POS[key-1]],
|
|
83
|
+
])
|
|
84
|
+
if value is not None:
|
|
85
|
+
CP = attribute[str(key)]["CP"]
|
|
86
|
+
left_CP, right_CP = CP
|
|
87
|
+
action_data[key].append([
|
|
88
|
+
[round(keyframe/100, 1), int(value)],
|
|
89
|
+
[round((keyframe+left_CP[0])/100, 1), int(value+left_CP[1])],
|
|
90
|
+
[round((keyframe+right_CP[0])/100, 1), int(value+right_CP[1])],
|
|
91
|
+
])
|
|
92
|
+
return action_data
|
|
93
|
+
|
|
94
|
+
def get_start_end_frame_time(file_path: str):
|
|
95
|
+
"""Get the start and end frame time from the action file.
|
|
96
|
+
|
|
97
|
+
Args:
|
|
98
|
+
file_path (str): Path to the action file.
|
|
99
|
+
|
|
100
|
+
Returns:
|
|
101
|
+
tuple: (start_frame_time, end_frame_time)
|
|
102
|
+
"""
|
|
103
|
+
with open(file_path, "r") as f:
|
|
104
|
+
data = json.load(f)
|
|
105
|
+
start_frame_time = data["first"] / 100
|
|
106
|
+
end_frame_time = data["finish"] / 100
|
|
107
|
+
return start_frame_time, end_frame_time
|
|
108
|
+
|
|
109
|
+
def frames_to_custom_action_data_ocs2(file_path: str, start_frame_time: float, current_arm_joint_state: list):
|
|
110
|
+
"""Parse action file and convert frames to custom action data for OCS2.
|
|
111
|
+
|
|
112
|
+
Args:
|
|
113
|
+
file_path (str): Path to the action file.
|
|
114
|
+
start_frame_time (float): Start frame time.
|
|
115
|
+
current_arm_joint_state (list): Current arm joint state.
|
|
116
|
+
|
|
117
|
+
Returns:
|
|
118
|
+
dict: Filtered action data for each joint.
|
|
119
|
+
"""
|
|
120
|
+
def filter_data(action_data, start_frame_time, current_arm_joint_state):
|
|
121
|
+
"""Filter and adjust action data for OCS2.
|
|
122
|
+
|
|
123
|
+
Args:
|
|
124
|
+
action_data (dict): Raw action data.
|
|
125
|
+
start_frame_time (float): Start frame time.
|
|
126
|
+
current_arm_joint_state (list): Current arm joint state.
|
|
127
|
+
|
|
128
|
+
Returns:
|
|
129
|
+
dict: Filtered action data.
|
|
130
|
+
"""
|
|
131
|
+
filtered_action_data = {}
|
|
132
|
+
x_shift = start_frame_time - 1
|
|
133
|
+
for key, frames in action_data.items():
|
|
134
|
+
filtered_frames = []
|
|
135
|
+
found_start = False
|
|
136
|
+
skip_next = False
|
|
137
|
+
for i in range(-1, len(frames)):
|
|
138
|
+
frame = frames[i]
|
|
139
|
+
if i == len(frames) - 1:
|
|
140
|
+
next_frame = frame
|
|
141
|
+
else:
|
|
142
|
+
next_frame = frames[i+1]
|
|
143
|
+
end_time = next_frame[0][0]
|
|
144
|
+
|
|
145
|
+
if not found_start and end_time >= start_frame_time:
|
|
146
|
+
found_start = True
|
|
147
|
+
|
|
148
|
+
p0 = np.array([0, current_arm_joint_state[key-1]])
|
|
149
|
+
p3 = np.array([next_frame[0][0] - x_shift, next_frame[0][1]])
|
|
150
|
+
|
|
151
|
+
# Calculate control points for smooth transition
|
|
152
|
+
curve_length = np.linalg.norm(p3 - p0)
|
|
153
|
+
p1 = p0 + curve_length * 0.25 * np.array([1, 0]) # Move 1/4 curve length to the right
|
|
154
|
+
p2 = p3 - curve_length * 0.25 * np.array([1, 0]) # Move 1/4 curve length to the left
|
|
155
|
+
|
|
156
|
+
# Create new frame
|
|
157
|
+
frame1 = [
|
|
158
|
+
p0.tolist(),
|
|
159
|
+
p0.tolist(),
|
|
160
|
+
p1.tolist()
|
|
161
|
+
]
|
|
162
|
+
|
|
163
|
+
# Modify next_frame's left control point
|
|
164
|
+
next_frame[1] = p2.tolist()
|
|
165
|
+
|
|
166
|
+
filtered_frames.append(frame1)
|
|
167
|
+
skip_next = True
|
|
168
|
+
|
|
169
|
+
if found_start:
|
|
170
|
+
if skip_next:
|
|
171
|
+
skip_next = False
|
|
172
|
+
continue
|
|
173
|
+
end_point = [round(frame[0][0] - x_shift, 1), round(frame[0][1], 1)]
|
|
174
|
+
left_control_point = [round(frame[1][0] - x_shift, 1), round(frame[1][1], 1)]
|
|
175
|
+
right_control_point = [round(frame[2][0] - x_shift, 1), round(frame[2][1], 1)]
|
|
176
|
+
filtered_frames.append([end_point, left_control_point, right_control_point])
|
|
177
|
+
|
|
178
|
+
filtered_action_data[key] = filtered_frames
|
|
179
|
+
return filtered_action_data
|
|
180
|
+
|
|
181
|
+
with open(file_path, "r") as f:
|
|
182
|
+
data = json.load(f)
|
|
183
|
+
frames = data["frames"]
|
|
184
|
+
action_data = {}
|
|
185
|
+
for frame in frames:
|
|
186
|
+
servos, keyframe, attribute = frame["servos"], frame["keyframe"], frame["attribute"]
|
|
187
|
+
for index, value in enumerate(servos):
|
|
188
|
+
key = index + 1
|
|
189
|
+
if key not in action_data:
|
|
190
|
+
action_data[key] = []
|
|
191
|
+
if keyframe != 0 and len(action_data[key]) == 0:
|
|
192
|
+
if key <= len(INIT_ARM_POS):
|
|
193
|
+
action_data[key].append([
|
|
194
|
+
[0, math.radians(INIT_ARM_POS[key-1])],
|
|
195
|
+
[0, math.radians(INIT_ARM_POS[key-1])],
|
|
196
|
+
[0, math.radians(INIT_ARM_POS[key-1])],
|
|
197
|
+
])
|
|
198
|
+
if value is not None:
|
|
199
|
+
CP = attribute[str(key)]["CP"]
|
|
200
|
+
left_CP, right_CP = CP
|
|
201
|
+
action_data[key].append([
|
|
202
|
+
[round(keyframe/100, 1), math.radians(value)],
|
|
203
|
+
[round((keyframe+left_CP[0])/100, 1), math.radians(value+left_CP[1])],
|
|
204
|
+
[round((keyframe+right_CP[0])/100, 1), math.radians(value+right_CP[1])],
|
|
205
|
+
])
|
|
206
|
+
return filter_data(action_data, start_frame_time, current_arm_joint_state)
|
|
207
|
+
|
|
208
|
+
|
|
209
|
+
class RobotArmControlMode(IntEnum):
|
|
210
|
+
"""Enum for robot arm control modes.
|
|
211
|
+
|
|
212
|
+
Attributes:
|
|
213
|
+
Fixed: Fixed mode, arm remains stationary.
|
|
214
|
+
AutoSwing: Auto swing mode, arm swings automatically.
|
|
215
|
+
ExternalControl: External control mode, arm is controlled by external signals.
|
|
216
|
+
"""
|
|
217
|
+
Fixed = 0 # Fixed mode, arm remains stationary
|
|
218
|
+
AutoSwing = 1 # Auto swing mode, arm swings automatically
|
|
219
|
+
ExternalControl = 2 # External control mode, arm is controlled externally
|
|
220
|
+
|
|
221
|
+
if robot_version >= 40:
|
|
222
|
+
joint_names = [
|
|
223
|
+
"l_arm_pitch",
|
|
224
|
+
"l_arm_roll",
|
|
225
|
+
"l_arm_yaw",
|
|
226
|
+
"l_forearm_pitch",
|
|
227
|
+
"l_hand_yaw",
|
|
228
|
+
"l_hand_pitch",
|
|
229
|
+
"l_hand_roll",
|
|
230
|
+
"r_arm_pitch",
|
|
231
|
+
"r_arm_roll",
|
|
232
|
+
"r_arm_yaw",
|
|
233
|
+
"r_forearm_pitch",
|
|
234
|
+
"r_hand_yaw",
|
|
235
|
+
"r_hand_pitch",
|
|
236
|
+
"r_hand_roll",
|
|
237
|
+
]
|
|
238
|
+
else:
|
|
239
|
+
joint_names = [
|
|
240
|
+
"zarm_l1_link", "zarm_l2_link", "zarm_l3_link", "zarm_l4_link",
|
|
241
|
+
"zarm_r1_link", "zarm_r2_link", "zarm_r3_link", "zarm_r4_link",
|
|
242
|
+
]
|
|
243
|
+
|
|
244
|
+
|
|
245
|
+
robot_settings = {
|
|
246
|
+
"kuavo": {
|
|
247
|
+
"plan_arm_trajectory_bezier_service_name": "/plan_arm_trajectory_bezier_curve",
|
|
248
|
+
"stop_plan_arm_trajectory_service_name": "/stop_plan_arm_trajectory",
|
|
249
|
+
"arm_traj_state_topic_name": "/robot_plan_arm_state",
|
|
250
|
+
},
|
|
251
|
+
"ocs2": {
|
|
252
|
+
"plan_arm_trajectory_bezier_service_name": "/bezier/plan_arm_trajectory",
|
|
253
|
+
"stop_plan_arm_trajectory_service_name": "/bezier/stop_plan_arm_trajectory",
|
|
254
|
+
"arm_traj_state_topic_name": "/bezier/arm_traj_state",
|
|
255
|
+
}
|
|
256
|
+
}
|
|
257
|
+
|
|
258
|
+
class RobotControlBlockly:
|
|
259
|
+
def __init__(self):
|
|
260
|
+
"""Initialize the robot control class, create robot objects, register signal handlers, and initialize ROS publishers and subscribers."""
|
|
261
|
+
self.robot = KuavoRobot()
|
|
262
|
+
self.robot_state = KuavoRobotState()
|
|
263
|
+
self.robot_audio = KuavoRobotAudio()
|
|
264
|
+
self.climb_stair = KuavoRobotClimbStair()
|
|
265
|
+
# Register signal handler
|
|
266
|
+
signal.signal(signal.SIGINT, signal_handler)
|
|
267
|
+
global g_robot_type
|
|
268
|
+
g_robot_type = "ocs2"
|
|
269
|
+
self.loop = None
|
|
270
|
+
self.init_ros_publishers()
|
|
271
|
+
self.THRESHOLD_HEAD_CONTROL_COUNT = 30
|
|
272
|
+
self.FLOAT_MAX = float('inf')
|
|
273
|
+
self.to_stance()
|
|
274
|
+
|
|
275
|
+
package_name = 'planarmwebsocketservice'
|
|
276
|
+
self.package_path = rospkg.RosPack().get_path(package_name)
|
|
277
|
+
sudo_user = os.environ.get("SUDO_USER")
|
|
278
|
+
if sudo_user:
|
|
279
|
+
user_info = pwd.getpwnam(sudo_user)
|
|
280
|
+
home_path = user_info.pw_dir
|
|
281
|
+
else:
|
|
282
|
+
home_path = os.path.expanduser("~")
|
|
283
|
+
self.ACTION_FILE_FOLDER = os.path.join(home_path, '.config', 'lejuconfig', 'action_files')
|
|
284
|
+
self.plan_arm_state_progress = 0
|
|
285
|
+
self.plan_arm_state_status = False
|
|
286
|
+
|
|
287
|
+
def set_action_file_path(self, proj_name: str):
|
|
288
|
+
"""Set the action file path for a specific project.
|
|
289
|
+
|
|
290
|
+
Args:
|
|
291
|
+
proj_name (str): Project name.
|
|
292
|
+
"""
|
|
293
|
+
self.ACTION_FILE_FOLDER = self.package_path + "/upload_files/" + proj_name + "/action_files"
|
|
294
|
+
|
|
295
|
+
def init_ros_publishers(self):
|
|
296
|
+
"""Initialize ROS publishers and subscribers for arm, hand, head control topics, sensor data, and trajectory state."""
|
|
297
|
+
global kuavo_arm_traj_pub, control_hand_pub, control_head_pub, g_robot_type, control_waist_pub
|
|
298
|
+
kuavo_arm_traj_pub = rospy.Publisher('/kuavo_arm_traj', JointState, queue_size=1, tcp_nodelay=True)
|
|
299
|
+
control_hand_pub = rospy.Publisher('/control_robot_hand_position', robotHandPosition, queue_size=1, tcp_nodelay=True)
|
|
300
|
+
control_head_pub = rospy.Publisher('/robot_head_motion_data', robotHeadMotionData, queue_size=1, tcp_nodelay=True)
|
|
301
|
+
|
|
302
|
+
control_waist_pub = rospy.Publisher('/robot_waist_motion_data', Float64MultiArray, queue_size=1, tcp_nodelay=True)
|
|
303
|
+
|
|
304
|
+
rospy.Subscriber('/sensors_data_raw', sensorsData, self.sensors_data_callback, queue_size=1, tcp_nodelay=True)
|
|
305
|
+
rospy.Timer(rospy.Duration(0.01), self.timer_callback)
|
|
306
|
+
rospy.Subscriber(robot_settings[g_robot_type]["arm_traj_state_topic_name"], planArmState, self.plan_arm_state_callback)
|
|
307
|
+
rospy.Subscriber('/bezier/arm_traj', JointTrajectory, self.traj_callback, queue_size=1, tcp_nodelay=True)
|
|
308
|
+
|
|
309
|
+
def timer_callback(self, event):
|
|
310
|
+
"""Timer callback for periodically publishing arm, hand, and head state to ROS topics.
|
|
311
|
+
|
|
312
|
+
Only publishes when robot type is 'ocs2' and trajectory is not finished.
|
|
313
|
+
"""
|
|
314
|
+
global kuavo_arm_traj_pub, control_hand_pub, control_head_pub, g_robot_type
|
|
315
|
+
if g_robot_type == "ocs2" and len(ocs2_joint_state.position) > 0 and self.plan_arm_state_status is False:
|
|
316
|
+
kuavo_arm_traj_pub.publish(ocs2_joint_state)
|
|
317
|
+
control_hand_pub.publish(ocs2_hand_state)
|
|
318
|
+
# control_head_pub.publish(ocs2_head_state)
|
|
319
|
+
|
|
320
|
+
def sensors_data_callback(self, msg):
|
|
321
|
+
"""Callback for sensor data, extracts current arm joint state and saves to global variables.
|
|
322
|
+
|
|
323
|
+
Args:
|
|
324
|
+
msg (sensorsData): Sensor data message.
|
|
325
|
+
"""
|
|
326
|
+
global current_arm_joint_state, robot_version, current_head_joint_state
|
|
327
|
+
if robot_version >= 40:
|
|
328
|
+
current_arm_joint_state = msg.joint_data.joint_q[12:26]
|
|
329
|
+
current_arm_joint_state = [round(pos, 2) for pos in current_arm_joint_state]
|
|
330
|
+
current_arm_joint_state.extend([0] * 14)
|
|
331
|
+
current_head_joint_state = msg.joint_data.joint_q[-2:]
|
|
332
|
+
current_head_joint_state = [round(pos, 4) for pos in current_head_joint_state]
|
|
333
|
+
elif robot_version >= 10 and robot_version < 30:
|
|
334
|
+
current_arm_joint_state = msg.joint_data.joint_q[13:21]
|
|
335
|
+
current_arm_joint_state = [round(pos, 5) for pos in current_arm_joint_state]
|
|
336
|
+
current_arm_joint_state.extend([0] * 14)
|
|
337
|
+
current_head_joint_state = msg.joint_data.joint_q[-2:]
|
|
338
|
+
current_head_joint_state = [round(pos, 4) for pos in current_head_joint_state]
|
|
339
|
+
|
|
340
|
+
def plan_arm_state_callback(self, msg: planArmState):
|
|
341
|
+
"""Callback for arm trajectory state, updates global variables for progress and completion.
|
|
342
|
+
|
|
343
|
+
Args:
|
|
344
|
+
msg (planArmState): Arm trajectory state message.
|
|
345
|
+
"""
|
|
346
|
+
self.plan_arm_state_progress = msg.progress
|
|
347
|
+
self.plan_arm_state_status = msg.is_finished
|
|
348
|
+
|
|
349
|
+
def traj_callback(self, msg):
|
|
350
|
+
"""Callback for trajectory messages, parses trajectory points and updates global arm, hand, and head state variables.
|
|
351
|
+
|
|
352
|
+
Args:
|
|
353
|
+
msg (JointTrajectory): Trajectory message.
|
|
354
|
+
"""
|
|
355
|
+
global ocs2_joint_state
|
|
356
|
+
global robot_version
|
|
357
|
+
if len(msg.points) == 0:
|
|
358
|
+
return
|
|
359
|
+
point = msg.points[0]
|
|
360
|
+
|
|
361
|
+
if robot_version >= 40:
|
|
362
|
+
ocs2_joint_state.name = joint_names
|
|
363
|
+
ocs2_joint_state.position = [math.degrees(pos) for pos in point.positions[:14]]
|
|
364
|
+
ocs2_joint_state.velocity = [math.degrees(vel) for vel in point.velocities[:14]]
|
|
365
|
+
ocs2_joint_state.effort = [0] * 14
|
|
366
|
+
ocs2_hand_state.left_hand_position = [int(math.degrees(pos)) for pos in point.positions[14:20]]
|
|
367
|
+
ocs2_hand_state.right_hand_position = [int(math.degrees(pos)) for pos in point.positions[20:26]]
|
|
368
|
+
ocs2_head_state.joint_data = [math.degrees(pos) for pos in point.positions[26:]]
|
|
369
|
+
|
|
370
|
+
elif robot_version >= 10 and robot_version < 30:
|
|
371
|
+
ocs2_joint_state.name = joint_names
|
|
372
|
+
ocs2_joint_state.position = [math.degrees(pos) for pos in point.positions[:8]]
|
|
373
|
+
ocs2_joint_state.velocity = [math.degrees(vel) for vel in point.velocities[:8]]
|
|
374
|
+
ocs2_joint_state.effort = [0] * 8
|
|
375
|
+
|
|
376
|
+
def arm_reset(self):
|
|
377
|
+
"""Reset the arm position to the initial state."""
|
|
378
|
+
try:
|
|
379
|
+
self.robot.arm_reset()
|
|
380
|
+
time.sleep(1)
|
|
381
|
+
except Exception as e:
|
|
382
|
+
print(f"Arm reset failed: {str(e)}")
|
|
383
|
+
|
|
384
|
+
def set_auto_swing_arm_mode(self):
|
|
385
|
+
"""Set the arm to auto swing mode."""
|
|
386
|
+
try:
|
|
387
|
+
self.robot.set_auto_swing_arm_mode()
|
|
388
|
+
time.sleep(1)
|
|
389
|
+
except Exception as e:
|
|
390
|
+
print(f"Set auto swing arm mode failed: {str(e)}")
|
|
391
|
+
|
|
392
|
+
def set_fixed_arm_mode(self):
|
|
393
|
+
"""Set the arm to fixed mode."""
|
|
394
|
+
try:
|
|
395
|
+
self.robot.set_fixed_arm_mode()
|
|
396
|
+
time.sleep(1)
|
|
397
|
+
except Exception as e:
|
|
398
|
+
print(f"Set fixed arm mode failed: {str(e)}")
|
|
399
|
+
|
|
400
|
+
def set_external_control_arm_mode(self):
|
|
401
|
+
"""Set the arm to external control mode."""
|
|
402
|
+
try:
|
|
403
|
+
if self.robot.set_external_control_arm_mode():
|
|
404
|
+
print("Set external control mode succeeded")
|
|
405
|
+
else:
|
|
406
|
+
print("Set external control mode failed")
|
|
407
|
+
time.sleep(1)
|
|
408
|
+
except Exception as e:
|
|
409
|
+
print(f"Set external control arm mode failed: {str(e)}")
|
|
410
|
+
|
|
411
|
+
def set_arm_control_mode(self, mode: int):
|
|
412
|
+
"""Set the arm control mode.
|
|
413
|
+
|
|
414
|
+
Args:
|
|
415
|
+
mode (int): 0 - Fixed, 1 - AutoSwing, 2 - ExternalControl
|
|
416
|
+
"""
|
|
417
|
+
try:
|
|
418
|
+
if mode == RobotArmControlMode.Fixed:
|
|
419
|
+
self.set_fixed_arm_mode()
|
|
420
|
+
elif mode == RobotArmControlMode.AutoSwing:
|
|
421
|
+
self.set_auto_swing_arm_mode()
|
|
422
|
+
elif mode == RobotArmControlMode.ExternalControl:
|
|
423
|
+
self.set_external_control_arm_mode()
|
|
424
|
+
except Exception as e:
|
|
425
|
+
print(f"Set arm control mode failed: {str(e)}")
|
|
426
|
+
|
|
427
|
+
def _start_loop_in_thread(self):
|
|
428
|
+
"""Start a new asyncio event loop in a separate thread."""
|
|
429
|
+
self.loop = asyncio.new_event_loop()
|
|
430
|
+
def run_loop():
|
|
431
|
+
asyncio.set_event_loop(self.loop)
|
|
432
|
+
self.loop.run_forever()
|
|
433
|
+
t = threading.Thread(target=run_loop, daemon=True)
|
|
434
|
+
t.start()
|
|
435
|
+
|
|
436
|
+
def walk(self, linear_x: float, linear_y: float, angular_z: float):
|
|
437
|
+
"""Start walking with specified velocities.
|
|
438
|
+
|
|
439
|
+
Args:
|
|
440
|
+
linear_x (float): Linear velocity in x direction.
|
|
441
|
+
linear_y (float): Linear velocity in y direction.
|
|
442
|
+
angular_z (float): Angular velocity around z axis.
|
|
443
|
+
"""
|
|
444
|
+
if self.loop is None:
|
|
445
|
+
self._start_loop_in_thread()
|
|
446
|
+
global running
|
|
447
|
+
# If already walking, stop first
|
|
448
|
+
global global_linear_x, global_linear_y, global_angular_z
|
|
449
|
+
global_linear_x = linear_x
|
|
450
|
+
global_linear_y = linear_y
|
|
451
|
+
global_angular_z = angular_z
|
|
452
|
+
if running:
|
|
453
|
+
self.robot.walk(global_linear_x, global_linear_y, global_angular_z)
|
|
454
|
+
else:
|
|
455
|
+
self.control_robot_height("up", 0.4, 0.0)
|
|
456
|
+
running = True
|
|
457
|
+
async def robot_walk():
|
|
458
|
+
try:
|
|
459
|
+
while running:
|
|
460
|
+
self.robot.walk(global_linear_x, global_linear_y, global_angular_z)
|
|
461
|
+
await asyncio.sleep(0.1)
|
|
462
|
+
self.robot.walk(0.0, 0.0, 0.0)
|
|
463
|
+
await asyncio.sleep(0.5)
|
|
464
|
+
self.robot.stance()
|
|
465
|
+
except asyncio.CancelledError:
|
|
466
|
+
pass
|
|
467
|
+
asyncio.run_coroutine_threadsafe(robot_walk(), self.loop)
|
|
468
|
+
|
|
469
|
+
def walk_angle(self, linear_x: float, linear_y: float, angular_z: float):
|
|
470
|
+
"""Start walking with specified velocities.
|
|
471
|
+
|
|
472
|
+
Args:
|
|
473
|
+
linear_x (float): Linear velocity in x direction.
|
|
474
|
+
linear_y (float): Linear velocity in y direction.
|
|
475
|
+
angular_z (float): Angular velocity around z axis.
|
|
476
|
+
"""
|
|
477
|
+
# 角度转弧度
|
|
478
|
+
angular_z = math.radians(angular_z)
|
|
479
|
+
self.walk(linear_x, linear_y, angular_z)
|
|
480
|
+
|
|
481
|
+
def start(self):
|
|
482
|
+
"""Start robot movement and Begin trot."""
|
|
483
|
+
self.robot.trot()
|
|
484
|
+
|
|
485
|
+
def stop(self):
|
|
486
|
+
"""Stop robot movement and set the running flag to False."""
|
|
487
|
+
global running
|
|
488
|
+
running = False
|
|
489
|
+
while True:
|
|
490
|
+
time.sleep(0.01)
|
|
491
|
+
if self.robot_state.is_stance():
|
|
492
|
+
break
|
|
493
|
+
# Wait for gait switch to complete
|
|
494
|
+
time.sleep(0.5)
|
|
495
|
+
|
|
496
|
+
def control_arm_position(self, joint_positions: list):
|
|
497
|
+
"""Move the arm to the specified joint angles using interpolation for smooth transition.
|
|
498
|
+
|
|
499
|
+
Args:
|
|
500
|
+
joint_positions (list): Target joint angles in degrees.
|
|
501
|
+
"""
|
|
502
|
+
try:
|
|
503
|
+
self.robot.arm_reset()
|
|
504
|
+
q_list = []
|
|
505
|
+
q0 = [0.0] * 8
|
|
506
|
+
num = 90
|
|
507
|
+
for i in range(num):
|
|
508
|
+
q_tmp = [0.0] * 8
|
|
509
|
+
for j in range(8):
|
|
510
|
+
q_tmp[j] = q0[j] + i / float(num) * (joint_positions[j] - q0[j])
|
|
511
|
+
q_list.append(q_tmp)
|
|
512
|
+
for q in q_list:
|
|
513
|
+
# Convert degrees to radians
|
|
514
|
+
q = [math.radians(angle) for angle in q]
|
|
515
|
+
self.robot.control_arm_position(q)
|
|
516
|
+
time.sleep(0.02)
|
|
517
|
+
except Exception as e:
|
|
518
|
+
print(f"Arm position control failed: {str(e)}")
|
|
519
|
+
|
|
520
|
+
def stance(self):
|
|
521
|
+
"""Set the robot to standing posture."""
|
|
522
|
+
try:
|
|
523
|
+
self.robot.stance()
|
|
524
|
+
while True:
|
|
525
|
+
time.sleep(0.01)
|
|
526
|
+
if self.robot_state.is_stance():
|
|
527
|
+
break
|
|
528
|
+
except Exception as e:
|
|
529
|
+
print(f"Stance adjustment failed: {str(e)}")
|
|
530
|
+
|
|
531
|
+
def create_bezier_request(self, action_data, start_frame_time, end_frame_time):
|
|
532
|
+
"""Construct a Bezier curve trajectory request message for arm trajectory planning service.
|
|
533
|
+
|
|
534
|
+
Args:
|
|
535
|
+
action_data (dict): Joint action data.
|
|
536
|
+
start_frame_time (float): Start frame time.
|
|
537
|
+
end_frame_time (float): End frame time.
|
|
538
|
+
|
|
539
|
+
Returns:
|
|
540
|
+
planArmTrajectoryBezierCurveRequest: Bezier curve trajectory request object.
|
|
541
|
+
"""
|
|
542
|
+
global robot_version
|
|
543
|
+
req = planArmTrajectoryBezierCurveRequest()
|
|
544
|
+
for key, value in action_data.items():
|
|
545
|
+
msg = jointBezierTrajectory()
|
|
546
|
+
for frame in value:
|
|
547
|
+
point = bezierCurveCubicPoint()
|
|
548
|
+
point.end_point, point.left_control_point, point.right_control_point = frame
|
|
549
|
+
msg.bezier_curve_points.append(point)
|
|
550
|
+
req.multi_joint_bezier_trajectory.append(msg)
|
|
551
|
+
req.start_frame_time = start_frame_time
|
|
552
|
+
req.end_frame_time = end_frame_time
|
|
553
|
+
if robot_version >= 40:
|
|
554
|
+
req.joint_names = [
|
|
555
|
+
"l_arm_pitch", "l_arm_roll", "l_arm_yaw", "l_forearm_pitch", "l_hand_yaw", "l_hand_pitch", "l_hand_roll",
|
|
556
|
+
"r_arm_pitch", "r_arm_roll", "r_arm_yaw", "r_forearm_pitch", "r_hand_yaw", "r_hand_pitch", "r_hand_roll"
|
|
557
|
+
]
|
|
558
|
+
else:
|
|
559
|
+
req.joint_names = [
|
|
560
|
+
"zarm_l1_link", "zarm_l2_link", "zarm_l3_link", "zarm_l4_link",
|
|
561
|
+
"zarm_r1_link", "zarm_r2_link", "zarm_r3_link", "zarm_r4_link"
|
|
562
|
+
]
|
|
563
|
+
return req
|
|
564
|
+
|
|
565
|
+
def plan_arm_trajectory_bezier_curve_client(self, req):
|
|
566
|
+
"""Call the arm Bezier curve trajectory planning service and send the trajectory request.
|
|
567
|
+
|
|
568
|
+
Args:
|
|
569
|
+
req: Bezier trajectory request object.
|
|
570
|
+
|
|
571
|
+
Returns:
|
|
572
|
+
bool: True if service call succeeded, False otherwise.
|
|
573
|
+
"""
|
|
574
|
+
service_name = robot_settings[g_robot_type]["plan_arm_trajectory_bezier_service_name"]
|
|
575
|
+
rospy.wait_for_service(service_name)
|
|
576
|
+
try:
|
|
577
|
+
plan_service = rospy.ServiceProxy(service_name, planArmTrajectoryBezierCurve)
|
|
578
|
+
res = plan_service(req)
|
|
579
|
+
return res.success
|
|
580
|
+
except rospy.ServiceException as e:
|
|
581
|
+
rospy.logerr(f"Service call failed: {e}")
|
|
582
|
+
return False
|
|
583
|
+
|
|
584
|
+
def excute_action_file(self, action_file: str, proj_name: str = None, music_file: str = None):
|
|
585
|
+
"""Execute an action file, parse action frames, and send Bezier trajectory request to the robot.
|
|
586
|
+
|
|
587
|
+
Args:
|
|
588
|
+
action_file (str): Action file name.
|
|
589
|
+
proj_name (str, optional): Project name.
|
|
590
|
+
"""
|
|
591
|
+
global g_robot_type
|
|
592
|
+
g_robot_type = "ocs2"
|
|
593
|
+
self.plan_arm_state_status = False
|
|
594
|
+
|
|
595
|
+
action_filename = action_file + ".tact"
|
|
596
|
+
if proj_name is None:
|
|
597
|
+
action_file_path = os.path.expanduser(f"{self.ACTION_FILE_FOLDER}/{action_filename}")
|
|
598
|
+
else:
|
|
599
|
+
action_file_path = self.package_path + "/upload_files/" + proj_name + "/action_files/" + action_filename
|
|
600
|
+
if not os.path.exists(action_file_path):
|
|
601
|
+
print(f"Action file not found: {action_file_path}")
|
|
602
|
+
return
|
|
603
|
+
|
|
604
|
+
start_frame_time, end_frame_time = get_start_end_frame_time(action_file_path)
|
|
605
|
+
|
|
606
|
+
if g_robot_type == "ocs2":
|
|
607
|
+
action_frames = frames_to_custom_action_data_ocs2(action_file_path, start_frame_time, current_arm_joint_state)
|
|
608
|
+
end_frame_time += 1
|
|
609
|
+
self.set_arm_control_mode(2)
|
|
610
|
+
else:
|
|
611
|
+
action_frames = frames_to_custom_action_data(action_file_path)
|
|
612
|
+
|
|
613
|
+
req = self.create_bezier_request(action_frames, start_frame_time, end_frame_time)
|
|
614
|
+
if music_file is not None:
|
|
615
|
+
self.play_music(music_file)
|
|
616
|
+
self.plan_arm_trajectory_bezier_curve_client(req)
|
|
617
|
+
time.sleep(0.5)
|
|
618
|
+
while self.plan_arm_state_status is False:
|
|
619
|
+
time.sleep(0.01)
|
|
620
|
+
print("action file executed")
|
|
621
|
+
|
|
622
|
+
def control_robot_head(self, yaw: float, pitch: float):
|
|
623
|
+
"""Control the robot head rotation.
|
|
624
|
+
|
|
625
|
+
Args:
|
|
626
|
+
yaw (float): Yaw angle in degrees.
|
|
627
|
+
pitch (float): Pitch angle in degrees.
|
|
628
|
+
"""
|
|
629
|
+
try:
|
|
630
|
+
self.robot.stance()
|
|
631
|
+
yaw = math.radians(yaw)
|
|
632
|
+
pitch = math.radians(pitch)
|
|
633
|
+
self.robot.control_head(yaw, pitch)
|
|
634
|
+
diff_yaw_min = self.FLOAT_MAX
|
|
635
|
+
diff_pitch_min = self.FLOAT_MAX
|
|
636
|
+
times_count = 0
|
|
637
|
+
while True:
|
|
638
|
+
time.sleep(0.01)
|
|
639
|
+
diff_yaw = abs(current_head_joint_state[0] - yaw)
|
|
640
|
+
diff_pitch = abs(current_head_joint_state[1] - pitch)
|
|
641
|
+
|
|
642
|
+
if diff_yaw < diff_yaw_min:
|
|
643
|
+
times_count = 0
|
|
644
|
+
diff_yaw_min = diff_yaw
|
|
645
|
+
continue
|
|
646
|
+
if diff_pitch < diff_pitch_min:
|
|
647
|
+
times_count = 0
|
|
648
|
+
diff_pitch_min = diff_pitch
|
|
649
|
+
continue
|
|
650
|
+
times_count += 1
|
|
651
|
+
if times_count > self.THRESHOLD_HEAD_CONTROL_COUNT:
|
|
652
|
+
break
|
|
653
|
+
|
|
654
|
+
except Exception as e:
|
|
655
|
+
print(f"Robot head control failed: {str(e)}")
|
|
656
|
+
|
|
657
|
+
def control_robot_head_only_yaw(self, yaw: float):
|
|
658
|
+
"""Control the robot head yaw only.
|
|
659
|
+
|
|
660
|
+
Args:
|
|
661
|
+
yaw (float): Yaw angle in degrees.
|
|
662
|
+
"""
|
|
663
|
+
try:
|
|
664
|
+
self.robot.stance()
|
|
665
|
+
yaw = math.radians(yaw)
|
|
666
|
+
self.robot.control_head(yaw, current_head_joint_state[1])
|
|
667
|
+
times_count = 0
|
|
668
|
+
diff_yaw_min = self.FLOAT_MAX
|
|
669
|
+
while True:
|
|
670
|
+
time.sleep(0.01)
|
|
671
|
+
diff_yaw = abs(current_head_joint_state[0] - yaw)
|
|
672
|
+
if diff_yaw < diff_yaw_min:
|
|
673
|
+
times_count = 0
|
|
674
|
+
diff_yaw_min = diff_yaw
|
|
675
|
+
continue
|
|
676
|
+
times_count += 1
|
|
677
|
+
if times_count > self.THRESHOLD_HEAD_CONTROL_COUNT:
|
|
678
|
+
break
|
|
679
|
+
except Exception as e:
|
|
680
|
+
print(f"Robot head control failed: {str(e)}")
|
|
681
|
+
|
|
682
|
+
def control_robot_head_only_pitch(self, pitch: float):
|
|
683
|
+
"""Control the robot head pitch only.
|
|
684
|
+
|
|
685
|
+
Args:
|
|
686
|
+
pitch (float): Pitch angle in degrees.
|
|
687
|
+
"""
|
|
688
|
+
try:
|
|
689
|
+
self.robot.stance()
|
|
690
|
+
pitch = math.radians(pitch)
|
|
691
|
+
self.robot.control_head(current_head_joint_state[0], pitch)
|
|
692
|
+
times_count = 0
|
|
693
|
+
diff_pitch_min = self.FLOAT_MAX
|
|
694
|
+
while True:
|
|
695
|
+
time.sleep(0.01)
|
|
696
|
+
diff_pitch = abs(current_head_joint_state[1] - pitch)
|
|
697
|
+
if diff_pitch < diff_pitch_min:
|
|
698
|
+
times_count = 0
|
|
699
|
+
diff_pitch_min = diff_pitch
|
|
700
|
+
continue
|
|
701
|
+
times_count += 1
|
|
702
|
+
if times_count > self.THRESHOLD_HEAD_CONTROL_COUNT:
|
|
703
|
+
break
|
|
704
|
+
except Exception as e:
|
|
705
|
+
print(f"Robot head control failed: {str(e)}")
|
|
706
|
+
|
|
707
|
+
def control_robot_height(self, is_up: str, height: float, pitch: float = 0.0):
|
|
708
|
+
"""Control the robot's height and body pitch.
|
|
709
|
+
|
|
710
|
+
Args:
|
|
711
|
+
is_up (str): "up" to increase height, "down" to decrease height.
|
|
712
|
+
height (float): Target height.
|
|
713
|
+
pitch (float, optional): Body pitch angle. Defaults to 0.0.
|
|
714
|
+
"""
|
|
715
|
+
try:
|
|
716
|
+
global com_height
|
|
717
|
+
|
|
718
|
+
while not self.robot_state.is_stance():
|
|
719
|
+
self.robot.stance()
|
|
720
|
+
time.sleep(0.01)
|
|
721
|
+
now_height = self.robot_state.com_height
|
|
722
|
+
mass_height = now_height - com_height
|
|
723
|
+
if is_up == "up":
|
|
724
|
+
height = mass_height + height
|
|
725
|
+
elif is_up == "down":
|
|
726
|
+
height = mass_height - height
|
|
727
|
+
else:
|
|
728
|
+
raise ValueError("is_up must be 'up' or 'down'")
|
|
729
|
+
|
|
730
|
+
if height >= 0.0:
|
|
731
|
+
height = -0.0000000001
|
|
732
|
+
self.robot.squat(height, pitch)
|
|
733
|
+
while True:
|
|
734
|
+
# print(self.robot_state.com_height - com_height - height)
|
|
735
|
+
if abs(self.robot_state.com_height - com_height - height) < 0.1:
|
|
736
|
+
break
|
|
737
|
+
except Exception as e:
|
|
738
|
+
print(f"Robot height control failed: {str(e)}")
|
|
739
|
+
|
|
740
|
+
def to_stance(self):
|
|
741
|
+
"""Return the robot to the standard standing posture, including arm reset and head centering."""
|
|
742
|
+
try:
|
|
743
|
+
self.arm_reset()
|
|
744
|
+
self.control_robot_head(0, 0)
|
|
745
|
+
self.robot._kuavo_core._control.robot_stance()
|
|
746
|
+
global com_height
|
|
747
|
+
while True:
|
|
748
|
+
time.sleep(0.01)
|
|
749
|
+
if self.robot_state.is_stance():
|
|
750
|
+
break
|
|
751
|
+
time.sleep(0.5)
|
|
752
|
+
com_height = self.robot_state.com_height
|
|
753
|
+
except Exception as e:
|
|
754
|
+
print(f"Stance adjustment failed: {str(e)}")
|
|
755
|
+
|
|
756
|
+
def slove_move_time(self, current_pos, target_pos):
|
|
757
|
+
"""Calculate the time required for the arm to move to the target position.
|
|
758
|
+
|
|
759
|
+
Args:
|
|
760
|
+
current_pos (list): Current position.
|
|
761
|
+
target_pos (list): Target position.
|
|
762
|
+
|
|
763
|
+
Returns:
|
|
764
|
+
float: Move time in seconds.
|
|
765
|
+
"""
|
|
766
|
+
left_dist = np.linalg.norm(np.array(current_pos) - np.array(target_pos))
|
|
767
|
+
right_dist = np.linalg.norm(np.array(current_pos) - np.array(target_pos))
|
|
768
|
+
max_dist = max(left_dist, right_dist)
|
|
769
|
+
min_time = 1.0
|
|
770
|
+
max_time = 10.0
|
|
771
|
+
# Linear interpolation: 1m -> 10s, 0m -> 1s
|
|
772
|
+
move_time = min_time + (max_time - min_time) * min(max_dist, 1.0)
|
|
773
|
+
return move_time
|
|
774
|
+
|
|
775
|
+
def control_arm_target_pose(self, x1, y1, z1, roll1, pitch1, yaw1, x2, y2, z2, roll2, pitch2, yaw2) -> bool:
|
|
776
|
+
"""Calculate target joint angles using IK and move the arm to the target pose.
|
|
777
|
+
|
|
778
|
+
Args:
|
|
779
|
+
x1, y1, z1, roll1, pitch1, yaw1: Left arm end-effector position and orientation.
|
|
780
|
+
x2, y2, z2, roll2, pitch2, yaw2: Right arm end-effector position and orientation.
|
|
781
|
+
|
|
782
|
+
Returns:
|
|
783
|
+
bool: True if successful, False otherwise.
|
|
784
|
+
"""
|
|
785
|
+
# Get the transform from zarm_r7_end_effector to base_link using tf
|
|
786
|
+
listener = tf.TransformListener()
|
|
787
|
+
try:
|
|
788
|
+
listener.waitForTransform("base_link", "zarm_r7_end_effector", rospy.Time(0), rospy.Duration(2.0))
|
|
789
|
+
listener.waitForTransform("base_link", "zarm_l7_end_effector", rospy.Time(0), rospy.Duration(2.0))
|
|
790
|
+
(trans_r, rot_r) = listener.lookupTransform("base_link", "zarm_r7_end_effector", rospy.Time(0))
|
|
791
|
+
(trans_l, rot_l) = listener.lookupTransform("base_link", "zarm_l7_end_effector", rospy.Time(0))
|
|
792
|
+
except (tf.Exception, tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException) as e:
|
|
793
|
+
print("Failed to get tf from zarm_r7_end_effector to base_link: ", str(e))
|
|
794
|
+
return False
|
|
795
|
+
global current_arm_joint_state
|
|
796
|
+
front_left = [x1, y1, z1] # Left arm target position
|
|
797
|
+
front_right = [x2, y2, z2] # Right arm target position
|
|
798
|
+
# Convert Euler angles to quaternion
|
|
799
|
+
l_front_orientation = tf.transformations.quaternion_from_euler(yaw1, pitch1, roll1)
|
|
800
|
+
r_front_orientation = tf.transformations.quaternion_from_euler(yaw2, pitch2, roll2)
|
|
801
|
+
|
|
802
|
+
res = self.robot.arm_ik_free(
|
|
803
|
+
KuavoPose(position=front_left, orientation=l_front_orientation),
|
|
804
|
+
KuavoPose(position=front_right, orientation=r_front_orientation)
|
|
805
|
+
)
|
|
806
|
+
|
|
807
|
+
if res:
|
|
808
|
+
# Calculate the spatial distance between current and target end-effector positions
|
|
809
|
+
current_left_pos = trans_l
|
|
810
|
+
current_right_pos = trans_r
|
|
811
|
+
target_left_pos = front_left
|
|
812
|
+
target_right_pos = front_right
|
|
813
|
+
move_time_left = self.slove_move_time(current_left_pos, target_left_pos)
|
|
814
|
+
move_time_right = self.slove_move_time(current_right_pos, target_right_pos)
|
|
815
|
+
move_time = max(move_time_left, move_time_right)
|
|
816
|
+
times = [1.0, move_time]
|
|
817
|
+
q_frames = [current_arm_joint_state[0:8], res]
|
|
818
|
+
self.robot.control_arm_joint_trajectory(times, q_frames)
|
|
819
|
+
time.sleep(move_time + 1.0)
|
|
820
|
+
# self.set_auto_swing_arm_mode()
|
|
821
|
+
# time.sleep(1.0)
|
|
822
|
+
return True
|
|
823
|
+
else:
|
|
824
|
+
return False
|
|
825
|
+
|
|
826
|
+
def control_arm_target_pose_by_single(self, hand_type, x, y, z, roll, pitch, yaw) -> bool:
|
|
827
|
+
"""Move a single arm to the specified spatial position and orientation.
|
|
828
|
+
|
|
829
|
+
Args:
|
|
830
|
+
hand_type (str): "left" or "right".
|
|
831
|
+
x, y, z (float): Target position.
|
|
832
|
+
roll, pitch, yaw (float): Target orientation.
|
|
833
|
+
|
|
834
|
+
Returns:
|
|
835
|
+
bool: True if successful, False otherwise.
|
|
836
|
+
"""
|
|
837
|
+
# Get the transform from zarm_r7_end_effector to base_link using tf
|
|
838
|
+
listener = tf.TransformListener()
|
|
839
|
+
try:
|
|
840
|
+
listener.waitForTransform("base_link", "zarm_r7_end_effector", rospy.Time(0), rospy.Duration(2.0))
|
|
841
|
+
listener.waitForTransform("base_link", "zarm_l7_end_effector", rospy.Time(0), rospy.Duration(2.0))
|
|
842
|
+
(trans_r, rot_r) = listener.lookupTransform("base_link", "zarm_r7_end_effector", rospy.Time(0))
|
|
843
|
+
(trans_l, rot_l) = listener.lookupTransform("base_link", "zarm_l7_end_effector", rospy.Time(0))
|
|
844
|
+
except (tf.Exception, tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException) as e:
|
|
845
|
+
print("Failed to get tf from zarm_r7_end_effector to base_link: ", str(e))
|
|
846
|
+
return False
|
|
847
|
+
|
|
848
|
+
global current_arm_joint_state
|
|
849
|
+
if hand_type == "left":
|
|
850
|
+
front_left = [x, y, z] # Left arm target position
|
|
851
|
+
front_right = trans_r # Right arm current position
|
|
852
|
+
# Convert left arm Euler angles to quaternion
|
|
853
|
+
l_front_orientation = tf.transformations.quaternion_from_euler(yaw, pitch, roll)
|
|
854
|
+
r_front_orientation = rot_r
|
|
855
|
+
res = self.robot.arm_ik_free(
|
|
856
|
+
KuavoPose(position=front_left, orientation=l_front_orientation),
|
|
857
|
+
KuavoPose(position=front_right, orientation=r_front_orientation)
|
|
858
|
+
)
|
|
859
|
+
|
|
860
|
+
if res:
|
|
861
|
+
# Calculate the spatial distance between current and target end-effector positions
|
|
862
|
+
current_left_pos = trans_l
|
|
863
|
+
current_right_pos = trans_r
|
|
864
|
+
target_left_pos = front_left
|
|
865
|
+
target_right_pos = front_right
|
|
866
|
+
move_time = self.slove_move_time(current_left_pos, target_left_pos)
|
|
867
|
+
times = [1.0, move_time]
|
|
868
|
+
res = list(res)
|
|
869
|
+
res[4:8] = current_arm_joint_state[4:8]
|
|
870
|
+
q_frames = [current_arm_joint_state[0:8], res]
|
|
871
|
+
self.robot.control_arm_joint_trajectory(times, q_frames)
|
|
872
|
+
time.sleep(move_time + 1.0)
|
|
873
|
+
# self.set_auto_swing_arm_mode()
|
|
874
|
+
# time.sleep(1.0)
|
|
875
|
+
elif hand_type == "right":
|
|
876
|
+
front_left = trans_l # Left arm current position
|
|
877
|
+
front_right = [x, y, z] # Right arm target position
|
|
878
|
+
# Convert right arm Euler angles to quaternion
|
|
879
|
+
r_front_orientation = tf.transformations.quaternion_from_euler(yaw, pitch, roll)
|
|
880
|
+
l_front_orientation = rot_l
|
|
881
|
+
res = self.robot.arm_ik_free(
|
|
882
|
+
KuavoPose(position=front_left, orientation=l_front_orientation),
|
|
883
|
+
KuavoPose(position=front_right, orientation=r_front_orientation)
|
|
884
|
+
)
|
|
885
|
+
|
|
886
|
+
if res:
|
|
887
|
+
# Calculate the spatial distance between current and target end-effector positions
|
|
888
|
+
current_left_pos = trans_l
|
|
889
|
+
current_right_pos = trans_r
|
|
890
|
+
target_left_pos = front_left
|
|
891
|
+
target_right_pos = front_right
|
|
892
|
+
move_time = self.slove_move_time(current_right_pos, target_right_pos)
|
|
893
|
+
times = [1.0, move_time]
|
|
894
|
+
res = list(res)
|
|
895
|
+
res[0:4] = current_arm_joint_state[0:4]
|
|
896
|
+
q_frames = [current_arm_joint_state[0:8], res]
|
|
897
|
+
self.robot.control_arm_joint_trajectory(times, q_frames)
|
|
898
|
+
time.sleep(move_time + 1.0)
|
|
899
|
+
return True
|
|
900
|
+
|
|
901
|
+
def play_music(self, music_number: str, volume: int = 100, speed: float = 1.0) -> bool:
|
|
902
|
+
"""Play a music file.
|
|
903
|
+
|
|
904
|
+
Args:
|
|
905
|
+
music_number (str): The number of the music file to play.
|
|
906
|
+
volume (int, optional): The volume of the music. Defaults to 100.
|
|
907
|
+
speed (float, optional): The speed of the music. Defaults to 1.0.
|
|
908
|
+
|
|
909
|
+
Returns:
|
|
910
|
+
bool: True if the music was played successfully, False otherwise.
|
|
911
|
+
"""
|
|
912
|
+
return self.robot_audio.play_audio(music_number, volume, speed)
|
|
913
|
+
|
|
914
|
+
def stop_music(self) -> bool:
|
|
915
|
+
"""Stop the currently playing music.
|
|
916
|
+
|
|
917
|
+
Returns:
|
|
918
|
+
bool: True if the music was stopped successfully, False otherwise.
|
|
919
|
+
"""
|
|
920
|
+
return self.robot_audio.stop_music()
|
|
921
|
+
|
|
922
|
+
# ===== Stair Climbing Methods =====
|
|
923
|
+
|
|
924
|
+
def set_stair_parameters(
|
|
925
|
+
self,
|
|
926
|
+
step_height: float = 0.13,
|
|
927
|
+
step_length: float = 0.28,
|
|
928
|
+
foot_width: float = 0.10,
|
|
929
|
+
stand_height: float = 0.0,
|
|
930
|
+
dt: float = 0.6,
|
|
931
|
+
ss_time: float = 0.5,
|
|
932
|
+
) -> bool:
|
|
933
|
+
"""Set stair climbing parameters with version-specific defaults.
|
|
934
|
+
|
|
935
|
+
Args:
|
|
936
|
+
step_height (float): Step height in meters. Defaults to 0.13.
|
|
937
|
+
step_length (float): Step length in meters. Defaults to 0.28.
|
|
938
|
+
foot_width (float): Foot width in meters. Defaults to 0.10.
|
|
939
|
+
stand_height (float): Standing height offset in meters. Defaults to 0.0.
|
|
940
|
+
dt (float): Gait cycle time in seconds. Defaults to 0.6.
|
|
941
|
+
ss_time (float): Single support time ratio. Defaults to 0.5.
|
|
942
|
+
|
|
943
|
+
Returns:
|
|
944
|
+
bool: True if parameters were set successfully, False otherwise.
|
|
945
|
+
"""
|
|
946
|
+
try:
|
|
947
|
+
# Set default parameters based on robot version
|
|
948
|
+
if robot_version >= 40:
|
|
949
|
+
step_height = step_height or 0.13
|
|
950
|
+
step_length = step_length or 0.28
|
|
951
|
+
foot_width = foot_width or 0.10
|
|
952
|
+
stand_height = stand_height or 0.0
|
|
953
|
+
dt = dt or 0.6
|
|
954
|
+
ss_time = ss_time or 0.5
|
|
955
|
+
else:
|
|
956
|
+
step_height = step_height or 0.08
|
|
957
|
+
step_length = step_length or 0.25
|
|
958
|
+
foot_width = foot_width or 0.10
|
|
959
|
+
stand_height = stand_height or -0.02
|
|
960
|
+
dt = dt or 1.0
|
|
961
|
+
ss_time = ss_time or 0.6
|
|
962
|
+
|
|
963
|
+
return self.climb_stair.set_stair_parameters(
|
|
964
|
+
step_height=step_height,
|
|
965
|
+
step_length=step_length,
|
|
966
|
+
foot_width=foot_width,
|
|
967
|
+
stand_height=stand_height,
|
|
968
|
+
dt=dt,
|
|
969
|
+
ss_time=ss_time,
|
|
970
|
+
)
|
|
971
|
+
except Exception as e:
|
|
972
|
+
print(f"Set stair parameters failed: {str(e)}")
|
|
973
|
+
return False
|
|
974
|
+
|
|
975
|
+
def climb_up_stairs(self, num_steps: int = 4, stair_offset: float = 0.03) -> bool:
|
|
976
|
+
"""Plan and add up stairs trajectory to accumulated trajectory.
|
|
977
|
+
|
|
978
|
+
Args:
|
|
979
|
+
num_steps (int): Number of steps to climb up. Defaults to 4.
|
|
980
|
+
|
|
981
|
+
Returns:
|
|
982
|
+
bool: True if planning was successful, False otherwise.
|
|
983
|
+
"""
|
|
984
|
+
try:
|
|
985
|
+
return self.climb_stair.climb_up_stairs(num_steps, stair_offset)
|
|
986
|
+
except Exception as e:
|
|
987
|
+
print(f"Climb up stairs failed: {str(e)}")
|
|
988
|
+
return False
|
|
989
|
+
|
|
990
|
+
def climb_down_stairs(self, num_steps: int = 5) -> bool:
|
|
991
|
+
"""Plan and add down stairs trajectory to accumulated trajectory.
|
|
992
|
+
|
|
993
|
+
Note: This functionality is currently disabled.
|
|
994
|
+
|
|
995
|
+
Args:
|
|
996
|
+
num_steps (int): Number of steps to climb down. Defaults to 5.
|
|
997
|
+
|
|
998
|
+
Returns:
|
|
999
|
+
bool: False (functionality disabled).
|
|
1000
|
+
"""
|
|
1001
|
+
print("⚠ Down stairs functionality is currently disabled (under development)")
|
|
1002
|
+
return self.climb_stair.climb_down_stairs(num_steps)
|
|
1003
|
+
|
|
1004
|
+
def stair_move_to_position(
|
|
1005
|
+
self,
|
|
1006
|
+
dx: float = 0.2,
|
|
1007
|
+
dy: float = 0.0,
|
|
1008
|
+
dyaw: float = 0.0,
|
|
1009
|
+
max_step_x: float = 0.28,
|
|
1010
|
+
max_step_y: float = 0.15,
|
|
1011
|
+
max_step_yaw: float = 30.0,
|
|
1012
|
+
) -> bool:
|
|
1013
|
+
"""Plan stair climbing move to position trajectory and add to accumulated trajectory.
|
|
1014
|
+
|
|
1015
|
+
Args:
|
|
1016
|
+
dx (float): X direction displacement in meters. Defaults to 0.2.
|
|
1017
|
+
dy (float): Y direction displacement in meters. Defaults to 0.0.
|
|
1018
|
+
dyaw (float): Yaw angle displacement in degrees. Defaults to 0.0.
|
|
1019
|
+
max_step_x (float): Maximum step size in X direction. Defaults to 0.28.
|
|
1020
|
+
max_step_y (float): Maximum step size in Y direction. Defaults to 0.15.
|
|
1021
|
+
max_step_yaw (float): Maximum yaw step size in degrees. Defaults to 30.0.
|
|
1022
|
+
|
|
1023
|
+
Returns:
|
|
1024
|
+
bool: True if planning was successful, False otherwise.
|
|
1025
|
+
"""
|
|
1026
|
+
try:
|
|
1027
|
+
return self.climb_stair.move_to_position(
|
|
1028
|
+
dx=dx, dy=dy, dyaw=dyaw, max_step_x=max_step_x, max_step_y=max_step_y, max_step_yaw=max_step_yaw
|
|
1029
|
+
)
|
|
1030
|
+
except Exception as e:
|
|
1031
|
+
print(f"Stair move to position failed: {str(e)}")
|
|
1032
|
+
return False
|
|
1033
|
+
|
|
1034
|
+
def execute_stair_trajectory(self) -> bool:
|
|
1035
|
+
"""Execute the complete accumulated stair climbing trajectory.
|
|
1036
|
+
|
|
1037
|
+
Returns:
|
|
1038
|
+
bool: True if execution was successful, False otherwise.
|
|
1039
|
+
"""
|
|
1040
|
+
try:
|
|
1041
|
+
return self.climb_stair.execute_trajectory()
|
|
1042
|
+
except Exception as e:
|
|
1043
|
+
print(f"Execute stair trajectory failed: {str(e)}")
|
|
1044
|
+
return False
|
|
1045
|
+
|
|
1046
|
+
def control_waist_rotation(self, degree: float = 0):
|
|
1047
|
+
"""Control the robot's waist rotation.
|
|
1048
|
+
|
|
1049
|
+
Args:
|
|
1050
|
+
degree (float): Rotation angle in degrees.
|
|
1051
|
+
"""
|
|
1052
|
+
try:
|
|
1053
|
+
global control_waist_pub
|
|
1054
|
+
msg = Float64MultiArray()
|
|
1055
|
+
msg.data = [degree]
|
|
1056
|
+
control_waist_pub.publish(msg)
|
|
1057
|
+
|
|
1058
|
+
except Exception as e:
|
|
1059
|
+
print(f"Robot waist control failed: {str(e)}")
|
|
1060
|
+
|
|
1061
|
+
def alignment_target(self, class_name: str, x: float, y: float, z: float):
|
|
1062
|
+
"""
|
|
1063
|
+
使机器人对准指定类别到对象,并移动到目标前
|
|
1064
|
+
:param class_name: yolo 检测中需要检测的对象名称,和训练模型中的类别名称一致
|
|
1065
|
+
:param x: 图像 x 方向的偏移值,物体中心的 x 小于该参数 -x 时,机器人左移,大于该参数 x 时,机器人右移
|
|
1066
|
+
:param y: 图像 y 方向的偏移值,物体中心的 y 小于该参数 y 时,机器人前进,否则停止移动
|
|
1067
|
+
:param z: 机器人上下蹲的高度控制
|
|
1068
|
+
"""
|
|
1069
|
+
try:
|
|
1070
|
+
# 加载 yolo 模型
|
|
1071
|
+
yolo_detection = model_utils.yolo_detection
|
|
1072
|
+
model = model_utils.model
|
|
1073
|
+
is_yolo_init = model_utils.is_yolo_init
|
|
1074
|
+
|
|
1075
|
+
if not is_yolo_init:
|
|
1076
|
+
yolo_detection = YOLO_detection()
|
|
1077
|
+
yolo_detection.init_ros_node()
|
|
1078
|
+
caller_file_path = self.package_path + "/upload_files/"
|
|
1079
|
+
print(f"caller_file_path: {caller_file_path}")
|
|
1080
|
+
model_path = os.path.join(os.path.dirname(caller_file_path), 'best.pt')
|
|
1081
|
+
# print(f"函数被文件调用: {model_path}")
|
|
1082
|
+
model = yolo_detection.load_model(model_path)
|
|
1083
|
+
result = yolo_detection.camera_interface.get_camera_image("head")
|
|
1084
|
+
if result is None:
|
|
1085
|
+
print("No head camera image...")
|
|
1086
|
+
return
|
|
1087
|
+
|
|
1088
|
+
IMAGE_CENTER_X = yolo_detection.camera_interface.cv_image_shape[1] / 2.0
|
|
1089
|
+
IMAGE_CENTER_Y = yolo_detection.camera_interface.cv_image_shape[0] / 2.0
|
|
1090
|
+
# rospy.loginfo(f"IMAGE_CENTER_X: {IMAGE_CENTER_X}")
|
|
1091
|
+
# rospy.loginfo(f"IMAGE_CENTER_Y: {IMAGE_CENTER_Y}")
|
|
1092
|
+
MOVE_SPEED_Y = 0.05 # Y方向移动速度
|
|
1093
|
+
MOVE_SPEED_X = 0.10 # X方向移动速度
|
|
1094
|
+
|
|
1095
|
+
while not rospy.is_shutdown():
|
|
1096
|
+
|
|
1097
|
+
results = yolo_detection.get_detections("head", model)
|
|
1098
|
+
if not results:
|
|
1099
|
+
continue
|
|
1100
|
+
|
|
1101
|
+
# 存储所有符合条件的目标信息
|
|
1102
|
+
targets = []
|
|
1103
|
+
for result in results:
|
|
1104
|
+
|
|
1105
|
+
boxes = result.boxes.cpu().numpy() # 边界框数据转NumPy数组
|
|
1106
|
+
xywh = boxes.xywh # [中心x, 中心y, 宽, 高]
|
|
1107
|
+
class_names = [result.names[cls.item()] for cls in result.boxes.cls.int()] # 类别名称列表
|
|
1108
|
+
|
|
1109
|
+
# 遍历每个检测框,提取目标信息
|
|
1110
|
+
for i in range(len(xywh)):
|
|
1111
|
+
if class_names[i] == class_name: # 筛选目标类别
|
|
1112
|
+
targets.append({
|
|
1113
|
+
"class": class_names[i],
|
|
1114
|
+
"x": xywh[i][0], # 目标中心x坐标
|
|
1115
|
+
"y": xywh[i][1], # 目标中心y坐标
|
|
1116
|
+
"area": xywh[i][2] * xywh[i][3] # 目标面积(宽×高)
|
|
1117
|
+
})
|
|
1118
|
+
|
|
1119
|
+
if not targets:
|
|
1120
|
+
continue # 无目标时继续循环
|
|
1121
|
+
|
|
1122
|
+
# 选择面积最大的目标
|
|
1123
|
+
best_target = max(targets, key=lambda t: t["area"])
|
|
1124
|
+
|
|
1125
|
+
# 计算相对于图像中心的偏移量
|
|
1126
|
+
offset_x = best_target["x"] - IMAGE_CENTER_X
|
|
1127
|
+
offset_y = best_target["y"] - IMAGE_CENTER_Y
|
|
1128
|
+
|
|
1129
|
+
# rospy.loginfo(f"目标中心偏移: x={offset_x:.2f}, y={offset_y:.2f}")
|
|
1130
|
+
|
|
1131
|
+
# 根据偏移量控制机器人移动
|
|
1132
|
+
if abs(offset_x) > x:
|
|
1133
|
+
# X方向偏移过大,左右移动调整
|
|
1134
|
+
speed_y = -MOVE_SPEED_Y if offset_x > x else MOVE_SPEED_Y
|
|
1135
|
+
self.walk_angle(0.00, speed_y, 0.00)
|
|
1136
|
+
elif offset_y < y:
|
|
1137
|
+
# Y方向偏移过大,向前移动调整
|
|
1138
|
+
self.walk_angle(MOVE_SPEED_X, 0.00, 0.00)
|
|
1139
|
+
else:
|
|
1140
|
+
# 偏移量在可接受范围内,停止移动
|
|
1141
|
+
self.stop()
|
|
1142
|
+
|
|
1143
|
+
# 控制机器人高度
|
|
1144
|
+
if z >= 0:
|
|
1145
|
+
self.control_robot_height("up", z)
|
|
1146
|
+
else:
|
|
1147
|
+
self.control_robot_height("down", -z)
|
|
1148
|
+
|
|
1149
|
+
break
|
|
1150
|
+
|
|
1151
|
+
time.sleep(0.1)
|
|
1152
|
+
|
|
1153
|
+
except Exception as e:
|
|
1154
|
+
print(f"Robot alignment to target failed: {str(e)}")
|