python-motion-planning 1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- curve_generation/__init__.py +9 -0
- curve_generation/bezier_curve.py +131 -0
- curve_generation/bspline_curve.py +271 -0
- curve_generation/cubic_spline.py +128 -0
- curve_generation/curve.py +64 -0
- curve_generation/dubins_curve.py +348 -0
- curve_generation/fem_pos_smooth.py +114 -0
- curve_generation/polynomial_curve.py +226 -0
- curve_generation/reeds_shepp.py +736 -0
- global_planner/__init__.py +3 -0
- global_planner/evolutionary_search/__init__.py +4 -0
- global_planner/evolutionary_search/aco.py +186 -0
- global_planner/evolutionary_search/evolutionary_search.py +87 -0
- global_planner/evolutionary_search/pso.py +356 -0
- global_planner/graph_search/__init__.py +28 -0
- global_planner/graph_search/a_star.py +124 -0
- global_planner/graph_search/d_star.py +291 -0
- global_planner/graph_search/d_star_lite.py +188 -0
- global_planner/graph_search/dijkstra.py +77 -0
- global_planner/graph_search/gbfs.py +78 -0
- global_planner/graph_search/graph_search.py +87 -0
- global_planner/graph_search/jps.py +165 -0
- global_planner/graph_search/lazy_theta_star.py +114 -0
- global_planner/graph_search/lpa_star.py +230 -0
- global_planner/graph_search/s_theta_star.py +133 -0
- global_planner/graph_search/theta_star.py +171 -0
- global_planner/graph_search/voronoi.py +200 -0
- global_planner/sample_search/__init__.py +6 -0
- global_planner/sample_search/informed_rrt.py +152 -0
- global_planner/sample_search/rrt.py +151 -0
- global_planner/sample_search/rrt_connect.py +147 -0
- global_planner/sample_search/rrt_star.py +77 -0
- global_planner/sample_search/sample_search.py +135 -0
- local_planner/__init__.py +19 -0
- local_planner/apf.py +144 -0
- local_planner/ddpg.py +630 -0
- local_planner/dqn.py +687 -0
- local_planner/dwa.py +212 -0
- local_planner/local_planner.py +262 -0
- local_planner/lqr.py +146 -0
- local_planner/mpc.py +214 -0
- local_planner/pid.py +158 -0
- local_planner/rpp.py +147 -0
- python_motion_planning-1.0.dist-info/LICENSE +674 -0
- python_motion_planning-1.0.dist-info/METADATA +873 -0
- python_motion_planning-1.0.dist-info/RECORD +65 -0
- python_motion_planning-1.0.dist-info/WHEEL +5 -0
- python_motion_planning-1.0.dist-info/top_level.txt +4 -0
- utils/__init__.py +19 -0
- utils/agent/__init__.py +0 -0
- utils/agent/agent.py +135 -0
- utils/environment/__init__.py +0 -0
- utils/environment/env.py +134 -0
- utils/environment/node.py +85 -0
- utils/environment/point2d.py +96 -0
- utils/environment/pose2d.py +91 -0
- utils/helper/__init__.py +3 -0
- utils/helper/math_helper.py +65 -0
- utils/planner/__init__.py +0 -0
- utils/planner/control_factory.py +31 -0
- utils/planner/curve_factory.py +29 -0
- utils/planner/planner.py +40 -0
- utils/planner/search_factory.py +51 -0
- utils/plot/__init__.py +0 -0
- utils/plot/plot.py +274 -0
local_planner/dwa.py
ADDED
|
@@ -0,0 +1,212 @@
|
|
|
1
|
+
"""
|
|
2
|
+
@file: dwa.py
|
|
3
|
+
@breif: Dynamic Window Approach(DWA) motion planning
|
|
4
|
+
@author: Yang Haodong, Wu Maojia
|
|
5
|
+
@update: 2024.6.25
|
|
6
|
+
"""
|
|
7
|
+
import numpy as np
|
|
8
|
+
from itertools import product
|
|
9
|
+
from scipy.spatial.distance import cdist
|
|
10
|
+
|
|
11
|
+
from .local_planner import LocalPlanner
|
|
12
|
+
from python_motion_planning.utils import Env
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class DWA(LocalPlanner):
|
|
16
|
+
"""
|
|
17
|
+
Class for Dynamic Window Approach(DWA) motion planning.
|
|
18
|
+
|
|
19
|
+
Parameters:
|
|
20
|
+
start (tuple): start point coordinate
|
|
21
|
+
goal (tuple): goal point coordinate
|
|
22
|
+
env (Env): environment
|
|
23
|
+
heuristic_type (str): heuristic function type
|
|
24
|
+
heading_weight (float): weight for heading cost
|
|
25
|
+
obstacle_weight (float): weight for obstacle cost
|
|
26
|
+
velocity_weight (float): weight for velocity cost
|
|
27
|
+
predict_time (float): predict time for trajectory
|
|
28
|
+
obstacle_inflation_radius (float): inflation radius for obstacles
|
|
29
|
+
v_resolution (float): velocity resolution in evaulation
|
|
30
|
+
w_resolution (float): angular velocity resolution in evaulation
|
|
31
|
+
**params: other parameters can be found in the parent class LocalPlanner
|
|
32
|
+
|
|
33
|
+
Examples:
|
|
34
|
+
>>> from python_motion_planning.utils import Grid
|
|
35
|
+
>>> from python_motion_planning.local_planner import DWA
|
|
36
|
+
>>> start = (5, 5, 0)
|
|
37
|
+
>>> goal = (45, 25, 0)
|
|
38
|
+
>>> env = Grid(51, 31)
|
|
39
|
+
>>> planner = DWA(start, goal, env)
|
|
40
|
+
>>> planner.run()
|
|
41
|
+
|
|
42
|
+
References:
|
|
43
|
+
[1] The Dynamic Window Approach to Collision Avoidance.
|
|
44
|
+
"""
|
|
45
|
+
def __init__(self, start: tuple, goal: tuple, env: Env, heuristic_type: str = "euclidean",
|
|
46
|
+
heading_weight: float = 0.2, obstacle_weight: float = 0.1, velocity_weight: float = 0.05,
|
|
47
|
+
predict_time: float = 1.5, obstacle_inflation_radius: float = 1.0,
|
|
48
|
+
v_resolution: float = 0.05, w_resolution: float = 0.05, **params) -> None:
|
|
49
|
+
super().__init__(start, goal, env, heuristic_type, **params)
|
|
50
|
+
self.heading_weight = heading_weight
|
|
51
|
+
self.obstacle_weight = obstacle_weight
|
|
52
|
+
self.velocity_weight = velocity_weight
|
|
53
|
+
self.predict_time = predict_time
|
|
54
|
+
self.obstacle_inflation_radius = obstacle_inflation_radius
|
|
55
|
+
self.v_resolution = v_resolution
|
|
56
|
+
self.w_resolution = w_resolution
|
|
57
|
+
|
|
58
|
+
# global planner
|
|
59
|
+
g_start = (start[0], start[1])
|
|
60
|
+
g_goal = (goal[0], goal[1])
|
|
61
|
+
self.g_planner = {"planner_name": "a_star", "start": g_start, "goal": g_goal, "env": env}
|
|
62
|
+
self.path = self.g_path[::-1]
|
|
63
|
+
|
|
64
|
+
def __str__(self) -> str:
|
|
65
|
+
return "Dynamic Window Approach(DWA)"
|
|
66
|
+
|
|
67
|
+
def plan(self) -> tuple:
|
|
68
|
+
"""
|
|
69
|
+
Dynamic Window Approach(DWA) motion plan function.
|
|
70
|
+
"""
|
|
71
|
+
history_traj = []
|
|
72
|
+
for _ in range(self.params["MAX_ITERATION"]):
|
|
73
|
+
# break until goal reached
|
|
74
|
+
if self.reachGoal(tuple(self.robot.state.squeeze(axis=1)[0:3]), self.goal):
|
|
75
|
+
return True, history_traj, self.robot.history_pose
|
|
76
|
+
|
|
77
|
+
# get the particular point on the path at the lookahead distance to track
|
|
78
|
+
lookahead_pt, theta_trj, kappa = self.getLookaheadPoint()
|
|
79
|
+
|
|
80
|
+
# dynamic configure
|
|
81
|
+
vr = self.calDynamicWin()
|
|
82
|
+
eval_win, traj_win = self.evaluation(vr, lookahead_pt)
|
|
83
|
+
|
|
84
|
+
# failed
|
|
85
|
+
if not len(eval_win):
|
|
86
|
+
break
|
|
87
|
+
|
|
88
|
+
# update
|
|
89
|
+
max_index = np.argmax(eval_win[:, -1])
|
|
90
|
+
u = np.expand_dims(eval_win[max_index, 0:-1], axis=1)
|
|
91
|
+
|
|
92
|
+
self.robot.kinematic(u, self.params["TIME_STEP"])
|
|
93
|
+
history_traj.append(traj_win[max_index])
|
|
94
|
+
|
|
95
|
+
return False, None, None
|
|
96
|
+
|
|
97
|
+
def run(self) -> None:
|
|
98
|
+
"""
|
|
99
|
+
Running both plannig and animation.
|
|
100
|
+
"""
|
|
101
|
+
_, history_traj, history_pose = self.plan()
|
|
102
|
+
|
|
103
|
+
if not history_pose:
|
|
104
|
+
raise ValueError("Path not found and planning failed!")
|
|
105
|
+
|
|
106
|
+
path = np.array(history_pose)[:, 0:2]
|
|
107
|
+
cost = np.sum(np.sqrt(np.sum(np.diff(path, axis=0)**2, axis=1, keepdims=True)))
|
|
108
|
+
self.plot.plotPath(self.path, path_color="r", path_style="--")
|
|
109
|
+
self.plot.animation(path, str(self), cost, history_pose=history_pose, predict_path=history_traj)
|
|
110
|
+
|
|
111
|
+
def calDynamicWin(self) -> list:
|
|
112
|
+
"""
|
|
113
|
+
Calculate dynamic window.
|
|
114
|
+
|
|
115
|
+
Returns:
|
|
116
|
+
v_reference (list): reference velocity
|
|
117
|
+
"""
|
|
118
|
+
# hard margin
|
|
119
|
+
vs = (self.params["MIN_V"], self.params["MAX_V"], self.params["MIN_W"], self.params["MAX_W"])
|
|
120
|
+
# predict margin
|
|
121
|
+
vd = (
|
|
122
|
+
self.robot.v + self.params["MIN_V_INC"] * self.params["TIME_STEP"],
|
|
123
|
+
self.robot.v + self.params["MAX_V_INC"] * self.params["TIME_STEP"],
|
|
124
|
+
self.robot.w +self.params["MIN_W_INC"] * self.params["TIME_STEP"],
|
|
125
|
+
self.robot.w + self.params["MAX_W_INC"] * self.params["TIME_STEP"]
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
# dynamic window
|
|
129
|
+
v_tmp = np.array([vs, vd])
|
|
130
|
+
# reference velocity
|
|
131
|
+
vr = [
|
|
132
|
+
float(np.max(v_tmp[:, 0])), float(np.min(v_tmp[:, 1])),
|
|
133
|
+
float(np.max(v_tmp[:, 2])), float(np.min(v_tmp[:, 3]))
|
|
134
|
+
]
|
|
135
|
+
return vr
|
|
136
|
+
|
|
137
|
+
def evaluation(self, vr: list, goal: tuple) -> tuple:
|
|
138
|
+
"""
|
|
139
|
+
Extract the path based on the CLOSED set.
|
|
140
|
+
|
|
141
|
+
Parameters:
|
|
142
|
+
closed_set (list): CLOSED set
|
|
143
|
+
goal (tuple): goal point coordinate
|
|
144
|
+
|
|
145
|
+
Returns:
|
|
146
|
+
cost (float): the cost of planning path
|
|
147
|
+
path (list): the planning path
|
|
148
|
+
"""
|
|
149
|
+
v_start, v_end, w_start, w_end = vr
|
|
150
|
+
v = np.linspace(v_start, v_end, num=int((v_end - v_start) / self.v_resolution)).tolist()
|
|
151
|
+
w = np.linspace(w_start, w_end, num=int((w_end - w_start) / self.w_resolution)).tolist()
|
|
152
|
+
|
|
153
|
+
eval_win, traj_win = [], []
|
|
154
|
+
for v_, w_ in product(v, w):
|
|
155
|
+
# trajectory prediction, consistent of poses
|
|
156
|
+
traj = self.generateTraj(v_, w_)
|
|
157
|
+
end_state = traj[-1].squeeze().tolist()
|
|
158
|
+
|
|
159
|
+
# heading evaluation
|
|
160
|
+
theta = self.angle((end_state[0], end_state[1]), goal)
|
|
161
|
+
heading = np.pi - abs(theta - end_state[2])
|
|
162
|
+
|
|
163
|
+
# obstacle evaluation
|
|
164
|
+
D = cdist(np.array(tuple(self.obstacles)), traj[:, 0:2])
|
|
165
|
+
min_D = np.min(D)
|
|
166
|
+
obstacle = min(min_D, self.obstacle_inflation_radius)
|
|
167
|
+
|
|
168
|
+
# velocity evaluation
|
|
169
|
+
velocity = abs(v_)
|
|
170
|
+
|
|
171
|
+
eval_win.append((v_, w_, heading, obstacle, velocity))
|
|
172
|
+
traj_win.append(traj)
|
|
173
|
+
|
|
174
|
+
# normalization
|
|
175
|
+
eval_win = np.array(eval_win)
|
|
176
|
+
if np.sum(eval_win[:, 2]) != 0:
|
|
177
|
+
eval_win[:, 2] = eval_win[:, 2] / np.sum(eval_win[:, 2])
|
|
178
|
+
if np.sum(eval_win[:, 3]) != 0:
|
|
179
|
+
eval_win[:, 3] = eval_win[:, 3] / np.sum(eval_win[:, 3])
|
|
180
|
+
if np.sum(eval_win[:, 4]) != 0:
|
|
181
|
+
eval_win[:, 4] = eval_win[:, 4] / np.sum(eval_win[:, 4])
|
|
182
|
+
|
|
183
|
+
# evaluation
|
|
184
|
+
factor = np.array([[1, 0, 0],
|
|
185
|
+
[0, 1, 0],
|
|
186
|
+
[0, 0, self.heading_weight ],
|
|
187
|
+
[0, 0, self.obstacle_weight],
|
|
188
|
+
[0, 0, self.velocity_weight]])
|
|
189
|
+
|
|
190
|
+
return eval_win @ factor, traj_win
|
|
191
|
+
|
|
192
|
+
def generateTraj(self, v: float, w: float) -> np.ndarray:
|
|
193
|
+
"""
|
|
194
|
+
Generate predict trajectory.
|
|
195
|
+
|
|
196
|
+
Parameters:
|
|
197
|
+
v (float): velocity
|
|
198
|
+
w (float): angular velocity
|
|
199
|
+
|
|
200
|
+
Returns:
|
|
201
|
+
v_reference (np.ndarray): reference velocity
|
|
202
|
+
"""
|
|
203
|
+
u = np.array([[v], [w]])
|
|
204
|
+
state = self.robot.state
|
|
205
|
+
time_steps = int(self.predict_time / self.params["TIME_STEP"])
|
|
206
|
+
|
|
207
|
+
traj = []
|
|
208
|
+
for _ in range(time_steps):
|
|
209
|
+
state = self.robot.lookforward(state, u, self.params["TIME_STEP"])
|
|
210
|
+
traj.append(state)
|
|
211
|
+
|
|
212
|
+
return np.array(traj).squeeze()
|
|
@@ -0,0 +1,262 @@
|
|
|
1
|
+
"""
|
|
2
|
+
@file: local_planner.py
|
|
3
|
+
@breif: Base class for local planner.
|
|
4
|
+
@author: Yang Haodong, Wu Maojia
|
|
5
|
+
@update: 2024.5.20
|
|
6
|
+
"""
|
|
7
|
+
import math
|
|
8
|
+
|
|
9
|
+
from python_motion_planning.utils import Env, Planner, SearchFactory, Plot, Robot, MathHelper
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class LocalPlanner(Planner):
|
|
13
|
+
"""
|
|
14
|
+
Base class for local planner.
|
|
15
|
+
|
|
16
|
+
Parameters:
|
|
17
|
+
start (tuple): start point coordinate
|
|
18
|
+
goal (tuple): goal point coordinate
|
|
19
|
+
env (Env): environment
|
|
20
|
+
heuristic_type (str): heuristic function type
|
|
21
|
+
**params: other parameters
|
|
22
|
+
"""
|
|
23
|
+
def __init__(self, start: tuple, goal: tuple, env: Env, heuristic_type: str="euclidean", **params) -> None:
|
|
24
|
+
# start and goal pose
|
|
25
|
+
assert len(start) == 3 and len(goal) == 3, \
|
|
26
|
+
"Start and goal parameters must be (x, y, theta)"
|
|
27
|
+
self.start, self.goal = start, goal
|
|
28
|
+
# heuristic type
|
|
29
|
+
self.heuristic_type = heuristic_type
|
|
30
|
+
# environment
|
|
31
|
+
self.env = env
|
|
32
|
+
# obstacles
|
|
33
|
+
self.obstacles = self.env.obstacles
|
|
34
|
+
# graph handler
|
|
35
|
+
self.plot = Plot(start, goal, env)
|
|
36
|
+
# robot
|
|
37
|
+
self.robot = Robot(start[0], start[1], start[2], 0, 0)
|
|
38
|
+
|
|
39
|
+
# parameters and default value
|
|
40
|
+
self.params = {}
|
|
41
|
+
self.params["TIME_STEP"] = params["TIME_STEP"] if "TIME_STEP" in params.keys() else 0.1
|
|
42
|
+
self.params["MAX_ITERATION"] = params["MAX_ITERATION"] if "MAX_ITERATION" in params.keys() else 1500
|
|
43
|
+
self.params["LOOKAHEAD_TIME"] = params["LOOKAHEAD_TIME"] if "LOOKAHEAD_TIME" in params.keys() else 1.0
|
|
44
|
+
self.params["MAX_LOOKAHEAD_DIST"] = params["MAX_LOOKAHEAD_DIST"] if "MAX_LOOKAHEAD_DIST" in params.keys() else 2.5
|
|
45
|
+
self.params["MIN_LOOKAHEAD_DIST"] = params["MIN_LOOKAHEAD_DIST"] if "MIN_LOOKAHEAD_DIST" in params.keys() else 1.0
|
|
46
|
+
self.params["MAX_V_INC"] = params["MAX_V_INC"] if "MAX_V_INC" in params.keys() else 1.0
|
|
47
|
+
self.params["MIN_V_INC"] = params["MIN_V_INC"] if "MIN_V_INC" in params.keys() else -1.0
|
|
48
|
+
self.params["MAX_V"] = params["MAX_V"] if "MAX_V" in params.keys() else 0.5
|
|
49
|
+
self.params["MIN_V"] = params["MIN_V"] if "MIN_V" in params.keys() else 0.0
|
|
50
|
+
self.params["MAX_W_INC"] = params["MAX_W_INC"] if "MAX_W_INC" in params.keys() else math.pi
|
|
51
|
+
self.params["MIN_W_INC"] = params["MIN_W_INC"] if "MIN_W_INC" in params.keys() else -math.pi
|
|
52
|
+
self.params["MAX_W"] = params["MAX_W"] if "MAX_W" in params.keys() else math.pi / 2
|
|
53
|
+
self.params["MIN_W"] = params["MIN_W"] if "MIN_W" in params.keys() else -math.pi / 2
|
|
54
|
+
self.params["GOAL_DIST_TOL"] = params["GOAL_DIST_TOL"] if "GOAL_DIST_TOL" in params.keys() else 0.5
|
|
55
|
+
self.params["ROTATE_TOL"] = params["ROTATE_TOL"] if "ROTATE_TOL" in params.keys() else 0.5
|
|
56
|
+
|
|
57
|
+
# global planner
|
|
58
|
+
self.g_planner_ = None
|
|
59
|
+
# global path
|
|
60
|
+
self.path = None
|
|
61
|
+
# search factory
|
|
62
|
+
self.search_factory_ = SearchFactory()
|
|
63
|
+
|
|
64
|
+
@property
|
|
65
|
+
def g_planner(self):
|
|
66
|
+
return str(self.g_planner_)
|
|
67
|
+
|
|
68
|
+
@g_planner.setter
|
|
69
|
+
def g_planner(self, config):
|
|
70
|
+
if "planner_name" in config:
|
|
71
|
+
self.g_planner_ = self.search_factory_(**config)
|
|
72
|
+
else:
|
|
73
|
+
raise RuntimeError("Please set planner name!")
|
|
74
|
+
|
|
75
|
+
@property
|
|
76
|
+
def g_path(self):
|
|
77
|
+
"""
|
|
78
|
+
[property]Global path.
|
|
79
|
+
"""
|
|
80
|
+
if self.g_planner_ is None:
|
|
81
|
+
raise AttributeError("Global path searcher is None, please set it first!")
|
|
82
|
+
|
|
83
|
+
cost, path, _ = self.g_planner_.plan()
|
|
84
|
+
return path
|
|
85
|
+
|
|
86
|
+
@property
|
|
87
|
+
def lookahead_dist(self):
|
|
88
|
+
return MathHelper.clamp(
|
|
89
|
+
abs(self.robot.v) * self.params["LOOKAHEAD_TIME"],
|
|
90
|
+
self.params["MIN_LOOKAHEAD_DIST"],
|
|
91
|
+
self.params["MAX_LOOKAHEAD_DIST"]
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
def dist(self, start: tuple, end: tuple) -> float:
|
|
95
|
+
return math.hypot(end[0] - start[0], end[1] - start[1])
|
|
96
|
+
|
|
97
|
+
def angle(self, start: tuple, end: tuple) -> float:
|
|
98
|
+
return math.atan2(end[1] - start[1], end[0] - start[0])
|
|
99
|
+
|
|
100
|
+
def regularizeAngle(self, angle: float):
|
|
101
|
+
return angle - 2.0 * math.pi * math.floor((angle + math.pi) / (2.0 * math.pi))
|
|
102
|
+
|
|
103
|
+
def getLookaheadPoint(self):
|
|
104
|
+
"""
|
|
105
|
+
Find the point on the path that is exactly the lookahead distance away from the robot
|
|
106
|
+
|
|
107
|
+
Returns:
|
|
108
|
+
lookahead_pt (tuple): lookahead point
|
|
109
|
+
theta (float): the angle on trajectory
|
|
110
|
+
kappa (float): the curvature on trajectory
|
|
111
|
+
"""
|
|
112
|
+
if self.path is None:
|
|
113
|
+
assert RuntimeError("Please plan the path using g_planner!")
|
|
114
|
+
|
|
115
|
+
# Find the first pose which is at a distance greater than the lookahead distance
|
|
116
|
+
dist_to_robot = [self.dist(p, (self.robot.px, self.robot.py)) for p in self.path]
|
|
117
|
+
idx_closest = dist_to_robot.index(min(dist_to_robot))
|
|
118
|
+
idx_goal = len(self.path) - 1
|
|
119
|
+
idx_prev = idx_goal - 1
|
|
120
|
+
for i in range(idx_closest, len(self.path)):
|
|
121
|
+
if self.dist(self.path[i], (self.robot.px, self.robot.py)) >= self.lookahead_dist:
|
|
122
|
+
idx_goal = i
|
|
123
|
+
break
|
|
124
|
+
|
|
125
|
+
pt_x, pt_y = None, None
|
|
126
|
+
if idx_goal == len(self.path) - 1:
|
|
127
|
+
# If the no pose is not far enough, take the last pose
|
|
128
|
+
pt_x = self.path[idx_goal][0]
|
|
129
|
+
pt_y = self.path[idx_goal][1]
|
|
130
|
+
else:
|
|
131
|
+
if idx_goal == 0:
|
|
132
|
+
idx_goal = idx_goal + 1
|
|
133
|
+
# find the point on the line segment between the two poses
|
|
134
|
+
# that is exactly the lookahead distance away from the robot pose (the origin)
|
|
135
|
+
# This can be found with a closed form for the intersection of a segment and a circle
|
|
136
|
+
idx_prev = idx_goal - 1
|
|
137
|
+
px, py = self.path[idx_prev][0], self.path[idx_prev][1]
|
|
138
|
+
gx, gy = self.path[idx_goal][0], self.path[idx_goal][1]
|
|
139
|
+
|
|
140
|
+
# transform to the robot frame so that the circle centers at (0,0)
|
|
141
|
+
prev_p = (px - self.robot.px, py - self.robot.py)
|
|
142
|
+
goal_p = (gx - self.robot.px, gy - self.robot.py)
|
|
143
|
+
i_points = MathHelper.circleSegmentIntersection(prev_p, goal_p, self.lookahead_dist)
|
|
144
|
+
if len(i_points) == 0:
|
|
145
|
+
# If there is no intersection, take the closest intersection point (foot of a perpendicular)
|
|
146
|
+
# between the current position and the line segment
|
|
147
|
+
i_points.append(MathHelper.closestPointOnLine(prev_p, goal_p))
|
|
148
|
+
pt_x = i_points[0][0] + self.robot.px
|
|
149
|
+
pt_y = i_points[0][1] + self.robot.py
|
|
150
|
+
|
|
151
|
+
# calculate the angle on trajectory
|
|
152
|
+
theta = self.angle(self.path[idx_prev], self.path[idx_goal])
|
|
153
|
+
|
|
154
|
+
# calculate the curvature on trajectory
|
|
155
|
+
if idx_goal == 1:
|
|
156
|
+
idx_goal = idx_goal + 1
|
|
157
|
+
idx_prev = idx_goal - 1
|
|
158
|
+
idx_pprev = idx_prev - 1
|
|
159
|
+
a = self.dist(self.path[idx_prev], self.path[idx_goal])
|
|
160
|
+
b = self.dist(self.path[idx_pprev], self.path[idx_goal])
|
|
161
|
+
c = self.dist(self.path[idx_pprev], self.path[idx_prev])
|
|
162
|
+
cosB = (a * a + c * c - b * b) / (2 * a * c)
|
|
163
|
+
sinB = math.sin(math.acos(cosB))
|
|
164
|
+
cross = (self.path[idx_prev][0] - self.path[idx_pprev][0]) * \
|
|
165
|
+
(self.path[idx_goal][1] - self.path[idx_pprev][1]) - \
|
|
166
|
+
(self.path[idx_prev][1] - self.path[idx_pprev][1]) * \
|
|
167
|
+
(self.path[idx_goal][0] - self.path[idx_pprev][0])
|
|
168
|
+
kappa = math.copysign(2 * sinB / b, cross)
|
|
169
|
+
|
|
170
|
+
return (pt_x, pt_y), theta, kappa
|
|
171
|
+
|
|
172
|
+
def linearRegularization(self, v_d: float) -> float:
|
|
173
|
+
"""
|
|
174
|
+
Linear velocity regularization
|
|
175
|
+
|
|
176
|
+
Parameters:
|
|
177
|
+
v_d (float): reference velocity input
|
|
178
|
+
|
|
179
|
+
Returns:
|
|
180
|
+
v (float): control velocity output
|
|
181
|
+
"""
|
|
182
|
+
v_inc = v_d - self.robot.v
|
|
183
|
+
v_inc = MathHelper.clamp(v_inc, self.params["MIN_V_INC"], self.params["MAX_V_INC"])
|
|
184
|
+
|
|
185
|
+
v = self.robot.v + v_inc
|
|
186
|
+
v = MathHelper.clamp(v, self.params["MIN_V"], self.params["MAX_V"])
|
|
187
|
+
|
|
188
|
+
return v
|
|
189
|
+
|
|
190
|
+
def angularRegularization(self, w_d: float) -> float:
|
|
191
|
+
"""
|
|
192
|
+
Angular velocity regularization
|
|
193
|
+
|
|
194
|
+
Parameters:
|
|
195
|
+
w_d (float): reference angular velocity input
|
|
196
|
+
|
|
197
|
+
Returns:
|
|
198
|
+
w (float): control angular velocity output
|
|
199
|
+
"""
|
|
200
|
+
w_inc = w_d - self.robot.w
|
|
201
|
+
w_inc = MathHelper.clamp(w_inc, self.params["MIN_W_INC"], self.params["MAX_W_INC"])
|
|
202
|
+
|
|
203
|
+
w = self.robot.w + w_inc
|
|
204
|
+
w = MathHelper.clamp(w, self.params["MIN_W"], self.params["MAX_W"])
|
|
205
|
+
|
|
206
|
+
return w
|
|
207
|
+
|
|
208
|
+
def shouldMoveToGoal(self, cur: tuple, goal: tuple) -> bool:
|
|
209
|
+
"""
|
|
210
|
+
Whether to move to the goal pose
|
|
211
|
+
|
|
212
|
+
Parameters:
|
|
213
|
+
cur (tuple): current pose of robot
|
|
214
|
+
goal (tuple): goal pose of robot
|
|
215
|
+
|
|
216
|
+
Returns:
|
|
217
|
+
flag (bool): true if robot should perform movement
|
|
218
|
+
"""
|
|
219
|
+
return self.dist(cur, goal) > self.params["GOAL_DIST_TOL"]
|
|
220
|
+
|
|
221
|
+
def shouldRotateToPath(self, angle_to_path: float) -> bool:
|
|
222
|
+
"""
|
|
223
|
+
Whether to correct the tracking path with rotation operation
|
|
224
|
+
|
|
225
|
+
Parameters:
|
|
226
|
+
angle_to_path (float): the angle deviation
|
|
227
|
+
|
|
228
|
+
Returns:
|
|
229
|
+
flag (bool): true if robot should perform rotation
|
|
230
|
+
"""
|
|
231
|
+
return angle_to_path > self.params["ROTATE_TOL"]
|
|
232
|
+
|
|
233
|
+
def reachGoal(self, cur: tuple, goal: tuple) -> bool:
|
|
234
|
+
"""
|
|
235
|
+
Whether the robot has reached the goal pose
|
|
236
|
+
|
|
237
|
+
Parameters:
|
|
238
|
+
cur (tuple): current pose of robot
|
|
239
|
+
goal (tuple): goal pose of robot
|
|
240
|
+
|
|
241
|
+
Returns:
|
|
242
|
+
flag (bool): true if robot has reached the goal
|
|
243
|
+
"""
|
|
244
|
+
e_theta = self.regularizeAngle(cur[2] - goal[2])
|
|
245
|
+
return not (self.shouldMoveToGoal((cur[0], cur[1]), (goal[0], goal[1]))
|
|
246
|
+
or self.shouldRotateToPath(abs(e_theta)))
|
|
247
|
+
|
|
248
|
+
def isCollision(self, cur_pos: tuple):
|
|
249
|
+
"""
|
|
250
|
+
Whether the robot is in collision with obstacles
|
|
251
|
+
|
|
252
|
+
Parameters:
|
|
253
|
+
cur_pos (tuple): current position of robot
|
|
254
|
+
|
|
255
|
+
Returns:
|
|
256
|
+
flag (bool): true if robot is in collision
|
|
257
|
+
"""
|
|
258
|
+
obstacles = self.obstacles
|
|
259
|
+
for obs in obstacles:
|
|
260
|
+
if abs(cur_pos[0] - obs[0]) < 0.5 and abs(cur_pos[1] - obs[1]) < 0.5:
|
|
261
|
+
return True
|
|
262
|
+
return False
|
local_planner/lqr.py
ADDED
|
@@ -0,0 +1,146 @@
|
|
|
1
|
+
"""
|
|
2
|
+
@file: lqr.py
|
|
3
|
+
@breif: Linear Quadratic Regulator(LQR) motion planning
|
|
4
|
+
@author: Yang Haodong, Wu Maojia
|
|
5
|
+
@update: 2024.6.25
|
|
6
|
+
"""
|
|
7
|
+
import numpy as np
|
|
8
|
+
|
|
9
|
+
from .local_planner import LocalPlanner
|
|
10
|
+
from python_motion_planning.utils import Env
|
|
11
|
+
|
|
12
|
+
class LQR(LocalPlanner):
|
|
13
|
+
"""
|
|
14
|
+
Class for Linear Quadratic Regulator(LQR) motion planning.
|
|
15
|
+
|
|
16
|
+
Parameters:
|
|
17
|
+
start (tuple): start point coordinate
|
|
18
|
+
goal (tuple): goal point coordinate
|
|
19
|
+
env (Env): environment
|
|
20
|
+
heuristic_type (str): heuristic function type
|
|
21
|
+
**params: other parameters can be found in the parent class LocalPlanner
|
|
22
|
+
|
|
23
|
+
Examples:
|
|
24
|
+
>>> from python_motion_planning.utils import Grid
|
|
25
|
+
>>> from python_motion_planning.local_planner import LQR
|
|
26
|
+
>>> start = (5, 5, 0)
|
|
27
|
+
>>> goal = (45, 25, 0)
|
|
28
|
+
>>> env = Grid(51, 31)
|
|
29
|
+
>>> planner = LQR(start, goal, env)
|
|
30
|
+
>>> planner.run()
|
|
31
|
+
"""
|
|
32
|
+
def __init__(self, start: tuple, goal: tuple, env: Env, heuristic_type: str = "euclidean", **params) -> None:
|
|
33
|
+
super().__init__(start, goal, env, heuristic_type, **params)
|
|
34
|
+
# LQR parameters
|
|
35
|
+
self.Q = np.diag([1, 1, 1])
|
|
36
|
+
self.R = np.diag([1, 1])
|
|
37
|
+
self.lqr_iteration = 100
|
|
38
|
+
self.eps_iter = 1e-1
|
|
39
|
+
|
|
40
|
+
# global planner
|
|
41
|
+
g_start = (start[0], start[1])
|
|
42
|
+
g_goal = (goal[0], goal[1])
|
|
43
|
+
self.g_planner = {"planner_name": "a_star", "start": g_start, "goal": g_goal, "env": env}
|
|
44
|
+
self.path = self.g_path[::-1]
|
|
45
|
+
|
|
46
|
+
def __str__(self) -> str:
|
|
47
|
+
return "Linear Quadratic Regulator (LQR)"
|
|
48
|
+
|
|
49
|
+
def plan(self):
|
|
50
|
+
"""
|
|
51
|
+
LQR motion plan function.
|
|
52
|
+
|
|
53
|
+
Returns:
|
|
54
|
+
flag (bool): planning successful if true else failed
|
|
55
|
+
pose_list (list): history poses of robot
|
|
56
|
+
"""
|
|
57
|
+
dt = self.params["TIME_STEP"]
|
|
58
|
+
for _ in range(self.params["MAX_ITERATION"]):
|
|
59
|
+
# break until goal reached
|
|
60
|
+
if self.reachGoal(tuple(self.robot.state.squeeze(axis=1)[0:3]), self.goal):
|
|
61
|
+
return True, self.robot.history_pose
|
|
62
|
+
|
|
63
|
+
# get the particular point on the path at the lookahead distance
|
|
64
|
+
lookahead_pt, theta_trj, kappa = self.getLookaheadPoint()
|
|
65
|
+
|
|
66
|
+
# calculate velocity command
|
|
67
|
+
e_theta = self.regularizeAngle(self.robot.theta - self.goal[2])
|
|
68
|
+
if not self.shouldMoveToGoal(self.robot.position, self.goal):
|
|
69
|
+
if not self.shouldRotateToPath(abs(e_theta)):
|
|
70
|
+
u = np.array([[0], [0]])
|
|
71
|
+
else:
|
|
72
|
+
u = np.array([[0], [self.angularRegularization(e_theta / dt)]])
|
|
73
|
+
else:
|
|
74
|
+
e_theta = self.regularizeAngle(
|
|
75
|
+
self.angle(self.robot.position, lookahead_pt) - self.robot.theta
|
|
76
|
+
)
|
|
77
|
+
if self.shouldRotateToPath(abs(e_theta)):
|
|
78
|
+
u = np.array([[0], [self.angularRegularization(e_theta / dt)]])
|
|
79
|
+
else:
|
|
80
|
+
s = (self.robot.px, self.robot.py, self.robot.theta) # current state
|
|
81
|
+
s_d = (lookahead_pt[0], lookahead_pt[1], theta_trj) # desired state
|
|
82
|
+
u_r = (self.robot.v, self.robot.v * kappa) # refered input
|
|
83
|
+
u = self.lqrControl(s, s_d, u_r)
|
|
84
|
+
|
|
85
|
+
# feed into robotic kinematic
|
|
86
|
+
self.robot.kinematic(u, dt)
|
|
87
|
+
|
|
88
|
+
return False, None
|
|
89
|
+
|
|
90
|
+
def run(self):
|
|
91
|
+
"""
|
|
92
|
+
Running both plannig and animation.
|
|
93
|
+
"""
|
|
94
|
+
_, history_pose = self.plan()
|
|
95
|
+
if not history_pose:
|
|
96
|
+
raise ValueError("Path not found and planning failed!")
|
|
97
|
+
|
|
98
|
+
path = np.array(history_pose)[:, 0:2]
|
|
99
|
+
cost = np.sum(np.sqrt(np.sum(np.diff(path, axis=0)**2, axis=1, keepdims=True)))
|
|
100
|
+
self.plot.plotPath(self.path, path_color="r", path_style="--")
|
|
101
|
+
self.plot.animation(path, str(self), cost, history_pose=history_pose)
|
|
102
|
+
|
|
103
|
+
def lqrControl(self, s: tuple, s_d: tuple, u_r: tuple) -> np.ndarray:
|
|
104
|
+
"""
|
|
105
|
+
Execute LQR control process.
|
|
106
|
+
|
|
107
|
+
Parameters:
|
|
108
|
+
s (tuple): current state
|
|
109
|
+
s_d (tuple): desired state
|
|
110
|
+
u_r (tuple): refered control
|
|
111
|
+
|
|
112
|
+
Returns:
|
|
113
|
+
u (np.ndarray): control vector
|
|
114
|
+
"""
|
|
115
|
+
dt = self.params["TIME_STEP"]
|
|
116
|
+
|
|
117
|
+
# state equation on error
|
|
118
|
+
A = np.identity(3)
|
|
119
|
+
A[0, 2] = -u_r[0] * np.sin(s_d[2]) * dt
|
|
120
|
+
A[1, 2] = u_r[0] * np.cos(s_d[2]) * dt
|
|
121
|
+
|
|
122
|
+
B = np.zeros((3, 2))
|
|
123
|
+
B[0, 0] = np.cos(s_d[2]) * dt
|
|
124
|
+
B[1, 0] = np.sin(s_d[2]) * dt
|
|
125
|
+
B[2, 1] = dt
|
|
126
|
+
|
|
127
|
+
# discrete iteration Ricatti equation
|
|
128
|
+
P, P_ = np.zeros((3, 3)), np.zeros((3, 3))
|
|
129
|
+
P = self.Q
|
|
130
|
+
|
|
131
|
+
# iteration
|
|
132
|
+
for _ in range(self.lqr_iteration):
|
|
133
|
+
P_ = self.Q + A.T @ P @ A - A.T @ P @ B @ np.linalg.inv(self.R + B.T @ P @ B) @ B.T @ P @ A
|
|
134
|
+
if np.max(P - P_) < self.eps_iter:
|
|
135
|
+
break
|
|
136
|
+
P = P_
|
|
137
|
+
|
|
138
|
+
# feedback
|
|
139
|
+
K = -np.linalg.inv(self.R + B.T @ P_ @ B) @ B.T @ P_ @ A
|
|
140
|
+
e = np.array([[s[0] - s_d[0]], [s[1] - s_d[1]], [self.regularizeAngle(s[2] - s_d[2])]])
|
|
141
|
+
u = np.array([[u_r[0]], [u_r[1]]]) + K @ e
|
|
142
|
+
|
|
143
|
+
return np.array([
|
|
144
|
+
[self.linearRegularization(float(u[0]))],
|
|
145
|
+
[self.angularRegularization(float(u[1]))]
|
|
146
|
+
])
|