python-motion-planning 0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. python_motion_planning/__init__.py +4 -0
  2. python_motion_planning/curve_generation/__init__.py +9 -0
  3. python_motion_planning/curve_generation/bezier_curve.py +131 -0
  4. python_motion_planning/curve_generation/bspline_curve.py +271 -0
  5. python_motion_planning/curve_generation/cubic_spline.py +128 -0
  6. python_motion_planning/curve_generation/curve.py +64 -0
  7. python_motion_planning/curve_generation/dubins_curve.py +348 -0
  8. python_motion_planning/curve_generation/fem_pos_smooth.py +114 -0
  9. python_motion_planning/curve_generation/polynomial_curve.py +226 -0
  10. python_motion_planning/curve_generation/reeds_shepp.py +736 -0
  11. python_motion_planning/global_planner/__init__.py +3 -0
  12. python_motion_planning/global_planner/evolutionary_search/__init__.py +4 -0
  13. python_motion_planning/global_planner/evolutionary_search/aco.py +186 -0
  14. python_motion_planning/global_planner/evolutionary_search/evolutionary_search.py +87 -0
  15. python_motion_planning/global_planner/evolutionary_search/pso.py +356 -0
  16. python_motion_planning/global_planner/graph_search/__init__.py +28 -0
  17. python_motion_planning/global_planner/graph_search/a_star.py +124 -0
  18. python_motion_planning/global_planner/graph_search/d_star.py +291 -0
  19. python_motion_planning/global_planner/graph_search/d_star_lite.py +188 -0
  20. python_motion_planning/global_planner/graph_search/dijkstra.py +77 -0
  21. python_motion_planning/global_planner/graph_search/gbfs.py +78 -0
  22. python_motion_planning/global_planner/graph_search/graph_search.py +87 -0
  23. python_motion_planning/global_planner/graph_search/jps.py +165 -0
  24. python_motion_planning/global_planner/graph_search/lazy_theta_star.py +114 -0
  25. python_motion_planning/global_planner/graph_search/lpa_star.py +230 -0
  26. python_motion_planning/global_planner/graph_search/s_theta_star.py +133 -0
  27. python_motion_planning/global_planner/graph_search/theta_star.py +171 -0
  28. python_motion_planning/global_planner/graph_search/voronoi.py +200 -0
  29. python_motion_planning/global_planner/sample_search/__init__.py +6 -0
  30. python_motion_planning/global_planner/sample_search/informed_rrt.py +152 -0
  31. python_motion_planning/global_planner/sample_search/rrt.py +151 -0
  32. python_motion_planning/global_planner/sample_search/rrt_connect.py +147 -0
  33. python_motion_planning/global_planner/sample_search/rrt_star.py +77 -0
  34. python_motion_planning/global_planner/sample_search/sample_search.py +135 -0
  35. python_motion_planning/local_planner/__init__.py +19 -0
  36. python_motion_planning/local_planner/apf.py +144 -0
  37. python_motion_planning/local_planner/ddpg.py +630 -0
  38. python_motion_planning/local_planner/dqn.py +687 -0
  39. python_motion_planning/local_planner/dwa.py +212 -0
  40. python_motion_planning/local_planner/local_planner.py +262 -0
  41. python_motion_planning/local_planner/lqr.py +146 -0
  42. python_motion_planning/local_planner/mpc.py +214 -0
  43. python_motion_planning/local_planner/pid.py +158 -0
  44. python_motion_planning/local_planner/rpp.py +147 -0
  45. python_motion_planning/utils/__init__.py +19 -0
  46. python_motion_planning/utils/agent/__init__.py +0 -0
  47. python_motion_planning/utils/agent/agent.py +135 -0
  48. python_motion_planning/utils/environment/__init__.py +0 -0
  49. python_motion_planning/utils/environment/env.py +134 -0
  50. python_motion_planning/utils/environment/node.py +85 -0
  51. python_motion_planning/utils/environment/point2d.py +96 -0
  52. python_motion_planning/utils/environment/pose2d.py +91 -0
  53. python_motion_planning/utils/helper/__init__.py +3 -0
  54. python_motion_planning/utils/helper/math_helper.py +65 -0
  55. python_motion_planning/utils/planner/__init__.py +0 -0
  56. python_motion_planning/utils/planner/control_factory.py +31 -0
  57. python_motion_planning/utils/planner/curve_factory.py +29 -0
  58. python_motion_planning/utils/planner/planner.py +40 -0
  59. python_motion_planning/utils/planner/search_factory.py +51 -0
  60. python_motion_planning/utils/plot/__init__.py +0 -0
  61. python_motion_planning/utils/plot/plot.py +274 -0
  62. python_motion_planning-0.1.dist-info/LICENSE +674 -0
  63. python_motion_planning-0.1.dist-info/METADATA +873 -0
  64. python_motion_planning-0.1.dist-info/RECORD +66 -0
  65. python_motion_planning-0.1.dist-info/WHEEL +5 -0
  66. python_motion_planning-0.1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,214 @@
1
+ """
2
+ @file: mpc.py
3
+ @breif: Model Predicted Control (MPC) motion planning
4
+ @author: Yang Haodong, Wu Maojia
5
+ @update: 2024.6.25
6
+ """
7
+ import osqp
8
+ import numpy as np
9
+ from scipy import sparse
10
+
11
+ from .local_planner import LocalPlanner
12
+ from python_motion_planning.utils import Env
13
+
14
+ class MPC(LocalPlanner):
15
+ """
16
+ Class for Model Predicted Control (MPC) motion planning.
17
+
18
+ Parameters:
19
+ start (tuple): start point coordinate
20
+ goal (tuple): goal point coordinate
21
+ env (Env): environment
22
+ heuristic_type (str): heuristic function type
23
+ **params: other parameters can be found in the parent class LocalPlanner
24
+
25
+ Examples:
26
+ >>> from python_motion_planning.utils import Grid
27
+ >>> from python_motion_planning.local_planner import MPC
28
+ >>> start = (5, 5, 0)
29
+ >>> goal = (45, 25, 0)
30
+ >>> env = Grid(51, 31)
31
+ >>> planner = MPC(start, goal, env)
32
+ >>> planner.run()
33
+ """
34
+ def __init__(self, start: tuple, goal: tuple, env: Env, heuristic_type: str = "euclidean", **params) -> None:
35
+ super().__init__(start, goal, env, heuristic_type, **params)
36
+ # MPC parameters
37
+ self.p = 12
38
+ self.m = 8
39
+ self.Q = np.diag([0.8, 0.8, 0.5])
40
+ self.R = np.diag([2, 2])
41
+ self.u_min = np.array([[self.params["MIN_V"]], [self.params["MIN_W"]]])
42
+ self.u_max = np.array([[self.params["MAX_V"]], [self.params["MAX_W"]]])
43
+ # self.du_min = np.array([[self.params["MIN_V"]], [self.params["MIN_W"]]])
44
+ self.du_min = np.array([[self.params["MIN_V_INC"]], [self.params["MIN_W_INC"]]])
45
+ self.du_max = np.array([[self.params["MAX_V_INC"]], [self.params["MAX_W_INC"]]])
46
+
47
+ # global planner
48
+ g_start = (start[0], start[1])
49
+ g_goal = (goal[0], goal[1])
50
+ self.g_planner = {"planner_name": "a_star", "start": g_start, "goal": g_goal, "env": env}
51
+ self.path = self.g_path[::-1]
52
+
53
+ def __str__(self) -> str:
54
+ return "Model Predicted Control (MPC)"
55
+
56
+ def plan(self):
57
+ """
58
+ MPC motion plan function.
59
+
60
+ Returns:
61
+ flag (bool): planning successful if true else failed
62
+ pose_list (list): history poses of robot
63
+ """
64
+ dt = self.params["TIME_STEP"]
65
+ u_p = (0, 0)
66
+ for _ in range(self.params["MAX_ITERATION"]):
67
+ # break until goal reached
68
+ if self.reachGoal(tuple(self.robot.state.squeeze(axis=1)[0:3]), self.goal):
69
+ return True, self.robot.history_pose
70
+
71
+ # get the particular point on the path at the lookahead distance
72
+ lookahead_pt, theta_trj, kappa = self.getLookaheadPoint()
73
+
74
+ # calculate velocity command
75
+ e_theta = self.regularizeAngle(self.robot.theta - self.goal[2])
76
+ if not self.shouldMoveToGoal(self.robot.position, self.goal):
77
+ if not self.shouldRotateToPath(abs(e_theta)):
78
+ u = np.array([[0], [0]])
79
+ else:
80
+ u = np.array([[0], [self.angularRegularization(e_theta / dt)]])
81
+ else:
82
+ e_theta = self.regularizeAngle(
83
+ self.angle(self.robot.position, lookahead_pt) - self.robot.theta
84
+ )
85
+ if self.shouldRotateToPath(abs(e_theta)):
86
+ u = np.array([[0], [self.angularRegularization(e_theta / dt)]])
87
+ else:
88
+ s = (self.robot.px, self.robot.py, self.robot.theta) # current state
89
+ s_d = (lookahead_pt[0], lookahead_pt[1], theta_trj) # desired state
90
+ u_r = (self.robot.v, self.robot.v * kappa) # refered input
91
+ u, u_p = self.mpcControl(s, s_d, u_r, u_p)
92
+
93
+ # feed into robotic kinematic
94
+ self.robot.kinematic(u, dt)
95
+
96
+ return False, None
97
+
98
+ def run(self):
99
+ """
100
+ Running both plannig and animation.
101
+ """
102
+ _, history_pose = self.plan()
103
+ if not history_pose:
104
+ raise ValueError("Path not found and planning failed!")
105
+
106
+ path = np.array(history_pose)[:, 0:2]
107
+ cost = np.sum(np.sqrt(np.sum(np.diff(path, axis=0)**2, axis=1, keepdims=True)))
108
+ self.plot.plotPath(self.path, path_color="r", path_style="--")
109
+ self.plot.animation(path, str(self), cost, history_pose=history_pose)
110
+
111
+ def mpcControl(self, s: tuple, s_d: tuple, u_r: tuple, u_p: tuple) -> np.ndarray:
112
+ """
113
+ Execute MPC control process.
114
+
115
+ Parameters:
116
+ s (tuple): current state
117
+ s_d (tuple): desired state
118
+ u_r (tuple): refered control
119
+ u_p (tuple): previous control error
120
+
121
+ Returns:
122
+ u (np.ndarray): control vector
123
+ """
124
+ dim_u, dim_x = 2, 3
125
+ dt = self.params["TIME_STEP"]
126
+
127
+ # state vector (5 x 1)
128
+ x = np.array([
129
+ [s[0] - s_d[0]],
130
+ [s[1] - s_d[1]],
131
+ [s[2] - s_d[2]],
132
+ [u_p[0]],
133
+ [u_p[1]],
134
+ ])
135
+
136
+ # original state matrix
137
+ A = np.identity(3)
138
+ A[0, 2] = -u_r[0] * np.sin(s_d[2]) * dt
139
+ A[1, 2] = u_r[0] * np.cos(s_d[2]) * dt
140
+
141
+ # original control matrix
142
+ B = np.zeros((3, 2))
143
+ B[0, 0] = np.cos(s_d[2]) * dt
144
+ B[1, 0] = np.sin(s_d[2]) * dt
145
+ B[2, 1] = dt
146
+
147
+ # state matrix (5 x 5)
148
+ A = np.concatenate((A, B), axis=1)
149
+ temp = np.concatenate((np.zeros((dim_u, dim_x)), np.identity(dim_u)), axis=1)
150
+ A = np.concatenate((A, temp), axis=0)
151
+
152
+ # control matrix (5 x 2)
153
+ B = np.concatenate((B, np.identity(dim_u)), axis=0)
154
+
155
+ # output matrix (3 x 5)
156
+ C = np.concatenate((np.identity(dim_x), np.zeros((dim_x, dim_u))), axis=1)
157
+
158
+ # mpc state matrix (3p x 5)
159
+ S_x = C @ A
160
+ for i in range(1, self.p):
161
+ S_x = np.concatenate((S_x, C @ np.linalg.matrix_power(A, i + 1)), axis=0)
162
+
163
+ # mpc control matrix (3p x 2m)
164
+ S_u_rows = []
165
+ for i in range(self.p):
166
+ S_u_row = C @ np.linalg.matrix_power(A, i) @ B
167
+ for j in range(1, self.m):
168
+ if j <= i:
169
+ S_u_row = np.concatenate((
170
+ S_u_row, C @ np.linalg.matrix_power(A, i - j) @ B
171
+ ), axis=1)
172
+ else:
173
+ S_u_row = np.concatenate((S_u_row, np.zeros((dim_x, dim_u))), axis=1)
174
+ S_u_rows.append(S_u_row)
175
+ S_u = np.vstack(S_u_rows)
176
+
177
+ # optimization
178
+ Yr = np.zeros((3 * self.p, 1)) # (3p x 1)
179
+ Q = np.kron(np.identity(self.p), self.Q) # (3p x 3p)
180
+ R = np.kron(np.identity(self.m), self.R) # (2m x 2m)
181
+ H = S_u.T @ Q @ S_u + R # (2m x 2m)
182
+ g = S_u.T @ Q @ (S_x @ x - Yr) # (2m x 1)
183
+
184
+ # constriants
185
+ I = np.eye(2 * self.m)
186
+ A_I = np.kron(np.tril(np.ones((self.m, self.m))), np.diag([1, 1]))
187
+ U_min = np.kron(np.ones((self.m, 1)), self.u_min)
188
+ U_max = np.kron(np.ones((self.m, 1)), self.u_max)
189
+ U_k_1 = np.kron(np.ones((self.m, 1)), np.array([[u_p[0]], [u_p[1]]]))
190
+
191
+ # boundary
192
+ dU_min = np.kron(np.ones((self.m, 1)), self.du_min)
193
+ dU_max = np.kron(np.ones((self.m, 1)), self.du_max)
194
+
195
+ # solve
196
+ solver = osqp.OSQP()
197
+ H = sparse.csc_matrix(H)
198
+ A = sparse.csc_matrix(np.vstack([A_I, I]))
199
+ l = np.vstack([U_min - U_k_1, dU_min])
200
+ u = np.vstack([U_max - U_k_1, dU_max])
201
+ solver.setup(H, g, A, l, u, verbose=False)
202
+ res = solver.solve()
203
+ dU_opt = res.x[:, None]
204
+
205
+ # first element
206
+ du = dU_opt[0:2]
207
+
208
+ # real control
209
+ u = du + np.array([[u_p[0]], [u_p[1]]]) + np.array([[u_r[0]], [u_r[1]]])
210
+
211
+ return np.array([
212
+ [self.linearRegularization(float(u[0]))],
213
+ [self.angularRegularization(float(u[1]))]
214
+ ]), (float(u[0]) - u_r[0], float(u[1]) - u_r[1])
@@ -0,0 +1,158 @@
1
+ """
2
+ @file: pid.py
3
+ @breif: PID motion planning
4
+ @author: Yang Haodong, Wu Maojia
5
+ @update: 2024.6.25
6
+ """
7
+ import numpy as np
8
+ import math
9
+
10
+ from .local_planner import LocalPlanner
11
+ from python_motion_planning.utils import Env, MathHelper
12
+
13
+
14
+ class PID(LocalPlanner):
15
+ """
16
+ Class for PID motion planning.
17
+
18
+ Parameters:
19
+ start (tuple): start point coordinate
20
+ goal (tuple): goal point coordinate
21
+ env (Env): environment
22
+ heuristic_type (str): heuristic function type
23
+ **params: other parameters can be found in the parent class LocalPlanner
24
+
25
+ Examples:
26
+ >>> from python_motion_planning.utils import Grid
27
+ >>> from python_motion_planning.local_planner import PID
28
+ >>> start = (5, 5, 0)
29
+ >>> goal = (45, 25, 0)
30
+ >>> env = Grid(51, 31)
31
+ >>> planner = PID(start, goal, env)
32
+ >>> planner.run()
33
+ """
34
+ def __init__(self, start: tuple, goal: tuple, env: Env, heuristic_type: str = "euclidean",
35
+ k_v_p: float = 1.00, k_v_i: float = 0.10, k_v_d: float = 0.10,
36
+ k_w_p: float = 1.00, k_w_i: float = 0.10, k_w_d: float = 0.10,
37
+ k_theta: float = 0.75, **params) -> None:
38
+ super().__init__(start, goal, env, heuristic_type, MIN_LOOKAHEAD_DIST=0.75, **params)
39
+ # PID parameters
40
+ self.e_w, self.i_w = 0.0, 0.0
41
+ self.e_v, self.i_v = 0.0, 0.0
42
+ self.k_v_p, self.k_v_i, self.k_v_d = k_v_p, k_v_i, k_v_d
43
+ self.k_w_p, self.k_w_i, self.k_w_d = k_w_p, k_w_i, k_w_d
44
+ self.k_theta = k_theta
45
+
46
+ # global planner
47
+ g_start = (start[0], start[1])
48
+ g_goal = (goal[0], goal[1])
49
+ self.g_planner = {"planner_name": "a_star", "start": g_start, "goal": g_goal, "env": env}
50
+ self.path = self.g_path[::-1]
51
+
52
+ def __str__(self) -> str:
53
+ return "PID Planner"
54
+
55
+ def plan(self):
56
+ """
57
+ PID motion plan function.
58
+
59
+ Returns:
60
+ flag (bool): planning successful if true else failed
61
+ pose_list (list): history poses of robot
62
+ """
63
+ dt = self.params["TIME_STEP"]
64
+ for _ in range(self.params["MAX_ITERATION"]):
65
+ # break until goal reached
66
+ if self.reachGoal(tuple(self.robot.state.squeeze(axis=1)[0:3]), self.goal):
67
+ return True, self.robot.history_pose
68
+
69
+ # find next tracking point
70
+ lookahead_pt, theta_trj, _ = self.getLookaheadPoint()
71
+
72
+ # desired angle
73
+ theta_err = self.angle(self.robot.position, lookahead_pt)
74
+ if abs(theta_err - theta_trj) > np.pi:
75
+ if theta_err > theta_trj:
76
+ theta_trj += 2 * np.pi
77
+ else:
78
+ theta_err += 2 * np.pi
79
+ theta_d = self.k_theta * theta_err + (1 - self.k_theta) * theta_trj
80
+
81
+ # calculate velocity command
82
+ e_theta = self.regularizeAngle(self.robot.theta - self.goal[2])
83
+ if not self.shouldMoveToGoal(self.robot.position, self.goal):
84
+ if not self.shouldRotateToPath(abs(e_theta)):
85
+ u = np.array([[0], [0]])
86
+ else:
87
+ u = np.array([[0], [self.angularRegularization(e_theta / dt)]])
88
+ else:
89
+ e_theta = self.regularizeAngle(theta_d - self.robot.theta)
90
+ if self.shouldRotateToPath(abs(e_theta)):
91
+ u = np.array([[0], [self.angularRegularization(e_theta / dt)]])
92
+ else:
93
+ v_d = self.dist(lookahead_pt, self.robot.position) / dt
94
+ u = np.array([[self.linearRegularization(v_d)], [self.angularRegularization(e_theta / dt)]])
95
+
96
+ # feed into robotic kinematic
97
+ self.robot.kinematic(u, dt)
98
+
99
+ return False, None
100
+
101
+ def run(self):
102
+ """
103
+ Running both plannig and animation.
104
+ """
105
+ _, history_pose = self.plan()
106
+ if not history_pose:
107
+ raise ValueError("Path not found and planning failed!")
108
+
109
+ path = np.array(history_pose)[:, 0:2]
110
+ cost = np.sum(np.sqrt(np.sum(np.diff(path, axis=0)**2, axis=1, keepdims=True)))
111
+ self.plot.plotPath(self.path, path_color="r", path_style="--")
112
+ self.plot.animation(path, str(self), cost, history_pose=history_pose)
113
+
114
+ def linearRegularization(self, v_d: float) -> float:
115
+ """
116
+ Linear velocity controller with pid.
117
+
118
+ Parameters:
119
+ v_d (float): reference velocity input
120
+
121
+ Returns:
122
+ v (float): control velocity output
123
+ """
124
+ e_v = v_d - self.robot.v
125
+ self.i_v += e_v * self.params["TIME_STEP"]
126
+ d_v = (e_v - self.e_v) / self.params["TIME_STEP"]
127
+ self.e_v = e_v
128
+
129
+ v_inc = self.k_v_p * e_v + self.k_v_i * self.i_v + self.k_v_d * d_v
130
+ v_inc = MathHelper.clamp(v_inc, self.params["MIN_V_INC"], self.params["MAX_V_INC"])
131
+
132
+ v = self.robot.v + v_inc
133
+ v = MathHelper.clamp(v, self.params["MIN_V"], self.params["MAX_V"])
134
+
135
+ return v
136
+
137
+ def angularRegularization(self, w_d: float) -> float:
138
+ """
139
+ Angular velocity controller with pid.
140
+
141
+ Parameters:
142
+ w_d (float): reference angular input
143
+
144
+ Returns:
145
+ w (float): control angular velocity output
146
+ """
147
+ e_w = w_d - self.robot.w
148
+ self.i_w += e_w * self.params["TIME_STEP"]
149
+ d_w = (e_w - self.e_w) / self.params["TIME_STEP"]
150
+ self.e_w = e_w
151
+
152
+ w_inc = self.k_w_p * e_w + self.k_w_i * self.i_w + self.k_w_d * d_w
153
+ w_inc = MathHelper.clamp(w_inc, self.params["MIN_W_INC"], self.params["MAX_W_INC"])
154
+
155
+ w = self.robot.w + w_inc
156
+ w = MathHelper.clamp(w, self.params["MIN_W"], self.params["MAX_W"])
157
+
158
+ return w
@@ -0,0 +1,147 @@
1
+ """
2
+ @file: rpp.py
3
+ @breif: Regulated Pure Pursuit (RPP) motion planning
4
+ @author: Yang Haodong, Wu Maojia
5
+ @update: 2024.5.21
6
+ """
7
+ import math
8
+ import numpy as np
9
+ from scipy.spatial.distance import cdist
10
+
11
+ from .local_planner import LocalPlanner
12
+ from python_motion_planning.utils import Env
13
+
14
+
15
+ class RPP(LocalPlanner):
16
+ """
17
+ Class for RPP motion planning.
18
+
19
+ Parameters:
20
+ start (tuple): start point coordinate
21
+ goal (tuple): goal point coordinate
22
+ env (Env): environment
23
+ heuristic_type (str): heuristic function type
24
+ **params: other parameters can be found in the parent class LocalPlanner
25
+
26
+ Examples:
27
+ >>> from python_motion_planning.utils import Grid
28
+ >>> from python_motion_planning.local_planner import RPP
29
+ >>> start = (5, 5, 0)
30
+ >>> goal = (45, 25, 0)
31
+ >>> env = Grid(51, 31)
32
+ >>> planner = RPP(start, goal, env)
33
+ >>> planner.run()
34
+ """
35
+ def __init__(self, start: tuple, goal: tuple, env: Env, heuristic_type: str = "euclidean", **params) -> None:
36
+ super().__init__(start, goal, env, heuristic_type, **params)
37
+ # RPP parameters
38
+ self.regulated_radius_min = 0.9
39
+ self.scaling_dist = 0.6
40
+ self.scaling_gain = 1.0
41
+
42
+ # global planner
43
+ g_start = (start[0], start[1])
44
+ g_goal = (goal[0], goal[1])
45
+ self.g_planner = {"planner_name": "a_star", "start": g_start, "goal": g_goal, "env": env}
46
+ self.path = self.g_path[::-1]
47
+
48
+ def __str__(self) -> str:
49
+ return "Regulated Pure Pursuit (RPP)"
50
+
51
+ def plan(self):
52
+ """
53
+ RPP motion plan function.
54
+
55
+ Returns:
56
+ flag (bool): planning successful if true else failed
57
+ pose_list (list): history poses of robot
58
+ lookahead_pts (list): history lookahead points
59
+ """
60
+ lookahead_pts = []
61
+ dt = self.params["TIME_STEP"]
62
+ for _ in range(self.params["MAX_ITERATION"]):
63
+ # break until goal reached
64
+ if self.reachGoal(tuple(self.robot.state.squeeze(axis=1)[0:3]), self.goal):
65
+ return True, self.robot.history_pose, lookahead_pts
66
+
67
+ # get the particular point on the path at the lookahead distance
68
+ lookahead_pt, _, _ = self.getLookaheadPoint()
69
+
70
+ # get the tracking curvature with goalahead point
71
+ lookahead_k = 2 * math.sin(
72
+ self.angle(self.robot.position, lookahead_pt) - self.robot.theta
73
+ ) / self.lookahead_dist
74
+
75
+ # calculate velocity command
76
+ e_theta = self.regularizeAngle(self.robot.theta - self.goal[2])
77
+ if not self.shouldMoveToGoal(self.robot.position, self.goal):
78
+ if not self.shouldRotateToPath(abs(e_theta)):
79
+ u = np.array([[0], [0]])
80
+ else:
81
+ u = np.array([[0], [self.angularRegularization(e_theta / dt)]])
82
+ else:
83
+ e_theta = self.regularizeAngle(self.angle(self.robot.position, lookahead_pt) - self.robot.theta)
84
+ if self.shouldRotateToPath(abs(e_theta)):
85
+ u = np.array([[0], [self.angularRegularization(e_theta / dt)]])
86
+ else:
87
+ # apply constraints
88
+ curv_vel = self.applyCurvatureConstraint(self.params["MAX_V"], lookahead_k)
89
+ cost_vel = self.applyObstacleConstraint(self.params["MAX_V"])
90
+ v_d = min(curv_vel, cost_vel)
91
+ u = np.array([[self.linearRegularization(v_d)], [self.angularRegularization(v_d * lookahead_k)]])
92
+
93
+ # update lookahead points
94
+ lookahead_pts.append(lookahead_pt)
95
+
96
+ # feed into robotic kinematic
97
+ self.robot.kinematic(u, dt)
98
+
99
+ return False, None, None
100
+
101
+ def run(self):
102
+ """
103
+ Running both plannig and animation.
104
+ """
105
+ _, history_pose, lookahead_pts = self.plan()
106
+ if not history_pose:
107
+ raise ValueError("Path not found and planning failed!")
108
+
109
+ path = np.array(history_pose)[:, 0:2]
110
+ cost = np.sum(np.sqrt(np.sum(np.diff(path, axis=0)**2, axis=1, keepdims=True)))
111
+ self.plot.plotPath(self.path, path_color="r", path_style="--")
112
+ self.plot.animation(path, str(self), cost, history_pose=history_pose, lookahead_pts=lookahead_pts)
113
+
114
+ def applyCurvatureConstraint(self, raw_linear_vel: float, curvature: float) -> float:
115
+ """
116
+ Applying curvature constraints to regularize the speed of robot turning.
117
+
118
+ Parameters:
119
+ raw_linear_vel (float): the raw linear velocity of robot
120
+ curvature (float): the tracking curvature
121
+
122
+ Returns:
123
+ reg_vel (float): the regulated velocity
124
+ """
125
+ radius = abs(1.0 / curvature)
126
+ if radius < self.regulated_radius_min:
127
+ return raw_linear_vel * (radius / self.regulated_radius_min)
128
+ else:
129
+ return raw_linear_vel
130
+
131
+ def applyObstacleConstraint(self, raw_linear_vel: float) -> float:
132
+ """
133
+ Applying obstacle constraints to regularize the speed of robot approaching obstacles.
134
+
135
+ Parameters:
136
+ raw_linear_vel (float): the raw linear velocity of robot
137
+
138
+ Returns:
139
+ reg_vel (float): the regulated velocity
140
+ """
141
+ obstacles = np.array(list(self.obstacles))
142
+ D = cdist(obstacles, np.array([[self.robot.px, self.robot.py]]))
143
+ obs_dist = np.min(D)
144
+ if obs_dist < self.scaling_dist:
145
+ return raw_linear_vel * self.scaling_gain * obs_dist / self.scaling_dist
146
+ else:
147
+ return raw_linear_vel
@@ -0,0 +1,19 @@
1
+ from .helper import MathHelper
2
+ from .agent.agent import Robot
3
+ from .environment.env import Env, Grid, Map
4
+ from .environment.node import Node
5
+ from .environment.point2d import Point2D
6
+ from .environment.pose2d import Pose2D
7
+ from .plot.plot import Plot
8
+ from .planner.planner import Planner
9
+ from .planner.search_factory import SearchFactory
10
+ from .planner.curve_factory import CurveFactory
11
+ from .planner.control_factory import ControlFactory
12
+
13
+ __all__ = [
14
+ "MathHelper",
15
+ "Env", "Grid", "Map", "Node", "Point2D", "Pose2D",
16
+ "Plot",
17
+ "Planner", "SearchFactory", "CurveFactory", "ControlFactory",
18
+ "Robot"
19
+ ]
File without changes
@@ -0,0 +1,135 @@
1
+ """
2
+ @file: agent.py
3
+ @breif: Class for agent
4
+ @author: Yang Haodong, Wu Maojia
5
+ @update: 2024.3.29
6
+ """
7
+ import math
8
+ import numpy as np
9
+ from abc import abstractmethod, ABC
10
+
11
+ class Agent(ABC):
12
+ """
13
+ Abstract class for agent.
14
+
15
+ Parameters:
16
+ px (float): initial x-position
17
+ py (float): initial y-position
18
+ theta (float): initial pose angle
19
+ """
20
+ def __init__(self, px, py, theta) -> None:
21
+ self.px = px
22
+ self.py = py
23
+ self.theta = theta
24
+ self.parameters = None
25
+
26
+ def setParameters(self, **parameters) -> None:
27
+ # other customer parameters
28
+ self.parameters = parameters
29
+ for param, val in parameters.items():
30
+ setattr(self, param, val)
31
+
32
+ @property
33
+ def position(self):
34
+ return (self.px, self.py)
35
+
36
+ @abstractmethod
37
+ def kinematic(self, u, dt):
38
+ pass
39
+
40
+ @property
41
+ @abstractmethod
42
+ def state(self):
43
+ pass
44
+
45
+
46
+ class Robot(Agent):
47
+ """
48
+ Class for robot.
49
+
50
+ Parameters:
51
+ px (float): initial x-position
52
+ py (float): initial y-position
53
+ theta (float): initial pose angle
54
+ v (float): linear velocity
55
+ w (float): angular velocity
56
+ """
57
+ def __init__(self, px, py, theta, v, w) -> None:
58
+ super().__init__(px, py, theta)
59
+ # velocity
60
+ self.v = v
61
+ self.w = w
62
+ # history
63
+ self.history_pose = []
64
+
65
+ def __str__(self) -> str:
66
+ return "Robot"
67
+
68
+ def kinematic(self, u: np.ndarray, dt: float, replace: bool = True):
69
+ """
70
+ Run robot kinematic once.
71
+
72
+ Parameters:
73
+ u (np.ndarray): control command with [v, w]
74
+ dt (float): simulation time
75
+ replace (bool): update-self if true else return a new Robot object
76
+
77
+ Returns:
78
+ robot (Robot): a new robot object
79
+ """
80
+ new_state = self.lookforward(self.state, u, dt).squeeze().tolist()
81
+ if replace:
82
+ self.history_pose.append((self.px, self.py, self.theta))
83
+ self.px, self.py, self.theta = new_state[0], new_state[1], new_state[2]
84
+ self.v, self.w = new_state[3], new_state[4]
85
+ else:
86
+ new_robot = Robot(new_state[0], new_state[1], new_state[2],
87
+ new_state[3], new_state[4])
88
+ new_robot.setParameters(self.parameters)
89
+ return new_robot
90
+
91
+ def lookforward(self, state: np.ndarray, u: np.ndarray, dt: float) -> np.ndarray:
92
+ """
93
+ Run robot kinematic once but do not update.
94
+
95
+ Parameters:
96
+ state (np.ndarray): robot state with [x, y, theta, v, w]
97
+ u (np.ndarray): control command with [v, w]
98
+ dt (float): simulation time
99
+ obstacles (set): set of obstacles with (x, y)
100
+
101
+ Returns:
102
+ new_state (np.ndarray (5x1)): new robot state with [x, y, theta, v, w]
103
+ """
104
+ F = np.array([[1, 0, 0, 0, 0],
105
+ [0, 1, 0, 0, 0],
106
+ [0, 0, 1, 0, 0],
107
+ [0, 0, 0, 0, 0],
108
+ [0, 0, 0, 0, 0]])
109
+ B = np.array([[dt * math.cos(state[2]), 0],
110
+ [dt * math.sin(state[2]), 0],
111
+ [ 0, dt],
112
+ [ 1, 0],
113
+ [ 0, 1]])
114
+ new_state = F @ state + B @ u
115
+
116
+ return new_state
117
+
118
+ def reset(self) -> None:
119
+ """
120
+ Reset the state.
121
+ """
122
+ self.v = 0
123
+ self.w = 0
124
+ self.history_pose = []
125
+
126
+ @property
127
+ def state(self) -> None:
128
+ """
129
+ Get the state.
130
+
131
+ Returns:
132
+ state (np.ndarray (5x1)): robot state with [x, y, theta, v, w]
133
+ """
134
+ state = np.array([[self.px], [self.py], [self.theta], [self.v], [self.w]])
135
+ return state
File without changes