gr-libs 0.2.2__py3-none-any.whl → 0.2.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. gr_libs/__init__.py +6 -1
  2. gr_libs/_evaluation/_generate_experiments_results.py +0 -141
  3. gr_libs/_version.py +2 -2
  4. gr_libs/all_experiments.py +73 -107
  5. gr_libs/environment/environment.py +126 -17
  6. gr_libs/evaluation/generate_experiments_results.py +100 -0
  7. gr_libs/ml/consts.py +1 -0
  8. gr_libs/ml/neural/deep_rl_learner.py +118 -34
  9. gr_libs/odgr_executor.py +27 -27
  10. gr_libs/problems/consts.py +568 -290
  11. gr_libs/recognizer/_utils/__init__.py +1 -0
  12. gr_libs/recognizer/_utils/format.py +7 -1
  13. gr_libs/recognizer/gr_as_rl/gr_as_rl_recognizer.py +158 -2
  14. gr_libs/recognizer/graml/graml_recognizer.py +18 -10
  15. gr_libs/recognizer/recognizer.py +4 -4
  16. gr_libs/tutorials/gcaura_panda_tutorial.py +168 -0
  17. gr_libs/tutorials/gcaura_parking_tutorial.py +167 -0
  18. gr_libs/tutorials/gcaura_point_maze_tutorial.py +169 -0
  19. gr_libs/tutorials/gcdraco_panda_tutorial.py +6 -2
  20. gr_libs/tutorials/gcdraco_parking_tutorial.py +3 -1
  21. gr_libs/tutorials/graml_minigrid_tutorial.py +16 -12
  22. gr_libs/tutorials/graml_panda_tutorial.py +6 -2
  23. gr_libs/tutorials/graml_parking_tutorial.py +3 -1
  24. gr_libs/tutorials/graml_point_maze_tutorial.py +15 -2
  25. {gr_libs-0.2.2.dist-info → gr_libs-0.2.6.dist-info}/METADATA +31 -15
  26. {gr_libs-0.2.2.dist-info → gr_libs-0.2.6.dist-info}/RECORD +35 -29
  27. {gr_libs-0.2.2.dist-info → gr_libs-0.2.6.dist-info}/WHEEL +1 -1
  28. tests/test_gcaura.py +15 -0
  29. tests/test_odgr_executor_expertbasedgraml.py +14 -0
  30. tests/test_odgr_executor_gcaura.py +14 -0
  31. tests/test_odgr_executor_gcdraco.py +14 -0
  32. tests/test_odgr_executor_gcgraml.py +14 -0
  33. tests/test_odgr_executor_graql.py +14 -0
  34. gr_libs/_evaluation/_analyze_results_cross_alg_cross_domain.py +0 -260
  35. gr_libs/_evaluation/_generate_task_specific_statistics_plots.py +0 -497
  36. gr_libs/_evaluation/_get_plans_images.py +0 -61
  37. gr_libs/_evaluation/_increasing_and_decreasing_.py +0 -106
  38. /gr_libs/{_evaluation → evaluation}/__init__.py +0 -0
  39. {gr_libs-0.2.2.dist-info → gr_libs-0.2.6.dist-info}/top_level.txt +0 -0
@@ -1,260 +0,0 @@
1
- import copy
2
- import os
3
-
4
- import dill
5
- import matplotlib.pyplot as plt
6
- import numpy as np
7
- from scipy.interpolate import make_interp_spline
8
- from scipy.ndimage import gaussian_filter1d
9
-
10
- from gr_libs.ml.utils.storage import get_experiment_results_path
11
-
12
-
13
- def smooth_line(x, y, num_points=300):
14
- x_smooth = np.linspace(np.min(x), np.max(x), num_points)
15
- spline = make_interp_spline(x, y, k=3) # Cubic spline
16
- y_smooth = spline(x_smooth)
17
- return x_smooth, y_smooth
18
-
19
-
20
- if __name__ == "__main__":
21
-
22
- fragmented_accuracies = {
23
- "ExpertBasedGraml": {
24
- "minigrid": {
25
- "obstacles": {"0.3": [], "0.5": [], "0.7": [], "0.9": [], "1": []},
26
- "lava_crossing": {"0.3": [], "0.5": [], "0.7": [], "0.9": [], "1": []},
27
- }
28
- },
29
- "Graql": {
30
- "minigrid": {
31
- "obstacles": {"0.3": [], "0.5": [], "0.7": [], "0.9": [], "1": []},
32
- "lava_crossing": {"0.3": [], "0.5": [], "0.7": [], "0.9": [], "1": []},
33
- }
34
- },
35
- }
36
-
37
- continuing_accuracies = copy.deepcopy(fragmented_accuracies)
38
-
39
- # domains = ['panda', 'minigrid', 'point_maze', 'parking']
40
- domains = ["minigrid"]
41
- tasks = ["L1", "L2", "L3", "L4", "L5"]
42
- percentages = ["0.3", "0.5", "1"]
43
-
44
- for partial_obs_type, accuracies, is_same_learn in zip(
45
- ["fragmented", "continuing"],
46
- [fragmented_accuracies, continuing_accuracies],
47
- [False, True],
48
- ):
49
- for domain in domains:
50
- for env in accuracies["ExpertBasedGraml"][domain].keys():
51
- for task in tasks:
52
- graml_res_file_path = f"{get_experiment_results_path(domain, env, task, 'ExpertBasedGraml')}.pkl"
53
- graql_res_file_path = (
54
- f"{get_experiment_results_path(domain, env, task, 'Graql')}.pkl"
55
- )
56
- if os.path.exists(graml_res_file_path):
57
- with open(graml_res_file_path, "rb") as results_file:
58
- results = dill.load(results_file)
59
- for percentage in accuracies["expertbasedgraml"][domain][
60
- env
61
- ].keys():
62
- accuracies["expertbasedgraml"][domain][env][
63
- percentage
64
- ].append(results[percentage]["accuracy"])
65
- else:
66
- assert False, f"no file for {graml_res_file_path}"
67
- if os.path.exists(graql_res_file_path):
68
- with open(graql_res_file_path, "rb") as results_file:
69
- results = dill.load(results_file)
70
- for percentage in accuracies["expertbasedgraml"][domain][
71
- env
72
- ].keys():
73
- accuracies["Graql"][domain][env][percentage].append(
74
- results[percentage]["accuracy"]
75
- )
76
- else:
77
- assert False, f"no file for {graql_res_file_path}"
78
-
79
- plot_styles = {
80
- (
81
- "expertbasedgraml",
82
- "fragmented",
83
- 0.3,
84
- ): "g--o", # Green dashed line with circle markers
85
- (
86
- "expertbasedgraml",
87
- "fragmented",
88
- 0.5,
89
- ): "g--s", # Green dashed line with square markers
90
- (
91
- "expertbasedgraml",
92
- "fragmented",
93
- 0.7,
94
- ): "g--^", # Green dashed line with triangle-up markers
95
- (
96
- "expertbasedgraml",
97
- "fragmented",
98
- 0.9,
99
- ): "g--d", # Green dashed line with diamond markers
100
- (
101
- "expertbasedgraml",
102
- "fragmented",
103
- 1.0,
104
- ): "g--*", # Green dashed line with star markers
105
- (
106
- "expertbasedgraml",
107
- "continuing",
108
- 0.3,
109
- ): "g-o", # Green solid line with circle markers
110
- (
111
- "expertbasedgraml",
112
- "continuing",
113
- 0.5,
114
- ): "g-s", # Green solid line with square markers
115
- (
116
- "expertbasedgraml",
117
- "continuing",
118
- 0.7,
119
- ): "g-^", # Green solid line with triangle-up markers
120
- (
121
- "expertbasedgraml",
122
- "continuing",
123
- 0.9,
124
- ): "g-d", # Green solid line with diamond markers
125
- (
126
- "expertbasedgraml",
127
- "continuing",
128
- 1.0,
129
- ): "g-*", # Green solid line with star markers
130
- ("Graql", "fragmented", 0.3): "b--o", # Blue dashed line with circle markers
131
- ("Graql", "fragmented", 0.5): "b--s", # Blue dashed line with square markers
132
- (
133
- "Graql",
134
- "fragmented",
135
- 0.7,
136
- ): "b--^", # Blue dashed line with triangle-up markers
137
- ("Graql", "fragmented", 0.9): "b--d", # Blue dashed line with diamond markers
138
- ("Graql", "fragmented", 1.0): "b--*", # Blue dashed line with star markers
139
- ("Graql", "continuing", 0.3): "b-o", # Blue solid line with circle markers
140
- ("Graql", "continuing", 0.5): "b-s", # Blue solid line with square markers
141
- ("Graql", "continuing", 0.7): "b-^", # Blue solid line with triangle-up markers
142
- ("Graql", "continuing", 0.9): "b-d", # Blue solid line with diamond markers
143
- ("Graql", "continuing", 1.0): "b-*", # Blue solid line with star markers
144
- }
145
-
146
- def average_accuracies(accuracies, domain):
147
- avg_acc = {
148
- algo: {perc: [] for perc in percentages}
149
- for algo in ["ExpertBasedGraml", "Graql"]
150
- }
151
-
152
- for algo in avg_acc.keys():
153
- for perc in percentages:
154
- for env in accuracies[algo][domain].keys():
155
- env_acc = accuracies[algo][domain][env][
156
- perc
157
- ] # list of 5, averages for L111 to L555.
158
- if env_acc:
159
- avg_acc[algo][perc].append(np.array(env_acc))
160
-
161
- for algo in avg_acc.keys():
162
- for perc in percentages:
163
- if avg_acc[algo][perc]:
164
- avg_acc[algo][perc] = np.mean(np.array(avg_acc[algo][perc]), axis=0)
165
-
166
- return avg_acc
167
-
168
- def plot_domain_accuracies(
169
- ax,
170
- fragmented_accuracies,
171
- continuing_accuracies,
172
- domain,
173
- sigma=1,
174
- line_width=1.5,
175
- ):
176
- fragmented_avg_acc = average_accuracies(fragmented_accuracies, domain)
177
- continuing_avg_acc = average_accuracies(continuing_accuracies, domain)
178
-
179
- x_vals = np.arange(1, 6) # Number of goals
180
-
181
- # Create "waves" (shaded regions) for each algorithm
182
- for algo in ["ExpertBasedGraml", "Graql"]:
183
- fragmented_y_vals_by_percentage = []
184
- continuing_y_vals_by_percentage = []
185
-
186
- for perc in percentages:
187
- fragmented_y_vals = np.array(fragmented_avg_acc[algo][perc])
188
- continuing_y_vals = np.array(continuing_avg_acc[algo][perc])
189
-
190
- # Smooth the trends using Gaussian filtering
191
- fragmented_y_smoothed = gaussian_filter1d(
192
- fragmented_y_vals, sigma=sigma
193
- )
194
- continuing_y_smoothed = gaussian_filter1d(
195
- continuing_y_vals, sigma=sigma
196
- )
197
-
198
- fragmented_y_vals_by_percentage.append(fragmented_y_smoothed)
199
- continuing_y_vals_by_percentage.append(continuing_y_smoothed)
200
-
201
- ax.plot(
202
- x_vals,
203
- fragmented_y_smoothed,
204
- plot_styles[(algo, "fragmented", float(perc))],
205
- label=f"{algo}, non-consecutive, {perc}",
206
- linewidth=0.5, # Control line thickness here
207
- )
208
- ax.plot(
209
- x_vals,
210
- continuing_y_smoothed,
211
- plot_styles[(algo, "continuing", float(perc))],
212
- label=f"{algo}, consecutive, {perc}",
213
- linewidth=0.5, # Control line thickness here
214
- )
215
-
216
- ax.set_xticks(x_vals)
217
- ax.set_yticks(np.linspace(0, 1, 6))
218
- ax.set_ylim([0, 1])
219
- ax.set_title(f"{domain.capitalize()} Domain", fontsize=16)
220
- ax.grid(True)
221
-
222
- fig, axes = plt.subplots(
223
- 1, 4, figsize=(24, 6)
224
- ) # Increase the figure size for better spacing (width 24, height 6)
225
-
226
- # Generate each plot in a subplot, including both fragmented and continuing accuracies
227
- for i, domain in enumerate(domains):
228
- plot_domain_accuracies(
229
- axes[i], fragmented_accuracies, continuing_accuracies, domain
230
- )
231
-
232
- # Set a single x-axis and y-axis label for the entire figure
233
- fig.text(
234
- 0.5, 0.04, "Number of Goals", ha="center", fontsize=20
235
- ) # Centered x-axis label
236
- fig.text(
237
- 0.04, 0.5, "Accuracy", va="center", rotation="vertical", fontsize=20
238
- ) # Reduced spacing for y-axis label
239
-
240
- # Adjust subplot layout to avoid overlap
241
- plt.subplots_adjust(
242
- left=0.09, right=0.91, top=0.79, bottom=0.21, wspace=0.3
243
- ) # More space on top (top=0.82)
244
-
245
- # Place the legend above the plots with more space between legend and plots
246
- handles, labels = axes[0].get_legend_handles_labels()
247
- fig.legend(
248
- handles,
249
- labels,
250
- loc="upper center",
251
- ncol=4,
252
- bbox_to_anchor=(0.5, 1.05),
253
- fontsize=12,
254
- ) # Moved above with bbox_to_anchor
255
-
256
- # Save the figure and show it
257
- save_dir = os.path.join("figures", "all_domains_accuracy_plots")
258
- if not os.path.exists(save_dir):
259
- os.makedirs(save_dir)
260
- plt.savefig(os.path.join(save_dir, "accuracy_plots_smooth.png"), dpi=300)