gr-libs 0.1.8__py3-none-any.whl → 0.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. gr_libs/__init__.py +3 -1
  2. gr_libs/_version.py +2 -2
  3. gr_libs/all_experiments.py +260 -0
  4. gr_libs/environment/__init__.py +14 -1
  5. gr_libs/environment/_utils/__init__.py +0 -0
  6. gr_libs/environment/{utils → _utils}/utils.py +1 -1
  7. gr_libs/environment/environment.py +278 -23
  8. gr_libs/evaluation/__init__.py +1 -0
  9. gr_libs/evaluation/generate_experiments_results.py +100 -0
  10. gr_libs/metrics/__init__.py +2 -0
  11. gr_libs/metrics/metrics.py +166 -31
  12. gr_libs/ml/__init__.py +1 -6
  13. gr_libs/ml/base/__init__.py +3 -1
  14. gr_libs/ml/base/rl_agent.py +68 -3
  15. gr_libs/ml/neural/__init__.py +1 -3
  16. gr_libs/ml/neural/deep_rl_learner.py +241 -84
  17. gr_libs/ml/neural/utils/__init__.py +1 -2
  18. gr_libs/ml/planner/mcts/{utils → _utils}/tree.py +1 -1
  19. gr_libs/ml/planner/mcts/mcts_model.py +71 -34
  20. gr_libs/ml/sequential/__init__.py +0 -1
  21. gr_libs/ml/sequential/{lstm_model.py → _lstm_model.py} +11 -14
  22. gr_libs/ml/tabular/__init__.py +1 -3
  23. gr_libs/ml/tabular/tabular_q_learner.py +27 -9
  24. gr_libs/ml/tabular/tabular_rl_agent.py +22 -9
  25. gr_libs/ml/utils/__init__.py +2 -9
  26. gr_libs/ml/utils/format.py +13 -90
  27. gr_libs/ml/utils/math.py +3 -2
  28. gr_libs/ml/utils/other.py +2 -2
  29. gr_libs/ml/utils/storage.py +41 -94
  30. gr_libs/odgr_executor.py +263 -0
  31. gr_libs/problems/consts.py +570 -292
  32. gr_libs/recognizer/{utils → _utils}/format.py +2 -2
  33. gr_libs/recognizer/gr_as_rl/gr_as_rl_recognizer.py +127 -36
  34. gr_libs/recognizer/graml/{gr_dataset.py → _gr_dataset.py} +11 -11
  35. gr_libs/recognizer/graml/graml_recognizer.py +186 -35
  36. gr_libs/recognizer/recognizer.py +59 -10
  37. gr_libs/tutorials/draco_panda_tutorial.py +58 -0
  38. gr_libs/tutorials/draco_parking_tutorial.py +56 -0
  39. {tutorials → gr_libs/tutorials}/gcdraco_panda_tutorial.py +11 -11
  40. {tutorials → gr_libs/tutorials}/gcdraco_parking_tutorial.py +6 -8
  41. {tutorials → gr_libs/tutorials}/graml_minigrid_tutorial.py +18 -14
  42. {tutorials → gr_libs/tutorials}/graml_panda_tutorial.py +11 -12
  43. {tutorials → gr_libs/tutorials}/graml_parking_tutorial.py +8 -10
  44. {tutorials → gr_libs/tutorials}/graml_point_maze_tutorial.py +17 -3
  45. {tutorials → gr_libs/tutorials}/graql_minigrid_tutorial.py +2 -2
  46. {gr_libs-0.1.8.dist-info → gr_libs-0.2.5.dist-info}/METADATA +95 -29
  47. gr_libs-0.2.5.dist-info/RECORD +72 -0
  48. {gr_libs-0.1.8.dist-info → gr_libs-0.2.5.dist-info}/WHEEL +1 -1
  49. gr_libs-0.2.5.dist-info/top_level.txt +2 -0
  50. tests/test_draco.py +14 -0
  51. tests/test_gcdraco.py +2 -2
  52. tests/test_graml.py +4 -4
  53. tests/test_graql.py +1 -1
  54. tests/test_odgr_executor_expertbasedgraml.py +14 -0
  55. tests/test_odgr_executor_gcdraco.py +14 -0
  56. tests/test_odgr_executor_gcgraml.py +14 -0
  57. tests/test_odgr_executor_graql.py +14 -0
  58. evaluation/analyze_results_cross_alg_cross_domain.py +0 -267
  59. evaluation/create_minigrid_map_image.py +0 -38
  60. evaluation/file_system.py +0 -53
  61. evaluation/generate_experiments_results.py +0 -141
  62. evaluation/generate_experiments_results_new_ver1.py +0 -238
  63. evaluation/generate_experiments_results_new_ver2.py +0 -331
  64. evaluation/generate_task_specific_statistics_plots.py +0 -500
  65. evaluation/get_plans_images.py +0 -62
  66. evaluation/increasing_and_decreasing_.py +0 -104
  67. gr_libs/ml/neural/utils/penv.py +0 -60
  68. gr_libs-0.1.8.dist-info/RECORD +0 -70
  69. gr_libs-0.1.8.dist-info/top_level.txt +0 -4
  70. /gr_libs/{environment/utils/__init__.py → _evaluation/_generate_experiments_results.py} +0 -0
  71. /gr_libs/ml/planner/mcts/{utils → _utils}/__init__.py +0 -0
  72. /gr_libs/ml/planner/mcts/{utils → _utils}/node.py +0 -0
  73. /gr_libs/recognizer/{utils → _utils}/__init__.py +0 -0
@@ -1,141 +0,0 @@
1
- import copy
2
- import sys
3
- import matplotlib.pyplot as plt
4
- import numpy as np
5
- import os
6
- import dill
7
- from gr_libs.ml.utils.storage import (
8
- get_experiment_results_path,
9
- set_global_storage_configs,
10
- )
11
- from scripts.generate_task_specific_statistics_plots import get_figures_dir_path
12
-
13
-
14
- def gen_graph(
15
- graph_name,
16
- x_label_str,
17
- tasks,
18
- panda_env,
19
- minigrid_env,
20
- parking_env,
21
- maze_env,
22
- percentage,
23
- ):
24
-
25
- fragmented_accuracies = {
26
- "graml": {
27
- #'panda': [],
28
- #'minigrid': [],
29
- #'point_maze': [],
30
- "parking": []
31
- },
32
- "graql": {
33
- #'panda': [],
34
- #'minigrid': [],
35
- #'point_maze': [],
36
- "parking": []
37
- },
38
- }
39
-
40
- continuing_accuracies = copy.deepcopy(fragmented_accuracies)
41
-
42
- # domains_envs = [('minigrid', minigrid_env), ('point_maze', maze_env), ('parking', parking_env)]
43
- domains_envs = [("parking", parking_env)]
44
-
45
- for partial_obs_type, accuracies, is_same_learn in zip(
46
- ["fragmented", "continuing"],
47
- [fragmented_accuracies, continuing_accuracies],
48
- [False, True],
49
- ):
50
- for domain, env in domains_envs:
51
- for task in tasks:
52
- set_global_storage_configs(
53
- recognizer_str="graml",
54
- is_fragmented=partial_obs_type,
55
- is_inference_same_length_sequences=True,
56
- is_learn_same_length_sequences=is_same_learn,
57
- )
58
- graml_res_file_path = (
59
- f"{get_experiment_results_path(domain, env, task)}.pkl"
60
- )
61
- set_global_storage_configs(
62
- recognizer_str="graql", is_fragmented=partial_obs_type
63
- )
64
- graql_res_file_path = (
65
- f"{get_experiment_results_path(domain, env, task)}.pkl"
66
- )
67
- if os.path.exists(graml_res_file_path):
68
- with open(graml_res_file_path, "rb") as results_file:
69
- results = dill.load(results_file)
70
- accuracies["graml"][domain].append(
71
- results[percentage]["accuracy"]
72
- )
73
- else:
74
- assert (False, f"no file for {graml_res_file_path}")
75
- if os.path.exists(graql_res_file_path):
76
- with open(graql_res_file_path, "rb") as results_file:
77
- results = dill.load(results_file)
78
- accuracies["graql"][domain].append(
79
- results[percentage]["accuracy"]
80
- )
81
- else:
82
- assert (False, f"no file for {graql_res_file_path}")
83
-
84
- def plot_accuracies(accuracies, partial_obs_type):
85
- plt.figure(figsize=(10, 6))
86
- colors = plt.cm.get_cmap(
87
- "tab10", len(accuracies["graml"]) * len(accuracies["graml"]["parking"])
88
- )
89
-
90
- # Define different line styles for each algorithm
91
- line_styles = {"graml": "-", "graql": "--"}
92
- x_vals = np.arange(3, 8)
93
- plt.xticks(x_vals)
94
- plt.yticks(np.linspace(0, 1, 6))
95
- plt.ylim([0, 1])
96
- # Plot each domain-env pair's accuracies with different line styles for each algorithm
97
- for alg in ["graml", "graql"]:
98
- for idx, (domain, acc_values) in enumerate(accuracies[alg].items()):
99
- if acc_values and len(acc_values) > 0: # Only plot if there are values
100
- x_values = np.arange(3, len(acc_values) + 3)
101
- plt.plot(
102
- x_values,
103
- acc_values,
104
- marker="o",
105
- linestyle=line_styles[alg],
106
- color=colors(idx),
107
- label=f"{alg}-{domain}-{partial_obs_type}-{percentage}",
108
- )
109
-
110
- # Set labels, title, and grid
111
- plt.xlabel(x_label_str)
112
- plt.ylabel("Accuracy")
113
- plt.grid(True)
114
-
115
- # Add legend to differentiate between domain-env pairs
116
- plt.legend()
117
-
118
- # Save the figure
119
- fig_path = os.path.join(f"{graph_name}_{partial_obs_type}.png")
120
- plt.savefig(fig_path)
121
- print(f"Accuracies figure saved at: {fig_path}")
122
-
123
- print(f"fragmented_accuracies: {fragmented_accuracies}")
124
- plot_accuracies(fragmented_accuracies, "fragmented")
125
- print(f"continuing_accuracies: {continuing_accuracies}")
126
- plot_accuracies(continuing_accuracies, "continuing")
127
-
128
-
129
- if __name__ == "__main__":
130
- # gen_graph("increasing_base_goals", "Number of base goals", ['L1', 'L2', 'L3', 'L4', 'L5'], panda_env='gd_agent', minigrid_env='obstacles', parking_env='gd_agent', maze_env='obstacles')
131
- # gen_graph("increasing_dynamic_goals", "Number of dynamic goals", ['L1', 'L2', 'L3', 'L4', 'L5'], panda_env='gc_agent', minigrid_env='lava_crossing', parking_env='gc_agent', maze_env='four_rooms')
132
- gen_graph(
133
- "base_problems",
134
- "Number of goals",
135
- ["L111", "L222", "L333", "L444", "L555"],
136
- panda_env="gd_agent",
137
- minigrid_env="obstacles",
138
- parking_env="gc_agent",
139
- maze_env="obstacles",
140
- percentage="0.7",
141
- )
@@ -1,238 +0,0 @@
1
- import copy
2
- import sys
3
- import matplotlib.pyplot as plt
4
- import numpy as np
5
- import os
6
- import dill
7
-
8
- from gr_libs.ml.utils.storage import (
9
- get_experiment_results_path,
10
- set_global_storage_configs,
11
- )
12
- from scripts.generate_task_specific_statistics_plots import get_figures_dir_path
13
-
14
- if __name__ == "__main__":
15
-
16
- fragmented_accuracies = {
17
- "graml": {
18
- "panda": {
19
- "gd_agent": {
20
- "0.3": [], # every list here should have number of tasks accuracies in it, since we done experiments for L111-L555. remember each accuracy is an average of #goals different tasks.
21
- "0.5": [],
22
- "0.7": [],
23
- "0.9": [],
24
- "1": [],
25
- },
26
- "gc_agent": {"0.3": [], "0.5": [], "0.7": [], "0.9": [], "1": []},
27
- },
28
- "minigrid": {
29
- "obstacles": {"0.3": [], "0.5": [], "0.7": [], "0.9": [], "1": []},
30
- "lava_crossing": {"0.3": [], "0.5": [], "0.7": [], "0.9": [], "1": []},
31
- },
32
- "point_maze": {
33
- "obstacles": {"0.3": [], "0.5": [], "0.7": [], "0.9": [], "1": []},
34
- "four_rooms": {"0.3": [], "0.5": [], "0.7": [], "0.9": [], "1": []},
35
- },
36
- "parking": {
37
- "gd_agent": {"0.3": [], "0.5": [], "0.7": [], "0.9": [], "1": []},
38
- "gc_agent": {"0.3": [], "0.5": [], "0.7": [], "0.9": [], "1": []},
39
- },
40
- },
41
- "graql": {
42
- "panda": {
43
- "gd_agent": {"0.3": [], "0.5": [], "0.7": [], "0.9": [], "1": []},
44
- "gc_agent": {"0.3": [], "0.5": [], "0.7": [], "0.9": [], "1": []},
45
- },
46
- "minigrid": {
47
- "obstacles": {"0.3": [], "0.5": [], "0.7": [], "0.9": [], "1": []},
48
- "lava_crossing": {"0.3": [], "0.5": [], "0.7": [], "0.9": [], "1": []},
49
- },
50
- "point_maze": {
51
- "obstacles": {"0.3": [], "0.5": [], "0.7": [], "0.9": [], "1": []},
52
- "four_rooms": {"0.3": [], "0.5": [], "0.7": [], "0.9": [], "1": []},
53
- },
54
- "parking": {
55
- "gd_agent": {"0.3": [], "0.5": [], "0.7": [], "0.9": [], "1": []},
56
- "gc_agent": {"0.3": [], "0.5": [], "0.7": [], "0.9": [], "1": []},
57
- },
58
- },
59
- }
60
-
61
- continuing_accuracies = copy.deepcopy(fragmented_accuracies)
62
-
63
- # domains = ['panda', 'minigrid', 'point_maze', 'parking']
64
- domains = ["minigrid", "point_maze", "parking"]
65
- tasks = ["L111", "L222", "L333", "L444", "L555"]
66
- percentages = ["0.3", "0.5", "0.7", "0.9", "1"]
67
-
68
- for partial_obs_type, accuracies, is_same_learn in zip(
69
- ["fragmented", "continuing"],
70
- [fragmented_accuracies, continuing_accuracies],
71
- [False, True],
72
- ):
73
- for domain in domains:
74
- for env in accuracies["graml"][domain].keys():
75
- for task in tasks:
76
- set_global_storage_configs(
77
- recognizer_str="graml",
78
- is_fragmented=partial_obs_type,
79
- is_inference_same_length_sequences=True,
80
- is_learn_same_length_sequences=is_same_learn,
81
- )
82
- graml_res_file_path = (
83
- f"{get_experiment_results_path(domain, env, task)}.pkl"
84
- )
85
- set_global_storage_configs(
86
- recognizer_str="graql", is_fragmented=partial_obs_type
87
- )
88
- graql_res_file_path = (
89
- f"{get_experiment_results_path(domain, env, task)}.pkl"
90
- )
91
- if os.path.exists(graml_res_file_path):
92
- with open(graml_res_file_path, "rb") as results_file:
93
- results = dill.load(results_file)
94
- for percentage in accuracies["graml"][domain][env].keys():
95
- accuracies["graml"][domain][env][percentage].append(
96
- results[percentage]["accuracy"]
97
- )
98
- else:
99
- assert (False, f"no file for {graml_res_file_path}")
100
- if os.path.exists(graql_res_file_path):
101
- with open(graql_res_file_path, "rb") as results_file:
102
- results = dill.load(results_file)
103
- for percentage in accuracies["graml"][domain][env].keys():
104
- accuracies["graql"][domain][env][percentage].append(
105
- results[percentage]["accuracy"]
106
- )
107
- else:
108
- assert (False, f"no file for {graql_res_file_path}")
109
-
110
- plot_styles = {
111
- ("graml", "fragmented", 0.3): "g--o", # Green dashed line with circle markers
112
- ("graml", "fragmented", 0.5): "g--s", # Green dashed line with square markers
113
- (
114
- "graml",
115
- "fragmented",
116
- 0.7,
117
- ): "g--^", # Green dashed line with triangle-up markers
118
- ("graml", "fragmented", 0.9): "g--d", # Green dashed line with diamond markers
119
- ("graml", "fragmented", 1.0): "g--*", # Green dashed line with star markers
120
- ("graml", "continuing", 0.3): "g-o", # Green solid line with circle markers
121
- ("graml", "continuing", 0.5): "g-s", # Green solid line with square markers
122
- (
123
- "graml",
124
- "continuing",
125
- 0.7,
126
- ): "g-^", # Green solid line with triangle-up markers
127
- ("graml", "continuing", 0.9): "g-d", # Green solid line with diamond markers
128
- ("graml", "continuing", 1.0): "g-*", # Green solid line with star markers
129
- ("graql", "fragmented", 0.3): "b--o", # Blue dashed line with circle markers
130
- ("graql", "fragmented", 0.5): "b--s", # Blue dashed line with square markers
131
- (
132
- "graql",
133
- "fragmented",
134
- 0.7,
135
- ): "b--^", # Blue dashed line with triangle-up markers
136
- ("graql", "fragmented", 0.9): "b--d", # Blue dashed line with diamond markers
137
- ("graql", "fragmented", 1.0): "b--*", # Blue dashed line with star markers
138
- ("graql", "continuing", 0.3): "b-o", # Blue solid line with circle markers
139
- ("graql", "continuing", 0.5): "b-s", # Blue solid line with square markers
140
- ("graql", "continuing", 0.7): "b-^", # Blue solid line with triangle-up markers
141
- ("graql", "continuing", 0.9): "b-d", # Blue solid line with diamond markers
142
- ("graql", "continuing", 1.0): "b-*", # Blue solid line with star markers
143
- }
144
-
145
- def average_accuracies(accuracies, domain):
146
- avg_acc = {
147
- algo: {perc: [] for perc in percentages} for algo in ["graml", "graql"]
148
- }
149
-
150
- for algo in avg_acc.keys():
151
- for perc in percentages:
152
- for env in accuracies[algo][domain].keys():
153
- env_acc = accuracies[algo][domain][env][
154
- perc
155
- ] # list of 5, averages for L111 to L555.
156
- if env_acc:
157
- avg_acc[algo][perc].append(np.array(env_acc))
158
-
159
- for algo in avg_acc.keys():
160
- for perc in percentages:
161
- if avg_acc[algo][perc]:
162
- avg_acc[algo][perc] = np.mean(np.array(avg_acc[algo][perc]), axis=0)
163
-
164
- return avg_acc
165
-
166
- def plot_domain_accuracies(
167
- ax, fragmented_accuracies, continuing_accuracies, domain
168
- ):
169
- fragmented_avg_acc = average_accuracies(fragmented_accuracies, domain)
170
- continuing_avg_acc = average_accuracies(continuing_accuracies, domain)
171
-
172
- x_vals = np.arange(1, 6) # Number of goals
173
-
174
- # Create "waves" (shaded regions) for each algorithm
175
- for algo in ["graml", "graql"]:
176
- for perc in percentages:
177
- fragmented_y_vals = np.array(fragmented_avg_acc[algo][perc])
178
- continuing_y_vals = np.array(continuing_avg_acc[algo][perc])
179
-
180
- ax.plot(
181
- x_vals,
182
- fragmented_y_vals,
183
- plot_styles[
184
- (algo, "fragmented", float(perc))
185
- ], # Use the updated plot_styles dictionary with percentage
186
- label=f"{algo}, non-consecutive, {perc}",
187
- )
188
- ax.plot(
189
- x_vals,
190
- continuing_y_vals,
191
- plot_styles[
192
- (algo, "continuing", float(perc))
193
- ], # Use the updated plot_styles dictionary with percentage
194
- label=f"{algo}, consecutive, {perc}",
195
- )
196
-
197
- ax.set_xticks(x_vals)
198
- ax.set_yticks(np.linspace(0, 1, 6))
199
- ax.set_ylim([0, 1])
200
- ax.set_title(f"{domain.capitalize()} Domain", fontsize=16)
201
- ax.grid(True)
202
-
203
- fig, axes = plt.subplots(
204
- 1, 4, figsize=(24, 6)
205
- ) # Increase the figure size for better spacing (width 24, height 6)
206
-
207
- # Generate each plot in a subplot, including both fragmented and continuing accuracies
208
- for i, domain in enumerate(domains):
209
- plot_domain_accuracies(
210
- axes[i], fragmented_accuracies, continuing_accuracies, domain
211
- )
212
-
213
- # Set a single x-axis and y-axis label for the entire figure
214
- fig.text(
215
- 0.5, 0.04, "Number of Goals", ha="center", fontsize=20
216
- ) # Centered x-axis label
217
- fig.text(
218
- 0.04, 0.5, "Accuracy", va="center", rotation="vertical", fontsize=20
219
- ) # Reduced spacing for y-axis label
220
-
221
- # Adjust subplot layout to avoid overlap
222
- plt.subplots_adjust(
223
- left=0.09, right=0.91, top=0.76, bottom=0.24, wspace=0.3
224
- ) # More space on top (top=0.82)
225
-
226
- # Place the legend above the plots with more space between legend and plots
227
- handles, labels = axes[0].get_legend_handles_labels()
228
- fig.legend(
229
- handles,
230
- labels,
231
- loc="upper center",
232
- ncol=4,
233
- bbox_to_anchor=(0.5, 1.05),
234
- fontsize=12,
235
- ) # Moved above with bbox_to_anchor
236
-
237
- # Save the figure and show it
238
- plt.savefig("accuracy_plots.png", dpi=300)