gr-libs 0.1.8__py3-none-any.whl → 0.2.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gr_libs/__init__.py +3 -1
- gr_libs/_evaluation/__init__.py +1 -0
- evaluation/analyze_results_cross_alg_cross_domain.py → gr_libs/_evaluation/_analyze_results_cross_alg_cross_domain.py +81 -88
- evaluation/generate_experiments_results.py → gr_libs/_evaluation/_generate_experiments_results.py +6 -6
- evaluation/generate_task_specific_statistics_plots.py → gr_libs/_evaluation/_generate_task_specific_statistics_plots.py +11 -14
- evaluation/get_plans_images.py → gr_libs/_evaluation/_get_plans_images.py +3 -4
- evaluation/increasing_and_decreasing_.py → gr_libs/_evaluation/_increasing_and_decreasing_.py +3 -1
- gr_libs/_version.py +2 -2
- gr_libs/all_experiments.py +294 -0
- gr_libs/environment/__init__.py +14 -1
- gr_libs/environment/{utils → _utils}/utils.py +1 -1
- gr_libs/environment/environment.py +257 -22
- gr_libs/metrics/__init__.py +2 -0
- gr_libs/metrics/metrics.py +166 -31
- gr_libs/ml/__init__.py +1 -6
- gr_libs/ml/base/__init__.py +3 -1
- gr_libs/ml/base/rl_agent.py +68 -3
- gr_libs/ml/neural/__init__.py +1 -3
- gr_libs/ml/neural/deep_rl_learner.py +227 -67
- gr_libs/ml/neural/utils/__init__.py +1 -2
- gr_libs/ml/planner/mcts/{utils → _utils}/tree.py +1 -1
- gr_libs/ml/planner/mcts/mcts_model.py +71 -34
- gr_libs/ml/sequential/__init__.py +0 -1
- gr_libs/ml/sequential/{lstm_model.py → _lstm_model.py} +11 -14
- gr_libs/ml/tabular/__init__.py +1 -3
- gr_libs/ml/tabular/tabular_q_learner.py +27 -9
- gr_libs/ml/tabular/tabular_rl_agent.py +22 -9
- gr_libs/ml/utils/__init__.py +2 -9
- gr_libs/ml/utils/format.py +13 -90
- gr_libs/ml/utils/math.py +3 -2
- gr_libs/ml/utils/other.py +2 -2
- gr_libs/ml/utils/storage.py +41 -94
- gr_libs/odgr_executor.py +268 -0
- gr_libs/problems/consts.py +2 -2
- gr_libs/recognizer/_utils/__init__.py +0 -0
- gr_libs/recognizer/{utils → _utils}/format.py +2 -2
- gr_libs/recognizer/gr_as_rl/gr_as_rl_recognizer.py +116 -36
- gr_libs/recognizer/graml/{gr_dataset.py → _gr_dataset.py} +11 -11
- gr_libs/recognizer/graml/graml_recognizer.py +172 -29
- gr_libs/recognizer/recognizer.py +59 -10
- gr_libs/tutorials/draco_panda_tutorial.py +58 -0
- gr_libs/tutorials/draco_parking_tutorial.py +56 -0
- {tutorials → gr_libs/tutorials}/gcdraco_panda_tutorial.py +5 -9
- {tutorials → gr_libs/tutorials}/gcdraco_parking_tutorial.py +3 -7
- {tutorials → gr_libs/tutorials}/graml_minigrid_tutorial.py +2 -2
- {tutorials → gr_libs/tutorials}/graml_panda_tutorial.py +5 -10
- {tutorials → gr_libs/tutorials}/graml_parking_tutorial.py +5 -9
- {tutorials → gr_libs/tutorials}/graml_point_maze_tutorial.py +2 -1
- {tutorials → gr_libs/tutorials}/graql_minigrid_tutorial.py +2 -2
- {gr_libs-0.1.8.dist-info → gr_libs-0.2.2.dist-info}/METADATA +84 -29
- gr_libs-0.2.2.dist-info/RECORD +71 -0
- {gr_libs-0.1.8.dist-info → gr_libs-0.2.2.dist-info}/WHEEL +1 -1
- gr_libs-0.2.2.dist-info/top_level.txt +2 -0
- tests/test_draco.py +14 -0
- tests/test_gcdraco.py +2 -2
- tests/test_graml.py +4 -4
- tests/test_graql.py +1 -1
- evaluation/create_minigrid_map_image.py +0 -38
- evaluation/file_system.py +0 -53
- evaluation/generate_experiments_results_new_ver1.py +0 -238
- evaluation/generate_experiments_results_new_ver2.py +0 -331
- gr_libs/ml/neural/utils/penv.py +0 -60
- gr_libs/recognizer/utils/__init__.py +0 -1
- gr_libs-0.1.8.dist-info/RECORD +0 -70
- gr_libs-0.1.8.dist-info/top_level.txt +0 -4
- /gr_libs/environment/{utils → _utils}/__init__.py +0 -0
- /gr_libs/ml/planner/mcts/{utils → _utils}/__init__.py +0 -0
- /gr_libs/ml/planner/mcts/{utils → _utils}/node.py +0 -0
gr_libs/__init__.py
CHANGED
@@ -1,5 +1,7 @@
|
|
1
|
+
"""gr_libs: Baselines for goal recognition executions on gym environments."""
|
2
|
+
|
3
|
+
from gr_libs.recognizer.gr_as_rl.gr_as_rl_recognizer import Draco, GCDraco, Graql
|
1
4
|
from gr_libs.recognizer.graml.graml_recognizer import ExpertBasedGraml, GCGraml
|
2
|
-
from gr_libs.recognizer.gr_as_rl.gr_as_rl_recognizer import Graql, Draco, GCDraco
|
3
5
|
|
4
6
|
try:
|
5
7
|
from ._version import version as __version__
|
@@ -0,0 +1 @@
|
|
1
|
+
""" This is a directory that includes scripts for analysis of GR results. """
|
@@ -1,16 +1,13 @@
|
|
1
1
|
import copy
|
2
|
-
import sys
|
3
|
-
import matplotlib.pyplot as plt
|
4
|
-
import numpy as np
|
5
2
|
import os
|
3
|
+
|
6
4
|
import dill
|
5
|
+
import matplotlib.pyplot as plt
|
6
|
+
import numpy as np
|
7
7
|
from scipy.interpolate import make_interp_spline
|
8
8
|
from scipy.ndimage import gaussian_filter1d
|
9
|
-
|
10
|
-
|
11
|
-
set_global_storage_configs,
|
12
|
-
)
|
13
|
-
from scripts.generate_task_specific_statistics_plots import get_figures_dir_path
|
9
|
+
|
10
|
+
from gr_libs.ml.utils.storage import get_experiment_results_path
|
14
11
|
|
15
12
|
|
16
13
|
def smooth_line(x, y, num_points=300):
|
@@ -23,55 +20,25 @@ def smooth_line(x, y, num_points=300):
|
|
23
20
|
if __name__ == "__main__":
|
24
21
|
|
25
22
|
fragmented_accuracies = {
|
26
|
-
"
|
27
|
-
"panda": {
|
28
|
-
"gd_agent": {
|
29
|
-
"0.3": [], # every list here should have number of tasks accuracies in it, since we done experiments for L111-L555. remember each accuracy is an average of #goals different tasks.
|
30
|
-
"0.5": [],
|
31
|
-
"0.7": [],
|
32
|
-
"0.9": [],
|
33
|
-
"1": [],
|
34
|
-
},
|
35
|
-
"gc_agent": {"0.3": [], "0.5": [], "0.7": [], "0.9": [], "1": []},
|
36
|
-
},
|
23
|
+
"ExpertBasedGraml": {
|
37
24
|
"minigrid": {
|
38
25
|
"obstacles": {"0.3": [], "0.5": [], "0.7": [], "0.9": [], "1": []},
|
39
26
|
"lava_crossing": {"0.3": [], "0.5": [], "0.7": [], "0.9": [], "1": []},
|
40
|
-
}
|
41
|
-
"point_maze": {
|
42
|
-
"obstacles": {"0.3": [], "0.5": [], "0.7": [], "0.9": [], "1": []},
|
43
|
-
"four_rooms": {"0.3": [], "0.5": [], "0.7": [], "0.9": [], "1": []},
|
44
|
-
},
|
45
|
-
"parking": {
|
46
|
-
"gd_agent": {"0.3": [], "0.5": [], "0.7": [], "0.9": [], "1": []},
|
47
|
-
"gc_agent": {"0.3": [], "0.5": [], "0.7": [], "0.9": [], "1": []},
|
48
|
-
},
|
27
|
+
}
|
49
28
|
},
|
50
|
-
"
|
51
|
-
"panda": {
|
52
|
-
"gd_agent": {"0.3": [], "0.5": [], "0.7": [], "0.9": [], "1": []},
|
53
|
-
"gc_agent": {"0.3": [], "0.5": [], "0.7": [], "0.9": [], "1": []},
|
54
|
-
},
|
29
|
+
"Graql": {
|
55
30
|
"minigrid": {
|
56
31
|
"obstacles": {"0.3": [], "0.5": [], "0.7": [], "0.9": [], "1": []},
|
57
32
|
"lava_crossing": {"0.3": [], "0.5": [], "0.7": [], "0.9": [], "1": []},
|
58
|
-
}
|
59
|
-
"point_maze": {
|
60
|
-
"obstacles": {"0.3": [], "0.5": [], "0.7": [], "0.9": [], "1": []},
|
61
|
-
"four_rooms": {"0.3": [], "0.5": [], "0.7": [], "0.9": [], "1": []},
|
62
|
-
},
|
63
|
-
"parking": {
|
64
|
-
"gd_agent": {"0.3": [], "0.5": [], "0.7": [], "0.9": [], "1": []},
|
65
|
-
"gc_agent": {"0.3": [], "0.5": [], "0.7": [], "0.9": [], "1": []},
|
66
|
-
},
|
33
|
+
}
|
67
34
|
},
|
68
35
|
}
|
69
36
|
|
70
37
|
continuing_accuracies = copy.deepcopy(fragmented_accuracies)
|
71
38
|
|
72
39
|
# domains = ['panda', 'minigrid', 'point_maze', 'parking']
|
73
|
-
domains = ["minigrid"
|
74
|
-
tasks = ["
|
40
|
+
domains = ["minigrid"]
|
41
|
+
tasks = ["L1", "L2", "L3", "L4", "L5"]
|
75
42
|
percentages = ["0.3", "0.5", "1"]
|
76
43
|
|
77
44
|
for partial_obs_type, accuracies, is_same_learn in zip(
|
@@ -80,80 +47,106 @@ if __name__ == "__main__":
|
|
80
47
|
[False, True],
|
81
48
|
):
|
82
49
|
for domain in domains:
|
83
|
-
for env in accuracies["
|
50
|
+
for env in accuracies["ExpertBasedGraml"][domain].keys():
|
84
51
|
for task in tasks:
|
85
|
-
|
86
|
-
recognizer_str="graml",
|
87
|
-
is_fragmented=partial_obs_type,
|
88
|
-
is_inference_same_length_sequences=True,
|
89
|
-
is_learn_same_length_sequences=is_same_learn,
|
90
|
-
)
|
91
|
-
graml_res_file_path = (
|
92
|
-
f"{get_experiment_results_path(domain, env, task)}.pkl"
|
93
|
-
)
|
94
|
-
set_global_storage_configs(
|
95
|
-
recognizer_str="graql", is_fragmented=partial_obs_type
|
96
|
-
)
|
52
|
+
graml_res_file_path = f"{get_experiment_results_path(domain, env, task, 'ExpertBasedGraml')}.pkl"
|
97
53
|
graql_res_file_path = (
|
98
|
-
f"{get_experiment_results_path(domain, env, task)}.pkl"
|
54
|
+
f"{get_experiment_results_path(domain, env, task, 'Graql')}.pkl"
|
99
55
|
)
|
100
56
|
if os.path.exists(graml_res_file_path):
|
101
57
|
with open(graml_res_file_path, "rb") as results_file:
|
102
58
|
results = dill.load(results_file)
|
103
|
-
for percentage in accuracies["
|
104
|
-
|
105
|
-
|
106
|
-
|
59
|
+
for percentage in accuracies["expertbasedgraml"][domain][
|
60
|
+
env
|
61
|
+
].keys():
|
62
|
+
accuracies["expertbasedgraml"][domain][env][
|
63
|
+
percentage
|
64
|
+
].append(results[percentage]["accuracy"])
|
107
65
|
else:
|
108
|
-
assert
|
66
|
+
assert False, f"no file for {graml_res_file_path}"
|
109
67
|
if os.path.exists(graql_res_file_path):
|
110
68
|
with open(graql_res_file_path, "rb") as results_file:
|
111
69
|
results = dill.load(results_file)
|
112
|
-
for percentage in accuracies["
|
113
|
-
|
70
|
+
for percentage in accuracies["expertbasedgraml"][domain][
|
71
|
+
env
|
72
|
+
].keys():
|
73
|
+
accuracies["Graql"][domain][env][percentage].append(
|
114
74
|
results[percentage]["accuracy"]
|
115
75
|
)
|
116
76
|
else:
|
117
|
-
assert
|
77
|
+
assert False, f"no file for {graql_res_file_path}"
|
118
78
|
|
119
79
|
plot_styles = {
|
120
|
-
("graml", "fragmented", 0.3): "g--o", # Green dashed line with circle markers
|
121
|
-
("graml", "fragmented", 0.5): "g--s", # Green dashed line with square markers
|
122
80
|
(
|
123
|
-
"
|
81
|
+
"expertbasedgraml",
|
82
|
+
"fragmented",
|
83
|
+
0.3,
|
84
|
+
): "g--o", # Green dashed line with circle markers
|
85
|
+
(
|
86
|
+
"expertbasedgraml",
|
87
|
+
"fragmented",
|
88
|
+
0.5,
|
89
|
+
): "g--s", # Green dashed line with square markers
|
90
|
+
(
|
91
|
+
"expertbasedgraml",
|
124
92
|
"fragmented",
|
125
93
|
0.7,
|
126
94
|
): "g--^", # Green dashed line with triangle-up markers
|
127
|
-
("graml", "fragmented", 0.9): "g--d", # Green dashed line with diamond markers
|
128
|
-
("graml", "fragmented", 1.0): "g--*", # Green dashed line with star markers
|
129
|
-
("graml", "continuing", 0.3): "g-o", # Green solid line with circle markers
|
130
|
-
("graml", "continuing", 0.5): "g-s", # Green solid line with square markers
|
131
95
|
(
|
132
|
-
"
|
96
|
+
"expertbasedgraml",
|
97
|
+
"fragmented",
|
98
|
+
0.9,
|
99
|
+
): "g--d", # Green dashed line with diamond markers
|
100
|
+
(
|
101
|
+
"expertbasedgraml",
|
102
|
+
"fragmented",
|
103
|
+
1.0,
|
104
|
+
): "g--*", # Green dashed line with star markers
|
105
|
+
(
|
106
|
+
"expertbasedgraml",
|
107
|
+
"continuing",
|
108
|
+
0.3,
|
109
|
+
): "g-o", # Green solid line with circle markers
|
110
|
+
(
|
111
|
+
"expertbasedgraml",
|
112
|
+
"continuing",
|
113
|
+
0.5,
|
114
|
+
): "g-s", # Green solid line with square markers
|
115
|
+
(
|
116
|
+
"expertbasedgraml",
|
133
117
|
"continuing",
|
134
118
|
0.7,
|
135
119
|
): "g-^", # Green solid line with triangle-up markers
|
136
|
-
("graml", "continuing", 0.9): "g-d", # Green solid line with diamond markers
|
137
|
-
("graml", "continuing", 1.0): "g-*", # Green solid line with star markers
|
138
|
-
("graql", "fragmented", 0.3): "b--o", # Blue dashed line with circle markers
|
139
|
-
("graql", "fragmented", 0.5): "b--s", # Blue dashed line with square markers
|
140
120
|
(
|
141
|
-
"
|
121
|
+
"expertbasedgraml",
|
122
|
+
"continuing",
|
123
|
+
0.9,
|
124
|
+
): "g-d", # Green solid line with diamond markers
|
125
|
+
(
|
126
|
+
"expertbasedgraml",
|
127
|
+
"continuing",
|
128
|
+
1.0,
|
129
|
+
): "g-*", # Green solid line with star markers
|
130
|
+
("Graql", "fragmented", 0.3): "b--o", # Blue dashed line with circle markers
|
131
|
+
("Graql", "fragmented", 0.5): "b--s", # Blue dashed line with square markers
|
132
|
+
(
|
133
|
+
"Graql",
|
142
134
|
"fragmented",
|
143
135
|
0.7,
|
144
136
|
): "b--^", # Blue dashed line with triangle-up markers
|
145
|
-
("
|
146
|
-
("
|
147
|
-
("
|
148
|
-
("
|
149
|
-
("
|
150
|
-
("
|
151
|
-
("
|
137
|
+
("Graql", "fragmented", 0.9): "b--d", # Blue dashed line with diamond markers
|
138
|
+
("Graql", "fragmented", 1.0): "b--*", # Blue dashed line with star markers
|
139
|
+
("Graql", "continuing", 0.3): "b-o", # Blue solid line with circle markers
|
140
|
+
("Graql", "continuing", 0.5): "b-s", # Blue solid line with square markers
|
141
|
+
("Graql", "continuing", 0.7): "b-^", # Blue solid line with triangle-up markers
|
142
|
+
("Graql", "continuing", 0.9): "b-d", # Blue solid line with diamond markers
|
143
|
+
("Graql", "continuing", 1.0): "b-*", # Blue solid line with star markers
|
152
144
|
}
|
153
145
|
|
154
146
|
def average_accuracies(accuracies, domain):
|
155
147
|
avg_acc = {
|
156
|
-
algo: {perc: [] for perc in percentages}
|
148
|
+
algo: {perc: [] for perc in percentages}
|
149
|
+
for algo in ["ExpertBasedGraml", "Graql"]
|
157
150
|
}
|
158
151
|
|
159
152
|
for algo in avg_acc.keys():
|
@@ -186,7 +179,7 @@ if __name__ == "__main__":
|
|
186
179
|
x_vals = np.arange(1, 6) # Number of goals
|
187
180
|
|
188
181
|
# Create "waves" (shaded regions) for each algorithm
|
189
|
-
for algo in ["
|
182
|
+
for algo in ["ExpertBasedGraml", "Graql"]:
|
190
183
|
fragmented_y_vals_by_percentage = []
|
191
184
|
continuing_y_vals_by_percentage = []
|
192
185
|
|
evaluation/generate_experiments_results.py → gr_libs/_evaluation/_generate_experiments_results.py
RENAMED
@@ -1,14 +1,14 @@
|
|
1
1
|
import copy
|
2
|
-
import sys
|
3
|
-
import matplotlib.pyplot as plt
|
4
|
-
import numpy as np
|
5
2
|
import os
|
3
|
+
|
6
4
|
import dill
|
5
|
+
import matplotlib.pyplot as plt
|
6
|
+
import numpy as np
|
7
|
+
|
7
8
|
from gr_libs.ml.utils.storage import (
|
8
9
|
get_experiment_results_path,
|
9
10
|
set_global_storage_configs,
|
10
11
|
)
|
11
|
-
from scripts.generate_task_specific_statistics_plots import get_figures_dir_path
|
12
12
|
|
13
13
|
|
14
14
|
def gen_graph(
|
@@ -71,7 +71,7 @@ def gen_graph(
|
|
71
71
|
results[percentage]["accuracy"]
|
72
72
|
)
|
73
73
|
else:
|
74
|
-
assert
|
74
|
+
assert False, f"no file for {graml_res_file_path}"
|
75
75
|
if os.path.exists(graql_res_file_path):
|
76
76
|
with open(graql_res_file_path, "rb") as results_file:
|
77
77
|
results = dill.load(results_file)
|
@@ -79,7 +79,7 @@ def gen_graph(
|
|
79
79
|
results[percentage]["accuracy"]
|
80
80
|
)
|
81
81
|
else:
|
82
|
-
assert
|
82
|
+
assert False, f"no file for {graql_res_file_path}"
|
83
83
|
|
84
84
|
def plot_accuracies(accuracies, partial_obs_type):
|
85
85
|
plt.figure(figsize=(10, 6))
|
@@ -1,20 +1,17 @@
|
|
1
1
|
import argparse
|
2
|
-
import
|
2
|
+
import os
|
3
|
+
|
4
|
+
import dill
|
3
5
|
import matplotlib.pyplot as plt
|
4
6
|
import numpy as np
|
5
|
-
import os
|
6
|
-
import ast
|
7
|
-
import inspect
|
8
7
|
import torch
|
9
|
-
import dill
|
10
8
|
|
9
|
+
from gr_libs.metrics.metrics import measure_average_sequence_distance
|
11
10
|
from gr_libs.ml.utils import get_embeddings_result_path
|
12
11
|
from gr_libs.ml.utils.storage import (
|
13
|
-
get_experiment_results_path,
|
14
|
-
set_global_storage_configs,
|
15
12
|
get_graql_experiment_confidence_path,
|
13
|
+
set_global_storage_configs,
|
16
14
|
)
|
17
|
-
from gr_libs.metrics.metrics import measure_average_sequence_distance
|
18
15
|
|
19
16
|
|
20
17
|
def get_tasks_embeddings_dir_path(env_name):
|
@@ -132,11 +129,11 @@ def analyze_and_produce_plots(
|
|
132
129
|
|
133
130
|
goals = list(goals_similarity_dict.keys())
|
134
131
|
percentages = sorted(
|
135
|
-
|
132
|
+
{
|
136
133
|
percentage
|
137
134
|
for similarities in goals_similarity_dict.values()
|
138
135
|
for percentage in similarities.keys()
|
139
|
-
|
136
|
+
}
|
140
137
|
)
|
141
138
|
num_percentages = len(percentages)
|
142
139
|
fig_string = f"{recognizer_type}_{domain_name}_{env_name}_{fragmented_status}_{inf_same_length_status}_{learn_same_length_status}"
|
@@ -171,11 +168,11 @@ def analyze_and_produce_plots(
|
|
171
168
|
|
172
169
|
goals = list(goals_similarity_dict.keys())
|
173
170
|
percentages = sorted(
|
174
|
-
|
171
|
+
{
|
175
172
|
percentage
|
176
173
|
for similarities in goals_similarity_dict.values()
|
177
174
|
for percentage in similarities.keys()
|
178
|
-
|
175
|
+
}
|
179
176
|
)
|
180
177
|
num_percentages = len(percentages)
|
181
178
|
fig_string = f"{recognizer_type}_{domain_name}_{env_name}_{fragmented_status}"
|
@@ -472,7 +469,7 @@ if __name__ == "__main__":
|
|
472
469
|
is_inference_same_length_sequences=args.inference_same_seq_len,
|
473
470
|
is_learn_same_length_sequences=args.learn_same_seq_len,
|
474
471
|
)
|
475
|
-
(env_name,) =
|
472
|
+
(env_name,) = (
|
476
473
|
x
|
477
474
|
for x in [
|
478
475
|
args.minigrid_env,
|
@@ -481,7 +478,7 @@ if __name__ == "__main__":
|
|
481
478
|
args.franka_env,
|
482
479
|
]
|
483
480
|
if isinstance(x, str)
|
484
|
-
|
481
|
+
)
|
485
482
|
if args.inference_same_seq_len:
|
486
483
|
inference_same_seq_len = "inference_same_seq_len"
|
487
484
|
else:
|
@@ -1,8 +1,7 @@
|
|
1
|
-
import
|
1
|
+
import inspect
|
2
2
|
import os
|
3
3
|
import pickle
|
4
|
-
import
|
5
|
-
|
4
|
+
import sys
|
6
5
|
|
7
6
|
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
|
8
7
|
GRAML_itself = os.path.dirname(currentdir)
|
@@ -50,7 +49,7 @@ def analyze_and_produce_images(env_name):
|
|
50
49
|
|
51
50
|
if __name__ == "__main__":
|
52
51
|
# preventing circular imports. only needed for running this as main anyway.
|
53
|
-
from gr_libs.ml.utils.storage import
|
52
|
+
from gr_libs.ml.utils.storage import get_model_dir, get_models_dir
|
54
53
|
|
55
54
|
# checks:
|
56
55
|
assert (
|
gr_libs/_version.py
CHANGED