gr-libs 0.1.8__py3-none-any.whl → 0.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gr_libs/__init__.py +3 -1
- gr_libs/_version.py +2 -2
- gr_libs/all_experiments.py +260 -0
- gr_libs/environment/__init__.py +14 -1
- gr_libs/environment/_utils/__init__.py +0 -0
- gr_libs/environment/{utils → _utils}/utils.py +1 -1
- gr_libs/environment/environment.py +278 -23
- gr_libs/evaluation/__init__.py +1 -0
- gr_libs/evaluation/generate_experiments_results.py +100 -0
- gr_libs/metrics/__init__.py +2 -0
- gr_libs/metrics/metrics.py +166 -31
- gr_libs/ml/__init__.py +1 -6
- gr_libs/ml/base/__init__.py +3 -1
- gr_libs/ml/base/rl_agent.py +68 -3
- gr_libs/ml/neural/__init__.py +1 -3
- gr_libs/ml/neural/deep_rl_learner.py +241 -84
- gr_libs/ml/neural/utils/__init__.py +1 -2
- gr_libs/ml/planner/mcts/{utils → _utils}/tree.py +1 -1
- gr_libs/ml/planner/mcts/mcts_model.py +71 -34
- gr_libs/ml/sequential/__init__.py +0 -1
- gr_libs/ml/sequential/{lstm_model.py → _lstm_model.py} +11 -14
- gr_libs/ml/tabular/__init__.py +1 -3
- gr_libs/ml/tabular/tabular_q_learner.py +27 -9
- gr_libs/ml/tabular/tabular_rl_agent.py +22 -9
- gr_libs/ml/utils/__init__.py +2 -9
- gr_libs/ml/utils/format.py +13 -90
- gr_libs/ml/utils/math.py +3 -2
- gr_libs/ml/utils/other.py +2 -2
- gr_libs/ml/utils/storage.py +41 -94
- gr_libs/odgr_executor.py +263 -0
- gr_libs/problems/consts.py +570 -292
- gr_libs/recognizer/{utils → _utils}/format.py +2 -2
- gr_libs/recognizer/gr_as_rl/gr_as_rl_recognizer.py +127 -36
- gr_libs/recognizer/graml/{gr_dataset.py → _gr_dataset.py} +11 -11
- gr_libs/recognizer/graml/graml_recognizer.py +186 -35
- gr_libs/recognizer/recognizer.py +59 -10
- gr_libs/tutorials/draco_panda_tutorial.py +58 -0
- gr_libs/tutorials/draco_parking_tutorial.py +56 -0
- {tutorials → gr_libs/tutorials}/gcdraco_panda_tutorial.py +11 -11
- {tutorials → gr_libs/tutorials}/gcdraco_parking_tutorial.py +6 -8
- {tutorials → gr_libs/tutorials}/graml_minigrid_tutorial.py +18 -14
- {tutorials → gr_libs/tutorials}/graml_panda_tutorial.py +11 -12
- {tutorials → gr_libs/tutorials}/graml_parking_tutorial.py +8 -10
- {tutorials → gr_libs/tutorials}/graml_point_maze_tutorial.py +17 -3
- {tutorials → gr_libs/tutorials}/graql_minigrid_tutorial.py +2 -2
- {gr_libs-0.1.8.dist-info → gr_libs-0.2.5.dist-info}/METADATA +95 -29
- gr_libs-0.2.5.dist-info/RECORD +72 -0
- {gr_libs-0.1.8.dist-info → gr_libs-0.2.5.dist-info}/WHEEL +1 -1
- gr_libs-0.2.5.dist-info/top_level.txt +2 -0
- tests/test_draco.py +14 -0
- tests/test_gcdraco.py +2 -2
- tests/test_graml.py +4 -4
- tests/test_graql.py +1 -1
- tests/test_odgr_executor_expertbasedgraml.py +14 -0
- tests/test_odgr_executor_gcdraco.py +14 -0
- tests/test_odgr_executor_gcgraml.py +14 -0
- tests/test_odgr_executor_graql.py +14 -0
- evaluation/analyze_results_cross_alg_cross_domain.py +0 -267
- evaluation/create_minigrid_map_image.py +0 -38
- evaluation/file_system.py +0 -53
- evaluation/generate_experiments_results.py +0 -141
- evaluation/generate_experiments_results_new_ver1.py +0 -238
- evaluation/generate_experiments_results_new_ver2.py +0 -331
- evaluation/generate_task_specific_statistics_plots.py +0 -500
- evaluation/get_plans_images.py +0 -62
- evaluation/increasing_and_decreasing_.py +0 -104
- gr_libs/ml/neural/utils/penv.py +0 -60
- gr_libs-0.1.8.dist-info/RECORD +0 -70
- gr_libs-0.1.8.dist-info/top_level.txt +0 -4
- /gr_libs/{environment/utils/__init__.py → _evaluation/_generate_experiments_results.py} +0 -0
- /gr_libs/ml/planner/mcts/{utils → _utils}/__init__.py +0 -0
- /gr_libs/ml/planner/mcts/{utils → _utils}/node.py +0 -0
- /gr_libs/recognizer/{utils → _utils}/__init__.py +0 -0
gr_libs/__init__.py
CHANGED
@@ -1,5 +1,7 @@
|
|
1
|
+
"""gr_libs: Baselines for goal recognition executions on gym environments."""
|
2
|
+
|
3
|
+
from gr_libs.recognizer.gr_as_rl.gr_as_rl_recognizer import Draco, GCDraco, Graql
|
1
4
|
from gr_libs.recognizer.graml.graml_recognizer import ExpertBasedGraml, GCGraml
|
2
|
-
from gr_libs.recognizer.gr_as_rl.gr_as_rl_recognizer import Graql, Draco, GCDraco
|
3
5
|
|
4
6
|
try:
|
5
7
|
from ._version import version as __version__
|
gr_libs/_version.py
CHANGED
@@ -0,0 +1,260 @@
|
|
1
|
+
""" executes odgr_executor parallely on a set of problems defined in consts.py """
|
2
|
+
|
3
|
+
import argparse
|
4
|
+
import concurrent.futures
|
5
|
+
import os
|
6
|
+
import subprocess
|
7
|
+
import sys
|
8
|
+
|
9
|
+
import dill
|
10
|
+
import numpy as np
|
11
|
+
|
12
|
+
from gr_libs.ml.utils.storage import get_experiment_results_path
|
13
|
+
|
14
|
+
parser = argparse.ArgumentParser()
|
15
|
+
parser.add_argument("--domains", nargs="+", required=True, help="List of domains")
|
16
|
+
parser.add_argument(
|
17
|
+
"--envs",
|
18
|
+
nargs="+",
|
19
|
+
required=True,
|
20
|
+
help="List of environments (same order as domains)",
|
21
|
+
)
|
22
|
+
parser.add_argument(
|
23
|
+
"--tasks", nargs="+", required=True, help="List of tasks (e.g. L1 L2 L3 L4 L5)"
|
24
|
+
)
|
25
|
+
parser.add_argument(
|
26
|
+
"--recognizers", nargs="+", required=True, help="List of recognizers"
|
27
|
+
)
|
28
|
+
parser.add_argument(
|
29
|
+
"--n", type=int, default=5, help="Number of times to execute each task"
|
30
|
+
)
|
31
|
+
args = parser.parse_args()
|
32
|
+
|
33
|
+
# Build configs dynamically
|
34
|
+
configs = {}
|
35
|
+
for domain, env in zip(args.domains, args.envs):
|
36
|
+
configs.setdefault(domain, {})
|
37
|
+
configs[domain][env] = args.tasks
|
38
|
+
|
39
|
+
recognizers = args.recognizers
|
40
|
+
n = args.n
|
41
|
+
|
42
|
+
|
43
|
+
# Function to read results from the result file
|
44
|
+
def read_results(res_file_path):
|
45
|
+
"""
|
46
|
+
Read the results from a result file.
|
47
|
+
|
48
|
+
Args:
|
49
|
+
res_file_path (str): The path to the result file.
|
50
|
+
|
51
|
+
Returns:
|
52
|
+
The results read from the file.
|
53
|
+
"""
|
54
|
+
with open(res_file_path, "rb") as f:
|
55
|
+
results = dill.load(f)
|
56
|
+
return results
|
57
|
+
|
58
|
+
|
59
|
+
# Every thread worker executes this function.
|
60
|
+
def run_experiment(domain, env, task, recognizer, i, generate_new=False):
|
61
|
+
"""
|
62
|
+
Run an experiment.
|
63
|
+
|
64
|
+
Args:
|
65
|
+
domain (str): The domain of the experiment.
|
66
|
+
env (str): The environment of the experiment.
|
67
|
+
task (str): The task of the experiment.
|
68
|
+
recognizer (str): The recognizer used in the experiment.
|
69
|
+
i (int): The index of the experiment.
|
70
|
+
generate_new (bool, optional): Whether to generate new results.
|
71
|
+
Defaults to False.
|
72
|
+
|
73
|
+
Returns:
|
74
|
+
tuple: A tuple containing the experiment details and the results.
|
75
|
+
"""
|
76
|
+
cmd = f"python gr_libs/odgr_executor.py --domain {domain} --recognizer {recognizer} --env_name {env} --task {task} --collect_stats --experiment_num {i}"
|
77
|
+
try:
|
78
|
+
res_file_path = get_experiment_results_path(domain, env, task, recognizer)
|
79
|
+
i_res_file_path_pkl = os.path.join(res_file_path, f"res_{i}.pkl")
|
80
|
+
i_res_file_path_txt = os.path.join(res_file_path, f"res_{i}.txt")
|
81
|
+
if generate_new or (
|
82
|
+
not os.path.exists(i_res_file_path_txt)
|
83
|
+
or not os.path.exists(i_res_file_path_pkl)
|
84
|
+
):
|
85
|
+
process = subprocess.Popen(
|
86
|
+
cmd,
|
87
|
+
shell=True,
|
88
|
+
stdout=subprocess.PIPE,
|
89
|
+
stderr=subprocess.PIPE,
|
90
|
+
text=True,
|
91
|
+
)
|
92
|
+
stdout, stderr = process.communicate()
|
93
|
+
if process.returncode != 0:
|
94
|
+
print(f"Execution failed: {cmd}\nSTDOUT:\n{stdout}\nSTDERR:\n{stderr}")
|
95
|
+
return None
|
96
|
+
else:
|
97
|
+
print(f"Finished execution successfully: {cmd}")
|
98
|
+
else:
|
99
|
+
print(
|
100
|
+
f"File {i_res_file_path_txt} already exists. Skipping execution of {cmd}"
|
101
|
+
)
|
102
|
+
return ((domain, env, task, recognizer), read_results(i_res_file_path_pkl))
|
103
|
+
except Exception as e:
|
104
|
+
print(f"Exception occurred while running experiment: {e}")
|
105
|
+
return None
|
106
|
+
|
107
|
+
|
108
|
+
# Collect results
|
109
|
+
results = {}
|
110
|
+
|
111
|
+
# create an executor that manages a pool of threads.
|
112
|
+
# Note that any failure in the threads will not stop the main thread
|
113
|
+
# from continuing and vice versa, nor will the debugger view the
|
114
|
+
# failure if in debug mode.
|
115
|
+
# Use prints and if any thread's printing stops suspect failure.
|
116
|
+
# If failure happened, use breakpoints before failure and use the
|
117
|
+
# watch to see the failure by pasting the problematic piece of code.
|
118
|
+
with concurrent.futures.ThreadPoolExecutor() as executor:
|
119
|
+
futures = []
|
120
|
+
for domain, envs in configs.items():
|
121
|
+
for env, tasks in envs.items():
|
122
|
+
for task in tasks:
|
123
|
+
for recognizer in recognizers:
|
124
|
+
for i in range(n):
|
125
|
+
futures.append(
|
126
|
+
executor.submit(
|
127
|
+
run_experiment,
|
128
|
+
domain,
|
129
|
+
env,
|
130
|
+
task,
|
131
|
+
recognizer,
|
132
|
+
i,
|
133
|
+
generate_new=(
|
134
|
+
True
|
135
|
+
if len(sys.argv) > 1
|
136
|
+
and sys.argv[1] == "--generate_new"
|
137
|
+
else False
|
138
|
+
),
|
139
|
+
)
|
140
|
+
)
|
141
|
+
|
142
|
+
for future in concurrent.futures.as_completed(futures):
|
143
|
+
if future.result() is None:
|
144
|
+
print(
|
145
|
+
f"for future {future}, future.result() is None. \
|
146
|
+
Continuing to next future."
|
147
|
+
)
|
148
|
+
continue
|
149
|
+
key, result = future.result()
|
150
|
+
print(f"main thread reading results from future {key}")
|
151
|
+
if key not in results:
|
152
|
+
results[key] = []
|
153
|
+
results[key].append(result)
|
154
|
+
|
155
|
+
# Calculate average accuracy and standard deviation for each percentage
|
156
|
+
detailed_summary = {}
|
157
|
+
compiled_accuracies = {}
|
158
|
+
for key, result_list in results.items():
|
159
|
+
domain, env, task, recognizer = key
|
160
|
+
percentages = result_list[0].keys()
|
161
|
+
detailed_summary[key] = {}
|
162
|
+
if (domain, recognizer) not in compiled_accuracies:
|
163
|
+
compiled_accuracies[(domain, recognizer)] = {}
|
164
|
+
for percentage in percentages:
|
165
|
+
if percentage == "total":
|
166
|
+
continue
|
167
|
+
if percentage not in compiled_accuracies[(domain, recognizer)].keys():
|
168
|
+
compiled_accuracies[(domain, recognizer)][percentage] = {}
|
169
|
+
if percentage not in detailed_summary[key].keys():
|
170
|
+
detailed_summary[key][percentage] = {}
|
171
|
+
consecutive_accuracies = [
|
172
|
+
result[percentage]["consecutive"]["accuracy"] for result in result_list
|
173
|
+
]
|
174
|
+
non_consecutive_accuracies = [
|
175
|
+
result[percentage]["non_consecutive"]["accuracy"] for result in result_list
|
176
|
+
]
|
177
|
+
if (
|
178
|
+
"consecutive"
|
179
|
+
in compiled_accuracies[(domain, recognizer)][percentage].keys()
|
180
|
+
):
|
181
|
+
compiled_accuracies[(domain, recognizer)][percentage]["consecutive"].extend(
|
182
|
+
consecutive_accuracies
|
183
|
+
)
|
184
|
+
else:
|
185
|
+
compiled_accuracies[(domain, recognizer)][percentage][
|
186
|
+
"consecutive"
|
187
|
+
] = consecutive_accuracies
|
188
|
+
if (
|
189
|
+
"non_consecutive"
|
190
|
+
in compiled_accuracies[(domain, recognizer)][percentage].keys()
|
191
|
+
):
|
192
|
+
compiled_accuracies[(domain, recognizer)][percentage][
|
193
|
+
"non_consecutive"
|
194
|
+
].extend(non_consecutive_accuracies)
|
195
|
+
else:
|
196
|
+
compiled_accuracies[(domain, recognizer)][percentage][
|
197
|
+
"non_consecutive"
|
198
|
+
] = non_consecutive_accuracies
|
199
|
+
avg_consecutive_accuracy = np.mean(consecutive_accuracies)
|
200
|
+
consecutive_std_dev = np.std(consecutive_accuracies)
|
201
|
+
detailed_summary[key][percentage]["consecutive"] = (
|
202
|
+
avg_consecutive_accuracy,
|
203
|
+
consecutive_std_dev,
|
204
|
+
)
|
205
|
+
avg_non_consecutive_accuracy = np.mean(non_consecutive_accuracies)
|
206
|
+
non_consecutive_std_dev = np.std(non_consecutive_accuracies)
|
207
|
+
detailed_summary[key][percentage]["non_consecutive"] = (
|
208
|
+
avg_non_consecutive_accuracy,
|
209
|
+
non_consecutive_std_dev,
|
210
|
+
)
|
211
|
+
|
212
|
+
compiled_summary = {}
|
213
|
+
for key, percentage_dict in compiled_accuracies.items():
|
214
|
+
compiled_summary[key] = {}
|
215
|
+
for percentage, cons_accuracies in percentage_dict.items():
|
216
|
+
compiled_summary[key][percentage] = {}
|
217
|
+
for is_cons, accuracies in cons_accuracies.items():
|
218
|
+
avg_accuracy = np.mean(accuracies)
|
219
|
+
std_dev = np.std(accuracies)
|
220
|
+
compiled_summary[key][percentage][is_cons] = (avg_accuracy, std_dev)
|
221
|
+
|
222
|
+
# Write different summary results to different files, one per recognizer
|
223
|
+
if not os.path.exists(os.path.join("outputs", "summaries")):
|
224
|
+
os.makedirs(os.path.join("outputs", "summaries"))
|
225
|
+
|
226
|
+
for recognizer in recognizers:
|
227
|
+
compiled_summary_file_path = os.path.join(
|
228
|
+
"outputs",
|
229
|
+
"summaries",
|
230
|
+
f"compiled_summary_{''.join(configs.keys())}_{recognizer}.txt",
|
231
|
+
)
|
232
|
+
with open(compiled_summary_file_path, "w") as f:
|
233
|
+
for key, percentage_dict in compiled_summary.items():
|
234
|
+
domain, recog = key
|
235
|
+
if recog != recognizer:
|
236
|
+
continue # Only write results for this recognizer
|
237
|
+
for percentage, cons_info in percentage_dict.items():
|
238
|
+
for is_cons, (avg_accuracy, std_dev) in cons_info.items():
|
239
|
+
f.write(
|
240
|
+
f"{domain}\t{recog}\t{percentage}\t{is_cons}\t{avg_accuracy:.4f}\t{std_dev:.4f}\n"
|
241
|
+
)
|
242
|
+
print(f"Compiled summary results written to {compiled_summary_file_path}")
|
243
|
+
|
244
|
+
detailed_summary_file_path = os.path.join(
|
245
|
+
"outputs",
|
246
|
+
"summaries",
|
247
|
+
f"detailed_summary_{''.join(configs.keys())}_{recognizer}.txt",
|
248
|
+
)
|
249
|
+
with open(detailed_summary_file_path, "w") as f:
|
250
|
+
for key, percentage_dict in detailed_summary.items():
|
251
|
+
domain, env, task, recog = key
|
252
|
+
if recog != recognizer:
|
253
|
+
continue # Only write results for this recognizer
|
254
|
+
f.write(f"{domain}\t{env}\t{task}\t{recog}\n")
|
255
|
+
for percentage, cons_info in percentage_dict.items():
|
256
|
+
for is_cons, (avg_accuracy, std_dev) in cons_info.items():
|
257
|
+
f.write(
|
258
|
+
f"\t\t{percentage}\t{is_cons}\t{avg_accuracy:.4f}\t{std_dev:.4f}\n"
|
259
|
+
)
|
260
|
+
print(f"Detailed summary results written to {detailed_summary_file_path}")
|
gr_libs/environment/__init__.py
CHANGED
@@ -1,9 +1,22 @@
|
|
1
|
+
"""
|
2
|
+
A module GR algorithms can store hard-coded parameters anf functionalities
|
3
|
+
that are environment-related.
|
4
|
+
"""
|
5
|
+
|
1
6
|
import importlib.metadata
|
2
7
|
import warnings
|
3
8
|
|
4
9
|
|
5
10
|
def is_extra_installed(package: str, extra: str) -> bool:
|
6
|
-
"""Check if an extra was installed for a given package.
|
11
|
+
"""Check if an extra was installed for a given package.
|
12
|
+
|
13
|
+
Args:
|
14
|
+
package (str): The name of the package.
|
15
|
+
extra (str): The name of the extra to check.
|
16
|
+
|
17
|
+
Returns:
|
18
|
+
bool: True if the extra is installed, False otherwise.
|
19
|
+
"""
|
7
20
|
try:
|
8
21
|
# Get metadata for the installed package
|
9
22
|
dist = importlib.metadata.metadata(package)
|
File without changes
|