gr-libs 0.1.7.post0__py3-none-any.whl → 0.2.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gr_libs/__init__.py +4 -1
- gr_libs/_evaluation/__init__.py +1 -0
- gr_libs/_evaluation/_analyze_results_cross_alg_cross_domain.py +260 -0
- gr_libs/_evaluation/_generate_experiments_results.py +141 -0
- gr_libs/_evaluation/_generate_task_specific_statistics_plots.py +497 -0
- gr_libs/_evaluation/_get_plans_images.py +61 -0
- gr_libs/_evaluation/_increasing_and_decreasing_.py +106 -0
- gr_libs/_version.py +2 -2
- gr_libs/all_experiments.py +294 -0
- gr_libs/environment/__init__.py +30 -9
- gr_libs/environment/_utils/utils.py +27 -0
- gr_libs/environment/environment.py +417 -54
- gr_libs/metrics/__init__.py +7 -0
- gr_libs/metrics/metrics.py +231 -54
- gr_libs/ml/__init__.py +2 -5
- gr_libs/ml/agent.py +21 -6
- gr_libs/ml/base/__init__.py +3 -1
- gr_libs/ml/base/rl_agent.py +81 -13
- gr_libs/ml/consts.py +1 -1
- gr_libs/ml/neural/__init__.py +1 -3
- gr_libs/ml/neural/deep_rl_learner.py +619 -378
- gr_libs/ml/neural/utils/__init__.py +1 -2
- gr_libs/ml/neural/utils/dictlist.py +3 -3
- gr_libs/ml/planner/mcts/{utils → _utils}/__init__.py +1 -1
- gr_libs/ml/planner/mcts/{utils → _utils}/node.py +11 -7
- gr_libs/ml/planner/mcts/{utils → _utils}/tree.py +15 -11
- gr_libs/ml/planner/mcts/mcts_model.py +571 -312
- gr_libs/ml/sequential/__init__.py +0 -1
- gr_libs/ml/sequential/_lstm_model.py +270 -0
- gr_libs/ml/tabular/__init__.py +1 -3
- gr_libs/ml/tabular/state.py +7 -7
- gr_libs/ml/tabular/tabular_q_learner.py +150 -82
- gr_libs/ml/tabular/tabular_rl_agent.py +42 -28
- gr_libs/ml/utils/__init__.py +2 -3
- gr_libs/ml/utils/format.py +28 -97
- gr_libs/ml/utils/math.py +5 -3
- gr_libs/ml/utils/other.py +3 -3
- gr_libs/ml/utils/storage.py +88 -81
- gr_libs/odgr_executor.py +268 -0
- gr_libs/problems/consts.py +1549 -1227
- gr_libs/recognizer/_utils/__init__.py +0 -0
- gr_libs/recognizer/_utils/format.py +18 -0
- gr_libs/recognizer/gr_as_rl/gr_as_rl_recognizer.py +233 -88
- gr_libs/recognizer/graml/_gr_dataset.py +233 -0
- gr_libs/recognizer/graml/graml_recognizer.py +586 -252
- gr_libs/recognizer/recognizer.py +90 -30
- gr_libs/tutorials/draco_panda_tutorial.py +58 -0
- gr_libs/tutorials/draco_parking_tutorial.py +56 -0
- gr_libs/tutorials/gcdraco_panda_tutorial.py +62 -0
- gr_libs/tutorials/gcdraco_parking_tutorial.py +57 -0
- gr_libs/tutorials/graml_minigrid_tutorial.py +64 -0
- gr_libs/tutorials/graml_panda_tutorial.py +57 -0
- gr_libs/tutorials/graml_parking_tutorial.py +52 -0
- gr_libs/tutorials/graml_point_maze_tutorial.py +60 -0
- gr_libs/tutorials/graql_minigrid_tutorial.py +50 -0
- {gr_libs-0.1.7.post0.dist-info → gr_libs-0.2.2.dist-info}/METADATA +84 -29
- gr_libs-0.2.2.dist-info/RECORD +71 -0
- {gr_libs-0.1.7.post0.dist-info → gr_libs-0.2.2.dist-info}/WHEEL +1 -1
- gr_libs-0.2.2.dist-info/top_level.txt +2 -0
- tests/test_draco.py +14 -0
- tests/test_gcdraco.py +10 -0
- tests/test_graml.py +12 -8
- tests/test_graql.py +3 -2
- evaluation/analyze_results_cross_alg_cross_domain.py +0 -277
- evaluation/create_minigrid_map_image.py +0 -34
- evaluation/file_system.py +0 -42
- evaluation/generate_experiments_results.py +0 -92
- evaluation/generate_experiments_results_new_ver1.py +0 -254
- evaluation/generate_experiments_results_new_ver2.py +0 -331
- evaluation/generate_task_specific_statistics_plots.py +0 -272
- evaluation/get_plans_images.py +0 -47
- evaluation/increasing_and_decreasing_.py +0 -63
- gr_libs/environment/utils/utils.py +0 -17
- gr_libs/ml/neural/utils/penv.py +0 -57
- gr_libs/ml/sequential/lstm_model.py +0 -192
- gr_libs/recognizer/graml/gr_dataset.py +0 -134
- gr_libs/recognizer/utils/__init__.py +0 -1
- gr_libs/recognizer/utils/format.py +0 -13
- gr_libs-0.1.7.post0.dist-info/RECORD +0 -67
- gr_libs-0.1.7.post0.dist-info/top_level.txt +0 -4
- tutorials/graml_minigrid_tutorial.py +0 -34
- tutorials/graml_panda_tutorial.py +0 -41
- tutorials/graml_parking_tutorial.py +0 -39
- tutorials/graml_point_maze_tutorial.py +0 -39
- tutorials/graql_minigrid_tutorial.py +0 -34
- /gr_libs/environment/{utils → _utils}/__init__.py +0 -0
@@ -0,0 +1,294 @@
|
|
1
|
+
""" executes odgr_executor parallely on a set of problems defined in consts.py """
|
2
|
+
|
3
|
+
import concurrent.futures
|
4
|
+
import os
|
5
|
+
import subprocess
|
6
|
+
import sys
|
7
|
+
import threading
|
8
|
+
|
9
|
+
import dill
|
10
|
+
import numpy as np
|
11
|
+
|
12
|
+
from gr_libs.ml.utils.storage import get_experiment_results_path
|
13
|
+
|
14
|
+
# Define the lists
|
15
|
+
# domains = ['minigrid', 'point_maze', 'parking', 'panda']
|
16
|
+
# envs = {
|
17
|
+
# 'minigrid': ['obstacles', 'lava_crossing'],
|
18
|
+
# 'point_maze': ['four_rooms', 'lava_crossing'],
|
19
|
+
# 'parking': ['gc_agent', 'gd_agent'],
|
20
|
+
# 'panda': ['gc_agent', 'gd_agent']
|
21
|
+
# }
|
22
|
+
# tasks = {
|
23
|
+
# 'minigrid': ['L111', 'L222', 'L333', 'L444', 'L555'],
|
24
|
+
# 'point_maze': ['L111', 'L222', 'L333', 'L444', 'L555'],
|
25
|
+
# 'parking': ['L111', 'L222', 'L333', 'L444', 'L555'],
|
26
|
+
# 'panda': ['L111', 'L222', 'L333', 'L444', 'L555']
|
27
|
+
# }
|
28
|
+
configs = {
|
29
|
+
"minigrid": {
|
30
|
+
"MiniGrid-SimpleCrossingS13N4": ["L1", "L2", "L3", "L4", "L5"],
|
31
|
+
"MiniGrid-LavaCrossingS9N2": ["L1", "L2", "L3", "L4", "L5"],
|
32
|
+
}
|
33
|
+
# 'point_maze': {
|
34
|
+
# 'PointMaze-FourRoomsEnvDense-11x11': ['L1', 'L2', 'L3', 'L4', 'L5'],
|
35
|
+
# 'PointMaze-ObstaclesEnvDense-11x11': ['L1', 'L2', 'L3', 'L4', 'L5']
|
36
|
+
# }
|
37
|
+
# 'parking': {
|
38
|
+
# 'Parking-S-14-PC-': ['L1', 'L2', 'L3', 'L4', 'L5'],
|
39
|
+
# 'Parking-S-14-PC-': ['L1', 'L2', 'L3', 'L4', 'L5']
|
40
|
+
# }
|
41
|
+
# 'panda': {
|
42
|
+
# 'PandaMyReachDense': ['L1', 'L2', 'L3', 'L4', 'L5'],
|
43
|
+
# 'PandaMyReachDense': ['L1', 'L2', 'L3', 'L4', 'L5']
|
44
|
+
# }
|
45
|
+
}
|
46
|
+
# for minigrid:
|
47
|
+
# TODO assert these instead i the beggingning of the code before beginning
|
48
|
+
# with the actual threading
|
49
|
+
recognizers = ["ExpertBasedGraml", "Graql"]
|
50
|
+
# recognizers = ['Graql']
|
51
|
+
|
52
|
+
# for point_maze:
|
53
|
+
# recognizers = ['ExpertBasedGraml']
|
54
|
+
# recognizers = ['Draco']
|
55
|
+
|
56
|
+
# for parking:
|
57
|
+
# recognizers = ['GCGraml']
|
58
|
+
# recognizers = ['GCDraco']
|
59
|
+
|
60
|
+
# for panda:
|
61
|
+
# recognizers = ['GCGraml']
|
62
|
+
# recognizers = ['GCDraco']
|
63
|
+
|
64
|
+
n = 5 # Number of times to execute each task
|
65
|
+
|
66
|
+
|
67
|
+
# Function to read results from the result file
|
68
|
+
def read_results(res_file_path):
|
69
|
+
"""
|
70
|
+
Read the results from a result file.
|
71
|
+
|
72
|
+
Args:
|
73
|
+
res_file_path (str): The path to the result file.
|
74
|
+
|
75
|
+
Returns:
|
76
|
+
The results read from the file.
|
77
|
+
"""
|
78
|
+
with open(res_file_path, "rb") as f:
|
79
|
+
results = dill.load(f)
|
80
|
+
return results
|
81
|
+
|
82
|
+
|
83
|
+
# Every thread worker executes this function.
|
84
|
+
def run_experiment(domain, env, task, recognizer, i, generate_new=False):
|
85
|
+
"""
|
86
|
+
Run an experiment.
|
87
|
+
|
88
|
+
Args:
|
89
|
+
domain (str): The domain of the experiment.
|
90
|
+
env (str): The environment of the experiment.
|
91
|
+
task (str): The task of the experiment.
|
92
|
+
recognizer (str): The recognizer used in the experiment.
|
93
|
+
i (int): The index of the experiment.
|
94
|
+
generate_new (bool, optional): Whether to generate new results.
|
95
|
+
Defaults to False.
|
96
|
+
|
97
|
+
Returns:
|
98
|
+
tuple: A tuple containing the experiment details and the results.
|
99
|
+
"""
|
100
|
+
cmd = f"python gr_libs/odgr_executor.py --domain {domain} --recognizer \
|
101
|
+
{recognizer} --env_name {env} --task {task} --collect_stats"
|
102
|
+
print(f"Starting execution: {cmd}")
|
103
|
+
try:
|
104
|
+
res_file_path = get_experiment_results_path(domain, env, task, recognizer)
|
105
|
+
res_file_path_txt = os.path.join(res_file_path, "res.txt")
|
106
|
+
i_res_file_path_txt = os.path.join(res_file_path, f"res_{i}.txt")
|
107
|
+
res_file_path_pkl = os.path.join(res_file_path, "res.pkl")
|
108
|
+
i_res_file_path_pkl = os.path.join(res_file_path, f"res_{i}.pkl")
|
109
|
+
if generate_new or (
|
110
|
+
not os.path.exists(i_res_file_path_txt)
|
111
|
+
or not os.path.exists(i_res_file_path_pkl)
|
112
|
+
):
|
113
|
+
if os.path.exists(i_res_file_path_txt) or os.path.exists(
|
114
|
+
i_res_file_path_pkl
|
115
|
+
):
|
116
|
+
i_res_file_path_txt = i_res_file_path_txt.replace(f"_{i}", f"_{i}_new")
|
117
|
+
i_res_file_path_pkl = i_res_file_path_pkl.replace(f"_{i}", f"_{i}_new")
|
118
|
+
process = subprocess.Popen(cmd, shell=True)
|
119
|
+
process.wait()
|
120
|
+
if process.returncode != 0:
|
121
|
+
print(f"Execution failed: {cmd}")
|
122
|
+
print(f"Error: {result.stderr}")
|
123
|
+
return None
|
124
|
+
else:
|
125
|
+
print(f"Finished execution successfully: {cmd}")
|
126
|
+
file_lock = threading.Lock()
|
127
|
+
with file_lock:
|
128
|
+
os.rename(res_file_path_pkl, i_res_file_path_pkl)
|
129
|
+
os.rename(res_file_path_txt, i_res_file_path_txt)
|
130
|
+
else:
|
131
|
+
print(
|
132
|
+
f"File {i_res_file_path_txt} already exists. Skipping execution \
|
133
|
+
of {cmd}"
|
134
|
+
)
|
135
|
+
return ((domain, env, task, recognizer), read_results(i_res_file_path_pkl))
|
136
|
+
except Exception as e:
|
137
|
+
print(f"Exception occurred while running experiment: {e}")
|
138
|
+
return None
|
139
|
+
|
140
|
+
|
141
|
+
# Collect results
|
142
|
+
results = {}
|
143
|
+
|
144
|
+
# create an executor that manages a pool of threads.
|
145
|
+
# Note that any failure in the threads will not stop the main thread
|
146
|
+
# from continuing and vice versa, nor will the debugger view the
|
147
|
+
# failure if in debug mode.
|
148
|
+
# Use prints and if any thread's printing stops suspect failure.
|
149
|
+
# If failure happened, use breakpoints before failure and use the
|
150
|
+
# watch to see the failure by pasting the problematic piece of code.
|
151
|
+
with concurrent.futures.ThreadPoolExecutor() as executor:
|
152
|
+
futures = []
|
153
|
+
for domain, envs in configs.items():
|
154
|
+
for env, tasks in envs.items():
|
155
|
+
for task in tasks:
|
156
|
+
for recognizer in recognizers:
|
157
|
+
for i in range(n):
|
158
|
+
futures.append(
|
159
|
+
executor.submit(
|
160
|
+
run_experiment,
|
161
|
+
domain,
|
162
|
+
env,
|
163
|
+
task,
|
164
|
+
recognizer,
|
165
|
+
i,
|
166
|
+
generate_new=(
|
167
|
+
True
|
168
|
+
if len(sys.argv) > 1
|
169
|
+
and sys.argv[1] == "--generate_new"
|
170
|
+
else False
|
171
|
+
),
|
172
|
+
)
|
173
|
+
)
|
174
|
+
|
175
|
+
for future in concurrent.futures.as_completed(futures):
|
176
|
+
if future.result() is None:
|
177
|
+
print(
|
178
|
+
f"for future {future}, future.result() is None. \
|
179
|
+
Continuing to next future."
|
180
|
+
)
|
181
|
+
continue
|
182
|
+
key, result = future.result()
|
183
|
+
print(f"main thread reading results from future {key}")
|
184
|
+
if key not in results:
|
185
|
+
results[key] = []
|
186
|
+
results[key].append(result)
|
187
|
+
|
188
|
+
# Calculate average accuracy and standard deviation for each percentage
|
189
|
+
detailed_summary = {}
|
190
|
+
compiled_accuracies = {}
|
191
|
+
for key, result_list in results.items():
|
192
|
+
domain, env, task, recognizer = key
|
193
|
+
percentages = result_list[0].keys()
|
194
|
+
detailed_summary[key] = {}
|
195
|
+
if (domain, recognizer) not in compiled_accuracies:
|
196
|
+
compiled_accuracies[(domain, recognizer)] = {}
|
197
|
+
for percentage in percentages:
|
198
|
+
if percentage == "total":
|
199
|
+
continue
|
200
|
+
if percentage not in compiled_accuracies[(domain, recognizer)].keys():
|
201
|
+
compiled_accuracies[(domain, recognizer)][percentage] = {}
|
202
|
+
if percentage not in detailed_summary[key].keys():
|
203
|
+
detailed_summary[key][percentage] = {}
|
204
|
+
consecutive_accuracies = [
|
205
|
+
result[percentage]["consecutive"]["accuracy"] for result in result_list
|
206
|
+
]
|
207
|
+
non_consecutive_accuracies = [
|
208
|
+
result[percentage]["non_consecutive"]["accuracy"] for result in result_list
|
209
|
+
]
|
210
|
+
if (
|
211
|
+
"consecutive"
|
212
|
+
in compiled_accuracies[(domain, recognizer)][percentage].keys()
|
213
|
+
):
|
214
|
+
compiled_accuracies[(domain, recognizer)][percentage]["consecutive"].extend(
|
215
|
+
consecutive_accuracies
|
216
|
+
)
|
217
|
+
else:
|
218
|
+
compiled_accuracies[(domain, recognizer)][percentage][
|
219
|
+
"consecutive"
|
220
|
+
] = consecutive_accuracies
|
221
|
+
if (
|
222
|
+
"non_consecutive"
|
223
|
+
in compiled_accuracies[(domain, recognizer)][percentage].keys()
|
224
|
+
):
|
225
|
+
compiled_accuracies[(domain, recognizer)][percentage][
|
226
|
+
"non_consecutive"
|
227
|
+
].extend(non_consecutive_accuracies)
|
228
|
+
else:
|
229
|
+
compiled_accuracies[(domain, recognizer)][percentage][
|
230
|
+
"non_consecutive"
|
231
|
+
] = non_consecutive_accuracies
|
232
|
+
avg_consecutive_accuracy = np.mean(consecutive_accuracies)
|
233
|
+
consecutive_std_dev = np.std(consecutive_accuracies)
|
234
|
+
detailed_summary[key][percentage]["consecutive"] = (
|
235
|
+
avg_consecutive_accuracy,
|
236
|
+
consecutive_std_dev,
|
237
|
+
)
|
238
|
+
avg_non_consecutive_accuracy = np.mean(non_consecutive_accuracies)
|
239
|
+
non_consecutive_std_dev = np.std(non_consecutive_accuracies)
|
240
|
+
detailed_summary[key][percentage]["non_consecutive"] = (
|
241
|
+
avg_non_consecutive_accuracy,
|
242
|
+
non_consecutive_std_dev,
|
243
|
+
)
|
244
|
+
|
245
|
+
compiled_summary = {}
|
246
|
+
for key, percentage_dict in compiled_accuracies.items():
|
247
|
+
compiled_summary[key] = {}
|
248
|
+
for percentage, cons_accuracies in percentage_dict.items():
|
249
|
+
compiled_summary[key][percentage] = {}
|
250
|
+
for is_cons, accuracies in cons_accuracies.items():
|
251
|
+
avg_accuracy = np.mean(accuracies)
|
252
|
+
std_dev = np.std(accuracies)
|
253
|
+
compiled_summary[key][percentage][is_cons] = (avg_accuracy, std_dev)
|
254
|
+
|
255
|
+
# Write different summary results to different files
|
256
|
+
if not os.path.exists(os.path.join("outputs", "summaries")):
|
257
|
+
os.makedirs(os.path.join("outputs", "summaries"))
|
258
|
+
detailed_summary_file_path = os.path.join(
|
259
|
+
"outputs",
|
260
|
+
"summaries",
|
261
|
+
f"detailed_summary_{''.join(configs.keys())}_{recognizers[0]}.txt",
|
262
|
+
)
|
263
|
+
compiled_summary_file_path = os.path.join(
|
264
|
+
"outputs",
|
265
|
+
"summaries",
|
266
|
+
f"compiled_summary_{''.join(configs.keys())}_{recognizers[0]}.txt",
|
267
|
+
)
|
268
|
+
with open(detailed_summary_file_path, "w") as f:
|
269
|
+
for key, percentage_dict in detailed_summary.items():
|
270
|
+
domain, env, task, recognizer = key
|
271
|
+
f.write(f"{domain}\t{env}\t{task}\t{recognizer}\n")
|
272
|
+
for percentage, cons_info in percentage_dict.items():
|
273
|
+
for is_cons, (avg_accuracy, std_dev) in cons_info.items():
|
274
|
+
f.write(
|
275
|
+
f"\t\t{percentage}\t{is_cons}\t{avg_accuracy:.4f}\t{std_dev:.4f}\n"
|
276
|
+
)
|
277
|
+
|
278
|
+
with open(compiled_summary_file_path, "w") as f:
|
279
|
+
for key, percentage_dict in compiled_summary.items():
|
280
|
+
for percentage, cons_info in percentage_dict.items():
|
281
|
+
for is_cons, (avg_accuracy, std_dev) in cons_info.items():
|
282
|
+
f.write(
|
283
|
+
f"{key[0]}\t{key[1]}\t{percentage}\t{is_cons}\t{avg_accuracy:.4f}\t{std_dev:.4f}\n"
|
284
|
+
)
|
285
|
+
domain, recognizer = key
|
286
|
+
f.write(f"{domain}\t{recognizer}\n")
|
287
|
+
for percentage, cons_info in percentage_dict.items():
|
288
|
+
for is_cons, (avg_accuracy, std_dev) in cons_info.items():
|
289
|
+
f.write(
|
290
|
+
f"\t\t{percentage}\t{is_cons}\t{avg_accuracy:.4f}\t{std_dev:.4f}\n"
|
291
|
+
)
|
292
|
+
|
293
|
+
print(f"Detailed summary results written to {detailed_summary_file_path}")
|
294
|
+
print(f"Compiled summary results written to {compiled_summary_file_path}")
|
gr_libs/environment/__init__.py
CHANGED
@@ -1,22 +1,43 @@
|
|
1
|
+
"""
|
2
|
+
A module GR algorithms can store hard-coded parameters anf functionalities
|
3
|
+
that are environment-related.
|
4
|
+
"""
|
5
|
+
|
1
6
|
import importlib.metadata
|
2
7
|
import warnings
|
3
8
|
|
9
|
+
|
4
10
|
def is_extra_installed(package: str, extra: str) -> bool:
|
5
|
-
"""Check if an extra was installed for a given package.
|
11
|
+
"""Check if an extra was installed for a given package.
|
12
|
+
|
13
|
+
Args:
|
14
|
+
package (str): The name of the package.
|
15
|
+
extra (str): The name of the extra to check.
|
16
|
+
|
17
|
+
Returns:
|
18
|
+
bool: True if the extra is installed, False otherwise.
|
19
|
+
"""
|
6
20
|
try:
|
7
21
|
# Get metadata for the installed package
|
8
22
|
dist = importlib.metadata.metadata(package)
|
9
|
-
requires = dist.get_all(
|
23
|
+
requires = dist.get_all(
|
24
|
+
"Requires-Dist", []
|
25
|
+
) # Dependencies listed in the package metadata
|
10
26
|
return any(extra in req for req in requires)
|
11
27
|
except importlib.metadata.PackageNotFoundError:
|
12
28
|
return False # The package is not installed
|
13
29
|
|
30
|
+
|
14
31
|
# Check if `gr_libs[minigrid]` was installed
|
15
32
|
for env in ["minigrid", "panda", "highway", "maze"]:
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
33
|
+
if is_extra_installed("gr_libs", f"gr_envs[{env}]"):
|
34
|
+
try:
|
35
|
+
importlib.import_module(f"gr_envs.{env}_scripts.envs")
|
36
|
+
except ImportError:
|
37
|
+
raise ImportError(
|
38
|
+
f"gr_envs[{env}] was not installed, but gr_libs[{env}] requires it! if you messed with gr_envs installation, you can reinstall gr_libs."
|
39
|
+
)
|
40
|
+
else:
|
41
|
+
warnings.warn(
|
42
|
+
f"gr_libs[{env}] was not installed, skipping {env} imports.", RuntimeWarning
|
43
|
+
)
|
@@ -0,0 +1,27 @@
|
|
1
|
+
import logging
|
2
|
+
import sys
|
3
|
+
|
4
|
+
from gr_libs.environment.environment import (
|
5
|
+
MINIGRID,
|
6
|
+
PANDA,
|
7
|
+
PARKING,
|
8
|
+
POINT_MAZE,
|
9
|
+
MinigridProperty,
|
10
|
+
PandaProperty,
|
11
|
+
ParkingProperty,
|
12
|
+
PointMazeProperty,
|
13
|
+
)
|
14
|
+
|
15
|
+
|
16
|
+
def domain_to_env_property(domain_name: str):
|
17
|
+
if domain_name == MINIGRID:
|
18
|
+
return MinigridProperty
|
19
|
+
elif domain_name == PARKING:
|
20
|
+
return ParkingProperty
|
21
|
+
elif domain_name == PANDA:
|
22
|
+
return PandaProperty
|
23
|
+
elif domain_name == POINT_MAZE:
|
24
|
+
return PointMazeProperty
|
25
|
+
else:
|
26
|
+
logging.error(f"Domain {domain_name} is not supported.")
|
27
|
+
sys.exit(1)
|