gr-libs 0.2.2__py3-none-any.whl → 0.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gr_libs/_evaluation/_generate_experiments_results.py +0 -141
- gr_libs/_version.py +2 -2
- gr_libs/all_experiments.py +73 -107
- gr_libs/environment/environment.py +22 -2
- gr_libs/evaluation/generate_experiments_results.py +100 -0
- gr_libs/ml/neural/deep_rl_learner.py +17 -20
- gr_libs/odgr_executor.py +20 -25
- gr_libs/problems/consts.py +568 -290
- gr_libs/recognizer/_utils/__init__.py +1 -0
- gr_libs/recognizer/gr_as_rl/gr_as_rl_recognizer.py +12 -1
- gr_libs/recognizer/graml/graml_recognizer.py +16 -8
- gr_libs/tutorials/gcdraco_panda_tutorial.py +6 -2
- gr_libs/tutorials/gcdraco_parking_tutorial.py +3 -1
- gr_libs/tutorials/graml_minigrid_tutorial.py +16 -12
- gr_libs/tutorials/graml_panda_tutorial.py +6 -2
- gr_libs/tutorials/graml_parking_tutorial.py +3 -1
- gr_libs/tutorials/graml_point_maze_tutorial.py +15 -2
- {gr_libs-0.2.2.dist-info → gr_libs-0.2.5.dist-info}/METADATA +27 -16
- {gr_libs-0.2.2.dist-info → gr_libs-0.2.5.dist-info}/RECORD +26 -25
- {gr_libs-0.2.2.dist-info → gr_libs-0.2.5.dist-info}/WHEEL +1 -1
- tests/test_odgr_executor_expertbasedgraml.py +14 -0
- tests/test_odgr_executor_gcdraco.py +14 -0
- tests/test_odgr_executor_gcgraml.py +14 -0
- tests/test_odgr_executor_graql.py +14 -0
- gr_libs/_evaluation/_analyze_results_cross_alg_cross_domain.py +0 -260
- gr_libs/_evaluation/_generate_task_specific_statistics_plots.py +0 -497
- gr_libs/_evaluation/_get_plans_images.py +0 -61
- gr_libs/_evaluation/_increasing_and_decreasing_.py +0 -106
- /gr_libs/{_evaluation → evaluation}/__init__.py +0 -0
- {gr_libs-0.2.2.dist-info → gr_libs-0.2.5.dist-info}/top_level.txt +0 -0
gr_libs/odgr_executor.py
CHANGED
@@ -4,7 +4,7 @@ import time
|
|
4
4
|
|
5
5
|
import dill
|
6
6
|
|
7
|
-
from gr_libs.environment.
|
7
|
+
from gr_libs.environment._utils.utils import domain_to_env_property
|
8
8
|
from gr_libs.metrics.metrics import stochastic_amplified_selection
|
9
9
|
from gr_libs.ml.neural.deep_rl_learner import DeepRLAgent
|
10
10
|
from gr_libs.ml.utils.format import random_subset_with_order
|
@@ -14,10 +14,10 @@ from gr_libs.ml.utils.storage import (
|
|
14
14
|
get_policy_sequences_result_path,
|
15
15
|
)
|
16
16
|
from gr_libs.problems.consts import PROBLEMS
|
17
|
+
from gr_libs.recognizer._utils import recognizer_str_to_obj
|
17
18
|
from gr_libs.recognizer.gr_as_rl.gr_as_rl_recognizer import Draco, GCDraco
|
18
19
|
from gr_libs.recognizer.graml.graml_recognizer import Graml
|
19
20
|
from gr_libs.recognizer.recognizer import GaAgentTrainerRecognizer, LearningRecognizer
|
20
|
-
from gr_libs.recognizer.utils import recognizer_str_to_obj
|
21
21
|
|
22
22
|
|
23
23
|
def validate(args, recognizer_type, task_inputs):
|
@@ -52,9 +52,7 @@ def run_odgr_problem(args):
|
|
52
52
|
dlp_time = 0
|
53
53
|
if issubclass(recognizer_type, LearningRecognizer):
|
54
54
|
start_dlp_time = time.time()
|
55
|
-
recognizer.domain_learning_phase(
|
56
|
-
base_goals=value["goals"], train_configs=value["train_configs"]
|
57
|
-
)
|
55
|
+
recognizer.domain_learning_phase(value)
|
58
56
|
dlp_time = time.time() - start_dlp_time
|
59
57
|
elif key.startswith("G_"):
|
60
58
|
start_ga_time = time.time()
|
@@ -184,10 +182,17 @@ def run_odgr_problem(args):
|
|
184
182
|
recognizer=args.recognizer,
|
185
183
|
)
|
186
184
|
)
|
187
|
-
|
188
|
-
|
185
|
+
if args.experiment_num is not None:
|
186
|
+
res_txt = os.path.join(res_file_path, f"res_{args.experiment_num}.txt")
|
187
|
+
res_pkl = os.path.join(res_file_path, f"res_{args.experiment_num}.pkl")
|
188
|
+
else:
|
189
|
+
res_txt = os.path.join(res_file_path, "res.txt")
|
190
|
+
res_pkl = os.path.join(res_file_path, "res.pkl")
|
191
|
+
|
192
|
+
print(f"generating results into {res_txt} and {res_pkl}")
|
193
|
+
with open(res_pkl, "wb") as results_file:
|
189
194
|
dill.dump(results, results_file)
|
190
|
-
with open(
|
195
|
+
with open(res_txt, "w") as results_file:
|
191
196
|
results_file.write(str(results))
|
192
197
|
|
193
198
|
|
@@ -225,23 +230,7 @@ def parse_args():
|
|
225
230
|
)
|
226
231
|
required_group.add_argument(
|
227
232
|
"--task",
|
228
|
-
choices=[
|
229
|
-
"L1",
|
230
|
-
"L2",
|
231
|
-
"L3",
|
232
|
-
"L4",
|
233
|
-
"L5",
|
234
|
-
"L11",
|
235
|
-
"L22",
|
236
|
-
"L33",
|
237
|
-
"L44",
|
238
|
-
"L55",
|
239
|
-
"L111",
|
240
|
-
"L222",
|
241
|
-
"L333",
|
242
|
-
"L444",
|
243
|
-
"L555",
|
244
|
-
],
|
233
|
+
choices=["L1", "L2", "L3", "L4", "L5"],
|
245
234
|
required=True,
|
246
235
|
help="Task identifier (e.g., L1, L2,...,L5)",
|
247
236
|
)
|
@@ -251,6 +240,12 @@ def parse_args():
|
|
251
240
|
optional_group.add_argument(
|
252
241
|
"--collect_stats", action="store_true", help="Whether to collect statistics"
|
253
242
|
)
|
243
|
+
optional_group.add_argument(
|
244
|
+
"--experiment_num",
|
245
|
+
type=int,
|
246
|
+
default=None,
|
247
|
+
help="Experiment number for parallel runs",
|
248
|
+
)
|
254
249
|
args = parser.parse_args()
|
255
250
|
|
256
251
|
### VALIDATE INPUTS ###
|