gr-libs 0.1.6.post1__py3-none-any.whl → 0.1.7.post0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
gr_libs/__init__.py CHANGED
@@ -1,2 +1,6 @@
1
1
  from gr_libs.recognizer.graml.graml_recognizer import ExpertBasedGraml, GCGraml
2
- from gr_libs.recognizer.gr_as_rl.gr_as_rl_recognizer import Graql
2
+ from gr_libs.recognizer.gr_as_rl.gr_as_rl_recognizer import Graql
3
+ try:
4
+ from ._version import version as __version__
5
+ except ImportError:
6
+ __version__ = "0.0.0" # fallback if file isn't present
gr_libs/_version.py CHANGED
@@ -17,5 +17,5 @@ __version__: str
17
17
  __version_tuple__: VERSION_TUPLE
18
18
  version_tuple: VERSION_TUPLE
19
19
 
20
- __version__ = version = '0.1.6.post1'
21
- __version_tuple__ = version_tuple = (0, 1, 6)
20
+ __version__ = version = '0.1.7.post0'
21
+ __version_tuple__ = version_tuple = (0, 1, 7, 'post0')
@@ -12,7 +12,7 @@ def is_extra_installed(package: str, extra: str) -> bool:
12
12
  return False # The package is not installed
13
13
 
14
14
  # Check if `gr_libs[minigrid]` was installed
15
- for env in ["minigrid", "panda", "highway", "point_maze"]:
15
+ for env in ["minigrid", "panda", "highway", "maze"]:
16
16
  if is_extra_installed("gr_libs", f"gr_envs[{env}]"):
17
17
  try:
18
18
  importlib.import_module(f"gr_envs.{env}_scripts.envs")
@@ -351,7 +351,7 @@ class TabularQLearner(TabularRLAgent):
351
351
  def simplify_observation(self, observation):
352
352
  return [(obs['direction'], agent_pos_x, agent_pos_y, action) for ((obs, (agent_pos_x, agent_pos_y)), action) in observation] # list of tuples, each tuple the sample
353
353
 
354
- def generate_observation(self, action_selection_method: MethodType, random_optimalism, save_fig = False, fig_path: str=None, env_prop=None):
354
+ def generate_observation(self, action_selection_method: MethodType, random_optimalism, save_fig=False, fig_path: str=None, env_prop=None):
355
355
  """
356
356
  Generate a single observation given a list of agents
357
357
 
@@ -15,6 +15,13 @@ def get_storage_framework_dir(recognizer: str):
15
15
  return os.path.join(get_storage_dir(),recognizer)
16
16
 
17
17
  def get_storage_dir():
18
+ # Prefer local directory if it exists (e.g., in GitHub workspace)
19
+ if os.path.exists("dataset"):
20
+ return "dataset"
21
+ # Fall back to pre-mounted directory (e.g., in Docker container)
22
+ if os.path.exists("/preloaded_data"):
23
+ return "/preloaded_data"
24
+ # Default to "dataset" even if it doesn't exist (e.g., will be created)
18
25
  return "dataset"
19
26
 
20
27
  def _get_models_directory_name():
@@ -82,14 +82,14 @@ class Graml(LearningRecognizer):
82
82
  dev_loader=DataLoader(dev_dataset, batch_size=self.env_prop.get_lstm_props().batch_size, shuffle=False, collate_fn=self.collate_func))
83
83
  save_weights(model=self.model, path=self.model_file_path)
84
84
 
85
- def goals_adaptation_phase(self, dynamic_goals: List[EnvProperty]):
85
+ def goals_adaptation_phase(self, dynamic_goals: List[EnvProperty], save_fig=False):
86
86
  self.is_first_inf_since_new_goals = True
87
87
  self.current_goals = dynamic_goals
88
88
  # start by training each rl agent on the base goal set
89
89
  self.embeddings_dict = {} # relevant if the embedding of the plan occurs during the goals adaptation phase
90
90
  self.plans_dict = {} # relevant if the embedding of the plan occurs during the inference phase
91
91
  for goal in self.current_goals:
92
- obss = self.generate_sequences_library(goal)
92
+ obss = self.generate_sequences_library(goal, save_fig=save_fig)
93
93
  self.plans_dict[str(goal)] = obss
94
94
 
95
95
  def get_goal_plan(self, goal):
@@ -150,7 +150,7 @@ class Graml(LearningRecognizer):
150
150
  return closest_goal
151
151
 
152
152
  @abstractmethod
153
- def generate_sequences_library(self, goal: str) -> List[List[Tuple[np.ndarray, np.ndarray]]]:
153
+ def generate_sequences_library(self, goal: str, save_fig=False) -> List[List[Tuple[np.ndarray, np.ndarray]]]:
154
154
  pass
155
155
 
156
156
  # this function duplicates every sequence and creates a consecutive and non-consecutive version of it
@@ -192,10 +192,10 @@ class MCTSBasedGraml(BGGraml, GaAdaptingRecognizer):
192
192
  super().__init__(*args, **kwargs)
193
193
  if self.rl_agent_type==None: self.rl_agent_type = TabularQLearner
194
194
 
195
- def generate_sequences_library(self, goal: str) -> List[List[Tuple[np.ndarray, np.ndarray]]]:
195
+ def generate_sequences_library(self, goal: str, save_fig=False) -> List[List[Tuple[np.ndarray, np.ndarray]]]:
196
196
  problem_name = self.env_prop.goal_to_problem_str(goal)
197
197
  img_path = os.path.join(get_policy_sequences_result_path(self.env_prop.domain_name, recognizer=self.__class__.__name__), problem_name + "_MCTS")
198
- return mcts_model.plan(self.env_prop.name, problem_name, goal, save_fig=True, img_path=img_path, env_prop=self.env_prop)
198
+ return mcts_model.plan(self.env_prop.name, problem_name, goal, save_fig=save_fig, img_path=img_path, env_prop=self.env_prop)
199
199
 
200
200
  class ExpertBasedGraml(BGGraml, GaAgentTrainerRecognizer):
201
201
  def __init__(self, *args, **kwargs):
@@ -206,15 +206,23 @@ class ExpertBasedGraml(BGGraml, GaAgentTrainerRecognizer):
206
206
  else:
207
207
  self.rl_agent_type = DeepRLAgent
208
208
 
209
- def generate_sequences_library(self, goal: str) -> List[List[Tuple[np.ndarray, np.ndarray]]]:
209
+ def generate_sequences_library(self, goal: str, save_fig=False) -> List[List[Tuple[np.ndarray, np.ndarray]]]:
210
210
  problem_name = self.env_prop.goal_to_problem_str(goal)
211
211
  kwargs = {"domain_name":self.domain_name, "problem_name":problem_name}
212
212
  if self.dynamic_train_configs_dict[problem_name][0] != None: kwargs["algorithm"] = self.dynamic_train_configs_dict[problem_name][0]
213
213
  if self.dynamic_train_configs_dict[problem_name][1] != None: kwargs["num_timesteps"] = self.dynamic_train_configs_dict[problem_name][1]
214
214
  agent = self.rl_agent_type(**kwargs)
215
215
  agent.learn()
216
- fig_path = get_and_create(f"{os.path.abspath(os.path.join(get_policy_sequences_result_path(domain_name=self.env_prop.domain_name, env_name=self.env_prop.name, recognizer=self.__class__.__name__), problem_name))}_bg_sequence")
217
- return [agent.generate_observation(action_selection_method=metrics.greedy_selection, random_optimalism=False, save_fig=True, fig_path=fig_path, env_prop=self.env_prop)]
216
+ agent_kwargs = {
217
+ "action_selection_method": metrics.greedy_selection,
218
+ "random_optimalism": False,
219
+ "save_fig": save_fig,
220
+ "env_prop": self.env_prop
221
+ }
222
+ if save_fig:
223
+ fig_path = get_and_create(f"{os.path.abspath(os.path.join(get_policy_sequences_result_path(domain_name=self.env_prop.domain_name, env_name=self.env_prop.name, recognizer=self.__class__.__name__), problem_name))}_bg_sequence")
224
+ agent_kwargs["fig_path"] = fig_path
225
+ return [agent.generate_observation(**agent_kwargs)]
218
226
 
219
227
  def goals_adaptation_phase(self, dynamic_goals: List[str], dynamic_train_configs):
220
228
  self.dynamic_goals_problems = [self.env_prop.goal_to_problem_str(g) for g in dynamic_goals]
@@ -244,20 +252,21 @@ class GCGraml(Graml, GaAdaptingRecognizer):
244
252
  gc_agent.learn()
245
253
  self.agents.append(ContextualAgent(problem_name=self.env_prop.name, problem_goal="general", agent=gc_agent))
246
254
 
247
- def generate_sequences_library(self, goal: str) -> List[List[Tuple[np.ndarray, np.ndarray]]]:
255
+ def generate_sequences_library(self, goal: str, save_fig=False) -> List[List[Tuple[np.ndarray, np.ndarray]]]:
248
256
  problem_name = self.env_prop.goal_to_problem_str(goal)
249
257
  kwargs = {"domain_name":self.domain_name, "problem_name":self.env_prop.name} # problem name is env name in gc case
250
258
  if self.original_train_configs[0][0] != None: kwargs["algorithm"] = self.original_train_configs[0][0]
251
259
  if self.original_train_configs[0][1] != None: kwargs["num_timesteps"] = self.original_train_configs[0][1]
252
260
  agent = self.rl_agent_type(**kwargs)
253
261
  agent.learn()
254
- fig_path = get_and_create(f"{os.path.abspath(os.path.join(get_policy_sequences_result_path(domain_name=self.env_prop.domain_name, env_name=self.env_prop.name, recognizer=self.__class__.__name__), problem_name))}_gc_sequence")
255
262
  agent_kwargs = {
256
263
  "action_selection_method": metrics.stochastic_amplified_selection,
257
264
  "random_optimalism": True,
258
- "save_fig": True,
259
- "fig_path": fig_path
265
+ "save_fig": save_fig
260
266
  }
267
+ if save_fig:
268
+ fig_path = get_and_create(f"{os.path.abspath(os.path.join(get_policy_sequences_result_path(domain_name=self.env_prop.domain_name, env_name=self.env_prop.name, recognizer=self.__class__.__name__), problem_name))}_gc_sequence")
269
+ agent_kwargs["fig_path"] = fig_path
261
270
  if self.env_prop.use_goal_directed_problem(): agent_kwargs["goal_directed_problem"] = problem_name
262
271
  else: agent_kwargs["goal_directed_goal"] = goal
263
272
  obss = []
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: gr_libs
3
- Version: 0.1.6.post1
3
+ Version: 0.1.7.post0
4
4
  Summary: Package with goal recognition frameworks baselines
5
5
  Author: Ben Nageris
6
6
  Author-email: Matan Shamir <matan.shamir@live.biu.ac.il>, Osher Elhadad <osher.elhadad@live.biu.ac.il>
@@ -1,5 +1,3 @@
1
- CI/README.md,sha256=CbWNAWrXFFwYq3sWAORhoQIE5busoNyYh_rFWVH1enw,800
2
- CI/docker_build_context/Dockerfile,sha256=Rk7LYTxOW7VVJcmNa8csZ4BwkunMYIiHX4WVSuMam50,311
3
1
  evaluation/analyze_results_cross_alg_cross_domain.py,sha256=s_DDh4rNfRnvQ0PDa2d5411jYOa7CaI1YeB8Dpup7QU,9803
4
2
  evaluation/create_minigrid_map_image.py,sha256=jaSW3n3tY222iFUeAMqedBP9cvD88GCzPrQ6_XHv5oQ,1242
5
3
  evaluation/file_system.py,sha256=SSYnj8QGFkq-8V_0s7x2MWbD88aFaoFY4Ogc_Pt8m6U,1601
@@ -9,9 +7,9 @@ evaluation/generate_experiments_results_new_ver2.py,sha256=jeKj_wgdM50o2vi8WZI-s
9
7
  evaluation/generate_task_specific_statistics_plots.py,sha256=rBsqaMe2irP_Cfo-icwIg4_dsleFjEH6eiQCcUBj6WU,15286
10
8
  evaluation/get_plans_images.py,sha256=BT-bGWuOPUAYpZVDwk7YMRBLdgKaDbNOBjMrtcl1Vjk,2346
11
9
  evaluation/increasing_and_decreasing_.py,sha256=fu1hkEjhOQC3jEsjiS7emW_UPRpVFCaae0d0E2MGZqI,2991
12
- gr_libs/__init__.py,sha256=-uKsQiHIL7yojbDwlTR-I8sj1WX9XT52PoFbPjtUTKo,145
13
- gr_libs/_version.py,sha256=C8Me-BH17Mqlv65Ba3Tqc5gFEzabp8fxxyIA9C_XdDQ,517
14
- gr_libs/environment/__init__.py,sha256=HFVGBcufWf8-ahCo6h_s2pFEyvDy59cFg8z908RgdYo,1038
10
+ gr_libs/__init__.py,sha256=WlSRpZIpz5GxLNk96nhympbk3Z5nsMiSOyiAWj17S88,280
11
+ gr_libs/_version.py,sha256=Zy3HQFB_Viry2Rl81-7LPU4kL2FTQegnwLvl0VxTs3E,526
12
+ gr_libs/environment/__init__.py,sha256=KlRp3qdgxEmej31zDoDsYPwbcAqyDglx6x0mH0KRmHU,1032
15
13
  gr_libs/environment/environment.py,sha256=d6ZbiAQ4H1aLrUFI8sm0BN9DVW3JtzpkodSi_70Z_PY,6780
16
14
  gr_libs/environment/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
17
15
  gr_libs/environment/utils/utils.py,sha256=4yM3s30KjyuEmWR8UuICE5rR03zsLi3tzqNDvBkdPcU,537
@@ -37,34 +35,33 @@ gr_libs/ml/sequential/__init__.py,sha256=rusN4ahTvAeAq1Saz6qS_9HEU7WuXDJu2zwhc9W
37
35
  gr_libs/ml/sequential/lstm_model.py,sha256=Vzm-C1URR84PGNEecj69GUtn3ZmOVyh1BAY6CUnfL1Q,8978
38
36
  gr_libs/ml/tabular/__init__.py,sha256=jAfjfTFZLLlVm1KUiJdxdnaNGFp1J2KBU89q_vvradM,177
39
37
  gr_libs/ml/tabular/state.py,sha256=8xroKF3y3nRX0LK1QX5fRT2PS2WmvcDPp0UvPFdSx2A,733
40
- gr_libs/ml/tabular/tabular_q_learner.py,sha256=q6Dz4RTX0GjBumUiS2mUFKvEiKUBecj0q1RpWvPvmmE,18972
38
+ gr_libs/ml/tabular/tabular_q_learner.py,sha256=5QU9ZWC-Cu5jxv5K1TohoRjQrRDhCgTs1Mt18cqM_Sc,18970
41
39
  gr_libs/ml/tabular/tabular_rl_agent.py,sha256=7w8PYbKi8QgxHJyECWU_rURtT89spg0tHIMI1cDwYc8,3764
42
40
  gr_libs/ml/utils/__init__.py,sha256=qH3pcnem5Z6rkQ4RTZi47AXJRe1RkFEST_-DrBmfWcM,258
43
41
  gr_libs/ml/utils/env.py,sha256=AWVN0OXYmFU-J3FUiwvEAIY93Suf1oL6VNcxtyWJraM,171
44
42
  gr_libs/ml/utils/format.py,sha256=nu7RzVwn_raG_fqqmnqlJgUjtA0yzKztkB3a5QZnRYo,3071
45
43
  gr_libs/ml/utils/math.py,sha256=n62zssVOLHnUb4dPofAoFhoLOKl5n_xBzaKQOUQBoNc,440
46
44
  gr_libs/ml/utils/other.py,sha256=HKUfeLBbd4DgJxSTs3ya9KQ85Acx4TjycRrtGD9WQ3s,505
47
- gr_libs/ml/utils/storage.py,sha256=oCdvL_ypCglnSJsyyXzNyV_UJASTfioa3yJhFlFso64,4277
45
+ gr_libs/ml/utils/storage.py,sha256=52wR2pgFmcCOhqbu5Km3tegjAmtI1Fb4HYAVUnUubZk,4626
48
46
  gr_libs/problems/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
49
47
  gr_libs/problems/consts.py,sha256=ON7yfKTAKETg7i3okDYuOzEU7KWvynyubl0m7TlU6Hs,38808
50
48
  gr_libs/recognizer/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
51
49
  gr_libs/recognizer/recognizer.py,sha256=ZrApJVdBQxKRYhhDiWLCNGmlxgi674nwgb30BgVggC8,1705
52
- gr_libs/recognizer/recognizer_doc.md,sha256=RnTvbZhl2opvU7-QT4pULCV5HCdJTw2dsu8WQOOiR3E,2521
53
50
  gr_libs/recognizer/gr_as_rl/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
54
51
  gr_libs/recognizer/gr_as_rl/gr_as_rl_recognizer.py,sha256=84GdfohC2dZoNH_QEo7GpSt8nZWdfqSRKCTY99X_iME,5215
55
52
  gr_libs/recognizer/graml/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
56
53
  gr_libs/recognizer/graml/gr_dataset.py,sha256=lG6m3ulxFELpH1oURnlcmNDWOrxyuzvlAR28ZTqB7L8,7224
57
- gr_libs/recognizer/graml/graml_recognizer.py,sha256=SGs7rtkA73lbCv9HISa6dfjVUJUhlH54QriVsoGVRss,15672
54
+ gr_libs/recognizer/graml/graml_recognizer.py,sha256=1xLl1gHj3JxWhHtV9h3SvsW7oJdxsQQV0F-VLtlTmKQ,15911
58
55
  gr_libs/recognizer/utils/__init__.py,sha256=ewSroxL7aATvvm-Xzc1_-61mP2LU2U28YaOEqvVVDB0,41
59
56
  gr_libs/recognizer/utils/format.py,sha256=e0AnqtPeYoJsV9Z7cEBpgbzTM0hLNxFIjn07fQ3YbQw,492
60
57
  tests/test_graml.py,sha256=ZJB2jqtf4Q2-KZredkJq90teqmHBIvigCAQpvR5G110,559
61
58
  tests/test_graql.py,sha256=-onMi13e2wStOmB5bYv2f3Ita3QFFiw416XMBkby0OI,141
62
59
  tutorials/graml_minigrid_tutorial.py,sha256=ONvxFi79R7d8dcd6gy083Z_yy9A2flhGTDIDRxurdx8,1782
63
60
  tutorials/graml_panda_tutorial.py,sha256=wtv_lsw0vsU7j45GKeWecTfE7jzfh4iVGEVnQyaWthM,2063
64
- tutorials/graml_parking_tutorial.py,sha256=46-sfxmYA9jLRSpqIF9z69MLSfOSTJarfjlQ_Igq294,1769
61
+ tutorials/graml_parking_tutorial.py,sha256=M6bt1WQOOgn8_CRyG2kjxF14PMeyXVAWRDq1ZRwGTXo,1808
65
62
  tutorials/graml_point_maze_tutorial.py,sha256=mYq3IxYbf9jidq-4VdT3MdStV80Q5lytFv6Xzzn22Ys,1835
66
63
  tutorials/graql_minigrid_tutorial.py,sha256=Jb0TCUhiZQkFeafJWUTPnCISd4FKfPrqP-xfHiqCGKE,1635
67
- gr_libs-0.1.6.post1.dist-info/METADATA,sha256=UPwlwVlbGTpTsUhYwWH5hYr-hSBpcWjrFIA7sWg0Kj4,9620
68
- gr_libs-0.1.6.post1.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
69
- gr_libs-0.1.6.post1.dist-info/top_level.txt,sha256=rL-bbK-KnLzVbLIUCdN1riH58lup3jG0NJ3LTt_qSwo,38
70
- gr_libs-0.1.6.post1.dist-info/RECORD,,
64
+ gr_libs-0.1.7.post0.dist-info/METADATA,sha256=aS7y9Nl1JErXYdpAHstuQP_W1QMcKMbGet6IfxfJ_Do,9620
65
+ gr_libs-0.1.7.post0.dist-info/WHEEL,sha256=wXxTzcEDnjrTwFYjLPcsW_7_XihufBwmpiBeiXNBGEA,91
66
+ gr_libs-0.1.7.post0.dist-info/top_level.txt,sha256=fJQF8Q8Dfh_D3pA2mhNodazNjzW6b3oWfnx6Jdo-pBU,35
67
+ gr_libs-0.1.7.post0.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (78.1.0)
2
+ Generator: setuptools (80.1.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,4 +1,3 @@
1
- CI
2
1
  evaluation
3
2
  gr_libs
4
3
  tests
@@ -5,6 +5,7 @@ from gr_libs.metrics.metrics import stochastic_amplified_selection
5
5
  from gr_libs.ml.neural.deep_rl_learner import DeepRLAgent, GCDeepRLAgent
6
6
  from gr_libs.ml.utils.format import random_subset_with_order
7
7
  from gr_libs.recognizer.graml.graml_recognizer import ExpertBasedGraml, GCGraml
8
+ import gr_libs.environment.environment
8
9
 
9
10
  def run_graml_parking_tutorial():
10
11
  recognizer = GCGraml(
CI/README.md DELETED
@@ -1,12 +0,0 @@
1
- ## How to build a new docker image including new trained agents:
2
- 1. Install docker
3
- 2. Make sure you have a dataset.zip at your repo root
4
- 3. Make sure you have a classic token in github: https://github.com/settings/tokens . If you don't, create one with package write, read and delete permissions and copy it somewhere safe.
5
- 4. Authenticate to ghcr with docker by running:
6
- ```sh
7
- echo ghp_REST_OF_TOKEN | docker login ghcr.io -u MatanShamir1 --password-stdin
8
- ```
9
- 3. docker build -t ghcr.io/<your-username>/gr_test_base:latest -f CI/Dockerfile .
10
- (the -f Dockerfile tells docker which Dockerfile to use and the '.' tells docker what's the build context, or where the dataset.zip should live)
11
- 4. docker push ghcr.io/<your-username>/gr_test_base:latest
12
- docker push ghcr.io/MatanShamir1/gr_test_base:latest
@@ -1,15 +0,0 @@
1
- FROM python:3.11-slim
2
-
3
- # Set workdir
4
- WORKDIR /app
5
-
6
- # Install unzip
7
- RUN apt-get update && apt-get install -y unzip && rm -rf /var/lib/apt/lists/*
8
-
9
- # Copy and unzip the dataset
10
- COPY dataset.zip .
11
- RUN unzip dataset.zip && rm dataset.zip
12
- RUN mv dataset_new dataset
13
-
14
- # Just start with bash by default
15
- CMD [ "bash" ]
@@ -1,61 +0,0 @@
1
- # Recognizer Module Documentation
2
-
3
- This document provides an overview of the recognizer module, including its class hierarchy and instructions for adding a new class of recognizer.
4
-
5
- ## Class Hierarchy
6
-
7
- The recognizer module consists of an abstract base class `Recognizer` and several derived classes, each implementing specific behaviors. The main classes are:
8
-
9
- 1. **Recognizer (Abstract Base Class)**
10
- - `inference_phase()` (abstract method)
11
-
12
- 2. **LearningRecognizer (Extends Recognizer)**
13
- - `domain_learning_phase()`
14
-
15
- 3. **GaAgentTrainerRecognizer (Extends Recognizer)**
16
- - `goals_adaptation_phase()` (abstract method)
17
- - `domain_learning_phase()`
18
-
19
- 4. **GaAdaptingRecognizer (Extends Recognizer)**
20
- - `goals_adaptation_phase()` (abstract method)
21
-
22
- 5. **GRAsRL (Extends Recognizer)**
23
- - Implements `goals_adaptation_phase()`
24
- - Implements `inference_phase()`
25
-
26
- 6. **Specific Implementations:**
27
- - `Graql (Extends GRAsRL, GaAgentTrainerRecognizer)`
28
- - `Draco (Extends GRAsRL, GaAgentTrainerRecognizer)`
29
- - `GCDraco (Extends GRAsRL, LearningRecognizer, GaAdaptingRecognizer)`
30
- - `Graml (Extends LearningRecognizer)`
31
-
32
- ## How to Add a New Recognizer Class
33
-
34
- To add a new class of recognizer, follow these steps:
35
-
36
- 1. **Determine the Type of Recognizer:**
37
- - Will it require learning? Extend `LearningRecognizer`.
38
- - Will it adapt goals dynamically? Extend `GaAdaptingRecognizer`.
39
- - Will it train agents for new goals? Extend `GaAgentTrainerRecognizer`.
40
- - Will it involve RL-based recognition? Extend `GRAsRL`.
41
-
42
- 2. **Define the Class:**
43
- - Create a new class that extends the appropriate base class(es).
44
- - Implement the required abstract methods (`inference_phase()`, `goals_adaptation_phase()`, etc.).
45
-
46
- 3. **Initialize the Recognizer:**
47
- - Ensure proper initialization by calling `super().__init__(*args, **kwargs)`.
48
- - Set up any necessary agent storage or evaluation functions.
49
-
50
- 4. **Implement Core Methods:**
51
- - Define how the recognizer processes inference sequences.
52
- - Implement learning or goal adaptation logic if applicable.
53
-
54
- 5. **Register the Recognizer:**
55
- - Ensure it integrates properly with the existing system by using the correct `domain_to_env_property()`.
56
-
57
- 6. **Test the New Recognizer:**
58
- - Run experiments to validate its behavior.
59
- - Compare results against existing recognizers to ensure correctness.
60
-
61
- By following these steps, you can seamlessly integrate a new recognizer into the framework while maintaining compatibility with the existing structure.