mxbiflow 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (93) hide show
  1. mxbiflow/__init__.py +3 -0
  2. mxbiflow/assets/__init__.py +5 -0
  3. mxbiflow/assets/clicker.wav +0 -0
  4. mxbiflow/config_store.py +68 -0
  5. mxbiflow/data_logger.py +114 -0
  6. mxbiflow/default/__init__.py +4 -0
  7. mxbiflow/default/idle/assets/apple_v1.png +0 -0
  8. mxbiflow/default/idle/idle.py +57 -0
  9. mxbiflow/detector_bridge.py +87 -0
  10. mxbiflow/game.py +84 -0
  11. mxbiflow/infra/eventbus.py +31 -0
  12. mxbiflow/main.py +106 -0
  13. mxbiflow/models/animal.py +130 -0
  14. mxbiflow/models/reward.py +7 -0
  15. mxbiflow/models/session.py +145 -0
  16. mxbiflow/mxbiflow.py +43 -0
  17. mxbiflow/path.py +41 -0
  18. mxbiflow/scene/__init__.py +8 -0
  19. mxbiflow/scene/scene_manager.py +64 -0
  20. mxbiflow/scene/scene_protocol.py +22 -0
  21. mxbiflow/scheduler.py +90 -0
  22. mxbiflow/tasks/GNGSiD/models.py +70 -0
  23. mxbiflow/tasks/GNGSiD/stages/detect_stage/config.json +116 -0
  24. mxbiflow/tasks/GNGSiD/stages/detect_stage/detect_stage.py +161 -0
  25. mxbiflow/tasks/GNGSiD/stages/detect_stage/detect_stage_models.py +65 -0
  26. mxbiflow/tasks/GNGSiD/stages/discriminate_stage/config.json +70 -0
  27. mxbiflow/tasks/GNGSiD/stages/discriminate_stage/discriminate_stage.py +173 -0
  28. mxbiflow/tasks/GNGSiD/stages/discriminate_stage/discriminate_stage_models.py +80 -0
  29. mxbiflow/tasks/GNGSiD/stages/size_reduction_stage/config.json +83 -0
  30. mxbiflow/tasks/GNGSiD/stages/size_reduction_stage/size_reduction_models.py +58 -0
  31. mxbiflow/tasks/GNGSiD/stages/size_reduction_stage/size_reduction_stage.py +149 -0
  32. mxbiflow/tasks/GNGSiD/tasks/artifacts.py +13 -0
  33. mxbiflow/tasks/GNGSiD/tasks/detect/models.py +21 -0
  34. mxbiflow/tasks/GNGSiD/tasks/detect/scene.py +271 -0
  35. mxbiflow/tasks/GNGSiD/tasks/discriminate/discriminate_models.py +31 -0
  36. mxbiflow/tasks/GNGSiD/tasks/discriminate/discriminate_scene.py +336 -0
  37. mxbiflow/tasks/GNGSiD/tasks/touch/touch_models.py +17 -0
  38. mxbiflow/tasks/GNGSiD/tasks/touch/touch_scene.py +256 -0
  39. mxbiflow/tasks/GNGSiD/tasks/utils/targets.py +57 -0
  40. mxbiflow/tasks/cross_modal/bundle_dir.py +553 -0
  41. mxbiflow/tasks/cross_modal/config.py +41 -0
  42. mxbiflow/tasks/cross_modal/media.py +61 -0
  43. mxbiflow/tasks/cross_modal/models.py +57 -0
  44. mxbiflow/tasks/cross_modal/scene.py +252 -0
  45. mxbiflow/tasks/cross_modal/stage.py +218 -0
  46. mxbiflow/tasks/cross_modal/trial_io.py +23 -0
  47. mxbiflow/tasks/cross_modal/trial_schema.py +113 -0
  48. mxbiflow/tasks/default/error_task/error_scene.py +53 -0
  49. mxbiflow/tasks/default/idle_task/assets/apple_v1.png +0 -0
  50. mxbiflow/tasks/default/idle_task/idle_scene.py +85 -0
  51. mxbiflow/tasks/default/initial_habituation_training/README.md +188 -0
  52. mxbiflow/tasks/default/initial_habituation_training/stages/config.csv +7 -0
  53. mxbiflow/tasks/default/initial_habituation_training/stages/config.json +67 -0
  54. mxbiflow/tasks/default/initial_habituation_training/stages/initial_habituation_training_stage.py +172 -0
  55. mxbiflow/tasks/default/initial_habituation_training/stages/models.py +56 -0
  56. mxbiflow/tasks/default/initial_habituation_training/tasks/stay_to_reward/stay_to_reward.py +244 -0
  57. mxbiflow/tasks/default/initial_habituation_training/tasks/stay_to_reward/stay_to_reward_models.py +50 -0
  58. mxbiflow/tasks/task_protocol.py +26 -0
  59. mxbiflow/tasks/task_table.py +29 -0
  60. mxbiflow/tasks/two_alternative_choice/assets/starter.py +27 -0
  61. mxbiflow/tasks/two_alternative_choice/models.py +68 -0
  62. mxbiflow/tasks/two_alternative_choice/stages/size_reduction_stage/config.json +118 -0
  63. mxbiflow/tasks/two_alternative_choice/stages/size_reduction_stage/size_reduction_models.py +41 -0
  64. mxbiflow/tasks/two_alternative_choice/stages/size_reduction_stage/size_reduction_stage.py +122 -0
  65. mxbiflow/tasks/two_alternative_choice/tasks/touch/touch_models.py +19 -0
  66. mxbiflow/tasks/two_alternative_choice/tasks/touch/touch_scene.py +249 -0
  67. mxbiflow/timer/__init__.py +3 -0
  68. mxbiflow/timer/frame_timer.py +47 -0
  69. mxbiflow/timer/realtime_timer.py +0 -0
  70. mxbiflow/tmp_email.py +13 -0
  71. mxbiflow/ui/components/animal.py +87 -0
  72. mxbiflow/ui/components/baseconfig.py +68 -0
  73. mxbiflow/ui/components/card.py +18 -0
  74. mxbiflow/ui/components/device_card/__init__.py +17 -0
  75. mxbiflow/ui/components/device_card/detector/beambreak_detector_card.py +29 -0
  76. mxbiflow/ui/components/device_card/detector/fusion_detector.py +45 -0
  77. mxbiflow/ui/components/device_card/detector/mock_detector_card.py +20 -0
  78. mxbiflow/ui/components/device_card/detector/rfid_detector.py +40 -0
  79. mxbiflow/ui/components/device_card/device_card.py +67 -0
  80. mxbiflow/ui/components/device_card/rewarder/mock_rewarder_card.py +20 -0
  81. mxbiflow/ui/components/device_card/rewarder/rpi_gpio_rewarder.py +33 -0
  82. mxbiflow/ui/components/devices.py +183 -0
  83. mxbiflow/ui/components/dialog/__init__.py +3 -0
  84. mxbiflow/ui/components/dialog/add_devices_dialog.py +64 -0
  85. mxbiflow/ui/components/experiment_groups.py +122 -0
  86. mxbiflow/ui/experiment_panel.py +91 -0
  87. mxbiflow/ui/mxbi_panel.py +152 -0
  88. mxbiflow/utils/logger.py +19 -0
  89. mxbiflow/utils/serial.py +10 -0
  90. mxbiflow-0.1.1.dist-info/METADATA +168 -0
  91. mxbiflow-0.1.1.dist-info/RECORD +93 -0
  92. mxbiflow-0.1.1.dist-info/WHEEL +4 -0
  93. mxbiflow-0.1.1.dist-info/entry_points.txt +4 -0
@@ -0,0 +1,53 @@
1
+ from tkinter import Canvas
2
+ from typing import TYPE_CHECKING
3
+
4
+ if TYPE_CHECKING:
5
+ from mxbi.models.animal import AnimalState, ScheduleCondition
6
+ from mxbi.models.session import SessionState
7
+ from mxbi.models.task import Feedback
8
+ from mxbi.theater import Theater
9
+
10
+
11
+ class ErrorScene:
12
+ def __init__(
13
+ self,
14
+ theater: "Theater",
15
+ session_state: "SessionState",
16
+ animal_state: "AnimalState",
17
+ ) -> None:
18
+ self._theater = theater
19
+ self._session_config = session_state
20
+ self._screen_type = self._session_config.session_config.screen_type
21
+
22
+ self._on_trial_start()
23
+
24
+ def _on_trial_start(self) -> None:
25
+ self._create_view()
26
+
27
+ def _create_view(self) -> None:
28
+ self._background = Canvas(
29
+ self._theater.root,
30
+ bg="red",
31
+ width=self._screen_type.width,
32
+ height=self._screen_type.height,
33
+ highlightthickness=0,
34
+ )
35
+ self._background.place(relx=0.5, rely=0.5, anchor="center")
36
+
37
+ def start(self) -> "Feedback":
38
+ self._theater.root.mainloop()
39
+
40
+ return True
41
+
42
+ def quit(self) -> None:
43
+ self._background.destroy()
44
+ self._theater.root.quit()
45
+
46
+ def on_idle(self) -> None:
47
+ self.quit()
48
+
49
+ def on_return(self) -> None: ...
50
+
51
+ @property
52
+ def condition(self) -> "ScheduleCondition | None":
53
+ return None
@@ -0,0 +1,85 @@
1
+ from pathlib import Path
2
+ from tkinter.ttk import Label
3
+ from typing import TYPE_CHECKING
4
+
5
+ from mxbi.utils.tkinter.components.canvas_with_border import CanvasWithInnerBorder
6
+ from PIL import Image, ImageTk
7
+
8
+ if TYPE_CHECKING:
9
+ from mxbi.models.animal import AnimalState, ScheduleCondition
10
+ from mxbi.models.session import SessionState
11
+ from mxbi.models.task import Feedback
12
+ from mxbi.theater import Theater
13
+
14
+ ASSETS_PATH = Path(__file__).parent / "assets"
15
+
16
+
17
+ class IDLEScene:
18
+ def __init__(
19
+ self,
20
+ theater: "Theater",
21
+ session_state: "SessionState",
22
+ animal_state: "AnimalState",
23
+ ) -> None:
24
+ self._theater = theater
25
+ self._session_config = session_state
26
+ self._screen_type = self._session_config.session_config.screen_type
27
+ self._standard_reward_stimulus = self._theater.new_standard_reward_stimulus(
28
+ 1000
29
+ )
30
+
31
+ self._on_trial_start()
32
+
33
+ def start(self) -> "Feedback":
34
+ self._theater.root.mainloop()
35
+
36
+ return True
37
+
38
+ def _on_trial_start(self) -> None:
39
+ self._create_view()
40
+ self._bind_events()
41
+
42
+ def _on_trial_end(self) -> None:
43
+ self._theater.aplayer.stop()
44
+ self._background.destroy()
45
+ self._theater.root.quit()
46
+
47
+ def _create_view(self) -> None:
48
+ self._background = CanvasWithInnerBorder(
49
+ master=self._theater.root,
50
+ bg="black",
51
+ width=self._screen_type.width,
52
+ height=self._screen_type.height,
53
+ border_width=40,
54
+ )
55
+ self._background.place(relx=0.5, rely=0.5, anchor="center")
56
+
57
+ xshift = 240
58
+ xcenter = self._screen_type.width / 2 + xshift
59
+ ycenter = self._screen_type.height / 2
60
+
61
+ self._img = Image.open(ASSETS_PATH / "apple_v1.png")
62
+ self._img = self._img.resize((400, 400)).rotate(-90, expand=True)
63
+ self._img = ImageTk.PhotoImage(self._img)
64
+ self.label_apple = Label(self._background, image=self._img)
65
+ self.label_apple.place(x=xcenter, y=ycenter, anchor="center")
66
+
67
+ def _bind_events(self) -> None:
68
+ self._background.focus_set()
69
+ self._background.bind("<r>", lambda e: self._give_stimulus(1000))
70
+
71
+ def _give_stimulus(self, duration: int) -> None:
72
+ self._standard_reward_stimulus.play(1000)
73
+
74
+ def quit(self) -> None:
75
+ self._on_trial_end()
76
+
77
+ def on_idle(self) -> None:
78
+ self._on_trial_end()
79
+
80
+ def on_return(self) -> None:
81
+ self._on_trial_end()
82
+
83
+ @property
84
+ def condition(self) -> "ScheduleCondition | None":
85
+ return None
@@ -0,0 +1,188 @@
1
+ ## Automated Habituation Training (Strong Auditory Cue)
2
+
3
+ **🎯 Overview**
4
+
5
+ - Familiarize the animal with the MXBI apparatus and establish the fixed relationship “auditory stimulus → reward.”
6
+ - Present a consistent routine to gradually extend stay time while keeping the reward experience stable.
7
+ - The animal must remain in MXBI for a period to obtain a reward.
8
+ - An auditory stimulus is presented before each reward to strengthen the association.
9
+
10
+ **🔁 Procedure**
11
+ The experimental flow is:
12
+
13
+ ```mermaid
14
+ flowchart TD
15
+ mxbi -->|start| idle_state
16
+ idle_state -->|animal enter as trigger| habituation_state
17
+ habituation_state -->|animal exit| idle_state
18
+ habituation_state -->|animal stay some time| stimulus
19
+ stimulus -->|after stimulus| reward
20
+ reward -->|after reward| habituation_state
21
+ ```
22
+
23
+ **🧭 Levels**
24
+ This stage consists of a series of levels. The parameters that change as the level increases include:
25
+
26
+ ```json
27
+ "0": {
28
+ "level": 0,
29
+ "evaluation_interval": 20,
30
+ "reward_interval": 5,
31
+ "reward_duration": 1000,
32
+ "stimulus_duration": 1000,
33
+ "stimulus_density": 5
34
+ }
35
+ ```
36
+
37
+ At level 0:
38
+
39
+ - Stay in MXBI for 5 seconds to receive a reward.
40
+ - Reward duration: 1 second; auditory stimulus duration: 1 second.
41
+ - Stimulus density: 5.
42
+ - Complete at least 20 trials to advance.
43
+
44
+ As the level increases:
45
+
46
+ - Required stay time increases; the auditory stimulus becomes longer and louder.
47
+ - More trials are required to progress, but more rewards are also available.
48
+
49
+ Outcome goal:
50
+
51
+ - Single stay in MXBI exceeds 1 minute.
52
+ - Strong understanding of “auditory stimulus → reward”.
53
+
54
+ > **Note: We currently lack detection methods; because the animal need fully controls the experiment, traditional training approaches cannot be used**
55
+
56
+ **📊 Current Level Table**
57
+
58
+ | level | evaluation_interval | reward_interval_s | reward_duration_ms | stimulus_duration_ms | stimulus_density |
59
+ | :---: | :-----------------: | :---------------: | :----------------: | :------------------: | :--------------: |
60
+ | 0 | 20 | 2 | 1000 | 1000 | 5 |
61
+ | 1 | 20 | 5 | 1200 | 1500 | 7 |
62
+ | 2 | 40 | 10 | 1400 | 2000 | 9 |
63
+ | 3 | 50 | 15 | 1600 | 2500 | 11 |
64
+ | 4 | 50 | 20 | 1800 | 3000 | 13 |
65
+ | 5 | 100 | 25 | 2000 | 3000 | 15 |
66
+ | 6 | 100 | 30 | 2200 | 3000 | 17 |
67
+ | 7 | 100 | 35 | 2400 | 3000 | 19 |
68
+ | 8 | 100 | 40 | 2600 | 3000 | 21 |
69
+ | 9 | 100 | 45 | 2800 | 3000 | 23 |
70
+ | 10 | 100 | 50 | 3000 | 3000 | 25 |
71
+ | 11 | 100 | 55 | 3000 | 3000 | 25 |
72
+ | 12 | 100 | 60 | 3000 | 3000 | 25 |
73
+
74
+ **🧪 Next Step**
75
+ Currently, the time the animal must stay in MXBI to get a reward is controlled by `reward_interval`. As shown above, it is a fixed value. Consider changing it to a range and randomly choosing the actual `reward_interval` within that range.
76
+
77
+ The complete configuration file is as follows:
78
+
79
+ ```json
80
+ {
81
+ "default": {
82
+ "condition": {
83
+ "config": {
84
+ "difficulty_increase_threshold": 0.8,
85
+ "allow_decrease": false
86
+ }
87
+ },
88
+ "levels_table": {
89
+ "0": {
90
+ "level": 0,
91
+ "evaluation_interval": 20,
92
+ "reward_interval": 5,
93
+ "reward_duration": 1000,
94
+ "stimulus_duration": 1000,
95
+ "stimulus_density": 5
96
+ },
97
+ "1": {
98
+ "level": 1,
99
+ "evaluation_interval": 30,
100
+ "reward_interval": 10,
101
+ "reward_duration": 1200,
102
+ "stimulus_duration": 1500,
103
+ "stimulus_density": 7
104
+ },
105
+ "2 ": {
106
+ "level": 2,
107
+ "evaluation_interval": 40,
108
+ "reward_interval": 15,
109
+ "reward_duration": 1400,
110
+ "stimulus_duration": 2000,
111
+ "stimulus_density": 9
112
+ },
113
+ "3": {
114
+ "level": 3,
115
+ "evaluation_interval": 50,
116
+ "reward_interval": 20,
117
+ "reward_duration": 1600,
118
+ "stimulus_duration": 2500,
119
+ "stimulus_density": 11
120
+ },
121
+ "4": {
122
+ "level": 4,
123
+ "evaluation_interval": 50,
124
+ "reward_interval": 25,
125
+ "reward_duration": 1800,
126
+ "stimulus_duration": 3000,
127
+ "stimulus_density": 13
128
+ },
129
+ "5": {
130
+ "level": 5,
131
+ "evaluation_interval": 100,
132
+ "reward_interval": 30,
133
+ "reward_duration": 2000,
134
+ "stimulus_duration": 3000,
135
+ "stimulus_density": 15
136
+ },
137
+ "6": {
138
+ "level": 6,
139
+ "evaluation_interval": 100,
140
+ "reward_interval": 35,
141
+ "reward_duration": 2200,
142
+ "stimulus_duration": 3000,
143
+ "stimulus_density": 17
144
+ },
145
+ "7": {
146
+ "level": 7,
147
+ "evaluation_interval": 100,
148
+ "reward_interval": 40,
149
+ "reward_duration": 2400,
150
+ "stimulus_duration": 3000,
151
+ "stimulus_density": 19
152
+ },
153
+ "8": {
154
+ "level": 8,
155
+ "evaluation_interval": 100,
156
+ "reward_interval": 45,
157
+ "reward_duration": 2600,
158
+ "stimulus_duration": 3000,
159
+ "stimulus_density": 21
160
+ },
161
+ "9": {
162
+ "level": 9,
163
+ "evaluation_interval": 100,
164
+ "reward_interval": 50,
165
+ "reward_duration": 2800,
166
+ "stimulus_duration": 3000,
167
+ "stimulus_density": 23
168
+ },
169
+ "10": {
170
+ "level": 10,
171
+ "evaluation_interval": 100,
172
+ "reward_interval": 55,
173
+ "reward_duration": 3000,
174
+ "stimulus_duration": 3000,
175
+ "stimulus_density": 25
176
+ },
177
+ "11": {
178
+ "level": 11,
179
+ "evaluation_interval": 100,
180
+ "reward_interval": 60,
181
+ "reward_duration": 3000,
182
+ "stimulus_duration": 3000,
183
+ "stimulus_density": 25
184
+ }
185
+ }
186
+ }
187
+ }
188
+ ```
@@ -0,0 +1,7 @@
1
+ _key,level,entry_reward,evaluation_interval,min_stimulus_interval,max_stimulus_interval,target,stimulus_density
2
+ 0,0,1,10,2,2,5,5
3
+ 1,1,0.8,50,2,2,10,5
4
+ 2,2,0.6,100,3,5,15,10
5
+ 3,3,0.4,100,4,7,20,15
6
+ 4,4,0.2,100,5,9,25,20
7
+ 5,5,0,200,5,10,30,25
@@ -0,0 +1,67 @@
1
+ {
2
+ "default": {
3
+ "condition": {
4
+ "config": {
5
+ "difficulty_increase_threshold": 0,
6
+ "allow_decrease": false,
7
+ "present_level_trial_id": true
8
+ }
9
+ },
10
+ "levels_table": {
11
+ "0": {
12
+ "level": 0,
13
+ "entry_reward": 1,
14
+ "evaluation_interval": 10,
15
+ "min_stimulus_interval": 2,
16
+ "max_stimulus_interval": 2,
17
+ "target": 5,
18
+ "stimulus_density": 5
19
+ },
20
+ "1": {
21
+ "level": 1,
22
+ "entry_reward": 0.8,
23
+ "evaluation_interval": 50,
24
+ "min_stimulus_interval": 2,
25
+ "max_stimulus_interval": 2,
26
+ "target": 10,
27
+ "stimulus_density": 5
28
+ },
29
+ "2": {
30
+ "level": 2,
31
+ "entry_reward": 0.6,
32
+ "evaluation_interval": 100,
33
+ "min_stimulus_interval": 3,
34
+ "max_stimulus_interval": 5,
35
+ "target": 15,
36
+ "stimulus_density": 10
37
+ },
38
+ "3": {
39
+ "level": 3,
40
+ "entry_reward": 0.4,
41
+ "evaluation_interval": 100,
42
+ "min_stimulus_interval": 4,
43
+ "max_stimulus_interval": 7,
44
+ "target": 20,
45
+ "stimulus_density": 15
46
+ },
47
+ "4": {
48
+ "level": 4,
49
+ "entry_reward": 0.2,
50
+ "evaluation_interval": 100,
51
+ "min_stimulus_interval": 5,
52
+ "max_stimulus_interval": 9,
53
+ "target": 25,
54
+ "stimulus_density": 20
55
+ },
56
+ "5": {
57
+ "level": 5,
58
+ "entry_reward": 0,
59
+ "evaluation_interval": 200,
60
+ "min_stimulus_interval": 5,
61
+ "max_stimulus_interval": 10,
62
+ "target": 30,
63
+ "stimulus_density": 25
64
+ }
65
+ }
66
+ }
67
+ }
@@ -0,0 +1,172 @@
1
+ from datetime import datetime
2
+ from random import choices
3
+ from typing import TYPE_CHECKING, Final
4
+
5
+ from mxbi.data_logger import DataLogger, DataLoggerType
6
+ from mxbi.models.animal import ScheduleCondition
7
+ from mxbi.tasks.default.initial_habituation_training.stages.models import (
8
+ InitialHabituationTrainingStageConfig,
9
+ StageContext,
10
+ StageContexts,
11
+ config,
12
+ )
13
+ from mxbi.tasks.default.initial_habituation_training.tasks.stay_to_reward.stay_to_reward import (
14
+ DefaultStayToRewardScene,
15
+ )
16
+ from mxbi.tasks.default.initial_habituation_training.tasks.stay_to_reward.stay_to_reward_models import (
17
+ Result,
18
+ TrialConfig,
19
+ )
20
+ from mxbi.utils.logger import logger
21
+ from mxbi.utils.tkinter.components.canvas_with_border import CanvasWithInnerBorder
22
+
23
+ if TYPE_CHECKING:
24
+ from mxbi.models.animal import AnimalState
25
+ from mxbi.models.session import SessionConfig, SessionState
26
+ from mxbi.models.task import Feedback
27
+ from mxbi.theater import Theater
28
+
29
+
30
+ contexts = StageContexts()
31
+ background: CanvasWithInnerBorder | None = None
32
+
33
+
34
+ def _initialize_contexts(session_config: "SessionConfig") -> None:
35
+ global contexts
36
+ for monkey in session_config.animals.keys():
37
+ if contexts.root.get(monkey) is None:
38
+ contexts.root[monkey] = StageContext()
39
+
40
+
41
+ def _initialize_background(theater: "Theater", session_config: "SessionConfig") -> None:
42
+ global background
43
+ background = CanvasWithInnerBorder(
44
+ master=theater.root,
45
+ bg="black",
46
+ width=session_config.screen_type.width,
47
+ height=session_config.screen_type.height,
48
+ border_width=40,
49
+ )
50
+
51
+
52
+ class InitialHabituationTrainingStage:
53
+ STAGE_NAME: Final[str] = "DEFAULT_INITIAL_HABITUATION_TRAINING_STAGE"
54
+
55
+ def __init__(
56
+ self,
57
+ theater: "Theater",
58
+ session_state: "SessionState",
59
+ animal_state: "AnimalState",
60
+ ) -> None:
61
+ self._theater = theater
62
+ self._session_state = session_state
63
+ self._animal_state = animal_state
64
+
65
+ if not contexts.root:
66
+ _initialize_contexts(session_state.session_config)
67
+
68
+ if background is None:
69
+ _initialize_background(theater, session_state.session_config)
70
+
71
+ context = contexts.root[animal_state.name]
72
+
73
+ self._stage_config = self._load_stage_config(self._animal_state.name)
74
+
75
+ _levels_config = self._stage_config.levels_table[animal_state.level]
76
+ self._stage_config.condition.config.evaluation_interval = (
77
+ _levels_config.evaluation_interval
78
+ )
79
+
80
+ entry_reward = choices(
81
+ [True, False],
82
+ weights=[_levels_config.entry_reward, 1 - _levels_config.entry_reward],
83
+ )[0]
84
+
85
+ _config = TrialConfig(
86
+ level=_levels_config.level,
87
+ entry_reward=entry_reward,
88
+ min_stimulus_interval=_levels_config.min_stimulus_interval,
89
+ max_stimulus_interval=_levels_config.max_stimulus_interval,
90
+ target=_levels_config.target,
91
+ stimulus_density=_levels_config.stimulus_density,
92
+ )
93
+
94
+ self._data_logger = DataLogger(
95
+ self._session_state,
96
+ self._animal_state.name,
97
+ self.STAGE_NAME,
98
+ DataLoggerType.JSONL,
99
+ )
100
+
101
+ if self._animal_state.data_path is None:
102
+ self._animal_state.data_path = self._data_logger.path
103
+
104
+ assert background is not None
105
+
106
+ self._task = DefaultStayToRewardScene(
107
+ theater=theater,
108
+ animal_state=animal_state,
109
+ screen_type=session_state.session_config.screen_type,
110
+ trial_config=_config,
111
+ context=context,
112
+ background=background,
113
+ )
114
+
115
+ def start(self) -> "Feedback":
116
+ trial_data = self._task.start()
117
+ self._data_logger.save(trial_data.model_dump())
118
+
119
+ feedback = self._handle_result(trial_data.result)
120
+ logger.debug(
121
+ f"{self.STAGE_NAME}: "
122
+ f"session_id={self._session_state.session_id}, "
123
+ f"animal_name={self._animal_state.name}, "
124
+ f"animal_level={self._animal_state.level}, "
125
+ f"state_name={self.STAGE_NAME}, "
126
+ f"result={trial_data}, "
127
+ f"feedback={feedback}"
128
+ )
129
+
130
+ now = datetime.now().timestamp()
131
+ if self._animal_state.animal_session_start_time != 0.0:
132
+ session_duration = now - self._animal_state.animal_session_start_time
133
+ if session_duration >= 30:
134
+ print(session_duration)
135
+ print(self._animal_state.animal_session_start_time)
136
+ self._animal_state.animal_session_start_time = (
137
+ datetime.now().timestamp()
138
+ )
139
+
140
+ self._theater._scheduler._increase_difficulty(self._animal_state)
141
+
142
+ return feedback
143
+
144
+ def _load_stage_config(self, monkey: str) -> InitialHabituationTrainingStageConfig:
145
+ stage_config = config.root.get(monkey) or config.root.get("default")
146
+ if stage_config is None:
147
+ raise ValueError("No default stage config found")
148
+ return stage_config
149
+
150
+ def _handle_result(self, result: "Result") -> "Feedback":
151
+ feedback = False
152
+
153
+ match result:
154
+ case Result.CORRECT:
155
+ feedback = True
156
+ case Result.INCORRECT:
157
+ feedback = False
158
+
159
+ return feedback
160
+
161
+ def quit(self) -> None:
162
+ self._task.cancle()
163
+
164
+ def on_idle(self) -> None:
165
+ self._task.cancle()
166
+
167
+ def on_return(self) -> None:
168
+ self._task.cancle()
169
+
170
+ @property
171
+ def condition(self) -> "ScheduleCondition | None":
172
+ return self._stage_config.condition
@@ -0,0 +1,56 @@
1
+ from pathlib import Path
2
+
3
+ from mxbi.config import Configure
4
+ from mxbi.models.animal import ScheduleCondition
5
+ from mxbi.tasks.GNGSiD.models import MonkeyName
6
+ from pydantic import BaseModel, ConfigDict, Field, RootModel
7
+
8
+ CONFIG_PATH = Path(__file__).parent / "config.json"
9
+
10
+
11
+ class InitialHabituationTraingStageLeveledParams(BaseModel):
12
+ model_config = ConfigDict(frozen=True)
13
+
14
+ level: int
15
+ entry_reward: float
16
+
17
+ evaluation_interval: int
18
+
19
+ min_stimulus_interval: float
20
+ max_stimulus_interval: float
21
+
22
+ target: float
23
+
24
+ stimulus_density: int
25
+
26
+
27
+ class InitialHabituationTrainingStageConfig(BaseModel):
28
+ model_config = ConfigDict(frozen=True)
29
+
30
+ condition: ScheduleCondition
31
+ levels_table: dict[int, InitialHabituationTraingStageLeveledParams]
32
+
33
+
34
+ class InitialHabituationTrainingStageConfigs(RootModel):
35
+ model_config = ConfigDict(frozen=True)
36
+
37
+ root: dict[MonkeyName, InitialHabituationTrainingStageConfig]
38
+
39
+
40
+ class StageContext(BaseModel):
41
+ duration: int = 0
42
+ rewards: int = 0
43
+
44
+
45
+ class StageContexts(RootModel):
46
+ root: dict[MonkeyName, StageContext] = Field(default_factory=dict)
47
+
48
+
49
+ def load_config() -> InitialHabituationTrainingStageConfigs:
50
+ configs = Configure(CONFIG_PATH, InitialHabituationTrainingStageConfigs).value
51
+ for config in configs.root.values():
52
+ config.condition.level_count = len(config.levels_table)
53
+ return configs
54
+
55
+
56
+ config = load_config()