@jspsych/plugin-audio-keyboard-response 1.1.2 → 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +8 -2
- package/dist/index.browser.js +266 -235
- package/dist/index.browser.js.map +1 -1
- package/dist/index.browser.min.js +2 -2
- package/dist/index.browser.min.js.map +1 -1
- package/dist/index.cjs +208 -229
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.ts +198 -115
- package/dist/index.js +208 -229
- package/dist/index.js.map +1 -1
- package/package.json +3 -3
- package/src/index.spec.ts +123 -3
- package/src/index.ts +145 -133
package/src/index.spec.ts
CHANGED
|
@@ -1,10 +1,131 @@
|
|
|
1
|
-
|
|
1
|
+
jest.mock("../../jspsych/src/modules/plugin-api/AudioPlayer");
|
|
2
|
+
|
|
3
|
+
import { flushPromises, pressKey, simulateTimeline, startTimeline } from "@jspsych/test-utils";
|
|
2
4
|
import { initJsPsych } from "jspsych";
|
|
3
5
|
|
|
6
|
+
//@ts-expect-error mock
|
|
7
|
+
import { mockStop } from "../../jspsych/src/modules/plugin-api/AudioPlayer";
|
|
4
8
|
import audioKeyboardResponse from ".";
|
|
5
9
|
|
|
6
10
|
jest.useFakeTimers();
|
|
7
11
|
|
|
12
|
+
beforeEach(() => {
|
|
13
|
+
jest.clearAllMocks();
|
|
14
|
+
});
|
|
15
|
+
|
|
16
|
+
describe("audio-keyboard-response", () => {
|
|
17
|
+
// this relies on AudioContext, which we haven't mocked yet
|
|
18
|
+
it.skip("works with all defaults", async () => {
|
|
19
|
+
const { expectFinished, expectRunning } = await startTimeline([
|
|
20
|
+
{
|
|
21
|
+
type: audioKeyboardResponse,
|
|
22
|
+
stimulus: "foo.mp3",
|
|
23
|
+
},
|
|
24
|
+
]);
|
|
25
|
+
|
|
26
|
+
expectRunning();
|
|
27
|
+
|
|
28
|
+
pressKey("a");
|
|
29
|
+
|
|
30
|
+
expectFinished();
|
|
31
|
+
|
|
32
|
+
await flushPromises();
|
|
33
|
+
});
|
|
34
|
+
|
|
35
|
+
it("works with use_webaudio:false", async () => {
|
|
36
|
+
const jsPsych = initJsPsych({ use_webaudio: false });
|
|
37
|
+
|
|
38
|
+
const { expectFinished, expectRunning } = await startTimeline(
|
|
39
|
+
[
|
|
40
|
+
{
|
|
41
|
+
type: audioKeyboardResponse,
|
|
42
|
+
stimulus: "foo.mp3",
|
|
43
|
+
},
|
|
44
|
+
],
|
|
45
|
+
jsPsych
|
|
46
|
+
);
|
|
47
|
+
|
|
48
|
+
await expectRunning();
|
|
49
|
+
pressKey("a");
|
|
50
|
+
await expectFinished();
|
|
51
|
+
});
|
|
52
|
+
|
|
53
|
+
it("ends when trial_ends_after_audio is true and audio finishes", async () => {
|
|
54
|
+
const jsPsych = initJsPsych({ use_webaudio: false });
|
|
55
|
+
|
|
56
|
+
const { expectFinished, expectRunning } = await startTimeline(
|
|
57
|
+
[
|
|
58
|
+
{
|
|
59
|
+
type: audioKeyboardResponse,
|
|
60
|
+
stimulus: "foo.mp3",
|
|
61
|
+
trial_ends_after_audio: true,
|
|
62
|
+
},
|
|
63
|
+
],
|
|
64
|
+
jsPsych
|
|
65
|
+
);
|
|
66
|
+
|
|
67
|
+
await expectRunning();
|
|
68
|
+
|
|
69
|
+
jest.runAllTimers();
|
|
70
|
+
|
|
71
|
+
await expectFinished();
|
|
72
|
+
});
|
|
73
|
+
|
|
74
|
+
it("prevents responses when response_allowed_while_playing is false", async () => {
|
|
75
|
+
const jsPsych = initJsPsych({ use_webaudio: false });
|
|
76
|
+
|
|
77
|
+
const { expectFinished, expectRunning } = await startTimeline(
|
|
78
|
+
[
|
|
79
|
+
{
|
|
80
|
+
type: audioKeyboardResponse,
|
|
81
|
+
stimulus: "foo.mp3",
|
|
82
|
+
response_allowed_while_playing: false,
|
|
83
|
+
},
|
|
84
|
+
],
|
|
85
|
+
jsPsych
|
|
86
|
+
);
|
|
87
|
+
|
|
88
|
+
await expectRunning();
|
|
89
|
+
|
|
90
|
+
pressKey("a");
|
|
91
|
+
|
|
92
|
+
await expectRunning();
|
|
93
|
+
|
|
94
|
+
jest.runAllTimers();
|
|
95
|
+
|
|
96
|
+
await expectRunning();
|
|
97
|
+
|
|
98
|
+
pressKey("a");
|
|
99
|
+
|
|
100
|
+
await expectFinished();
|
|
101
|
+
});
|
|
102
|
+
|
|
103
|
+
it("ends when trial_duration is shorter than the audio duration, stopping the audio", async () => {
|
|
104
|
+
const jsPsych = initJsPsych({ use_webaudio: false });
|
|
105
|
+
|
|
106
|
+
const { expectFinished, expectRunning } = await startTimeline(
|
|
107
|
+
[
|
|
108
|
+
{
|
|
109
|
+
type: audioKeyboardResponse,
|
|
110
|
+
stimulus: "foo.mp3",
|
|
111
|
+
trial_duration: 500,
|
|
112
|
+
},
|
|
113
|
+
],
|
|
114
|
+
jsPsych
|
|
115
|
+
);
|
|
116
|
+
|
|
117
|
+
await expectRunning();
|
|
118
|
+
|
|
119
|
+
expect(mockStop).not.toHaveBeenCalled();
|
|
120
|
+
|
|
121
|
+
jest.advanceTimersByTime(500);
|
|
122
|
+
|
|
123
|
+
expect(mockStop).toHaveBeenCalled();
|
|
124
|
+
|
|
125
|
+
await expectFinished();
|
|
126
|
+
});
|
|
127
|
+
});
|
|
128
|
+
|
|
8
129
|
describe("audio-keyboard-response simulation", () => {
|
|
9
130
|
test("data mode works", async () => {
|
|
10
131
|
const timeline = [
|
|
@@ -22,8 +143,7 @@ describe("audio-keyboard-response simulation", () => {
|
|
|
22
143
|
expect(typeof getData().values()[0].response).toBe("string");
|
|
23
144
|
});
|
|
24
145
|
|
|
25
|
-
|
|
26
|
-
test.skip("visual mode works", async () => {
|
|
146
|
+
test("visual mode works", async () => {
|
|
27
147
|
const jsPsych = initJsPsych({ use_webaudio: false });
|
|
28
148
|
|
|
29
149
|
const timeline = [
|
package/src/index.ts
CHANGED
|
@@ -1,36 +1,53 @@
|
|
|
1
|
+
import autoBind from "auto-bind";
|
|
1
2
|
import { JsPsych, JsPsychPlugin, ParameterType, TrialType } from "jspsych";
|
|
2
3
|
|
|
4
|
+
import { AudioPlayerInterface } from "../../jspsych/src/modules/plugin-api/AudioPlayer";
|
|
5
|
+
import { version } from "../package.json";
|
|
6
|
+
|
|
3
7
|
const info = <const>{
|
|
4
8
|
name: "audio-keyboard-response",
|
|
9
|
+
version: version,
|
|
5
10
|
parameters: {
|
|
6
11
|
/** The audio file to be played. */
|
|
7
12
|
stimulus: {
|
|
8
13
|
type: ParameterType.AUDIO,
|
|
9
|
-
pretty_name: "Stimulus",
|
|
10
14
|
default: undefined,
|
|
11
15
|
},
|
|
12
|
-
/**
|
|
16
|
+
/** This array contains the key(s) that the participant is allowed to press in order to respond to the stimulus.
|
|
17
|
+
* Keys should be specified as characters (e.g., `'a'`, `'q'`, `' '`, `'Enter'`, `'ArrowDown'`) -
|
|
18
|
+
* see [this page](https://developer.mozilla.org/en-US/docs/Web/API/KeyboardEvent/key/Key_Values)
|
|
19
|
+
* and [this page (event.key column)](https://www.freecodecamp.org/news/javascript-keycode-list-keypress-event-key-codes/)
|
|
20
|
+
* for more examples. Any key presses that are not listed in the array will be ignored. The default value of `"ALL_KEYS"`
|
|
21
|
+
* means that all keys will be accepted as valid responses. Specifying `"NO_KEYS"` will mean that no responses are allowed.
|
|
22
|
+
*/
|
|
13
23
|
choices: {
|
|
14
24
|
type: ParameterType.KEYS,
|
|
15
|
-
pretty_name: "Choices",
|
|
16
25
|
default: "ALL_KEYS",
|
|
17
26
|
},
|
|
18
|
-
/** Any content here will be displayed below the stimulus.
|
|
27
|
+
/** This string can contain HTML markup. Any content here will be displayed below the stimulus. The intention is that
|
|
28
|
+
* it can be used to provide a reminder about the action the participant is supposed to take (e.g., which key to press).
|
|
29
|
+
*/
|
|
19
30
|
prompt: {
|
|
20
31
|
type: ParameterType.HTML_STRING,
|
|
21
32
|
pretty_name: "Prompt",
|
|
22
33
|
default: null,
|
|
23
34
|
},
|
|
24
|
-
/**
|
|
35
|
+
/** How long to wait for the participant to make a response before ending the trial in milliseconds. If the
|
|
36
|
+
* participant fails to make a response before this timer is reached, the participant's response will be
|
|
37
|
+
* recorded as null for the trial and the trial will end. If the value of this parameter is null, then the
|
|
38
|
+
* trial will wait for a response indefinitely.
|
|
39
|
+
*/
|
|
25
40
|
trial_duration: {
|
|
26
41
|
type: ParameterType.INT,
|
|
27
|
-
pretty_name: "Trial duration",
|
|
28
42
|
default: null,
|
|
29
43
|
},
|
|
30
|
-
/** If true, the trial will end
|
|
44
|
+
/** If true, then the trial will end whenever the participant makes a response (assuming they make their
|
|
45
|
+
* response before the cutoff specified by the `trial_duration` parameter). If false, then the trial will
|
|
46
|
+
* continue until the value for `trial_duration` is reached. You can use set this parameter to `false` to
|
|
47
|
+
* force the participant to listen to the stimulus for a fixed amount of time, even if they respond before the time is complete
|
|
48
|
+
*/
|
|
31
49
|
response_ends_trial: {
|
|
32
50
|
type: ParameterType.BOOL,
|
|
33
|
-
pretty_name: "Response ends trial",
|
|
34
51
|
default: true,
|
|
35
52
|
},
|
|
36
53
|
/** If true, then the trial will end as soon as the audio file finishes playing. */
|
|
@@ -39,72 +56,77 @@ const info = <const>{
|
|
|
39
56
|
pretty_name: "Trial ends after audio",
|
|
40
57
|
default: false,
|
|
41
58
|
},
|
|
42
|
-
/** If true, then responses are allowed while the audio is playing. If false, then the audio must finish
|
|
59
|
+
/** If true, then responses are allowed while the audio is playing. If false, then the audio must finish
|
|
60
|
+
* playing before a keyboard response is accepted. Once the audio has played all the way through, a valid
|
|
61
|
+
* keyboard response is allowed (including while the audio is being re-played via on-screen playback controls).
|
|
62
|
+
*/
|
|
43
63
|
response_allowed_while_playing: {
|
|
44
64
|
type: ParameterType.BOOL,
|
|
45
|
-
pretty_name: "Response allowed while playing",
|
|
46
65
|
default: true,
|
|
47
66
|
},
|
|
48
67
|
},
|
|
68
|
+
data: {
|
|
69
|
+
/** Indicates which key the participant pressed. If no key was pressed before the trial ended, then the value will be `null`. */
|
|
70
|
+
response: {
|
|
71
|
+
type: ParameterType.STRING,
|
|
72
|
+
},
|
|
73
|
+
/** The response time in milliseconds for the participant to make a response. The time is measured from when the stimulus
|
|
74
|
+
* first began playing until the participant made a key response. If no key was pressed before the trial ended, then the
|
|
75
|
+
* value will be `null`.
|
|
76
|
+
*/
|
|
77
|
+
rt: {
|
|
78
|
+
type: ParameterType.INT,
|
|
79
|
+
},
|
|
80
|
+
/** Path to the audio file that played during the trial. */
|
|
81
|
+
stimulus: {
|
|
82
|
+
type: ParameterType.STRING,
|
|
83
|
+
},
|
|
84
|
+
},
|
|
49
85
|
};
|
|
50
86
|
|
|
51
87
|
type Info = typeof info;
|
|
52
88
|
|
|
53
89
|
/**
|
|
54
|
-
*
|
|
90
|
+
* This plugin plays audio files and records responses generated with the keyboard.
|
|
91
|
+
*
|
|
92
|
+
* If the browser supports it, audio files are played using the WebAudio API. This allows for reasonably precise timing of the
|
|
93
|
+
* playback. The timing of responses generated is measured against the WebAudio specific clock, improving the measurement of
|
|
94
|
+
* response times. If the browser does not support the WebAudio API, then the audio file is played with HTML5 audio.
|
|
95
|
+
*
|
|
96
|
+
* Audio files can be automatically preloaded by jsPsych using the [`preload` plugin](preload.md). However, if you are using
|
|
97
|
+
* timeline variables or another dynamic method to specify the audio stimulus, then you will need to [manually preload](../overview/media-preloading.md#manual-preloading) the audio.
|
|
55
98
|
*
|
|
56
|
-
*
|
|
99
|
+
* The trial can end when the participant responds, when the audio file has finished playing, or if the participant has
|
|
100
|
+
* failed to respond within a fixed length of time. You can also prevent a keyboard response from being recorded before
|
|
101
|
+
* the audio has finished playing.
|
|
57
102
|
*
|
|
58
103
|
* @author Josh de Leeuw
|
|
59
|
-
* @see {@link https://www.jspsych.org/plugins/
|
|
104
|
+
* @see {@link https://www.jspsych.org/latest/plugins/audio-keyboard-response/ audio-keyboard-response plugin documentation on jspsych.org}
|
|
60
105
|
*/
|
|
61
106
|
class AudioKeyboardResponsePlugin implements JsPsychPlugin<Info> {
|
|
62
107
|
static info = info;
|
|
63
|
-
private audio;
|
|
64
|
-
|
|
65
|
-
|
|
108
|
+
private audio: AudioPlayerInterface;
|
|
109
|
+
private params: TrialType<Info>;
|
|
110
|
+
private display: HTMLElement;
|
|
111
|
+
private response: { rt: number; key: string } = { rt: null, key: null };
|
|
112
|
+
private startTime: number;
|
|
113
|
+
private finish: ({}: { rt: number; response: string; stimulus: string }) => void;
|
|
114
|
+
|
|
115
|
+
constructor(private jsPsych: JsPsych) {
|
|
116
|
+
autoBind(this);
|
|
117
|
+
}
|
|
66
118
|
|
|
67
119
|
trial(display_element: HTMLElement, trial: TrialType<Info>, on_load: () => void) {
|
|
68
|
-
|
|
69
|
-
|
|
120
|
+
return new Promise(async (resolve) => {
|
|
121
|
+
this.finish = resolve;
|
|
122
|
+
this.params = trial;
|
|
123
|
+
this.display = display_element;
|
|
124
|
+
// load audio file
|
|
125
|
+
this.audio = await this.jsPsych.pluginAPI.getAudioPlayer(trial.stimulus);
|
|
70
126
|
|
|
71
|
-
// setup stimulus
|
|
72
|
-
var context = this.jsPsych.pluginAPI.audioContext();
|
|
73
|
-
|
|
74
|
-
// store response
|
|
75
|
-
var response = {
|
|
76
|
-
rt: null,
|
|
77
|
-
key: null,
|
|
78
|
-
};
|
|
79
|
-
|
|
80
|
-
// record webaudio context start time
|
|
81
|
-
var startTime;
|
|
82
|
-
|
|
83
|
-
// load audio file
|
|
84
|
-
this.jsPsych.pluginAPI
|
|
85
|
-
.getAudioBuffer(trial.stimulus)
|
|
86
|
-
.then((buffer) => {
|
|
87
|
-
if (context !== null) {
|
|
88
|
-
this.audio = context.createBufferSource();
|
|
89
|
-
this.audio.buffer = buffer;
|
|
90
|
-
this.audio.connect(context.destination);
|
|
91
|
-
} else {
|
|
92
|
-
this.audio = buffer;
|
|
93
|
-
this.audio.currentTime = 0;
|
|
94
|
-
}
|
|
95
|
-
setupTrial();
|
|
96
|
-
})
|
|
97
|
-
.catch((err) => {
|
|
98
|
-
console.error(
|
|
99
|
-
`Failed to load audio file "${trial.stimulus}". Try checking the file path. We recommend using the preload plugin to load audio files.`
|
|
100
|
-
);
|
|
101
|
-
console.error(err);
|
|
102
|
-
});
|
|
103
|
-
|
|
104
|
-
const setupTrial = () => {
|
|
105
127
|
// set up end event if trial needs it
|
|
106
128
|
if (trial.trial_ends_after_audio) {
|
|
107
|
-
this.audio.addEventListener("ended", end_trial);
|
|
129
|
+
this.audio.addEventListener("ended", this.end_trial);
|
|
108
130
|
}
|
|
109
131
|
|
|
110
132
|
// show prompt if there is one
|
|
@@ -112,107 +134,91 @@ class AudioKeyboardResponsePlugin implements JsPsychPlugin<Info> {
|
|
|
112
134
|
display_element.innerHTML = trial.prompt;
|
|
113
135
|
}
|
|
114
136
|
|
|
115
|
-
// start audio
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
} else {
|
|
120
|
-
this.audio.play();
|
|
121
|
-
}
|
|
137
|
+
// start playing audio here to record time
|
|
138
|
+
// use this for offsetting RT measurement in
|
|
139
|
+
// setup_keyboard_listener
|
|
140
|
+
this.startTime = this.jsPsych.pluginAPI.audioContext()?.currentTime;
|
|
122
141
|
|
|
123
142
|
// start keyboard listener when trial starts or sound ends
|
|
124
143
|
if (trial.response_allowed_while_playing) {
|
|
125
|
-
setup_keyboard_listener();
|
|
144
|
+
this.setup_keyboard_listener();
|
|
126
145
|
} else if (!trial.trial_ends_after_audio) {
|
|
127
|
-
this.audio.addEventListener("ended", setup_keyboard_listener);
|
|
146
|
+
this.audio.addEventListener("ended", this.setup_keyboard_listener);
|
|
128
147
|
}
|
|
129
148
|
|
|
130
149
|
// end trial if time limit is set
|
|
131
150
|
if (trial.trial_duration !== null) {
|
|
132
151
|
this.jsPsych.pluginAPI.setTimeout(() => {
|
|
133
|
-
end_trial();
|
|
152
|
+
this.end_trial();
|
|
134
153
|
}, trial.trial_duration);
|
|
135
154
|
}
|
|
136
155
|
|
|
156
|
+
// call trial on_load method because we are done with all loading setup
|
|
137
157
|
on_load();
|
|
138
|
-
};
|
|
139
|
-
|
|
140
|
-
// function to end trial when it is time
|
|
141
|
-
const end_trial = () => {
|
|
142
|
-
// kill any remaining setTimeout handlers
|
|
143
|
-
this.jsPsych.pluginAPI.clearAllTimeouts();
|
|
144
158
|
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
this.audio.stop();
|
|
149
|
-
} else {
|
|
150
|
-
this.audio.pause();
|
|
151
|
-
}
|
|
152
|
-
|
|
153
|
-
this.audio.removeEventListener("ended", end_trial);
|
|
154
|
-
this.audio.removeEventListener("ended", setup_keyboard_listener);
|
|
159
|
+
this.audio.play();
|
|
160
|
+
});
|
|
161
|
+
}
|
|
155
162
|
|
|
156
|
-
|
|
157
|
-
|
|
163
|
+
private end_trial() {
|
|
164
|
+
// kill any remaining setTimeout handlers
|
|
165
|
+
this.jsPsych.pluginAPI.clearAllTimeouts();
|
|
158
166
|
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
rt: response.rt,
|
|
162
|
-
stimulus: trial.stimulus,
|
|
163
|
-
response: response.key,
|
|
164
|
-
};
|
|
167
|
+
// stop the audio file if it is playing
|
|
168
|
+
this.audio.stop();
|
|
165
169
|
|
|
166
|
-
|
|
167
|
-
|
|
170
|
+
// remove end event listeners if they exist
|
|
171
|
+
this.audio.removeEventListener("ended", this.end_trial);
|
|
172
|
+
this.audio.removeEventListener("ended", this.setup_keyboard_listener);
|
|
168
173
|
|
|
169
|
-
|
|
170
|
-
|
|
174
|
+
// kill keyboard listeners
|
|
175
|
+
this.jsPsych.pluginAPI.cancelAllKeyboardResponses();
|
|
171
176
|
|
|
172
|
-
|
|
177
|
+
// gather the data to store for the trial
|
|
178
|
+
var trial_data = {
|
|
179
|
+
rt: this.response.rt,
|
|
180
|
+
response: this.response.key,
|
|
181
|
+
stimulus: this.params.stimulus,
|
|
173
182
|
};
|
|
174
183
|
|
|
175
|
-
//
|
|
176
|
-
|
|
177
|
-
// only record the first response
|
|
178
|
-
if (response.key == null) {
|
|
179
|
-
response = info;
|
|
180
|
-
}
|
|
184
|
+
// clear the display
|
|
185
|
+
this.display.innerHTML = "";
|
|
181
186
|
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
}
|
|
187
|
+
// move on to the next trial
|
|
188
|
+
this.finish(trial_data);
|
|
189
|
+
}
|
|
186
190
|
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
rt_method: "audio",
|
|
194
|
-
persist: false,
|
|
195
|
-
allow_held_key: false,
|
|
196
|
-
audio_context: context,
|
|
197
|
-
audio_context_start_time: startTime,
|
|
198
|
-
});
|
|
199
|
-
} else {
|
|
200
|
-
this.jsPsych.pluginAPI.getKeyboardResponse({
|
|
201
|
-
callback_function: after_response,
|
|
202
|
-
valid_responses: trial.choices,
|
|
203
|
-
rt_method: "performance",
|
|
204
|
-
persist: false,
|
|
205
|
-
allow_held_key: false,
|
|
206
|
-
});
|
|
207
|
-
}
|
|
208
|
-
};
|
|
191
|
+
private after_response(info: { key: string; rt: number }) {
|
|
192
|
+
this.response = info;
|
|
193
|
+
if (this.params.response_ends_trial) {
|
|
194
|
+
this.end_trial();
|
|
195
|
+
}
|
|
196
|
+
}
|
|
209
197
|
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
198
|
+
private setup_keyboard_listener() {
|
|
199
|
+
// start the response listener
|
|
200
|
+
if (this.jsPsych.pluginAPI.useWebaudio) {
|
|
201
|
+
this.jsPsych.pluginAPI.getKeyboardResponse({
|
|
202
|
+
callback_function: this.after_response,
|
|
203
|
+
valid_responses: this.params.choices,
|
|
204
|
+
rt_method: "audio",
|
|
205
|
+
persist: false,
|
|
206
|
+
allow_held_key: false,
|
|
207
|
+
audio_context: this.jsPsych.pluginAPI.audioContext(),
|
|
208
|
+
audio_context_start_time: this.startTime,
|
|
209
|
+
});
|
|
210
|
+
} else {
|
|
211
|
+
this.jsPsych.pluginAPI.getKeyboardResponse({
|
|
212
|
+
callback_function: this.after_response,
|
|
213
|
+
valid_responses: this.params.choices,
|
|
214
|
+
rt_method: "performance",
|
|
215
|
+
persist: false,
|
|
216
|
+
allow_held_key: false,
|
|
217
|
+
});
|
|
218
|
+
}
|
|
213
219
|
}
|
|
214
220
|
|
|
215
|
-
simulate(
|
|
221
|
+
async simulate(
|
|
216
222
|
trial: TrialType<Info>,
|
|
217
223
|
simulation_mode,
|
|
218
224
|
simulation_options: any,
|
|
@@ -220,20 +226,24 @@ class AudioKeyboardResponsePlugin implements JsPsychPlugin<Info> {
|
|
|
220
226
|
) {
|
|
221
227
|
if (simulation_mode == "data-only") {
|
|
222
228
|
load_callback();
|
|
223
|
-
this.simulate_data_only(trial, simulation_options);
|
|
229
|
+
return this.simulate_data_only(trial, simulation_options);
|
|
224
230
|
}
|
|
225
231
|
if (simulation_mode == "visual") {
|
|
226
|
-
this.simulate_visual(trial, simulation_options, load_callback);
|
|
232
|
+
return this.simulate_visual(trial, simulation_options, load_callback);
|
|
227
233
|
}
|
|
228
234
|
}
|
|
229
235
|
|
|
230
236
|
private simulate_data_only(trial: TrialType<Info>, simulation_options) {
|
|
231
237
|
const data = this.create_simulation_data(trial, simulation_options);
|
|
232
238
|
|
|
233
|
-
|
|
239
|
+
return data;
|
|
234
240
|
}
|
|
235
241
|
|
|
236
|
-
private simulate_visual(
|
|
242
|
+
private async simulate_visual(
|
|
243
|
+
trial: TrialType<Info>,
|
|
244
|
+
simulation_options,
|
|
245
|
+
load_callback: () => void
|
|
246
|
+
) {
|
|
237
247
|
const data = this.create_simulation_data(trial, simulation_options);
|
|
238
248
|
|
|
239
249
|
const display_element = this.jsPsych.getDisplayElement();
|
|
@@ -244,7 +254,7 @@ class AudioKeyboardResponsePlugin implements JsPsychPlugin<Info> {
|
|
|
244
254
|
}
|
|
245
255
|
};
|
|
246
256
|
|
|
247
|
-
this.trial(display_element, trial, () => {
|
|
257
|
+
const result = await this.trial(display_element, trial, () => {
|
|
248
258
|
load_callback();
|
|
249
259
|
if (!trial.response_allowed_while_playing) {
|
|
250
260
|
this.audio.addEventListener("ended", respond);
|
|
@@ -252,6 +262,8 @@ class AudioKeyboardResponsePlugin implements JsPsychPlugin<Info> {
|
|
|
252
262
|
respond();
|
|
253
263
|
}
|
|
254
264
|
});
|
|
265
|
+
|
|
266
|
+
return result;
|
|
255
267
|
}
|
|
256
268
|
|
|
257
269
|
private create_simulation_data(trial: TrialType<Info>, simulation_options) {
|