np-workflows 1.6.89__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- np_workflows/__init__.py +7 -0
- np_workflows/assets/images/logo_np_hab.png +0 -0
- np_workflows/assets/images/logo_np_vis.png +0 -0
- np_workflows/experiments/__init__.py +1 -0
- np_workflows/experiments/dynamic_routing/__init__.py +2 -0
- np_workflows/experiments/dynamic_routing/main.py +117 -0
- np_workflows/experiments/dynamic_routing/widgets.py +82 -0
- np_workflows/experiments/openscope_P3/P3_workflow_widget.py +83 -0
- np_workflows/experiments/openscope_P3/__init__.py +2 -0
- np_workflows/experiments/openscope_P3/main_P3_pilot.py +217 -0
- np_workflows/experiments/openscope_barcode/__init__.py +2 -0
- np_workflows/experiments/openscope_barcode/barcode_workflow_widget.py +83 -0
- np_workflows/experiments/openscope_barcode/camstim_scripts/barcode_mapping_script.py +138 -0
- np_workflows/experiments/openscope_barcode/camstim_scripts/barcode_opto_script.py +219 -0
- np_workflows/experiments/openscope_barcode/main_barcode_pilot.py +217 -0
- np_workflows/experiments/openscope_loop/__init__.py +2 -0
- np_workflows/experiments/openscope_loop/camstim_scripts/barcode_mapping_script.py +138 -0
- np_workflows/experiments/openscope_loop/camstim_scripts/barcode_opto_script.py +219 -0
- np_workflows/experiments/openscope_loop/loop_workflow_widget.py +83 -0
- np_workflows/experiments/openscope_loop/main_loop_pilot.py +217 -0
- np_workflows/experiments/openscope_psycode/__init__.py +2 -0
- np_workflows/experiments/openscope_psycode/main_psycode_pilot.py +217 -0
- np_workflows/experiments/openscope_psycode/psycode_workflow_widget.py +83 -0
- np_workflows/experiments/openscope_v2/__init__.py +2 -0
- np_workflows/experiments/openscope_v2/main_v2_pilot.py +217 -0
- np_workflows/experiments/openscope_v2/v2_workflow_widget.py +83 -0
- np_workflows/experiments/openscope_vippo/__init__.py +2 -0
- np_workflows/experiments/openscope_vippo/main_vippo_pilot.py +217 -0
- np_workflows/experiments/openscope_vippo/vippo_workflow_widget.py +83 -0
- np_workflows/experiments/task_trained_network/__init__.py +2 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/make_tt_stims.py +23 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/oct22_tt_stim_script.py +69 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/densely_annotated_00.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/densely_annotated_01.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/densely_annotated_02.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/densely_annotated_03.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/densely_annotated_04.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/densely_annotated_05.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/densely_annotated_06.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/densely_annotated_07.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/densely_annotated_08.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/densely_annotated_09.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/densely_annotated_10.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/densely_annotated_11.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/densely_annotated_12.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/densely_annotated_13.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/densely_annotated_14.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/densely_annotated_15.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/densely_annotated_16.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/densely_annotated_17.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/densely_annotated_18.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/flash_250ms.stim +20 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/gabor_20_deg_250ms.stim +30 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/old_stim.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/shuffle_reversed.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/shuffle_reversed_1st.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/shuffle_reversed_2nd.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/ttn_main_script.py +130 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/ttn_mapping_script.py +138 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/ttn_opto_script.py +219 -0
- np_workflows/experiments/task_trained_network/main_ttn_pilot.py +263 -0
- np_workflows/experiments/task_trained_network/ttn_session_widget.py +83 -0
- np_workflows/experiments/task_trained_network/ttn_stim_config.py +213 -0
- np_workflows/experiments/templeton/__init__.py +2 -0
- np_workflows/experiments/templeton/main.py +105 -0
- np_workflows/experiments/templeton/widgets.py +82 -0
- np_workflows/shared/__init__.py +3 -0
- np_workflows/shared/base_experiments.py +826 -0
- np_workflows/shared/camstim_scripts/flash_250ms.stim +20 -0
- np_workflows/shared/camstim_scripts/gabor_20_deg_250ms.stim +30 -0
- np_workflows/shared/npxc.py +187 -0
- np_workflows/shared/widgets.py +705 -0
- np_workflows-1.6.89.dist-info/METADATA +85 -0
- np_workflows-1.6.89.dist-info/RECORD +76 -0
- np_workflows-1.6.89.dist-info/WHEEL +4 -0
- np_workflows-1.6.89.dist-info/entry_points.txt +4 -0
|
@@ -0,0 +1,130 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Oct'22 task-trained ephys stimuli
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import argparse
|
|
6
|
+
import json
|
|
7
|
+
import logging
|
|
8
|
+
import os
|
|
9
|
+
import time
|
|
10
|
+
|
|
11
|
+
import numpy as np
|
|
12
|
+
from psychopy import visual
|
|
13
|
+
from camstim import Foraging
|
|
14
|
+
from camstim import Stimulus_v2
|
|
15
|
+
from camstim import SweepStim_v2, MovieStim
|
|
16
|
+
from camstim import Warp, Window
|
|
17
|
+
from camstim.misc import wecanpicklethat
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
# get params ------------------------------------------------------------------
|
|
21
|
+
# stored in json file -
|
|
22
|
+
# path to json supplied by camstim via command line arg when this script is called
|
|
23
|
+
|
|
24
|
+
parser = argparse.ArgumentParser()
|
|
25
|
+
parser.add_argument(
|
|
26
|
+
"params_path",
|
|
27
|
+
nargs="?",
|
|
28
|
+
type=str,
|
|
29
|
+
default="",
|
|
30
|
+
)
|
|
31
|
+
args, _ = parser.parse_known_args()
|
|
32
|
+
|
|
33
|
+
with open(args.params_path, "r") as f:
|
|
34
|
+
json_params = json.load(f)
|
|
35
|
+
|
|
36
|
+
# Create display window
|
|
37
|
+
# ----------------------------------------------------------------------------
|
|
38
|
+
window = Window(
|
|
39
|
+
fullscr=True,
|
|
40
|
+
monitor=json_params["monitor"],
|
|
41
|
+
screen=0,
|
|
42
|
+
warp=Warp.Spherical,
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
# monkey-patch MovieStim to serialize without large redundant arrays
|
|
46
|
+
# ----------------------------------------------------------------------------
|
|
47
|
+
|
|
48
|
+
def package(self):
|
|
49
|
+
"""
|
|
50
|
+
Package for serializing - minus large arrays of frame timing/order.
|
|
51
|
+
"""
|
|
52
|
+
if not self.save_sweep_table:
|
|
53
|
+
self.sweep_table = None
|
|
54
|
+
self.sweep_params = self.sweep_params.keys()
|
|
55
|
+
self_dict = self.__dict__
|
|
56
|
+
del self_dict['sweep_frames']
|
|
57
|
+
del self_dict['sweep_order']
|
|
58
|
+
del self_dict['frame_list']
|
|
59
|
+
self_dict['stim'] = str(self_dict['stim'])
|
|
60
|
+
return wecanpicklethat(self_dict)
|
|
61
|
+
|
|
62
|
+
MovieStim.package = package
|
|
63
|
+
|
|
64
|
+
# setup main stim
|
|
65
|
+
# -----------------------------------------------------------------------
|
|
66
|
+
# build the stimulus array with parameterized repeats & durations
|
|
67
|
+
|
|
68
|
+
main_stimuli = []
|
|
69
|
+
|
|
70
|
+
old_repeats, reversed_repeats, annotated_repeats = (
|
|
71
|
+
json_params["stim_repeats"][key] for key in ("old", "reversed", "annotated")
|
|
72
|
+
)
|
|
73
|
+
old_sec, reversed_sec, annotated_sec = (
|
|
74
|
+
json_params["stim_lengths_sec"][key] for key in ("old", "reversed", "annotated")
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
segment_stim_secs = (
|
|
78
|
+
[("old_stim.stim", old_sec)] * old_repeats
|
|
79
|
+
+ [
|
|
80
|
+
("shuffle_reversed.stim", reversed_sec),
|
|
81
|
+
("shuffle_reversed_1st.stim", reversed_sec),
|
|
82
|
+
("shuffle_reversed_2nd.stim", reversed_sec),
|
|
83
|
+
] * reversed_repeats
|
|
84
|
+
+ [("densely_annotated_%02d.stim" % i, annotated_sec) for i in range(19)]
|
|
85
|
+
* annotated_repeats
|
|
86
|
+
+ [("old_stim.stim", old_sec)] * old_repeats
|
|
87
|
+
+ [
|
|
88
|
+
("shuffle_reversed.stim", reversed_sec),
|
|
89
|
+
("shuffle_reversed_1st.stim", reversed_sec),
|
|
90
|
+
("shuffle_reversed_2nd.stim", reversed_sec),
|
|
91
|
+
] * reversed_repeats
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
# setup stim list and timing
|
|
95
|
+
cumulative_duration_sec = (
|
|
96
|
+
main_sequence_start_sec
|
|
97
|
+
) = 0 # if stims are daisy-chained within one script, this should be the end of the prev stim
|
|
98
|
+
for stim_file, duration_sec in segment_stim_secs:
|
|
99
|
+
segment = Stimulus_v2.from_file(stim_file, window) # stim file actually instantiates MovieStim
|
|
100
|
+
segment_ds = [(cumulative_duration_sec, cumulative_duration_sec + duration_sec)]
|
|
101
|
+
segment.set_display_sequence(segment_ds)
|
|
102
|
+
|
|
103
|
+
cumulative_duration_sec += duration_sec
|
|
104
|
+
main_stimuli.append(segment)
|
|
105
|
+
|
|
106
|
+
main_sequence_end_sec = cumulative_duration_sec # if daisy-chained, the next stim in this script should start at this time
|
|
107
|
+
|
|
108
|
+
# create SweepStim_v2 instance for main stimulus
|
|
109
|
+
ss = SweepStim_v2(
|
|
110
|
+
window,
|
|
111
|
+
stimuli=main_stimuli,
|
|
112
|
+
pre_blank_sec=json_params["pre_blank_screen_sec"],
|
|
113
|
+
post_blank_sec=json_params["post_blank_screen_sec"],
|
|
114
|
+
params=json_params["sweepstim"],
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
# add in foraging so we can track wheel, potentially give rewards, etc
|
|
118
|
+
f = Foraging(
|
|
119
|
+
window=window,
|
|
120
|
+
auto_update=False,
|
|
121
|
+
params=json_params["sweepstim"],
|
|
122
|
+
nidaq_tasks={
|
|
123
|
+
"digital_input": ss.di,
|
|
124
|
+
"digital_output": ss.do,
|
|
125
|
+
},
|
|
126
|
+
) # share di and do with SS
|
|
127
|
+
|
|
128
|
+
ss.add_item(f, "foraging")
|
|
129
|
+
|
|
130
|
+
ss.run()
|
|
@@ -0,0 +1,138 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Oct'22 task-trained ephys stimuli
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import argparse
|
|
6
|
+
import json
|
|
7
|
+
import logging
|
|
8
|
+
import os
|
|
9
|
+
import time
|
|
10
|
+
|
|
11
|
+
import numpy as np
|
|
12
|
+
from psychopy import visual
|
|
13
|
+
from camstim import Foraging
|
|
14
|
+
from camstim import Stimulus_v2
|
|
15
|
+
from camstim import SweepStim_v2
|
|
16
|
+
from camstim import Warp, Window
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
# get params ------------------------------------------------------------------
|
|
20
|
+
# stored in json file -
|
|
21
|
+
# path to json supplied by camstim via command line arg when this script is called
|
|
22
|
+
|
|
23
|
+
parser = argparse.ArgumentParser()
|
|
24
|
+
parser.add_argument(
|
|
25
|
+
"params_path",
|
|
26
|
+
nargs="?",
|
|
27
|
+
type=str,
|
|
28
|
+
default="",
|
|
29
|
+
)
|
|
30
|
+
args, _ = parser.parse_known_args()
|
|
31
|
+
|
|
32
|
+
with open(args.params_path, "r") as f:
|
|
33
|
+
json_params = json.load(f)
|
|
34
|
+
|
|
35
|
+
# Create display window
|
|
36
|
+
# ----------------------------------------------------------------------------
|
|
37
|
+
window = Window(
|
|
38
|
+
fullscr=True,
|
|
39
|
+
monitor=json_params["monitor"],
|
|
40
|
+
screen=0,
|
|
41
|
+
warp=Warp.Spherical,
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
# patch the Stimulus_v2 class to allow for serializing without large arrays
|
|
45
|
+
# ----------------------------------------------------------------------------
|
|
46
|
+
class Stimulus_v2_MinusFrameArrays(Stimulus_v2):
|
|
47
|
+
|
|
48
|
+
def __init__(self, *args, **kwargs):
|
|
49
|
+
super(Stimulus_v2_MinusFrameArrays, self).__init__(*args, **kwargs)
|
|
50
|
+
|
|
51
|
+
def package(self):
|
|
52
|
+
"""
|
|
53
|
+
Package for serializing - minus large arrays of frame timing/order.
|
|
54
|
+
"""
|
|
55
|
+
if not self.save_sweep_table:
|
|
56
|
+
self.sweep_table = None
|
|
57
|
+
self.sweep_params = self.sweep_params.keys()
|
|
58
|
+
self_dict = self.__dict__
|
|
59
|
+
del self_dict['sweep_frames']
|
|
60
|
+
del self_dict['sweep_order']
|
|
61
|
+
self_dict['stim'] = str(self_dict['stim'])
|
|
62
|
+
return wecanpicklethat(self_dict)
|
|
63
|
+
|
|
64
|
+
# ----------------------------------------------------------------------------
|
|
65
|
+
# setup mapping stim
|
|
66
|
+
"""from mapping_script_v2.py"""
|
|
67
|
+
|
|
68
|
+
mapping_stimuli = []
|
|
69
|
+
|
|
70
|
+
# load common stimuli
|
|
71
|
+
gabor_path = json_params["gabor_path"]
|
|
72
|
+
flash_path = json_params["flash_path"]
|
|
73
|
+
gabor = Stimulus_v2_MinusFrameArrays.from_file(gabor_path, window)
|
|
74
|
+
flash = Stimulus_v2_MinusFrameArrays.from_file(flash_path, window)
|
|
75
|
+
|
|
76
|
+
gabor_duration_sec = json_params["default_gabor_duration_seconds"]
|
|
77
|
+
flash_duration_sec = json_params["default_flash_duration_seconds"]
|
|
78
|
+
|
|
79
|
+
original_duration_sec = gabor_duration_sec + flash_duration_sec
|
|
80
|
+
|
|
81
|
+
# if max total duration is set, and less than original movie length, cut down display sequence:
|
|
82
|
+
max_mapping_duation_minutes = json_params[
|
|
83
|
+
"max_total_duration_minutes"
|
|
84
|
+
] # can be zero, in which case we use the full movie length
|
|
85
|
+
max_mapping_duration_sec = max_mapping_duation_minutes * 60
|
|
86
|
+
if 0 < max_mapping_duration_sec < original_duration_sec:
|
|
87
|
+
logging.info("Mapping duration capped at %s minutes", max_mapping_duation_minutes)
|
|
88
|
+
|
|
89
|
+
logging.info("original gabor duration: %s sec", gabor_duration_sec)
|
|
90
|
+
logging.info("original flash duration: %s sec", flash_duration_sec)
|
|
91
|
+
logging.info("max mapping duration: %s sec", max_mapping_duration_sec)
|
|
92
|
+
|
|
93
|
+
gabor_duration_sec = (
|
|
94
|
+
max_mapping_duration_sec * gabor_duration_sec
|
|
95
|
+
) / original_duration_sec
|
|
96
|
+
flash_duration_sec = (
|
|
97
|
+
max_mapping_duration_sec * flash_duration_sec
|
|
98
|
+
) / original_duration_sec
|
|
99
|
+
|
|
100
|
+
logging.info("modified gabor duration: %s sec", gabor_duration_sec)
|
|
101
|
+
logging.info("modified flash duration: %s sec", flash_duration_sec)
|
|
102
|
+
|
|
103
|
+
# setup timing
|
|
104
|
+
mapping_sequence_start_sec = 0 # if stims are daisy-chained within one script, this should be the end of the prev stim
|
|
105
|
+
gabor.set_display_sequence([(mapping_sequence_start_sec, gabor_duration_sec)])
|
|
106
|
+
flash.set_display_sequence(
|
|
107
|
+
[(gabor_duration_sec, gabor_duration_sec + flash_duration_sec)]
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
mapping_stimuli = [gabor, flash]
|
|
111
|
+
|
|
112
|
+
mapping_sequence_end_sec = (
|
|
113
|
+
gabor_duration_sec + flash_duration_sec
|
|
114
|
+
) # if daisy-chained, the next stim in this script should start at this time
|
|
115
|
+
|
|
116
|
+
# create SweepStim_v2 instance for main stimulus
|
|
117
|
+
ss = SweepStim_v2(
|
|
118
|
+
window,
|
|
119
|
+
stimuli=mapping_stimuli,
|
|
120
|
+
pre_blank_sec=json_params["pre_blank_screen_sec"],
|
|
121
|
+
post_blank_sec=json_params["post_blank_screen_sec"],
|
|
122
|
+
params=json_params["sweepstim"],
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
# add in foraging so we can track wheel, potentially give rewards, etc
|
|
126
|
+
f = Foraging(
|
|
127
|
+
window=window,
|
|
128
|
+
auto_update=False,
|
|
129
|
+
params=json_params["sweepstim"],
|
|
130
|
+
nidaq_tasks={
|
|
131
|
+
"digital_input": ss.di,
|
|
132
|
+
"digital_output": ss.do,
|
|
133
|
+
},
|
|
134
|
+
) # share di and do with SS
|
|
135
|
+
|
|
136
|
+
ss.add_item(f, "foraging")
|
|
137
|
+
|
|
138
|
+
ss.run()
|
|
@@ -0,0 +1,219 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
"""
|
|
3
|
+
optotagging.py
|
|
4
|
+
|
|
5
|
+
runs optotagging code for ecephys pipeline experiments
|
|
6
|
+
|
|
7
|
+
by joshs@alleninstitute.org
|
|
8
|
+
|
|
9
|
+
(c) 2018 Allen Institute for Brain Science
|
|
10
|
+
|
|
11
|
+
"""
|
|
12
|
+
import camstim # ensures "magic" gets setup properly by importing first
|
|
13
|
+
import logging # must occur after camstim import for "magic"
|
|
14
|
+
from camstim.zro import agent
|
|
15
|
+
|
|
16
|
+
import numpy as np
|
|
17
|
+
from toolbox.IO.nidaq import AnalogOutput
|
|
18
|
+
from toolbox.IO.nidaq import DigitalOutput
|
|
19
|
+
|
|
20
|
+
import datetime
|
|
21
|
+
import numpy as np
|
|
22
|
+
import time
|
|
23
|
+
import pickle as pkl
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
# %%
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def run_optotagging(levels, conditions, waveforms, isis, sampleRate=10000.):
|
|
30
|
+
|
|
31
|
+
from toolbox.IO.nidaq import AnalogOutput
|
|
32
|
+
from toolbox.IO.nidaq import DigitalOutput
|
|
33
|
+
|
|
34
|
+
sweep_on = np.array([0, 0, 1, 0, 0, 0, 0, 0], dtype=np.uint8)
|
|
35
|
+
stim_on = np.array([0, 0, 1, 1, 0, 0, 0, 0], dtype=np.uint8)
|
|
36
|
+
stim_off = np.array([0, 0, 1, 0, 0, 0, 0, 0], dtype=np.uint8)
|
|
37
|
+
sweep_off = np.array([0, 0, 0, 0, 0, 0, 0, 0], dtype=np.uint8)
|
|
38
|
+
|
|
39
|
+
ao = AnalogOutput('Dev1', channels=[1])
|
|
40
|
+
ao.cfg_sample_clock(sampleRate)
|
|
41
|
+
|
|
42
|
+
do = DigitalOutput('Dev1', 2)
|
|
43
|
+
|
|
44
|
+
do.start()
|
|
45
|
+
ao.start()
|
|
46
|
+
|
|
47
|
+
do.write(sweep_on)
|
|
48
|
+
time.sleep(5)
|
|
49
|
+
|
|
50
|
+
for i, level in enumerate(levels):
|
|
51
|
+
|
|
52
|
+
print(level)
|
|
53
|
+
|
|
54
|
+
data = waveforms[conditions[i]]
|
|
55
|
+
|
|
56
|
+
do.write(stim_on)
|
|
57
|
+
ao.write(data * level)
|
|
58
|
+
do.write(stim_off)
|
|
59
|
+
time.sleep(isis[i])
|
|
60
|
+
|
|
61
|
+
do.write(sweep_off)
|
|
62
|
+
do.clear()
|
|
63
|
+
ao.clear()
|
|
64
|
+
|
|
65
|
+
# %%
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def generatePulseTrain(pulseWidth, pulseInterval, numRepeats, riseTime, sampleRate=10000.):
|
|
69
|
+
|
|
70
|
+
data = np.zeros((int(sampleRate),), dtype=np.float64)
|
|
71
|
+
# rise_samples =
|
|
72
|
+
|
|
73
|
+
rise_and_fall = (
|
|
74
|
+
((1 - np.cos(np.arange(sampleRate*riseTime/1000., dtype=np.float64)*2*np.pi/10))+1)-1)/2
|
|
75
|
+
half_length = rise_and_fall.size / 2
|
|
76
|
+
rise = rise_and_fall[:half_length]
|
|
77
|
+
fall = rise_and_fall[half_length:]
|
|
78
|
+
|
|
79
|
+
peak_samples = int(sampleRate*(pulseWidth-riseTime*2)/1000)
|
|
80
|
+
peak = np.ones((peak_samples,))
|
|
81
|
+
|
|
82
|
+
pulse = np.concatenate((rise,
|
|
83
|
+
peak,
|
|
84
|
+
fall))
|
|
85
|
+
|
|
86
|
+
interval = int(pulseInterval*sampleRate/1000.)
|
|
87
|
+
|
|
88
|
+
for i in range(0, numRepeats):
|
|
89
|
+
data[i*interval:i*interval+pulse.size] = pulse
|
|
90
|
+
|
|
91
|
+
return data
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
# %% create waveforms
|
|
95
|
+
|
|
96
|
+
def optotagging(mouseID, operation_mode='experiment', level_list=[1.15, 1.28, 1.345], genotype=None):
|
|
97
|
+
|
|
98
|
+
sampleRate = 10000
|
|
99
|
+
|
|
100
|
+
# 1 s cosine ramp:
|
|
101
|
+
data_cosine = (((1 - np.cos(np.arange(sampleRate, dtype=np.float64)
|
|
102
|
+
* 2*np.pi/sampleRate)) + 1) - 1)/2 # create raised cosine waveform
|
|
103
|
+
|
|
104
|
+
# 1 ms cosine ramp:
|
|
105
|
+
rise_and_fall = (
|
|
106
|
+
((1 - np.cos(np.arange(sampleRate*0.001, dtype=np.float64)*2*np.pi/10))+1)-1)/2
|
|
107
|
+
half_length = rise_and_fall.size / 2
|
|
108
|
+
|
|
109
|
+
# pulses with cosine ramp:
|
|
110
|
+
pulse_2ms = np.concatenate((rise_and_fall[:half_length], np.ones(
|
|
111
|
+
(int(sampleRate*0.001),)), rise_and_fall[half_length:]))
|
|
112
|
+
pulse_5ms = np.concatenate((rise_and_fall[:half_length], np.ones(
|
|
113
|
+
(int(sampleRate*0.004),)), rise_and_fall[half_length:]))
|
|
114
|
+
pulse_10ms = np.concatenate((rise_and_fall[:half_length], np.ones(
|
|
115
|
+
(int(sampleRate*0.009),)), rise_and_fall[half_length:]))
|
|
116
|
+
|
|
117
|
+
data_2ms_10Hz = np.zeros((sampleRate,), dtype=np.float64)
|
|
118
|
+
|
|
119
|
+
for i in range(0, 10):
|
|
120
|
+
interval = sampleRate / 10
|
|
121
|
+
data_2ms_10Hz[i*interval:i*interval+pulse_2ms.size] = pulse_2ms
|
|
122
|
+
|
|
123
|
+
data_5ms = np.zeros((sampleRate,), dtype=np.float64)
|
|
124
|
+
data_5ms[:pulse_5ms.size] = pulse_5ms
|
|
125
|
+
|
|
126
|
+
data_10ms = np.zeros((sampleRate,), dtype=np.float64)
|
|
127
|
+
data_10ms[:pulse_10ms.size] = pulse_10ms
|
|
128
|
+
|
|
129
|
+
data_10s = np.zeros((sampleRate*10,), dtype=np.float64)
|
|
130
|
+
data_10s[:-2] = 1
|
|
131
|
+
|
|
132
|
+
# %% for experiment
|
|
133
|
+
|
|
134
|
+
isi = 1.5
|
|
135
|
+
isi_rand = 0.5
|
|
136
|
+
numRepeats = 50
|
|
137
|
+
|
|
138
|
+
condition_list = [2, 3]
|
|
139
|
+
waveforms = [data_2ms_10Hz, data_5ms, data_10ms, data_cosine]
|
|
140
|
+
|
|
141
|
+
opto_levels = np.array(level_list*numRepeats*len(condition_list)) # BLUE
|
|
142
|
+
opto_conditions = condition_list*numRepeats*len(level_list)
|
|
143
|
+
opto_conditions = np.sort(opto_conditions)
|
|
144
|
+
opto_isis = np.random.random(opto_levels.shape) * isi_rand + isi
|
|
145
|
+
|
|
146
|
+
p = np.random.permutation(len(opto_levels))
|
|
147
|
+
|
|
148
|
+
# implement shuffle?
|
|
149
|
+
opto_levels = opto_levels[p]
|
|
150
|
+
opto_conditions = opto_conditions[p]
|
|
151
|
+
|
|
152
|
+
# %% for testing
|
|
153
|
+
|
|
154
|
+
if operation_mode == 'test_levels':
|
|
155
|
+
isi = 2.0
|
|
156
|
+
isi_rand = 0.0
|
|
157
|
+
|
|
158
|
+
numRepeats = 2
|
|
159
|
+
|
|
160
|
+
condition_list = [0]
|
|
161
|
+
waveforms = [data_10s, data_10s]
|
|
162
|
+
|
|
163
|
+
opto_levels = np.array(level_list*numRepeats *
|
|
164
|
+
len(condition_list)) # BLUE
|
|
165
|
+
opto_conditions = condition_list*numRepeats*len(level_list)
|
|
166
|
+
opto_conditions = np.sort(opto_conditions)
|
|
167
|
+
opto_isis = np.random.random(opto_levels.shape) * isi_rand + isi
|
|
168
|
+
|
|
169
|
+
elif operation_mode == 'pretest':
|
|
170
|
+
numRepeats = 1
|
|
171
|
+
|
|
172
|
+
condition_list = [0]
|
|
173
|
+
data_2s = data_10s[-sampleRate*2:]
|
|
174
|
+
waveforms = [data_2s]
|
|
175
|
+
|
|
176
|
+
opto_levels = np.array(level_list*numRepeats *
|
|
177
|
+
len(condition_list)) # BLUE
|
|
178
|
+
opto_conditions = condition_list*numRepeats*len(level_list)
|
|
179
|
+
opto_conditions = np.sort(opto_conditions)
|
|
180
|
+
opto_isis = [1]*len(opto_conditions)
|
|
181
|
+
# %%
|
|
182
|
+
|
|
183
|
+
outputDirectory = agent.OUTPUT_DIR
|
|
184
|
+
fileDate = str(datetime.datetime.now()).replace(':', '').replace(
|
|
185
|
+
'.', '').replace('-', '').replace(' ', '')[2:14]
|
|
186
|
+
fileName = outputDirectory + "/" + fileDate + '_'+mouseID + '.opto.pkl'
|
|
187
|
+
|
|
188
|
+
print('saving info to: ' + fileName)
|
|
189
|
+
fl = open(fileName, 'wb')
|
|
190
|
+
output = {}
|
|
191
|
+
|
|
192
|
+
output['opto_levels'] = opto_levels
|
|
193
|
+
output['opto_conditions'] = opto_conditions
|
|
194
|
+
output['opto_ISIs'] = opto_isis
|
|
195
|
+
output['opto_waveforms'] = waveforms
|
|
196
|
+
|
|
197
|
+
pkl.dump(output, fl)
|
|
198
|
+
fl.close()
|
|
199
|
+
print('saved.')
|
|
200
|
+
|
|
201
|
+
# %%
|
|
202
|
+
run_optotagging(opto_levels, opto_conditions,
|
|
203
|
+
waveforms, opto_isis, float(sampleRate))
|
|
204
|
+
|
|
205
|
+
|
|
206
|
+
# %%
|
|
207
|
+
if __name__ == "__main__":
|
|
208
|
+
import json
|
|
209
|
+
import argparse
|
|
210
|
+
|
|
211
|
+
parser = argparse.ArgumentParser()
|
|
212
|
+
parser.add_argument('json_params', type=str, )
|
|
213
|
+
args, _ = parser.parse_known_args()
|
|
214
|
+
|
|
215
|
+
with open(args.json_params, 'r', ) as f:
|
|
216
|
+
json_params = json.load(f)
|
|
217
|
+
|
|
218
|
+
logging.info('Optotagging with params: %s' % json_params)
|
|
219
|
+
optotagging(**json_params)
|