np-workflows 1.6.89__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- np_workflows/__init__.py +7 -0
- np_workflows/assets/images/logo_np_hab.png +0 -0
- np_workflows/assets/images/logo_np_vis.png +0 -0
- np_workflows/experiments/__init__.py +1 -0
- np_workflows/experiments/dynamic_routing/__init__.py +2 -0
- np_workflows/experiments/dynamic_routing/main.py +117 -0
- np_workflows/experiments/dynamic_routing/widgets.py +82 -0
- np_workflows/experiments/openscope_P3/P3_workflow_widget.py +83 -0
- np_workflows/experiments/openscope_P3/__init__.py +2 -0
- np_workflows/experiments/openscope_P3/main_P3_pilot.py +217 -0
- np_workflows/experiments/openscope_barcode/__init__.py +2 -0
- np_workflows/experiments/openscope_barcode/barcode_workflow_widget.py +83 -0
- np_workflows/experiments/openscope_barcode/camstim_scripts/barcode_mapping_script.py +138 -0
- np_workflows/experiments/openscope_barcode/camstim_scripts/barcode_opto_script.py +219 -0
- np_workflows/experiments/openscope_barcode/main_barcode_pilot.py +217 -0
- np_workflows/experiments/openscope_loop/__init__.py +2 -0
- np_workflows/experiments/openscope_loop/camstim_scripts/barcode_mapping_script.py +138 -0
- np_workflows/experiments/openscope_loop/camstim_scripts/barcode_opto_script.py +219 -0
- np_workflows/experiments/openscope_loop/loop_workflow_widget.py +83 -0
- np_workflows/experiments/openscope_loop/main_loop_pilot.py +217 -0
- np_workflows/experiments/openscope_psycode/__init__.py +2 -0
- np_workflows/experiments/openscope_psycode/main_psycode_pilot.py +217 -0
- np_workflows/experiments/openscope_psycode/psycode_workflow_widget.py +83 -0
- np_workflows/experiments/openscope_v2/__init__.py +2 -0
- np_workflows/experiments/openscope_v2/main_v2_pilot.py +217 -0
- np_workflows/experiments/openscope_v2/v2_workflow_widget.py +83 -0
- np_workflows/experiments/openscope_vippo/__init__.py +2 -0
- np_workflows/experiments/openscope_vippo/main_vippo_pilot.py +217 -0
- np_workflows/experiments/openscope_vippo/vippo_workflow_widget.py +83 -0
- np_workflows/experiments/task_trained_network/__init__.py +2 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/make_tt_stims.py +23 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/oct22_tt_stim_script.py +69 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/densely_annotated_00.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/densely_annotated_01.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/densely_annotated_02.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/densely_annotated_03.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/densely_annotated_04.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/densely_annotated_05.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/densely_annotated_06.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/densely_annotated_07.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/densely_annotated_08.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/densely_annotated_09.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/densely_annotated_10.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/densely_annotated_11.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/densely_annotated_12.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/densely_annotated_13.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/densely_annotated_14.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/densely_annotated_15.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/densely_annotated_16.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/densely_annotated_17.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/densely_annotated_18.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/flash_250ms.stim +20 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/gabor_20_deg_250ms.stim +30 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/old_stim.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/shuffle_reversed.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/shuffle_reversed_1st.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/stims/shuffle_reversed_2nd.stim +5 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/ttn_main_script.py +130 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/ttn_mapping_script.py +138 -0
- np_workflows/experiments/task_trained_network/camstim_scripts/ttn_opto_script.py +219 -0
- np_workflows/experiments/task_trained_network/main_ttn_pilot.py +263 -0
- np_workflows/experiments/task_trained_network/ttn_session_widget.py +83 -0
- np_workflows/experiments/task_trained_network/ttn_stim_config.py +213 -0
- np_workflows/experiments/templeton/__init__.py +2 -0
- np_workflows/experiments/templeton/main.py +105 -0
- np_workflows/experiments/templeton/widgets.py +82 -0
- np_workflows/shared/__init__.py +3 -0
- np_workflows/shared/base_experiments.py +826 -0
- np_workflows/shared/camstim_scripts/flash_250ms.stim +20 -0
- np_workflows/shared/camstim_scripts/gabor_20_deg_250ms.stim +30 -0
- np_workflows/shared/npxc.py +187 -0
- np_workflows/shared/widgets.py +705 -0
- np_workflows-1.6.89.dist-info/METADATA +85 -0
- np_workflows-1.6.89.dist-info/RECORD +76 -0
- np_workflows-1.6.89.dist-info/WHEEL +4 -0
- np_workflows-1.6.89.dist-info/entry_points.txt +4 -0
|
@@ -0,0 +1,138 @@
|
|
|
1
|
+
"""
|
|
2
|
+
May '23 OpenScope: Barcode stimuli
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import argparse
|
|
6
|
+
import json
|
|
7
|
+
import logging
|
|
8
|
+
import os
|
|
9
|
+
import time
|
|
10
|
+
|
|
11
|
+
import numpy as np
|
|
12
|
+
from psychopy import visual
|
|
13
|
+
from camstim import Foraging
|
|
14
|
+
from camstim import Stimulus_v2
|
|
15
|
+
from camstim import SweepStim_v2
|
|
16
|
+
from camstim import Warp, Window
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
# get params ------------------------------------------------------------------
|
|
20
|
+
# stored in json file -
|
|
21
|
+
# path to json supplied by camstim via command line arg when this script is called
|
|
22
|
+
|
|
23
|
+
parser = argparse.ArgumentParser()
|
|
24
|
+
parser.add_argument(
|
|
25
|
+
"params_path",
|
|
26
|
+
nargs="?",
|
|
27
|
+
type=str,
|
|
28
|
+
default="",
|
|
29
|
+
)
|
|
30
|
+
args, _ = parser.parse_known_args()
|
|
31
|
+
|
|
32
|
+
with open(args.params_path, "r") as f:
|
|
33
|
+
json_params = json.load(f)
|
|
34
|
+
|
|
35
|
+
# Create display window
|
|
36
|
+
# ----------------------------------------------------------------------------
|
|
37
|
+
window = Window(
|
|
38
|
+
fullscr=True,
|
|
39
|
+
monitor=json_params["monitor"],
|
|
40
|
+
screen=0,
|
|
41
|
+
warp=Warp.Spherical,
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
# patch the Stimulus_v2 class to allow for serializing without large arrays
|
|
45
|
+
# ----------------------------------------------------------------------------
|
|
46
|
+
class Stimulus_v2_MinusFrameArrays(Stimulus_v2):
|
|
47
|
+
|
|
48
|
+
def __init__(self, *args, **kwargs):
|
|
49
|
+
super(Stimulus_v2_MinusFrameArrays, self).__init__(*args, **kwargs)
|
|
50
|
+
|
|
51
|
+
def package(self):
|
|
52
|
+
"""
|
|
53
|
+
Package for serializing - minus large arrays of frame timing/order.
|
|
54
|
+
"""
|
|
55
|
+
if not self.save_sweep_table:
|
|
56
|
+
self.sweep_table = None
|
|
57
|
+
self.sweep_params = self.sweep_params.keys()
|
|
58
|
+
self_dict = self.__dict__
|
|
59
|
+
del self_dict['sweep_frames']
|
|
60
|
+
del self_dict['sweep_order']
|
|
61
|
+
self_dict['stim'] = str(self_dict['stim'])
|
|
62
|
+
return wecanpicklethat(self_dict)
|
|
63
|
+
|
|
64
|
+
# ----------------------------------------------------------------------------
|
|
65
|
+
# setup mapping stim
|
|
66
|
+
"""from mapping_script_v2.py"""
|
|
67
|
+
|
|
68
|
+
mapping_stimuli = []
|
|
69
|
+
|
|
70
|
+
# load common stimuli
|
|
71
|
+
gabor_path = json_params["gabor_path"]
|
|
72
|
+
flash_path = json_params["flash_path"]
|
|
73
|
+
gabor = Stimulus_v2_MinusFrameArrays.from_file(gabor_path, window)
|
|
74
|
+
flash = Stimulus_v2_MinusFrameArrays.from_file(flash_path, window)
|
|
75
|
+
|
|
76
|
+
gabor_duration_sec = json_params["default_gabor_duration_seconds"]
|
|
77
|
+
flash_duration_sec = json_params["default_flash_duration_seconds"]
|
|
78
|
+
|
|
79
|
+
original_duration_sec = gabor_duration_sec + flash_duration_sec
|
|
80
|
+
|
|
81
|
+
# if max total duration is set, and less than original movie length, cut down display sequence:
|
|
82
|
+
max_mapping_duation_minutes = json_params[
|
|
83
|
+
"max_total_duration_minutes"
|
|
84
|
+
] # can be zero, in which case we use the full movie length
|
|
85
|
+
max_mapping_duration_sec = max_mapping_duation_minutes * 60
|
|
86
|
+
if 0 < max_mapping_duration_sec < original_duration_sec:
|
|
87
|
+
logging.info("Mapping duration capped at %s minutes", max_mapping_duation_minutes)
|
|
88
|
+
|
|
89
|
+
logging.info("original gabor duration: %s sec", gabor_duration_sec)
|
|
90
|
+
logging.info("original flash duration: %s sec", flash_duration_sec)
|
|
91
|
+
logging.info("max mapping duration: %s sec", max_mapping_duration_sec)
|
|
92
|
+
|
|
93
|
+
gabor_duration_sec = (
|
|
94
|
+
max_mapping_duration_sec * gabor_duration_sec
|
|
95
|
+
) / original_duration_sec
|
|
96
|
+
flash_duration_sec = (
|
|
97
|
+
max_mapping_duration_sec * flash_duration_sec
|
|
98
|
+
) / original_duration_sec
|
|
99
|
+
|
|
100
|
+
logging.info("modified gabor duration: %s sec", gabor_duration_sec)
|
|
101
|
+
logging.info("modified flash duration: %s sec", flash_duration_sec)
|
|
102
|
+
|
|
103
|
+
# setup timing
|
|
104
|
+
mapping_sequence_start_sec = 0 # if stims are daisy-chained within one script, this should be the end of the prev stim
|
|
105
|
+
gabor.set_display_sequence([(mapping_sequence_start_sec, gabor_duration_sec)])
|
|
106
|
+
flash.set_display_sequence(
|
|
107
|
+
[(gabor_duration_sec, gabor_duration_sec + flash_duration_sec)]
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
mapping_stimuli = [gabor, flash]
|
|
111
|
+
|
|
112
|
+
mapping_sequence_end_sec = (
|
|
113
|
+
gabor_duration_sec + flash_duration_sec
|
|
114
|
+
) # if daisy-chained, the next stim in this script should start at this time
|
|
115
|
+
|
|
116
|
+
# create SweepStim_v2 instance for main stimulus
|
|
117
|
+
ss = SweepStim_v2(
|
|
118
|
+
window,
|
|
119
|
+
stimuli=mapping_stimuli,
|
|
120
|
+
pre_blank_sec=json_params["pre_blank_screen_sec"],
|
|
121
|
+
post_blank_sec=json_params["post_blank_screen_sec"],
|
|
122
|
+
params=json_params["sweepstim"],
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
# add in foraging so we can track wheel, potentially give rewards, etc
|
|
126
|
+
f = Foraging(
|
|
127
|
+
window=window,
|
|
128
|
+
auto_update=False,
|
|
129
|
+
params=json_params["sweepstim"],
|
|
130
|
+
nidaq_tasks={
|
|
131
|
+
"digital_input": ss.di,
|
|
132
|
+
"digital_output": ss.do,
|
|
133
|
+
},
|
|
134
|
+
) # share di and do with SS
|
|
135
|
+
|
|
136
|
+
ss.add_item(f, "foraging")
|
|
137
|
+
|
|
138
|
+
ss.run()
|
|
@@ -0,0 +1,219 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
"""
|
|
3
|
+
optotagging.py
|
|
4
|
+
|
|
5
|
+
runs optotagging code for ecephys pipeline experiments
|
|
6
|
+
|
|
7
|
+
by joshs@alleninstitute.org
|
|
8
|
+
|
|
9
|
+
(c) 2018 Allen Institute for Brain Science
|
|
10
|
+
|
|
11
|
+
"""
|
|
12
|
+
import camstim # ensures "magic" gets setup properly by importing first
|
|
13
|
+
import logging # must occur after camstim import for "magic"
|
|
14
|
+
from camstim.zro import agent
|
|
15
|
+
|
|
16
|
+
import numpy as np
|
|
17
|
+
from toolbox.IO.nidaq import AnalogOutput
|
|
18
|
+
from toolbox.IO.nidaq import DigitalOutput
|
|
19
|
+
|
|
20
|
+
import datetime
|
|
21
|
+
import numpy as np
|
|
22
|
+
import time
|
|
23
|
+
import pickle as pkl
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
# %%
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def run_optotagging(levels, conditions, waveforms, isis, sampleRate=10000.):
|
|
30
|
+
|
|
31
|
+
from toolbox.IO.nidaq import AnalogOutput
|
|
32
|
+
from toolbox.IO.nidaq import DigitalOutput
|
|
33
|
+
|
|
34
|
+
sweep_on = np.array([0, 0, 1, 0, 0, 0, 0, 0], dtype=np.uint8)
|
|
35
|
+
stim_on = np.array([0, 0, 1, 1, 0, 0, 0, 0], dtype=np.uint8)
|
|
36
|
+
stim_off = np.array([0, 0, 1, 0, 0, 0, 0, 0], dtype=np.uint8)
|
|
37
|
+
sweep_off = np.array([0, 0, 0, 0, 0, 0, 0, 0], dtype=np.uint8)
|
|
38
|
+
|
|
39
|
+
ao = AnalogOutput('Dev1', channels=[1])
|
|
40
|
+
ao.cfg_sample_clock(sampleRate)
|
|
41
|
+
|
|
42
|
+
do = DigitalOutput('Dev1', 2)
|
|
43
|
+
|
|
44
|
+
do.start()
|
|
45
|
+
ao.start()
|
|
46
|
+
|
|
47
|
+
do.write(sweep_on)
|
|
48
|
+
time.sleep(5)
|
|
49
|
+
|
|
50
|
+
for i, level in enumerate(levels):
|
|
51
|
+
|
|
52
|
+
print(level)
|
|
53
|
+
|
|
54
|
+
data = waveforms[conditions[i]]
|
|
55
|
+
|
|
56
|
+
do.write(stim_on)
|
|
57
|
+
ao.write(data * level)
|
|
58
|
+
do.write(stim_off)
|
|
59
|
+
time.sleep(isis[i])
|
|
60
|
+
|
|
61
|
+
do.write(sweep_off)
|
|
62
|
+
do.clear()
|
|
63
|
+
ao.clear()
|
|
64
|
+
|
|
65
|
+
# %%
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def generatePulseTrain(pulseWidth, pulseInterval, numRepeats, riseTime, sampleRate=10000.):
|
|
69
|
+
|
|
70
|
+
data = np.zeros((int(sampleRate),), dtype=np.float64)
|
|
71
|
+
# rise_samples =
|
|
72
|
+
|
|
73
|
+
rise_and_fall = (
|
|
74
|
+
((1 - np.cos(np.arange(sampleRate*riseTime/1000., dtype=np.float64)*2*np.pi/10))+1)-1)/2
|
|
75
|
+
half_length = rise_and_fall.size / 2
|
|
76
|
+
rise = rise_and_fall[:half_length]
|
|
77
|
+
fall = rise_and_fall[half_length:]
|
|
78
|
+
|
|
79
|
+
peak_samples = int(sampleRate*(pulseWidth-riseTime*2)/1000)
|
|
80
|
+
peak = np.ones((peak_samples,))
|
|
81
|
+
|
|
82
|
+
pulse = np.concatenate((rise,
|
|
83
|
+
peak,
|
|
84
|
+
fall))
|
|
85
|
+
|
|
86
|
+
interval = int(pulseInterval*sampleRate/1000.)
|
|
87
|
+
|
|
88
|
+
for i in range(0, numRepeats):
|
|
89
|
+
data[i*interval:i*interval+pulse.size] = pulse
|
|
90
|
+
|
|
91
|
+
return data
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
# %% create waveforms
|
|
95
|
+
|
|
96
|
+
def optotagging(mouseID, operation_mode='experiment', level_list=[1.15, 1.28, 1.345], genotype=None):
|
|
97
|
+
|
|
98
|
+
sampleRate = 10000
|
|
99
|
+
|
|
100
|
+
# 1 s cosine ramp:
|
|
101
|
+
data_cosine = (((1 - np.cos(np.arange(sampleRate, dtype=np.float64)
|
|
102
|
+
* 2*np.pi/sampleRate)) + 1) - 1)/2 # create raised cosine waveform
|
|
103
|
+
|
|
104
|
+
# 1 ms cosine ramp:
|
|
105
|
+
rise_and_fall = (
|
|
106
|
+
((1 - np.cos(np.arange(sampleRate*0.001, dtype=np.float64)*2*np.pi/10))+1)-1)/2
|
|
107
|
+
half_length = rise_and_fall.size / 2
|
|
108
|
+
|
|
109
|
+
# pulses with cosine ramp:
|
|
110
|
+
pulse_2ms = np.concatenate((rise_and_fall[:half_length], np.ones(
|
|
111
|
+
(int(sampleRate*0.001),)), rise_and_fall[half_length:]))
|
|
112
|
+
pulse_5ms = np.concatenate((rise_and_fall[:half_length], np.ones(
|
|
113
|
+
(int(sampleRate*0.004),)), rise_and_fall[half_length:]))
|
|
114
|
+
pulse_10ms = np.concatenate((rise_and_fall[:half_length], np.ones(
|
|
115
|
+
(int(sampleRate*0.009),)), rise_and_fall[half_length:]))
|
|
116
|
+
|
|
117
|
+
data_2ms_10Hz = np.zeros((sampleRate,), dtype=np.float64)
|
|
118
|
+
|
|
119
|
+
for i in range(0, 10):
|
|
120
|
+
interval = sampleRate / 10
|
|
121
|
+
data_2ms_10Hz[i*interval:i*interval+pulse_2ms.size] = pulse_2ms
|
|
122
|
+
|
|
123
|
+
data_5ms = np.zeros((sampleRate,), dtype=np.float64)
|
|
124
|
+
data_5ms[:pulse_5ms.size] = pulse_5ms
|
|
125
|
+
|
|
126
|
+
data_10ms = np.zeros((sampleRate,), dtype=np.float64)
|
|
127
|
+
data_10ms[:pulse_10ms.size] = pulse_10ms
|
|
128
|
+
|
|
129
|
+
data_10s = np.zeros((sampleRate*10,), dtype=np.float64)
|
|
130
|
+
data_10s[:-2] = 1
|
|
131
|
+
|
|
132
|
+
# %% for experiment
|
|
133
|
+
|
|
134
|
+
isi = 1.5
|
|
135
|
+
isi_rand = 0.5
|
|
136
|
+
numRepeats = 50
|
|
137
|
+
|
|
138
|
+
condition_list = [2, 3]
|
|
139
|
+
waveforms = [data_2ms_10Hz, data_5ms, data_10ms, data_cosine]
|
|
140
|
+
|
|
141
|
+
opto_levels = np.array(level_list*numRepeats*len(condition_list)) # BLUE
|
|
142
|
+
opto_conditions = condition_list*numRepeats*len(level_list)
|
|
143
|
+
opto_conditions = np.sort(opto_conditions)
|
|
144
|
+
opto_isis = np.random.random(opto_levels.shape) * isi_rand + isi
|
|
145
|
+
|
|
146
|
+
p = np.random.permutation(len(opto_levels))
|
|
147
|
+
|
|
148
|
+
# implement shuffle?
|
|
149
|
+
opto_levels = opto_levels[p]
|
|
150
|
+
opto_conditions = opto_conditions[p]
|
|
151
|
+
|
|
152
|
+
# %% for testing
|
|
153
|
+
|
|
154
|
+
if operation_mode == 'test_levels':
|
|
155
|
+
isi = 2.0
|
|
156
|
+
isi_rand = 0.0
|
|
157
|
+
|
|
158
|
+
numRepeats = 2
|
|
159
|
+
|
|
160
|
+
condition_list = [0]
|
|
161
|
+
waveforms = [data_10s, data_10s]
|
|
162
|
+
|
|
163
|
+
opto_levels = np.array(level_list*numRepeats *
|
|
164
|
+
len(condition_list)) # BLUE
|
|
165
|
+
opto_conditions = condition_list*numRepeats*len(level_list)
|
|
166
|
+
opto_conditions = np.sort(opto_conditions)
|
|
167
|
+
opto_isis = np.random.random(opto_levels.shape) * isi_rand + isi
|
|
168
|
+
|
|
169
|
+
elif operation_mode == 'pretest':
|
|
170
|
+
numRepeats = 1
|
|
171
|
+
|
|
172
|
+
condition_list = [0]
|
|
173
|
+
data_2s = data_10s[-sampleRate*2:]
|
|
174
|
+
waveforms = [data_2s]
|
|
175
|
+
|
|
176
|
+
opto_levels = np.array(level_list*numRepeats *
|
|
177
|
+
len(condition_list)) # BLUE
|
|
178
|
+
opto_conditions = condition_list*numRepeats*len(level_list)
|
|
179
|
+
opto_conditions = np.sort(opto_conditions)
|
|
180
|
+
opto_isis = [1]*len(opto_conditions)
|
|
181
|
+
# %%
|
|
182
|
+
|
|
183
|
+
outputDirectory = agent.OUTPUT_DIR
|
|
184
|
+
fileDate = str(datetime.datetime.now()).replace(':', '').replace(
|
|
185
|
+
'.', '').replace('-', '').replace(' ', '')[2:14]
|
|
186
|
+
fileName = outputDirectory + "/" + fileDate + '_'+mouseID + '.opto.pkl'
|
|
187
|
+
|
|
188
|
+
print('saving info to: ' + fileName)
|
|
189
|
+
fl = open(fileName, 'wb')
|
|
190
|
+
output = {}
|
|
191
|
+
|
|
192
|
+
output['opto_levels'] = opto_levels
|
|
193
|
+
output['opto_conditions'] = opto_conditions
|
|
194
|
+
output['opto_ISIs'] = opto_isis
|
|
195
|
+
output['opto_waveforms'] = waveforms
|
|
196
|
+
|
|
197
|
+
pkl.dump(output, fl)
|
|
198
|
+
fl.close()
|
|
199
|
+
print('saved.')
|
|
200
|
+
|
|
201
|
+
# %%
|
|
202
|
+
run_optotagging(opto_levels, opto_conditions,
|
|
203
|
+
waveforms, opto_isis, float(sampleRate))
|
|
204
|
+
|
|
205
|
+
|
|
206
|
+
# %%
|
|
207
|
+
if __name__ == "__main__":
|
|
208
|
+
import json
|
|
209
|
+
import argparse
|
|
210
|
+
|
|
211
|
+
parser = argparse.ArgumentParser()
|
|
212
|
+
parser.add_argument('json_params', type=str, )
|
|
213
|
+
args, _ = parser.parse_known_args()
|
|
214
|
+
|
|
215
|
+
with open(args.json_params, 'r', ) as f:
|
|
216
|
+
json_params = json.load(f)
|
|
217
|
+
|
|
218
|
+
logging.info('Optotagging with params: %s' % json_params)
|
|
219
|
+
optotagging(**json_params)
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
import configparser
|
|
2
|
+
import contextlib
|
|
3
|
+
import copy
|
|
4
|
+
import enum
|
|
5
|
+
import functools
|
|
6
|
+
from typing import ClassVar, Literal, NamedTuple, NoReturn, Optional, TypedDict
|
|
7
|
+
|
|
8
|
+
import IPython.display
|
|
9
|
+
import ipywidgets as ipw
|
|
10
|
+
import np_config
|
|
11
|
+
import np_logging
|
|
12
|
+
import np_session
|
|
13
|
+
import np_workflows
|
|
14
|
+
from pyparsing import Any
|
|
15
|
+
|
|
16
|
+
from np_workflows.experiments.openscope_loop.main_loop_pilot import LoopSession
|
|
17
|
+
|
|
18
|
+
global_state = {}
|
|
19
|
+
"""Global variable for persisting widget states."""
|
|
20
|
+
|
|
21
|
+
# for widget, before creating a experiment --------------------------------------------- #
|
|
22
|
+
|
|
23
|
+
class SelectedSession:
|
|
24
|
+
def __init__(self, session: str | LoopSession, mouse: str | int | np_session.Mouse):
|
|
25
|
+
if isinstance(session, str):
|
|
26
|
+
session = LoopSession(session)
|
|
27
|
+
self.session = session
|
|
28
|
+
self.mouse = str(mouse)
|
|
29
|
+
|
|
30
|
+
def __repr__(self) -> str:
|
|
31
|
+
return f"{self.__class__.__name__}({self.session}, {self.mouse})"
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def loop_workflow_widget(
|
|
35
|
+
mouse: str | int | np_session.Mouse,
|
|
36
|
+
) -> SelectedSession:
|
|
37
|
+
"""Select a stimulus session (hab, pretest, ephys) to run.
|
|
38
|
+
|
|
39
|
+
An object with mutable attributes is returned, so the selected session can be
|
|
40
|
+
updated along with the GUI selection. (Preference would be to return an enum
|
|
41
|
+
directly, and change it's value, but that doesn't seem possible.)
|
|
42
|
+
|
|
43
|
+
"""
|
|
44
|
+
|
|
45
|
+
selection = SelectedSession(LoopSession.PRETEST, mouse)
|
|
46
|
+
|
|
47
|
+
session_dropdown = ipw.Select(
|
|
48
|
+
options=tuple(_.value for _ in LoopSession),
|
|
49
|
+
description="Session",
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
def update_selection():
|
|
53
|
+
selection.__init__(str(session_dropdown.value), str(mouse))
|
|
54
|
+
|
|
55
|
+
if (previously_selected_value := global_state.get('selected_session')):
|
|
56
|
+
session_dropdown.value = previously_selected_value
|
|
57
|
+
update_selection()
|
|
58
|
+
|
|
59
|
+
console = ipw.Output()
|
|
60
|
+
with console:
|
|
61
|
+
if last_session := np_session.Mouse(selection.mouse).state.get('last_loop_session'):
|
|
62
|
+
print(f"{mouse} last session: {last_session}")
|
|
63
|
+
print(f"Selected: {selection.session}")
|
|
64
|
+
|
|
65
|
+
def update(change):
|
|
66
|
+
if change["name"] != "value":
|
|
67
|
+
return
|
|
68
|
+
if (options := getattr(change["owner"], "options", None)) and change[
|
|
69
|
+
"new"
|
|
70
|
+
] not in options:
|
|
71
|
+
return
|
|
72
|
+
if change["new"] == change["old"]:
|
|
73
|
+
return
|
|
74
|
+
update_selection()
|
|
75
|
+
with console:
|
|
76
|
+
print(f"Selected: {selection.session}")
|
|
77
|
+
global_state['selected_session'] = selection.session.value
|
|
78
|
+
|
|
79
|
+
session_dropdown.observe(update, names='value')
|
|
80
|
+
|
|
81
|
+
IPython.display.display(ipw.VBox([session_dropdown, console]))
|
|
82
|
+
|
|
83
|
+
return selection
|
|
@@ -0,0 +1,217 @@
|
|
|
1
|
+
import configparser
|
|
2
|
+
import contextlib
|
|
3
|
+
import copy
|
|
4
|
+
import dataclasses
|
|
5
|
+
import datetime
|
|
6
|
+
import enum
|
|
7
|
+
import functools
|
|
8
|
+
import pathlib
|
|
9
|
+
import platform
|
|
10
|
+
import shutil
|
|
11
|
+
import threading
|
|
12
|
+
import time
|
|
13
|
+
import zlib
|
|
14
|
+
from typing import ClassVar, Literal, NamedTuple, NoReturn, Optional, TypedDict
|
|
15
|
+
|
|
16
|
+
import IPython
|
|
17
|
+
import IPython.display
|
|
18
|
+
import ipywidgets as ipw
|
|
19
|
+
import np_config
|
|
20
|
+
import np_logging
|
|
21
|
+
import np_services
|
|
22
|
+
import np_session
|
|
23
|
+
import np_workflows
|
|
24
|
+
import PIL.Image
|
|
25
|
+
import pydantic
|
|
26
|
+
from pyparsing import Any
|
|
27
|
+
from np_services import (
|
|
28
|
+
Service,
|
|
29
|
+
Finalizable,
|
|
30
|
+
ScriptCamstim, SessionCamstim,
|
|
31
|
+
SessionCamstim,
|
|
32
|
+
OpenEphys,
|
|
33
|
+
Sync,
|
|
34
|
+
VideoMVR,
|
|
35
|
+
NewScaleCoordinateRecorder,
|
|
36
|
+
MouseDirector,
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
logger = np_logging.getLogger(__name__)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
class LoopSession(enum.Enum):
|
|
43
|
+
"""Enum for the different sessions available, each with different param sets."""
|
|
44
|
+
|
|
45
|
+
PRETEST = "pretest"
|
|
46
|
+
HAB = "hab"
|
|
47
|
+
EPHYS = "ephys"
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class LoopMixin:
|
|
51
|
+
"""Provides project-specific methods and attributes, mainly related to camstim scripts."""
|
|
52
|
+
|
|
53
|
+
workflow: LoopSession
|
|
54
|
+
"""Enum for particular workflow/session, e.g. PRETEST, HAB_60, HAB_90,
|
|
55
|
+
EPHYS."""
|
|
56
|
+
|
|
57
|
+
session: np_session.PipelineSession
|
|
58
|
+
mouse: np_session.Mouse
|
|
59
|
+
user: np_session.User
|
|
60
|
+
platform_json: np_session.PlatformJson
|
|
61
|
+
|
|
62
|
+
@property
|
|
63
|
+
def recorders(self) -> tuple[Service, ...]:
|
|
64
|
+
"""Services to be started before stimuli run, and stopped after. Session-dependent."""
|
|
65
|
+
match self.workflow:
|
|
66
|
+
case LoopSession.PRETEST | LoopSession.EPHYS:
|
|
67
|
+
return (Sync, VideoMVR, OpenEphys)
|
|
68
|
+
case LoopSession.HAB:
|
|
69
|
+
return (Sync, VideoMVR)
|
|
70
|
+
|
|
71
|
+
@property
|
|
72
|
+
def stims(self) -> tuple[Service, ...]:
|
|
73
|
+
return (SessionCamstim, )
|
|
74
|
+
|
|
75
|
+
def initialize_and_test_services(self) -> None:
|
|
76
|
+
"""Configure, initialize (ie. reset), then test all services."""
|
|
77
|
+
|
|
78
|
+
MouseDirector.user = self.user.id
|
|
79
|
+
MouseDirector.mouse = self.mouse.id
|
|
80
|
+
|
|
81
|
+
OpenEphys.folder = self.session.folder
|
|
82
|
+
|
|
83
|
+
NewScaleCoordinateRecorder.log_root = self.session.npexp_path
|
|
84
|
+
NewScaleCoordinateRecorder.log_name = self.platform_json.path.name
|
|
85
|
+
|
|
86
|
+
SessionCamstim.labtracks_mouse_id = self.mouse.id
|
|
87
|
+
SessionCamstim.lims_user_id = self.user.id
|
|
88
|
+
|
|
89
|
+
self.configure_services()
|
|
90
|
+
|
|
91
|
+
super().initialize_and_test_services()
|
|
92
|
+
|
|
93
|
+
def update_state(self) -> None:
|
|
94
|
+
"Store useful but non-essential info."
|
|
95
|
+
self.mouse.state['last_session'] = self.session.id
|
|
96
|
+
self.mouse.state['last_Loop_session'] = str(self.workflow)
|
|
97
|
+
if self.mouse == 366122:
|
|
98
|
+
return
|
|
99
|
+
match self.workflow:
|
|
100
|
+
case LoopSession.PRETEST:
|
|
101
|
+
return
|
|
102
|
+
case LoopSession.HAB:
|
|
103
|
+
self.session.project.state['latest_hab'] = self.session.id
|
|
104
|
+
case LoopSession.EPHYS:
|
|
105
|
+
self.session.project.state['latest_ephys'] = self.session.id
|
|
106
|
+
self.session.project.state['sessions'] = self.session.project.state.get('sessions', []) + [self.session.id]
|
|
107
|
+
|
|
108
|
+
def run_stim(self) -> None:
|
|
109
|
+
|
|
110
|
+
self.update_state()
|
|
111
|
+
|
|
112
|
+
if not SessionCamstim.is_ready_to_start():
|
|
113
|
+
raise RuntimeError("SessionCamstim is not ready to start.")
|
|
114
|
+
|
|
115
|
+
np_logging.web(f'Loop_{self.workflow.name.lower()}').info(f"Started session {self.mouse.mtrain.stage['name']}")
|
|
116
|
+
SessionCamstim.start()
|
|
117
|
+
|
|
118
|
+
with contextlib.suppress(Exception):
|
|
119
|
+
while not SessionCamstim.is_ready_to_start():
|
|
120
|
+
time.sleep(2.5)
|
|
121
|
+
|
|
122
|
+
if isinstance(SessionCamstim, Finalizable):
|
|
123
|
+
SessionCamstim.finalize()
|
|
124
|
+
|
|
125
|
+
with contextlib.suppress(Exception):
|
|
126
|
+
np_logging.web(f'Loop_{self.workflow.name.lower()}').info(f"Finished session {self.mouse.mtrain.stage['name']}")
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
def copy_data_files(self) -> None:
|
|
130
|
+
super().copy_data_files()
|
|
131
|
+
|
|
132
|
+
# When all processing completes, camstim Agent class passes data and uuid to
|
|
133
|
+
# /camstim/lims BehaviorSession class, and write_behavior_data() writes a
|
|
134
|
+
# final .pkl with default name YYYYMMDDSSSS_mouseID_foragingID.pkl
|
|
135
|
+
# - if we have a foraging ID, we can search for that
|
|
136
|
+
if None == (stim_pkl := next(self.session.npexp_path.glob(f'{self.session.date:%y%m%d}*_{self.session.mouse}_*.pkl'), None)):
|
|
137
|
+
logger.warning('Did not find stim file on npexp matching the format `YYYYMMDDSSSS_mouseID_foragingID.pkl`')
|
|
138
|
+
return
|
|
139
|
+
assert stim_pkl
|
|
140
|
+
if not self.session.platform_json.foraging_id:
|
|
141
|
+
self.session.platform_json.foraging_id = stim_pkl.stem.split('_')[-1]
|
|
142
|
+
new_stem = f'{self.session.folder}.stim'
|
|
143
|
+
logger.debug(f'Renaming stim file copied to npexp: {stim_pkl} -> {new_stem}')
|
|
144
|
+
stim_pkl = stim_pkl.rename(stim_pkl.with_stem(new_stem))
|
|
145
|
+
|
|
146
|
+
# remove other stim pkl, which is nearly identical, if it was also copied
|
|
147
|
+
for pkl in self.session.npexp_path.glob('*.pkl'):
|
|
148
|
+
if (
|
|
149
|
+
self.session.folder not in pkl.stem
|
|
150
|
+
and
|
|
151
|
+
abs(pkl.stat().st_size - stim_pkl.stat().st_size) < 1e6
|
|
152
|
+
):
|
|
153
|
+
logger.debug(f'Deleting extra stim pkl copied to npexp: {pkl.stem}')
|
|
154
|
+
pkl.unlink()
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
def validate_selected_workflow(session: LoopSession, mouse: np_session.Mouse) -> None:
|
|
158
|
+
for workflow in ('hab', 'ephys'):
|
|
159
|
+
if (
|
|
160
|
+
workflow in session.value.lower()
|
|
161
|
+
and workflow not in mouse.mtrain.stage['name'].lower()
|
|
162
|
+
) or (
|
|
163
|
+
session.value.lower() == 'ephys' and 'hab' in mouse.mtrain.stage['name'].lower()
|
|
164
|
+
):
|
|
165
|
+
raise ValueError(f"Workflow selected ({session.value}) does not match MTrain stage ({mouse.mtrain.stage['name']}): please check cells above.")
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
class Hab(LoopMixin, np_workflows.PipelineHab):
|
|
169
|
+
def __init__(self, *args, **kwargs):
|
|
170
|
+
self.services = (
|
|
171
|
+
MouseDirector,
|
|
172
|
+
Sync,
|
|
173
|
+
VideoMVR,
|
|
174
|
+
self.imager,
|
|
175
|
+
NewScaleCoordinateRecorder,
|
|
176
|
+
SessionCamstim,
|
|
177
|
+
)
|
|
178
|
+
super().__init__(*args, **kwargs)
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
class Ephys(LoopMixin, np_workflows.PipelineEphys):
|
|
182
|
+
def __init__(self, *args, **kwargs):
|
|
183
|
+
self.services = (
|
|
184
|
+
MouseDirector,
|
|
185
|
+
Sync,
|
|
186
|
+
VideoMVR,
|
|
187
|
+
self.imager,
|
|
188
|
+
NewScaleCoordinateRecorder,
|
|
189
|
+
SessionCamstim,
|
|
190
|
+
OpenEphys,
|
|
191
|
+
)
|
|
192
|
+
super().__init__(*args, **kwargs)
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
# --------------------------------------------------------------------------------------
|
|
196
|
+
|
|
197
|
+
|
|
198
|
+
def new_experiment(
|
|
199
|
+
mouse: int | str | np_session.Mouse,
|
|
200
|
+
user: str | np_session.User,
|
|
201
|
+
workflow: LoopSession,
|
|
202
|
+
) -> Ephys | Hab:
|
|
203
|
+
"""Create a new experiment for the given mouse and user."""
|
|
204
|
+
match workflow:
|
|
205
|
+
case LoopSession.PRETEST | LoopSession.EPHYS:
|
|
206
|
+
experiment = Ephys(mouse, user)
|
|
207
|
+
case LoopSession.HAB:
|
|
208
|
+
experiment = Hab(mouse, user)
|
|
209
|
+
case _:
|
|
210
|
+
raise ValueError(f"Invalid workflow type: {workflow}")
|
|
211
|
+
experiment.workflow = workflow
|
|
212
|
+
|
|
213
|
+
with contextlib.suppress(Exception):
|
|
214
|
+
np_logging.web(f'Loop_{experiment.workflow.name.lower()}').info(f"{experiment} created")
|
|
215
|
+
|
|
216
|
+
return experiment
|
|
217
|
+
|