bmtool 0.6.6.2__tar.gz → 0.6.6.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {bmtool-0.6.6.2 → bmtool-0.6.6.3}/PKG-INFO +1 -1
- {bmtool-0.6.6.2 → bmtool-0.6.6.3}/bmtool/synapses.py +74 -26
- {bmtool-0.6.6.2 → bmtool-0.6.6.3}/bmtool.egg-info/PKG-INFO +1 -1
- {bmtool-0.6.6.2 → bmtool-0.6.6.3}/setup.py +1 -1
- {bmtool-0.6.6.2 → bmtool-0.6.6.3}/LICENSE +0 -0
- {bmtool-0.6.6.2 → bmtool-0.6.6.3}/README.md +0 -0
- {bmtool-0.6.6.2 → bmtool-0.6.6.3}/bmtool/SLURM.py +0 -0
- {bmtool-0.6.6.2 → bmtool-0.6.6.3}/bmtool/__init__.py +0 -0
- {bmtool-0.6.6.2 → bmtool-0.6.6.3}/bmtool/__main__.py +0 -0
- {bmtool-0.6.6.2 → bmtool-0.6.6.3}/bmtool/bmplot.py +0 -0
- {bmtool-0.6.6.2 → bmtool-0.6.6.3}/bmtool/connectors.py +0 -0
- {bmtool-0.6.6.2 → bmtool-0.6.6.3}/bmtool/debug/__init__.py +0 -0
- {bmtool-0.6.6.2 → bmtool-0.6.6.3}/bmtool/debug/commands.py +0 -0
- {bmtool-0.6.6.2 → bmtool-0.6.6.3}/bmtool/debug/debug.py +0 -0
- {bmtool-0.6.6.2 → bmtool-0.6.6.3}/bmtool/graphs.py +0 -0
- {bmtool-0.6.6.2 → bmtool-0.6.6.3}/bmtool/manage.py +0 -0
- {bmtool-0.6.6.2 → bmtool-0.6.6.3}/bmtool/plot_commands.py +0 -0
- {bmtool-0.6.6.2 → bmtool-0.6.6.3}/bmtool/singlecell.py +0 -0
- {bmtool-0.6.6.2 → bmtool-0.6.6.3}/bmtool/util/__init__.py +0 -0
- {bmtool-0.6.6.2 → bmtool-0.6.6.3}/bmtool/util/commands.py +0 -0
- {bmtool-0.6.6.2 → bmtool-0.6.6.3}/bmtool/util/neuron/__init__.py +0 -0
- {bmtool-0.6.6.2 → bmtool-0.6.6.3}/bmtool/util/neuron/celltuner.py +0 -0
- {bmtool-0.6.6.2 → bmtool-0.6.6.3}/bmtool/util/util.py +0 -0
- {bmtool-0.6.6.2 → bmtool-0.6.6.3}/bmtool.egg-info/SOURCES.txt +0 -0
- {bmtool-0.6.6.2 → bmtool-0.6.6.3}/bmtool.egg-info/dependency_links.txt +0 -0
- {bmtool-0.6.6.2 → bmtool-0.6.6.3}/bmtool.egg-info/entry_points.txt +0 -0
- {bmtool-0.6.6.2 → bmtool-0.6.6.3}/bmtool.egg-info/requires.txt +0 -0
- {bmtool-0.6.6.2 → bmtool-0.6.6.3}/bmtool.egg-info/top_level.txt +0 -0
- {bmtool-0.6.6.2 → bmtool-0.6.6.3}/setup.cfg +0 -0
@@ -84,6 +84,20 @@ class SynapseTuner:
|
|
84
84
|
h.dt = general_settings['dt'] # Time step (resolution) of the simulation in ms
|
85
85
|
h.steps_per_ms = 1 / h.dt
|
86
86
|
h.celsius = general_settings['celsius']
|
87
|
+
|
88
|
+
# get some stuff set up we need for both SingleEvent and Interactive Tuner
|
89
|
+
self._set_up_cell()
|
90
|
+
self._set_up_synapse()
|
91
|
+
|
92
|
+
self.nstim = h.NetStim()
|
93
|
+
self.nstim2 = h.NetStim()
|
94
|
+
|
95
|
+
self.vcl = h.VClamp(self.cell.soma[0](0.5))
|
96
|
+
|
97
|
+
self.nc = h.NetCon(self.nstim, self.syn, self.general_settings['threshold'], self.general_settings['delay'], self.general_settings['weight'])
|
98
|
+
self.nc2 = h.NetCon(self.nstim2, self.syn, self.general_settings['threshold'], self.general_settings['delay'], self.general_settings['weight'])
|
99
|
+
|
100
|
+
self._set_up_recorders()
|
87
101
|
|
88
102
|
def _update_spec_syn_param(self, json_folder_path):
|
89
103
|
"""
|
@@ -168,7 +182,7 @@ class SynapseTuner:
|
|
168
182
|
self.ivcl.record(self.vcl._ref_i)
|
169
183
|
|
170
184
|
|
171
|
-
def SingleEvent(self):
|
185
|
+
def SingleEvent(self,plot_and_print=True):
|
172
186
|
"""
|
173
187
|
Simulate a single synaptic event by delivering an input stimulus to the synapse.
|
174
188
|
|
@@ -176,33 +190,30 @@ class SynapseTuner:
|
|
176
190
|
and then runs the NEURON simulation for a single event. The single synaptic event will occur at general_settings['tstart']
|
177
191
|
Will display graphs and synaptic properies works best with a jupyter notebook
|
178
192
|
"""
|
179
|
-
self.
|
180
|
-
self._set_up_synapse()
|
193
|
+
self.ispk = None
|
181
194
|
|
182
195
|
# user slider values if the sliders are set up
|
183
196
|
if hasattr(self, 'dynamic_sliders'):
|
184
197
|
syn_props = {var: slider.value for var, slider in self.dynamic_sliders.items()}
|
185
198
|
self._set_syn_prop(**syn_props)
|
199
|
+
|
200
|
+
# sets values based off optimizer
|
201
|
+
if hasattr(self,'using_optimizer'):
|
202
|
+
for name, value in zip(self.param_names, self.params):
|
203
|
+
setattr(self.syn, name, value)
|
186
204
|
|
187
205
|
# Set up the stimulus
|
188
|
-
self.nstim = h.NetStim()
|
189
206
|
self.nstim.start = self.general_settings['tstart']
|
190
207
|
self.nstim.noise = 0
|
191
|
-
self.nstim2 = h.NetStim()
|
192
208
|
self.nstim2.start = h.tstop
|
193
209
|
self.nstim2.noise = 0
|
194
|
-
self.nc = h.NetCon(self.nstim, self.syn, self.general_settings['threshold'], self.general_settings['delay'], self.general_settings['weight'])
|
195
|
-
self.nc2 = h.NetCon(self.nstim2, self.syn, self.general_settings['threshold'], self.general_settings['delay'], self.general_settings['weight'])
|
196
210
|
|
197
211
|
# Set up voltage clamp
|
198
|
-
self.vcl = h.VClamp(self.cell.soma[0](0.5))
|
199
212
|
vcldur = [[0, 0, 0], [self.general_settings['tstart'], h.tstop, 1e9]]
|
200
213
|
for i in range(3):
|
201
214
|
self.vcl.amp[i] = self.conn['spec_settings']['vclamp_amp']
|
202
215
|
self.vcl.dur[i] = vcldur[1][i]
|
203
216
|
|
204
|
-
self._set_up_recorders()
|
205
|
-
|
206
217
|
# Run simulation
|
207
218
|
h.tstop = self.general_settings['tstart'] + self.general_settings['tdur']
|
208
219
|
self.nstim.interval = self.general_settings['tdur']
|
@@ -211,15 +222,18 @@ class SynapseTuner:
|
|
211
222
|
h.run()
|
212
223
|
|
213
224
|
current = np.array(self.rec_vectors[self.current_name])
|
214
|
-
|
215
|
-
current = (current -
|
225
|
+
syn_props = self._get_syn_prop(rise_interval=self.general_settings['rise_interval'])
|
226
|
+
current = (current - syn_props['baseline']) * 1000 # Convert to pA
|
216
227
|
current_integral = np.trapz(current, dx=h.dt) # pA·ms
|
217
228
|
|
218
|
-
|
219
|
-
|
220
|
-
|
221
|
-
|
222
|
-
|
229
|
+
if plot_and_print:
|
230
|
+
self._plot_model([self.general_settings['tstart'] - 5, self.general_settings['tstart'] + self.general_settings['tdur']])
|
231
|
+
for prop in syn_props.items():
|
232
|
+
print(prop)
|
233
|
+
print(f'Current Integral in pA*ms: {current_integral:.2f}')
|
234
|
+
|
235
|
+
self.rise_time = syn_props['rise_time']
|
236
|
+
self.decay_time = syn_props['decay_time']
|
223
237
|
|
224
238
|
|
225
239
|
def _find_first(self, x):
|
@@ -687,6 +701,7 @@ class SynapseTuner:
|
|
687
701
|
display(ui)
|
688
702
|
update_ui()
|
689
703
|
|
704
|
+
|
690
705
|
def stp_frequency_response(self, freqs=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20, 35, 50, 100, 200],
|
691
706
|
delay=250, plot=True,log_plot=True):
|
692
707
|
"""
|
@@ -733,6 +748,7 @@ class SynapseTuner:
|
|
733
748
|
|
734
749
|
return results
|
735
750
|
|
751
|
+
|
736
752
|
def _plot_frequency_analysis(self, results,log_plot):
|
737
753
|
"""
|
738
754
|
Plot the frequency-dependent synaptic properties.
|
@@ -928,15 +944,33 @@ class SynapseOptimizer:
|
|
928
944
|
|
929
945
|
def _calculate_metrics(self) -> Dict[str, float]:
|
930
946
|
"""Calculate standard metrics from the current simulation using specified frequency"""
|
931
|
-
|
932
|
-
|
933
|
-
|
934
|
-
|
947
|
+
|
948
|
+
# Set these to 0 for when we return the dict
|
949
|
+
induction = 0
|
950
|
+
ppr = 0
|
951
|
+
recovery = 0
|
952
|
+
amp = 0
|
953
|
+
rise_time = 0
|
954
|
+
decay_time = 0
|
955
|
+
|
956
|
+
if self.run_single_event:
|
957
|
+
self.tuner.SingleEvent(plot_and_print=False)
|
958
|
+
rise_time = self.tuner.rise_time
|
959
|
+
decay_time = self.tuner.decay_time
|
960
|
+
|
961
|
+
if self.run_train_input:
|
962
|
+
self.tuner._simulate_model(self.train_frequency, self.train_delay)
|
963
|
+
amp = self.tuner._response_amplitude()
|
964
|
+
ppr, induction, recovery = self.tuner._calc_ppr_induction_recovery(amp, print_math=False)
|
965
|
+
amp = self.tuner._find_max_amp(amp)
|
966
|
+
|
935
967
|
return {
|
936
968
|
'induction': float(induction),
|
937
969
|
'ppr': float(ppr),
|
938
970
|
'recovery': float(recovery),
|
939
|
-
'max_amplitude': float(amp)
|
971
|
+
'max_amplitude': float(amp),
|
972
|
+
'rise_time': float(rise_time),
|
973
|
+
'decay_time': float(decay_time)
|
940
974
|
}
|
941
975
|
|
942
976
|
def _default_cost_function(self, metrics: Dict[str, float], target_metrics: Dict[str, float]) -> float:
|
@@ -957,7 +991,13 @@ class SynapseOptimizer:
|
|
957
991
|
# Set parameters
|
958
992
|
for name, value in zip(param_names, params):
|
959
993
|
setattr(self.tuner.syn, name, value)
|
960
|
-
|
994
|
+
|
995
|
+
# just do this and have the SingleEvent handle it
|
996
|
+
if self.run_single_event:
|
997
|
+
self.tuner.using_optimizer = True
|
998
|
+
self.tuner.param_names = param_names
|
999
|
+
self.tuner.params = params
|
1000
|
+
|
961
1001
|
# Calculate metrics and error
|
962
1002
|
metrics = self._calculate_metrics()
|
963
1003
|
error = float(cost_function(metrics, target_metrics)) # Ensure error is scalar
|
@@ -974,6 +1014,7 @@ class SynapseOptimizer:
|
|
974
1014
|
|
975
1015
|
def optimize_parameters(self, target_metrics: Dict[str, float],
|
976
1016
|
param_bounds: Dict[str, Tuple[float, float]],
|
1017
|
+
run_single_event:bool = False, run_train_input:bool = True,
|
977
1018
|
train_frequency: float = 50,train_delay: float = 250,
|
978
1019
|
cost_function: Optional[Callable] = None,
|
979
1020
|
method: str = 'SLSQP',init_guess='random') -> SynapseOptimizationResult:
|
@@ -1005,6 +1046,8 @@ class SynapseOptimizer:
|
|
1005
1046
|
self.optimization_history = []
|
1006
1047
|
self.train_frequency = train_frequency
|
1007
1048
|
self.train_delay = train_delay
|
1049
|
+
self.run_single_event = run_single_event
|
1050
|
+
self.run_train_input = run_train_input
|
1008
1051
|
|
1009
1052
|
param_names = list(param_bounds.keys())
|
1010
1053
|
bounds = [param_bounds[name] for name in param_names]
|
@@ -1107,9 +1150,14 @@ class SynapseOptimizer:
|
|
1107
1150
|
print(f"{param}: {float(value):.3f}")
|
1108
1151
|
|
1109
1152
|
# Plot final model response
|
1110
|
-
|
1111
|
-
|
1112
|
-
|
1153
|
+
if self.run_train_input:
|
1154
|
+
self.tuner._plot_model([self.tuner.general_settings['tstart'] - self.tuner.nstim.interval / 3, self.tuner.tstop])
|
1155
|
+
amp = self.tuner._response_amplitude()
|
1156
|
+
self.tuner._calc_ppr_induction_recovery(amp)
|
1157
|
+
if self.run_single_event:
|
1158
|
+
self.tuner.ispk=None
|
1159
|
+
self.tuner.SingleEvent(plot_and_print=True)
|
1160
|
+
|
1113
1161
|
|
1114
1162
|
|
1115
1163
|
# dataclass means just init the typehints as self.typehint. looks a bit cleaner
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|