bmtool 0.6.3__py3-none-any.whl → 0.6.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- bmtool/singlecell.py +7 -2
- bmtool/synapses.py +454 -43
- {bmtool-0.6.3.dist-info → bmtool-0.6.4.dist-info}/METADATA +6 -4
- {bmtool-0.6.3.dist-info → bmtool-0.6.4.dist-info}/RECORD +8 -8
- {bmtool-0.6.3.dist-info → bmtool-0.6.4.dist-info}/LICENSE +0 -0
- {bmtool-0.6.3.dist-info → bmtool-0.6.4.dist-info}/WHEEL +0 -0
- {bmtool-0.6.3.dist-info → bmtool-0.6.4.dist-info}/entry_points.txt +0 -0
- {bmtool-0.6.3.dist-info → bmtool-0.6.4.dist-info}/top_level.txt +0 -0
bmtool/singlecell.py
CHANGED
@@ -7,6 +7,7 @@ import matplotlib.pyplot as plt
|
|
7
7
|
from scipy.optimize import curve_fit
|
8
8
|
import neuron
|
9
9
|
from neuron import h
|
10
|
+
import pandas as pd
|
10
11
|
|
11
12
|
|
12
13
|
def load_biophys1():
|
@@ -356,8 +357,12 @@ class FI(object):
|
|
356
357
|
self.nspks = [len(v) for v in self.tspk_vecs]
|
357
358
|
print()
|
358
359
|
print("Results")
|
359
|
-
|
360
|
-
|
360
|
+
# lets make a df so the results line up nice
|
361
|
+
data = {'Injection (nA):':self.amps,'number of spikes':self.nspks}
|
362
|
+
df = pd.DataFrame(data)
|
363
|
+
print(df)
|
364
|
+
#print(f'Injection (nA): ' + ', '.join(f'{x:g}' for x in self.amps))
|
365
|
+
#print(f'Number of spikes: ' + ', '.join(f'{x:d}' for x in self.nspks))
|
361
366
|
print()
|
362
367
|
|
363
368
|
return self.amps, self.nspks
|
bmtool/synapses.py
CHANGED
@@ -1,13 +1,17 @@
|
|
1
1
|
import os
|
2
2
|
import json
|
3
|
-
import numpy as np
|
4
3
|
import neuron
|
4
|
+
import numpy as np
|
5
5
|
from neuron import h
|
6
|
-
from
|
6
|
+
from typing import List, Dict, Callable, Optional,Tuple
|
7
|
+
from tqdm.notebook import tqdm
|
7
8
|
import matplotlib.pyplot as plt
|
9
|
+
from neuron.units import ms, mV
|
10
|
+
from dataclasses import dataclass
|
11
|
+
# scipy
|
8
12
|
from scipy.signal import find_peaks
|
9
|
-
from scipy.optimize import curve_fit
|
10
|
-
|
13
|
+
from scipy.optimize import curve_fit,minimize_scalar,minimize
|
14
|
+
# widgets
|
11
15
|
import ipywidgets as widgets
|
12
16
|
from IPython.display import display, clear_output
|
13
17
|
from ipywidgets import HBox, VBox
|
@@ -319,10 +323,14 @@ class SynapseTuner:
|
|
319
323
|
axs = axs.ravel()
|
320
324
|
|
321
325
|
# Plot synaptic current (always included)
|
322
|
-
|
326
|
+
current = self.rec_vectors[self.current_name]
|
327
|
+
syn_prop = self._get_syn_prop(short=True)
|
328
|
+
current = (current - syn_prop['baseline'])
|
329
|
+
current = current * 1000
|
330
|
+
|
331
|
+
axs[0].plot(self.t, current)
|
323
332
|
if self.ispk !=None:
|
324
333
|
for num in range(len(self.ispk)):
|
325
|
-
current = 1000 * np.array(self.rec_vectors[self.current_name].to_python())
|
326
334
|
axs[0].text(self.t[self.ispk[num]],current[self.ispk[num]],f"{str(num+1)}")
|
327
335
|
|
328
336
|
axs[0].set_ylabel('Synaptic Current (pA)')
|
@@ -407,7 +415,7 @@ class SynapseTuner:
|
|
407
415
|
A list containing the peak amplitudes for each segment of the recorded synaptic current.
|
408
416
|
|
409
417
|
"""
|
410
|
-
isyn = np.
|
418
|
+
isyn = np.array(self.rec_vectors[self.current_name].to_python())
|
411
419
|
tspk = np.append(np.asarray(self.tspk), h.tstop)
|
412
420
|
syn_prop = self._get_syn_prop(short=True)
|
413
421
|
# print("syn_prp[sign] = " + str(syn_prop['sign']))
|
@@ -415,6 +423,7 @@ class SynapseTuner:
|
|
415
423
|
isyn *= syn_prop['sign']
|
416
424
|
ispk = np.floor((tspk + self.general_settings['delay']) / h.dt).astype(int)
|
417
425
|
|
426
|
+
|
418
427
|
try:
|
419
428
|
amp = [isyn[ispk[i]:ispk[i + 1]].max() for i in range(ispk.size - 1)]
|
420
429
|
# indexs of where the max of the synaptic current is at. This is then plotted
|
@@ -423,7 +432,7 @@ class SynapseTuner:
|
|
423
432
|
except:
|
424
433
|
amp = [isyn[ispk[i]:ispk[i + 1]].max() for i in range(ispk.size - 2)]
|
425
434
|
self.ispk = [np.argmax(isyn[ispk[i]:ispk[i + 1]]) + ispk[i] for i in range(ispk.size - 2)]
|
426
|
-
|
435
|
+
|
427
436
|
return amp
|
428
437
|
|
429
438
|
|
@@ -450,7 +459,7 @@ class SynapseTuner:
|
|
450
459
|
return max_amp
|
451
460
|
|
452
461
|
|
453
|
-
def
|
462
|
+
def _calc_ppr_induction_recovery(self,amp, normalize_by_trial=True,print_math=True):
|
454
463
|
"""
|
455
464
|
Calculates induction and recovery metrics from the synaptic response amplitudes.
|
456
465
|
|
@@ -471,46 +480,51 @@ class SynapseTuner:
|
|
471
480
|
The maximum amplitude in the response.
|
472
481
|
"""
|
473
482
|
amp = np.array(amp)
|
483
|
+
amp = (amp * 1000) # scale up
|
474
484
|
amp = amp.reshape(-1, amp.shape[-1])
|
475
485
|
maxamp = amp.max(axis=1 if normalize_by_trial else None)
|
476
486
|
|
477
|
-
# functions used to round array to 2 sig figs
|
478
|
-
def format_value(x):
|
479
|
-
return f"{x:.2g}"
|
480
|
-
|
481
|
-
# Function to apply format_value to an entire array
|
482
487
|
def format_array(arr):
|
483
|
-
|
484
|
-
return
|
485
|
-
|
486
|
-
print("Short Term Plasticity")
|
487
|
-
print("PPR: above 1 is facilitating below 1 is depressing")
|
488
|
-
print("Induction: above 0 is facilitating below 0 is depressing")
|
489
|
-
print("Recovery: measure of how fast STP decays")
|
490
|
-
print("")
|
488
|
+
"""Format an array to 2 significant figures for cleaner output."""
|
489
|
+
return np.array2string(arr, precision=2, separator=', ', suppress_small=True)
|
491
490
|
|
492
|
-
|
493
|
-
|
494
|
-
|
495
|
-
|
496
|
-
|
497
|
-
|
498
|
-
|
499
|
-
|
500
|
-
|
501
|
-
|
502
|
-
|
491
|
+
if print_math:
|
492
|
+
print("\n" + "="*40)
|
493
|
+
print("Short Term Plasticity Results")
|
494
|
+
print("="*40)
|
495
|
+
print("PPR: Above 1 is facilitating, below 1 is depressing.")
|
496
|
+
print("Induction: Above 0 is facilitating, below 0 is depressing.")
|
497
|
+
print("Recovery: A measure of how fast STP decays.\n")
|
498
|
+
|
499
|
+
# PPR Calculation
|
500
|
+
ppr = amp[:, 1:2] / amp[:, 0:1]
|
501
|
+
print("Paired Pulse Response (PPR)")
|
502
|
+
print("Calculation: 2nd pulse / 1st pulse")
|
503
|
+
print(f"Values: ({format_array(amp[:, 1:2])}) / ({format_array(amp[:, 0:1])}) = {format_array(ppr)}\n")
|
504
|
+
|
505
|
+
# Induction Calculation
|
506
|
+
induction = np.mean((amp[:, 5:8].mean(axis=1) - amp[:, :1].mean(axis=1)) / maxamp)
|
507
|
+
print("Induction")
|
508
|
+
print("Calculation: (avg(6th, 7th, 8th pulses) - 1st pulse) / max amps")
|
509
|
+
print(f"Values: avg({format_array(amp[:, 5:8])}) - {format_array(amp[:, :1])} / {format_array(maxamp)}")
|
510
|
+
print(f"({format_array(amp[:, 5:8].mean(axis=1))}) - ({format_array(amp[:, :1].mean(axis=1))}) / {format_array(maxamp)} = {induction:.3f}\n")
|
511
|
+
|
512
|
+
# Recovery Calculation
|
513
|
+
recovery = np.mean((amp[:, 8:12].mean(axis=1) - amp[:, :4].mean(axis=1)) / maxamp)
|
514
|
+
print("Recovery")
|
515
|
+
print("Calculation: (avg(9th, 10th, 11th, 12th pulses) - avg(1st to 4th pulses)) / max amps")
|
516
|
+
print(f"Values: avg({format_array(amp[:, 8:12])}) - avg({format_array(amp[:, :4])}) / {format_array(maxamp)}")
|
517
|
+
print(f"({format_array(amp[:, 8:12].mean(axis=1))}) - ({format_array(amp[:, :4].mean(axis=1))}) / {format_array(maxamp)} = {recovery:.3f}\n")
|
518
|
+
|
519
|
+
print("="*40 + "\n")
|
503
520
|
|
504
521
|
recovery = np.mean((amp[:, 8:12].mean(axis=1) - amp[:, :4].mean(axis=1)) / maxamp)
|
505
|
-
|
506
|
-
|
507
|
-
print(f"{format_array(amp[:, 8:12].mean(axis=1))} - {format_array(amp[:, :4].mean(axis=1))} / {format_array(maxamp)} = {format_array(recovery)}")
|
508
|
-
print("")
|
509
|
-
|
510
|
-
|
522
|
+
induction = np.mean((amp[:, 5:8].mean(axis=1) - amp[:, :1].mean(axis=1)) / maxamp)
|
523
|
+
ppr = amp[:, 1:2] / amp[:, 0:1]
|
511
524
|
# maxamp = max(amp, key=lambda x: abs(x[0]))
|
512
525
|
maxamp = maxamp.max()
|
513
|
-
|
526
|
+
|
527
|
+
return ppr, induction, recovery
|
514
528
|
|
515
529
|
|
516
530
|
def _set_syn_prop(self, **kwargs):
|
@@ -614,7 +628,7 @@ class SynapseTuner:
|
|
614
628
|
self._simulate_model(w_input_freq.value, self.w_duration.value, w_vclamp.value)
|
615
629
|
amp = self._response_amplitude()
|
616
630
|
self._plot_model([self.general_settings['tstart'] - self.nstim.interval / 3, self.tstop])
|
617
|
-
self.
|
631
|
+
_ = self._calc_ppr_induction_recovery(amp)
|
618
632
|
# print('Single trial ' + ('PSC' if self.vclamp else 'PSP'))
|
619
633
|
# print(f'Induction: {induction_single:.2f}; Recovery: {recovery:.2f}')
|
620
634
|
#print(f'Rest Amp: {amp[0]:.2f}; Maximum Amp: {maxamp:.2f}')
|
@@ -738,7 +752,7 @@ class GapJunctionTuner:
|
|
738
752
|
return (v2[idx2] - v2[idx1]) / (v1[idx2] - v1[idx1])
|
739
753
|
|
740
754
|
|
741
|
-
def
|
755
|
+
def InteractiveTuner(self):
|
742
756
|
w_run = widgets.Button(description='Run', icon='history', button_style='primary')
|
743
757
|
values = [i * 10**-4 for i in range(1, 101)] # From 1e-4 to 1e-2
|
744
758
|
|
@@ -762,4 +776,401 @@ class GapJunctionTuner:
|
|
762
776
|
print(f"coupling_coefficient is {cc:0.4f}")
|
763
777
|
|
764
778
|
on_button()
|
765
|
-
w_run.on_click(on_button)
|
779
|
+
w_run.on_click(on_button)
|
780
|
+
|
781
|
+
|
782
|
+
# optimizers!
|
783
|
+
|
784
|
+
@dataclass
|
785
|
+
class SynapseOptimizationResult:
|
786
|
+
"""Container for synaptic parameter optimization results"""
|
787
|
+
optimal_params: Dict[str, float]
|
788
|
+
achieved_metrics: Dict[str, float]
|
789
|
+
target_metrics: Dict[str, float]
|
790
|
+
error: float
|
791
|
+
optimization_path: List[Dict[str, float]]
|
792
|
+
|
793
|
+
class SynapseOptimizer:
|
794
|
+
def __init__(self, tuner):
|
795
|
+
"""
|
796
|
+
Initialize the synapse optimizer with parameter scaling
|
797
|
+
|
798
|
+
Parameters:
|
799
|
+
-----------
|
800
|
+
tuner : SynapseTuner
|
801
|
+
Instance of the SynapseTuner class
|
802
|
+
"""
|
803
|
+
self.tuner = tuner
|
804
|
+
self.optimization_history = []
|
805
|
+
self.param_scales = {}
|
806
|
+
|
807
|
+
def _normalize_params(self, params: np.ndarray, param_names: List[str]) -> np.ndarray:
|
808
|
+
"""Normalize parameters to similar scales"""
|
809
|
+
return np.array([params[i] / self.param_scales[name] for i, name in enumerate(param_names)])
|
810
|
+
|
811
|
+
def _denormalize_params(self, normalized_params: np.ndarray, param_names: List[str]) -> np.ndarray:
|
812
|
+
"""Convert normalized parameters back to original scale"""
|
813
|
+
return np.array([normalized_params[i] * self.param_scales[name] for i, name in enumerate(param_names)])
|
814
|
+
|
815
|
+
def _calculate_metrics(self) -> Dict[str, float]:
|
816
|
+
"""Calculate standard metrics from the current simulation"""
|
817
|
+
self.tuner._simulate_model(50, 250) # 50 Hz with 250ms Delay
|
818
|
+
amp = self.tuner._response_amplitude()
|
819
|
+
ppr, induction, recovery = self.tuner._calc_ppr_induction_recovery(amp, print_math=False)
|
820
|
+
return {
|
821
|
+
'induction': float(induction), # Ensure these are scalar values
|
822
|
+
'ppr': float(ppr),
|
823
|
+
'recovery': float(recovery),
|
824
|
+
'amplitudes': amp
|
825
|
+
}
|
826
|
+
|
827
|
+
def _default_cost_function(self, metrics: Dict[str, float], target_metrics: Dict[str, float]) -> float:
|
828
|
+
"""Default cost function that targets induction"""
|
829
|
+
return float((metrics['induction'] - target_metrics['induction']) ** 2)
|
830
|
+
|
831
|
+
def _objective_function(self,
|
832
|
+
normalized_params: np.ndarray,
|
833
|
+
param_names: List[str],
|
834
|
+
cost_function: Callable,
|
835
|
+
target_metrics: Dict[str, float]) -> float:
|
836
|
+
"""
|
837
|
+
Calculate error using provided cost function
|
838
|
+
"""
|
839
|
+
# Denormalize parameters
|
840
|
+
params = self._denormalize_params(normalized_params, param_names)
|
841
|
+
|
842
|
+
# Set parameters
|
843
|
+
for name, value in zip(param_names, params):
|
844
|
+
setattr(self.tuner.syn, name, value)
|
845
|
+
|
846
|
+
# Calculate metrics and error
|
847
|
+
metrics = self._calculate_metrics()
|
848
|
+
error = float(cost_function(metrics, target_metrics)) # Ensure error is scalar
|
849
|
+
|
850
|
+
# Store history with denormalized values
|
851
|
+
history_entry = {
|
852
|
+
'params': dict(zip(param_names, params)),
|
853
|
+
'metrics': metrics,
|
854
|
+
'error': error
|
855
|
+
}
|
856
|
+
self.optimization_history.append(history_entry)
|
857
|
+
|
858
|
+
return error
|
859
|
+
|
860
|
+
def optimize_parameters(self,
|
861
|
+
target_metrics: Dict[str, float],
|
862
|
+
param_bounds: Dict[str, Tuple[float, float]],
|
863
|
+
cost_function: Optional[Callable] = None,
|
864
|
+
method: str = 'SLSQP',init_guess='random') -> SynapseOptimizationResult:
|
865
|
+
"""
|
866
|
+
Optimize synaptic parameters using custom cost function
|
867
|
+
"""
|
868
|
+
self.optimization_history = []
|
869
|
+
param_names = list(param_bounds.keys())
|
870
|
+
bounds = [param_bounds[name] for name in param_names]
|
871
|
+
|
872
|
+
if cost_function is None:
|
873
|
+
cost_function = self._default_cost_function
|
874
|
+
|
875
|
+
# Calculate scaling factors
|
876
|
+
self.param_scales = {
|
877
|
+
name: max(abs(bounds[i][0]), abs(bounds[i][1]))
|
878
|
+
for i, name in enumerate(param_names)
|
879
|
+
}
|
880
|
+
|
881
|
+
# Normalize bounds
|
882
|
+
normalized_bounds = [
|
883
|
+
(b[0]/self.param_scales[name], b[1]/self.param_scales[name])
|
884
|
+
for name, b in zip(param_names, bounds)
|
885
|
+
]
|
886
|
+
|
887
|
+
# picks with method of init value we want to use
|
888
|
+
if init_guess=='random':
|
889
|
+
x0 = np.array([np.random.uniform(b[0], b[1]) for b in bounds])
|
890
|
+
elif init_guess=='middle_guess':
|
891
|
+
x0 = [(b[0] + b[1])/2 for b in bounds]
|
892
|
+
else:
|
893
|
+
raise Exception("Pick a vaid init guess method either random or midde_guess")
|
894
|
+
normalized_x0 = self._normalize_params(np.array(x0), param_names)
|
895
|
+
|
896
|
+
|
897
|
+
# Run optimization
|
898
|
+
result = minimize(
|
899
|
+
self._objective_function,
|
900
|
+
normalized_x0,
|
901
|
+
args=(param_names, cost_function, target_metrics),
|
902
|
+
method=method,
|
903
|
+
bounds=normalized_bounds
|
904
|
+
)
|
905
|
+
|
906
|
+
# Get final parameters and metrics
|
907
|
+
final_params = dict(zip(param_names, self._denormalize_params(result.x, param_names)))
|
908
|
+
for name, value in final_params.items():
|
909
|
+
setattr(self.tuner.syn, name, value)
|
910
|
+
final_metrics = self._calculate_metrics()
|
911
|
+
|
912
|
+
return SynapseOptimizationResult(
|
913
|
+
optimal_params=final_params,
|
914
|
+
achieved_metrics=final_metrics,
|
915
|
+
target_metrics=target_metrics,
|
916
|
+
error=result.fun,
|
917
|
+
optimization_path=self.optimization_history
|
918
|
+
)
|
919
|
+
|
920
|
+
def plot_optimization_results(self, result: SynapseOptimizationResult):
|
921
|
+
"""Plot optimization results including convergence and final traces."""
|
922
|
+
# Ensure errors are properly shaped for plotting
|
923
|
+
iterations = range(len(result.optimization_path))
|
924
|
+
errors = np.array([float(h['error']) for h in result.optimization_path]).flatten()
|
925
|
+
|
926
|
+
# Plot error convergence
|
927
|
+
fig1, ax1 = plt.subplots(figsize=(8, 5))
|
928
|
+
ax1.plot(iterations, errors, label='Error')
|
929
|
+
ax1.set_xlabel('Iteration')
|
930
|
+
ax1.set_ylabel('Error')
|
931
|
+
ax1.set_title('Error Convergence')
|
932
|
+
ax1.set_yscale('log')
|
933
|
+
ax1.legend()
|
934
|
+
plt.tight_layout()
|
935
|
+
plt.show()
|
936
|
+
|
937
|
+
# Plot parameter convergence
|
938
|
+
param_names = list(result.optimal_params.keys())
|
939
|
+
num_params = len(param_names)
|
940
|
+
fig2, axs = plt.subplots(nrows=num_params, ncols=1, figsize=(8, 5 * num_params))
|
941
|
+
|
942
|
+
if num_params == 1:
|
943
|
+
axs = [axs]
|
944
|
+
|
945
|
+
for ax, param in zip(axs, param_names):
|
946
|
+
values = [float(h['params'][param]) for h in result.optimization_path]
|
947
|
+
ax.plot(iterations, values, label=f'{param}')
|
948
|
+
ax.set_xlabel('Iteration')
|
949
|
+
ax.set_ylabel('Parameter Value')
|
950
|
+
ax.set_title(f'Convergence of {param}')
|
951
|
+
ax.legend()
|
952
|
+
|
953
|
+
plt.tight_layout()
|
954
|
+
plt.show()
|
955
|
+
|
956
|
+
# Print final results
|
957
|
+
print("Optimization Results:")
|
958
|
+
print(f"Final Error: {float(result.error):.2e}\n")
|
959
|
+
print("Target Metrics:")
|
960
|
+
for metric, value in result.target_metrics.items():
|
961
|
+
achieved = result.achieved_metrics.get(metric)
|
962
|
+
if achieved is not None and metric != 'amplitudes': # Skip amplitude array
|
963
|
+
print(f"{metric}: {float(achieved):.3f} (target: {float(value):.3f})")
|
964
|
+
|
965
|
+
print("\nOptimal Parameters:")
|
966
|
+
for param, value in result.optimal_params.items():
|
967
|
+
print(f"{param}: {float(value):.3f}")
|
968
|
+
|
969
|
+
# Plot final model response
|
970
|
+
self.tuner._plot_model([self.tuner.general_settings['tstart'] - self.tuner.nstim.interval / 3, self.tuner.tstop])
|
971
|
+
amp = self.tuner._response_amplitude()
|
972
|
+
self.tuner._calc_ppr_induction_recovery(amp)
|
973
|
+
|
974
|
+
|
975
|
+
# dataclass means just init the typehints as self.typehint. looks a bit cleaner
|
976
|
+
@dataclass
|
977
|
+
class GapOptimizationResult:
|
978
|
+
"""Container for gap junction optimization results"""
|
979
|
+
optimal_resistance: float
|
980
|
+
achieved_cc: float
|
981
|
+
target_cc: float
|
982
|
+
error: float
|
983
|
+
optimization_path: List[Dict[str, float]]
|
984
|
+
|
985
|
+
class GapJunctionOptimizer:
|
986
|
+
def __init__(self, tuner):
|
987
|
+
"""
|
988
|
+
Initialize the gap junction optimizer
|
989
|
+
|
990
|
+
Parameters:
|
991
|
+
-----------
|
992
|
+
tuner : GapJunctionTuner
|
993
|
+
Instance of the GapJunctionTuner class
|
994
|
+
"""
|
995
|
+
self.tuner = tuner
|
996
|
+
self.optimization_history = []
|
997
|
+
|
998
|
+
def _objective_function(self, resistance: float, target_cc: float) -> float:
|
999
|
+
"""
|
1000
|
+
Calculate error between achieved and target coupling coefficient
|
1001
|
+
|
1002
|
+
Parameters:
|
1003
|
+
-----------
|
1004
|
+
resistance : float
|
1005
|
+
Gap junction resistance to try
|
1006
|
+
target_cc : float
|
1007
|
+
Target coupling coefficient to match
|
1008
|
+
|
1009
|
+
Returns:
|
1010
|
+
--------
|
1011
|
+
float : Error between achieved and target coupling coefficient
|
1012
|
+
"""
|
1013
|
+
# Run model with current resistance
|
1014
|
+
self.tuner.model(resistance)
|
1015
|
+
|
1016
|
+
# Calculate coupling coefficient
|
1017
|
+
achieved_cc = self.tuner.coupling_coefficient(
|
1018
|
+
self.tuner.t_vec,
|
1019
|
+
self.tuner.soma_v_1,
|
1020
|
+
self.tuner.soma_v_2,
|
1021
|
+
self.tuner.general_settings['tstart'],
|
1022
|
+
self.tuner.general_settings['tstart'] + self.tuner.general_settings['tdur']
|
1023
|
+
)
|
1024
|
+
|
1025
|
+
# Calculate error
|
1026
|
+
error = (achieved_cc - target_cc) ** 2 #MSE
|
1027
|
+
|
1028
|
+
# Store history
|
1029
|
+
self.optimization_history.append({
|
1030
|
+
'resistance': resistance,
|
1031
|
+
'achieved_cc': achieved_cc,
|
1032
|
+
'error': error
|
1033
|
+
})
|
1034
|
+
|
1035
|
+
return error
|
1036
|
+
|
1037
|
+
def optimize_resistance(self, target_cc: float,
|
1038
|
+
resistance_bounds: tuple = (1e-4, 1e-2),
|
1039
|
+
method: str = 'bounded') -> GapOptimizationResult:
|
1040
|
+
"""
|
1041
|
+
Optimize gap junction resistance to achieve target coupling coefficient
|
1042
|
+
|
1043
|
+
Parameters:
|
1044
|
+
-----------
|
1045
|
+
target_cc : float
|
1046
|
+
Target coupling coefficient to achieve
|
1047
|
+
resistance_bounds : tuple, optional
|
1048
|
+
(min, max) bounds for resistance search
|
1049
|
+
method : str, optional
|
1050
|
+
Optimization method to use (default: 'bounded')
|
1051
|
+
|
1052
|
+
Returns:
|
1053
|
+
--------
|
1054
|
+
GapOptimizationResult
|
1055
|
+
Container with optimization results
|
1056
|
+
"""
|
1057
|
+
self.optimization_history = []
|
1058
|
+
|
1059
|
+
# Run optimization
|
1060
|
+
result = minimize_scalar(
|
1061
|
+
self._objective_function,
|
1062
|
+
args=(target_cc,),
|
1063
|
+
bounds=resistance_bounds,
|
1064
|
+
method=method
|
1065
|
+
)
|
1066
|
+
|
1067
|
+
# Run final model with optimal resistance
|
1068
|
+
self.tuner.model(result.x)
|
1069
|
+
final_cc = self.tuner.coupling_coefficient(
|
1070
|
+
self.tuner.t_vec,
|
1071
|
+
self.tuner.soma_v_1,
|
1072
|
+
self.tuner.soma_v_2,
|
1073
|
+
self.tuner.general_settings['tstart'],
|
1074
|
+
self.tuner.general_settings['tstart'] + self.tuner.general_settings['tdur']
|
1075
|
+
)
|
1076
|
+
|
1077
|
+
# Package up our results
|
1078
|
+
optimization_result = GapOptimizationResult(
|
1079
|
+
optimal_resistance=result.x,
|
1080
|
+
achieved_cc=final_cc,
|
1081
|
+
target_cc=target_cc,
|
1082
|
+
error=result.fun,
|
1083
|
+
optimization_path=self.optimization_history
|
1084
|
+
)
|
1085
|
+
|
1086
|
+
return optimization_result
|
1087
|
+
|
1088
|
+
def plot_optimization_results(self, result: GapOptimizationResult):
|
1089
|
+
"""
|
1090
|
+
Plot optimization results including convergence and final voltage traces
|
1091
|
+
|
1092
|
+
Parameters:
|
1093
|
+
-----------
|
1094
|
+
result : GapOptimizationResult
|
1095
|
+
Results from optimization
|
1096
|
+
"""
|
1097
|
+
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(15, 10))
|
1098
|
+
|
1099
|
+
# Plot voltage traces
|
1100
|
+
t_range = [
|
1101
|
+
self.tuner.general_settings['tstart'] - 100.,
|
1102
|
+
self.tuner.general_settings['tstart'] + self.tuner.general_settings['tdur'] + 100.
|
1103
|
+
]
|
1104
|
+
t = np.array(self.tuner.t_vec)
|
1105
|
+
v1 = np.array(self.tuner.soma_v_1)
|
1106
|
+
v2 = np.array(self.tuner.soma_v_2)
|
1107
|
+
tidx = (t >= t_range[0]) & (t <= t_range[1])
|
1108
|
+
|
1109
|
+
ax1.plot(t[tidx], v1[tidx], 'b', label=f'{self.tuner.cell_name} 1')
|
1110
|
+
ax1.plot(t[tidx], v2[tidx], 'r', label=f'{self.tuner.cell_name} 2')
|
1111
|
+
ax1.set_xlabel('Time (ms)')
|
1112
|
+
ax1.set_ylabel('Membrane Voltage (mV)')
|
1113
|
+
ax1.legend()
|
1114
|
+
ax1.set_title('Optimized Voltage Traces')
|
1115
|
+
|
1116
|
+
# Plot error convergence
|
1117
|
+
errors = [h['error'] for h in result.optimization_path]
|
1118
|
+
ax2.plot(errors)
|
1119
|
+
ax2.set_xlabel('Iteration')
|
1120
|
+
ax2.set_ylabel('Error')
|
1121
|
+
ax2.set_title('Error Convergence')
|
1122
|
+
ax2.set_yscale('log')
|
1123
|
+
|
1124
|
+
# Plot resistance convergence
|
1125
|
+
resistances = [h['resistance'] for h in result.optimization_path]
|
1126
|
+
ax3.plot(resistances)
|
1127
|
+
ax3.set_xlabel('Iteration')
|
1128
|
+
ax3.set_ylabel('Resistance')
|
1129
|
+
ax3.set_title('Resistance Convergence')
|
1130
|
+
ax3.set_yscale('log')
|
1131
|
+
|
1132
|
+
# Print final results
|
1133
|
+
result_text = (
|
1134
|
+
f'Optimal Resistance: {result.optimal_resistance:.2e}\n'
|
1135
|
+
f'Target CC: {result.target_cc:.3f}\n'
|
1136
|
+
f'Achieved CC: {result.achieved_cc:.3f}\n'
|
1137
|
+
f'Final Error: {result.error:.2e}'
|
1138
|
+
)
|
1139
|
+
ax4.text(0.1, 0.7, result_text, transform=ax4.transAxes, fontsize=10)
|
1140
|
+
ax4.axis('off')
|
1141
|
+
|
1142
|
+
plt.tight_layout()
|
1143
|
+
plt.show()
|
1144
|
+
|
1145
|
+
def parameter_sweep(self, resistance_range: np.ndarray) -> dict:
|
1146
|
+
"""
|
1147
|
+
Perform a parameter sweep across different resistance values
|
1148
|
+
|
1149
|
+
Parameters:
|
1150
|
+
-----------
|
1151
|
+
resistance_range : np.ndarray
|
1152
|
+
Array of resistance values to test
|
1153
|
+
|
1154
|
+
Returns:
|
1155
|
+
--------
|
1156
|
+
dict : Results of parameter sweep including coupling coefficients
|
1157
|
+
"""
|
1158
|
+
results = {
|
1159
|
+
'resistance': [],
|
1160
|
+
'coupling_coefficient': []
|
1161
|
+
}
|
1162
|
+
|
1163
|
+
for resistance in tqdm(resistance_range, desc="Sweeping resistance values"):
|
1164
|
+
self.tuner.model(resistance)
|
1165
|
+
cc = self.tuner.coupling_coefficient(
|
1166
|
+
self.tuner.t_vec,
|
1167
|
+
self.tuner.soma_v_1,
|
1168
|
+
self.tuner.soma_v_2,
|
1169
|
+
self.tuner.general_settings['tstart'],
|
1170
|
+
self.tuner.general_settings['tstart'] + self.tuner.general_settings['tdur']
|
1171
|
+
)
|
1172
|
+
|
1173
|
+
results['resistance'].append(resistance)
|
1174
|
+
results['coupling_coefficient'].append(cc)
|
1175
|
+
|
1176
|
+
return results
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: bmtool
|
3
|
-
Version: 0.6.
|
3
|
+
Version: 0.6.4
|
4
4
|
Summary: BMTool
|
5
5
|
Home-page: https://github.com/cyneuro/bmtool
|
6
6
|
Download-URL:
|
@@ -114,7 +114,9 @@ Commands:
|
|
114
114
|
- [ZAP](#zap)
|
115
115
|
- [Tuner](#single-cell-tuning)
|
116
116
|
- [VHalf Segregation](#vhalf-segregation-module)
|
117
|
-
####
|
117
|
+
#### Jupyter Notebook for how to use passive properties, current injection, FI curve, and ZAP can be found [here](examples/single_cell/). There are versions with example how to use single cells in HOC format and in the Allen Database format.
|
118
|
+
|
119
|
+
#### The single cell module can take any neuron HOC object and calculate passive properties, run a current clamp, calculate FI curve, or run a ZAP. The module is designed to work with HOC template files and can also turn Allen database SWC and json files into HOC objects and use those. The examples below uses "Cell_Cf" which is the name of a HOC templated loaded by the profiler. E
|
118
120
|
|
119
121
|
#### First step is it initialize the profiler.
|
120
122
|
|
@@ -351,7 +353,7 @@ ex: [https://github.com/tjbanks/two-cell-hco](https://github.com/tjbanks/two-cel
|
|
351
353
|
-Gap Junction tuner
|
352
354
|
|
353
355
|
#### SynapticTuner - Aids in the tuning of synapses by printing out synaptic properties and giving the user sliders in a Jupyter notebook to tune the synapse. For more info view the example [here](examples/synapses/synaptic_tuner.ipynb)
|
354
|
-
#### GapJunctionTuner - Provides jupyter sliders to tune for coupling coefficient in a similar style to the SynapticTuner an example can be viewed [here](examples/synapses/gap_junction_tuner.ipynb)
|
356
|
+
#### GapJunctionTuner - Provides jupyter sliders to tune for coupling coefficient in a similar style to the SynapticTuner. The Gap junction tuner also has an optimizer which can find the best resistance for the desired coupling coefficient. an example can be viewed [here](examples/synapses/gap_junction_tuner.ipynb)
|
355
357
|
|
356
358
|
### Connectors Module
|
357
359
|
- [UnidirectionConnector](#unidirectional-connector---unidirectional-connections-in-bmtk-network-model-with-given-probability-within-a-single-population-or-between-two-populations)
|
@@ -409,7 +411,6 @@ net.add_edges(**connector.edge_params())
|
|
409
411
|
```
|
410
412
|
|
411
413
|
## Bmplot Module
|
412
|
-
### for a demo please see the notebook [here](examples/bmplot/bmplot.ipynb)
|
413
414
|
- [total_connection_matrix](#total_connection_matrix)
|
414
415
|
- [percent_connection_matrix](#percent_connection_matrix)
|
415
416
|
- [connector_percent_matrix](#connector_percent_matrix)
|
@@ -420,6 +421,7 @@ net.add_edges(**connector.edge_params())
|
|
420
421
|
- [connection_histogram](#connection_histogram)
|
421
422
|
- [plot_3d_positions](#plot_3d_positions)
|
422
423
|
- [plot_3d_cell_rotation](#plot_3d_cell_rotation)
|
424
|
+
### for a demo please see the notebook [here](examples/bmplot/bmplot.ipynb)
|
423
425
|
|
424
426
|
### total_connection_matrix
|
425
427
|
#### Generates a table of total number of connections each neuron population recieves
|
@@ -6,8 +6,8 @@ bmtool/connectors.py,sha256=2vVUsqYMaCuWZ-4C5eUzqwsFItFM9vm0ytZdRQdWgoc,72243
|
|
6
6
|
bmtool/graphs.py,sha256=K8BiughRUeXFVvAgo8UzrwpSClIVg7UfmIcvtEsEsk0,6020
|
7
7
|
bmtool/manage.py,sha256=_lCU0qBQZ4jSxjzAJUd09JEetb--cud7KZgxQFbLGSY,657
|
8
8
|
bmtool/plot_commands.py,sha256=Tqujyf0c0u8olhiHOMwgUSJXIIE1hgjv6otb25G9cA0,12298
|
9
|
-
bmtool/singlecell.py,sha256=
|
10
|
-
bmtool/synapses.py,sha256=
|
9
|
+
bmtool/singlecell.py,sha256=MQiLucsI6OBIjtcJra3Z9PTFQOE-Zn5ST-R9SmFvrbQ,27049
|
10
|
+
bmtool/synapses.py,sha256=FPpNZavsTe-ZuKgO6NUOeFP6mfnEn2KISYuEqdF503w,47984
|
11
11
|
bmtool/debug/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
12
12
|
bmtool/debug/commands.py,sha256=AwtcR7BUUheM0NxvU1Nu234zCdpobhJv5noX8x5K2vY,583
|
13
13
|
bmtool/debug/debug.py,sha256=xqnkzLiH3s-tS26Y5lZZL62qR2evJdi46Gud-HzxEN4,207
|
@@ -16,9 +16,9 @@ bmtool/util/commands.py,sha256=zJF-fiLk0b8LyzHDfvewUyS7iumOxVnj33IkJDzux4M,64396
|
|
16
16
|
bmtool/util/util.py,sha256=00vOAwTVIifCqouBoFoT0lBashl4fCalrk8fhg_Uq4c,56654
|
17
17
|
bmtool/util/neuron/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
18
18
|
bmtool/util/neuron/celltuner.py,sha256=xSRpRN6DhPFz4q5buq_W8UmsD7BbUrkzYBEbKVloYss,87194
|
19
|
-
bmtool-0.6.
|
20
|
-
bmtool-0.6.
|
21
|
-
bmtool-0.6.
|
22
|
-
bmtool-0.6.
|
23
|
-
bmtool-0.6.
|
24
|
-
bmtool-0.6.
|
19
|
+
bmtool-0.6.4.dist-info/LICENSE,sha256=qrXg2jj6kz5d0EnN11hllcQt2fcWVNumx0xNbV05nyM,1068
|
20
|
+
bmtool-0.6.4.dist-info/METADATA,sha256=USR9YbT2CPKOw_bz3ZspPCBrI10NkDsLV9imF8-ND54,20224
|
21
|
+
bmtool-0.6.4.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
|
22
|
+
bmtool-0.6.4.dist-info/entry_points.txt,sha256=0-BHZ6nUnh0twWw9SXNTiRmKjDnb1VO2DfG_-oprhAc,45
|
23
|
+
bmtool-0.6.4.dist-info/top_level.txt,sha256=gpd2Sj-L9tWbuJEd5E8C8S8XkNm5yUE76klUYcM-eWM,7
|
24
|
+
bmtool-0.6.4.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|