bmtool 0.7.5.1__py3-none-any.whl → 0.7.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- bmtool/bmplot/connections.py +11 -5
- bmtool/connectors.py +386 -0
- bmtool/singlecell.py +429 -31
- bmtool/synapses.py +369 -45
- bmtool/util/util.py +69 -17
- {bmtool-0.7.5.1.dist-info → bmtool-0.7.7.dist-info}/METADATA +1 -1
- {bmtool-0.7.5.1.dist-info → bmtool-0.7.7.dist-info}/RECORD +11 -11
- {bmtool-0.7.5.1.dist-info → bmtool-0.7.7.dist-info}/WHEEL +0 -0
- {bmtool-0.7.5.1.dist-info → bmtool-0.7.7.dist-info}/entry_points.txt +0 -0
- {bmtool-0.7.5.1.dist-info → bmtool-0.7.7.dist-info}/licenses/LICENSE +0 -0
- {bmtool-0.7.5.1.dist-info → bmtool-0.7.7.dist-info}/top_level.txt +0 -0
bmtool/synapses.py
CHANGED
@@ -38,6 +38,7 @@ DEFAULT_GAP_JUNCTION_GENERAL_SETTINGS = {
|
|
38
38
|
"tdur": 500.0,
|
39
39
|
"dt": 0.025,
|
40
40
|
"celsius": 20,
|
41
|
+
"iclamp_amp": -0.01, # nA
|
41
42
|
}
|
42
43
|
|
43
44
|
|
@@ -1175,7 +1176,9 @@ class SynapseTuner:
|
|
1175
1176
|
amp = np.array(amp)
|
1176
1177
|
amp = amp * 1000 # scale up
|
1177
1178
|
amp = amp.reshape(-1, amp.shape[-1])
|
1178
|
-
|
1179
|
+
|
1180
|
+
# Calculate 90th percentile amplitude for normalization
|
1181
|
+
percentile_90 = np.percentile(amp, 90)
|
1179
1182
|
|
1180
1183
|
def format_array(arr):
|
1181
1184
|
"""Format an array to 2 significant figures for cleaner output."""
|
@@ -1187,49 +1190,42 @@ class SynapseTuner:
|
|
1187
1190
|
f"Short Term Plasticity Results for {self.train_freq}Hz with {self.train_delay} Delay"
|
1188
1191
|
)
|
1189
1192
|
print("=" * 40)
|
1190
|
-
print("PPR: Above
|
1193
|
+
print("PPR: Above 0 is facilitating, below 0 is depressing.")
|
1191
1194
|
print("Induction: Above 0 is facilitating, below 0 is depressing.")
|
1192
1195
|
print("Recovery: A measure of how fast STP decays.\n")
|
1193
1196
|
|
1194
|
-
# PPR Calculation
|
1195
|
-
ppr = amp[:, 1:2]
|
1197
|
+
# PPR Calculation: (Avg 2nd pulse - Avg 1st pulse) / 90th percentile amplitude
|
1198
|
+
ppr = (np.mean(amp[:, 1:2]) - np.mean(amp[:, 0:1])) / percentile_90
|
1196
1199
|
print("Paired Pulse Response (PPR)")
|
1197
|
-
print("Calculation: 2nd pulse
|
1200
|
+
print("Calculation: (Avg 2nd pulse - Avg 1st pulse) / 90th percentile amplitude")
|
1198
1201
|
print(
|
1199
|
-
f"Values: ({
|
1202
|
+
f"Values: ({np.mean(amp[:, 1:2]):.3f} - {np.mean(amp[:, 0:1]):.3f}) / {percentile_90:.3f} = {ppr:.3f}\n"
|
1200
1203
|
)
|
1201
1204
|
|
1202
|
-
# Induction Calculation
|
1203
|
-
induction = np.mean(
|
1205
|
+
# Induction Calculation: (Avg (6th, 7th, 8th pulses) - Avg 1st pulse) / 90th percentile amplitude
|
1206
|
+
induction = (np.mean(amp[:, 5:8]) - np.mean(amp[:, :1])) / percentile_90
|
1204
1207
|
print("Induction")
|
1205
|
-
print("Calculation: (
|
1206
|
-
print(
|
1207
|
-
f"Values: avg({format_array(amp[:, 5:8])}) - {format_array(amp[:, :1])} / {format_array(maxamp)}"
|
1208
|
-
)
|
1208
|
+
print("Calculation: (Avg(6th, 7th, 8th pulses) - Avg 1st pulse) / 90th percentile amplitude")
|
1209
1209
|
print(
|
1210
|
-
f"
|
1210
|
+
f"Values: {np.mean(amp[:, 5:8]):.3f} - {np.mean(amp[:, :1]):.3f} / {percentile_90:.3f} = {induction:.3f}\n"
|
1211
1211
|
)
|
1212
1212
|
|
1213
|
-
# Recovery Calculation
|
1214
|
-
recovery = np.mean(
|
1213
|
+
# Recovery Calculation: (Avg (9th, 10th, 11th, 12th pulses) - Avg (1st, 2nd, 3rd, 4th pulses)) / 90th percentile amplitude
|
1214
|
+
recovery = (np.mean(amp[:, 8:12]) - np.mean(amp[:, :4])) / percentile_90
|
1215
1215
|
print("Recovery")
|
1216
1216
|
print(
|
1217
|
-
"Calculation: (
|
1217
|
+
"Calculation: (Avg(9th, 10th, 11th, 12th pulses) - Avg(1st to 4th pulses)) / 90th percentile amplitude"
|
1218
1218
|
)
|
1219
1219
|
print(
|
1220
|
-
f"Values:
|
1221
|
-
)
|
1222
|
-
print(
|
1223
|
-
f"({format_array(amp[:, 8:12].mean(axis=1))}) - ({format_array(amp[:, :4].mean(axis=1))}) / {format_array(maxamp)} = {recovery:.3f}\n"
|
1220
|
+
f"Values: {np.mean(amp[:, 8:12]):.3f} - {np.mean(amp[:, :4]):.3f} / {percentile_90:.3f} = {recovery:.3f}\n"
|
1224
1221
|
)
|
1225
1222
|
|
1226
1223
|
print("=" * 40 + "\n")
|
1227
1224
|
|
1228
|
-
|
1229
|
-
|
1230
|
-
|
1231
|
-
|
1232
|
-
maxamp = maxamp.max()
|
1225
|
+
# Calculate final metrics
|
1226
|
+
ppr = (np.mean(amp[:, 1:2]) - np.mean(amp[:, 0:1])) / percentile_90
|
1227
|
+
induction = (np.mean(amp[:, 5:8]) - np.mean(amp[:, :1])) / percentile_90
|
1228
|
+
recovery = (np.mean(amp[:, 8:12]) - np.mean(amp[:, :4])) / percentile_90
|
1233
1229
|
|
1234
1230
|
return ppr, induction, recovery
|
1235
1231
|
|
@@ -1330,7 +1326,7 @@ class SynapseTuner:
|
|
1330
1326
|
vlamp_status = self.vclamp
|
1331
1327
|
|
1332
1328
|
# Connection dropdown
|
1333
|
-
connection_options = list(self.conn_type_settings.keys())
|
1329
|
+
connection_options = sorted(list(self.conn_type_settings.keys()))
|
1334
1330
|
w_connection = widgets.Dropdown(
|
1335
1331
|
options=connection_options,
|
1336
1332
|
value=self.current_connection,
|
@@ -1767,42 +1763,356 @@ class GapJunctionTuner:
|
|
1767
1763
|
self.general_settings = {**DEFAULT_GAP_JUNCTION_GENERAL_SETTINGS, **general_settings}
|
1768
1764
|
self.conn_type_settings = conn_type_settings
|
1769
1765
|
|
1766
|
+
self._syn_params_cache = {}
|
1767
|
+
self.config = config
|
1768
|
+
self.available_networks = []
|
1769
|
+
self.current_network = None
|
1770
|
+
if self.conn_type_settings is None and self.config is not None:
|
1771
|
+
self.conn_type_settings = self._build_conn_type_settings_from_config(self.config)
|
1772
|
+
if self.conn_type_settings is None or len(self.conn_type_settings) == 0:
|
1773
|
+
raise ValueError("conn_type_settings must be provided or config must be given to load gap junction connections from")
|
1774
|
+
self.current_connection = list(self.conn_type_settings.keys())[0]
|
1775
|
+
self.conn = self.conn_type_settings[self.current_connection]
|
1776
|
+
|
1770
1777
|
h.tstop = self.general_settings["tstart"] + self.general_settings["tdur"] + 100.0
|
1771
1778
|
h.dt = self.general_settings["dt"] # Time step (resolution) of the simulation in ms
|
1772
1779
|
h.steps_per_ms = 1 / h.dt
|
1773
1780
|
h.celsius = self.general_settings["celsius"]
|
1774
1781
|
|
1782
|
+
# Clean up any existing parallel context before setting up gap junctions
|
1783
|
+
try:
|
1784
|
+
pc_temp = h.ParallelContext()
|
1785
|
+
pc_temp.done() # Clean up any existing parallel context
|
1786
|
+
except:
|
1787
|
+
pass # Ignore errors if no existing context
|
1788
|
+
|
1789
|
+
# Force cleanup
|
1790
|
+
import gc
|
1791
|
+
gc.collect()
|
1792
|
+
|
1775
1793
|
# set up gap junctions
|
1776
|
-
pc = h.ParallelContext()
|
1794
|
+
self.pc = h.ParallelContext()
|
1777
1795
|
|
1778
1796
|
# Use provided hoc_cell or create new cells
|
1779
1797
|
if self.hoc_cell is not None:
|
1780
1798
|
self.cell1 = self.hoc_cell
|
1781
1799
|
# For gap junctions, we need two cells, so create a second one if using hoc_cell
|
1782
|
-
self.cell_name =
|
1800
|
+
self.cell_name = self.conn['cell']
|
1783
1801
|
self.cell2 = getattr(h, self.cell_name)()
|
1784
1802
|
else:
|
1785
|
-
self.
|
1803
|
+
print(self.conn)
|
1804
|
+
self.cell_name = self.conn['cell']
|
1786
1805
|
self.cell1 = getattr(h, self.cell_name)()
|
1787
1806
|
self.cell2 = getattr(h, self.cell_name)()
|
1788
1807
|
|
1789
1808
|
self.icl = h.IClamp(self.cell1.soma[0](0.5))
|
1790
1809
|
self.icl.delay = self.general_settings["tstart"]
|
1791
1810
|
self.icl.dur = self.general_settings["tdur"]
|
1792
|
-
self.icl.amp = self.
|
1811
|
+
self.icl.amp = self.general_settings["iclamp_amp"] # nA
|
1793
1812
|
|
1794
|
-
sec1 = list(self.cell1.all)[
|
1795
|
-
sec2 = list(self.cell2.all)[
|
1813
|
+
sec1 = list(self.cell1.all)[self.conn["sec_id"]]
|
1814
|
+
sec2 = list(self.cell2.all)[self.conn["sec_id"]]
|
1796
1815
|
|
1797
|
-
|
1816
|
+
# Use unique IDs to avoid conflicts with existing parallel context setups
|
1817
|
+
import time
|
1818
|
+
unique_id = int(time.time() * 1000) % 10000 # Use timestamp as unique base ID
|
1819
|
+
|
1820
|
+
self.pc.source_var(sec1(self.conn["sec_x"])._ref_v, unique_id, sec=sec1)
|
1798
1821
|
self.gap_junc_1 = h.Gap(sec1(0.5))
|
1799
|
-
pc.target_var(self.gap_junc_1._ref_vgap, 1)
|
1822
|
+
self.pc.target_var(self.gap_junc_1._ref_vgap, unique_id + 1)
|
1800
1823
|
|
1801
|
-
pc.source_var(sec2(
|
1824
|
+
self.pc.source_var(sec2(self.conn["sec_x"])._ref_v, unique_id + 1, sec=sec2)
|
1802
1825
|
self.gap_junc_2 = h.Gap(sec2(0.5))
|
1803
|
-
pc.target_var(self.gap_junc_2._ref_vgap,
|
1826
|
+
self.pc.target_var(self.gap_junc_2._ref_vgap, unique_id)
|
1827
|
+
|
1828
|
+
self.pc.setup_transfer()
|
1829
|
+
|
1830
|
+
# Now it's safe to initialize NEURON
|
1831
|
+
h.finitialize()
|
1804
1832
|
|
1805
|
-
|
1833
|
+
def _load_synaptic_params_from_config(self, config: dict, dynamics_params: str) -> dict:
|
1834
|
+
try:
|
1835
|
+
# Get the synaptic models directory from config
|
1836
|
+
synaptic_models_dir = config.get('components', {}).get('synaptic_models_dir', '')
|
1837
|
+
if synaptic_models_dir:
|
1838
|
+
# Handle path variables
|
1839
|
+
if synaptic_models_dir.startswith('$'):
|
1840
|
+
# This is a placeholder, try to resolve it
|
1841
|
+
config_dir = os.path.dirname(config.get('config_path', ''))
|
1842
|
+
synaptic_models_dir = synaptic_models_dir.replace('$COMPONENTS_DIR',
|
1843
|
+
os.path.join(config_dir, 'components'))
|
1844
|
+
synaptic_models_dir = synaptic_models_dir.replace('$BASE_DIR', config_dir)
|
1845
|
+
|
1846
|
+
dynamics_file = os.path.join(synaptic_models_dir, dynamics_params)
|
1847
|
+
|
1848
|
+
if os.path.exists(dynamics_file):
|
1849
|
+
with open(dynamics_file, 'r') as f:
|
1850
|
+
return json.load(f)
|
1851
|
+
else:
|
1852
|
+
print(f"Warning: Dynamics params file not found: {dynamics_file}")
|
1853
|
+
except Exception as e:
|
1854
|
+
print(f"Warning: Error loading synaptic parameters: {e}")
|
1855
|
+
|
1856
|
+
return {}
|
1857
|
+
|
1858
|
+
def _load_available_networks(self) -> None:
|
1859
|
+
"""
|
1860
|
+
Load available network names from the config file for the network dropdown feature.
|
1861
|
+
|
1862
|
+
This method is automatically called during initialization when a config file is provided.
|
1863
|
+
It populates the available_networks list which enables the network dropdown in
|
1864
|
+
InteractiveTuner when multiple networks are available.
|
1865
|
+
|
1866
|
+
Network Dropdown Behavior:
|
1867
|
+
-------------------------
|
1868
|
+
- If only one network exists: No network dropdown is shown
|
1869
|
+
- If multiple networks exist: Network dropdown appears next to connection dropdown
|
1870
|
+
- Networks are loaded from the edges data in the config file
|
1871
|
+
- Current network defaults to the first available if not specified during init
|
1872
|
+
"""
|
1873
|
+
if self.config is None:
|
1874
|
+
self.available_networks = []
|
1875
|
+
return
|
1876
|
+
|
1877
|
+
try:
|
1878
|
+
edges = load_edges_from_config(self.config)
|
1879
|
+
self.available_networks = list(edges.keys())
|
1880
|
+
|
1881
|
+
# Set current network to first available if not specified
|
1882
|
+
if self.current_network is None and self.available_networks:
|
1883
|
+
self.current_network = self.available_networks[0]
|
1884
|
+
except Exception as e:
|
1885
|
+
print(f"Warning: Could not load networks from config: {e}")
|
1886
|
+
self.available_networks = []
|
1887
|
+
|
1888
|
+
def _build_conn_type_settings_from_config(self, config_path: str) -> Dict[str, dict]:
|
1889
|
+
# Load configuration and get nodes and edges using util.py methods
|
1890
|
+
config = load_config(config_path)
|
1891
|
+
# Ensure the config dict knows its source path so path substitutions can be resolved
|
1892
|
+
try:
|
1893
|
+
config['config_path'] = config_path
|
1894
|
+
except Exception:
|
1895
|
+
pass
|
1896
|
+
nodes = load_nodes_from_config(config_path)
|
1897
|
+
edges = load_edges_from_config(config_path)
|
1898
|
+
|
1899
|
+
conn_type_settings = {}
|
1900
|
+
|
1901
|
+
# Process all edge datasets
|
1902
|
+
for edge_dataset_name, edge_df in edges.items():
|
1903
|
+
if edge_df.empty:
|
1904
|
+
continue
|
1905
|
+
|
1906
|
+
# Merging with node data to get model templates
|
1907
|
+
source_node_df = None
|
1908
|
+
target_node_df = None
|
1909
|
+
|
1910
|
+
# First, try to deterministically parse the edge_dataset_name for patterns like '<src>_to_<tgt>'
|
1911
|
+
if '_to_' in edge_dataset_name:
|
1912
|
+
parts = edge_dataset_name.split('_to_')
|
1913
|
+
if len(parts) == 2:
|
1914
|
+
src_name, tgt_name = parts
|
1915
|
+
if src_name in nodes:
|
1916
|
+
source_node_df = nodes[src_name].add_prefix('source_')
|
1917
|
+
if tgt_name in nodes:
|
1918
|
+
target_node_df = nodes[tgt_name].add_prefix('target_')
|
1919
|
+
|
1920
|
+
# If not found by parsing name, fall back to inspecting a sample edge row
|
1921
|
+
if source_node_df is None or target_node_df is None:
|
1922
|
+
sample_edge = edge_df.iloc[0] if len(edge_df) > 0 else None
|
1923
|
+
if sample_edge is not None:
|
1924
|
+
source_pop_name = sample_edge.get('source_population', '')
|
1925
|
+
target_pop_name = sample_edge.get('target_population', '')
|
1926
|
+
if source_pop_name in nodes:
|
1927
|
+
source_node_df = nodes[source_pop_name].add_prefix('source_')
|
1928
|
+
if target_pop_name in nodes:
|
1929
|
+
target_node_df = nodes[target_pop_name].add_prefix('target_')
|
1930
|
+
|
1931
|
+
# As a last resort, attempt to heuristically match
|
1932
|
+
if source_node_df is None or target_node_df is None:
|
1933
|
+
for pop_name, node_df in nodes.items():
|
1934
|
+
if source_node_df is None and (edge_dataset_name.startswith(pop_name) or edge_dataset_name.endswith(pop_name)):
|
1935
|
+
source_node_df = node_df.add_prefix('source_')
|
1936
|
+
if target_node_df is None and (edge_dataset_name.startswith(pop_name) or edge_dataset_name.endswith(pop_name)):
|
1937
|
+
target_node_df = node_df.add_prefix('target_')
|
1938
|
+
|
1939
|
+
if source_node_df is None or target_node_df is None:
|
1940
|
+
print(f"Warning: Could not find node data for edge dataset {edge_dataset_name}")
|
1941
|
+
continue
|
1942
|
+
|
1943
|
+
# Merge edge data with source node info
|
1944
|
+
edges_with_source = pd.merge(
|
1945
|
+
edge_df.reset_index(),
|
1946
|
+
source_node_df,
|
1947
|
+
how='left',
|
1948
|
+
left_on='source_node_id',
|
1949
|
+
right_index=True
|
1950
|
+
)
|
1951
|
+
|
1952
|
+
# Merge with target node info
|
1953
|
+
edges_with_nodes = pd.merge(
|
1954
|
+
edges_with_source,
|
1955
|
+
target_node_df,
|
1956
|
+
how='left',
|
1957
|
+
left_on='target_node_id',
|
1958
|
+
right_index=True
|
1959
|
+
)
|
1960
|
+
|
1961
|
+
# Skip edge datasets that don't have gap junction information
|
1962
|
+
if 'is_gap_junction' not in edges_with_nodes.columns:
|
1963
|
+
continue
|
1964
|
+
|
1965
|
+
# Filter to only gap junction edges
|
1966
|
+
# Handle NaN values in is_gap_junction column
|
1967
|
+
gap_junction_mask = edges_with_nodes['is_gap_junction'].fillna(False) == True
|
1968
|
+
gap_junction_edges = edges_with_nodes[gap_junction_mask]
|
1969
|
+
if gap_junction_edges.empty:
|
1970
|
+
continue
|
1971
|
+
|
1972
|
+
# Get unique edge types from the gap junction edges
|
1973
|
+
if 'edge_type_id' in gap_junction_edges.columns:
|
1974
|
+
edge_types = gap_junction_edges['edge_type_id'].unique()
|
1975
|
+
else:
|
1976
|
+
edge_types = [None] # Single edge type
|
1977
|
+
|
1978
|
+
# Process each edge type
|
1979
|
+
for edge_type_id in edge_types:
|
1980
|
+
# Filter edges for this type
|
1981
|
+
if edge_type_id is not None:
|
1982
|
+
edge_type_data = gap_junction_edges[gap_junction_edges['edge_type_id'] == edge_type_id]
|
1983
|
+
else:
|
1984
|
+
edge_type_data = gap_junction_edges
|
1985
|
+
|
1986
|
+
if len(edge_type_data) == 0:
|
1987
|
+
continue
|
1988
|
+
|
1989
|
+
# Get representative edge for this type
|
1990
|
+
edge_info = edge_type_data.iloc[0]
|
1991
|
+
|
1992
|
+
# Process gap junction
|
1993
|
+
source_model_template = edge_info.get('source_model_template', '')
|
1994
|
+
target_model_template = edge_info.get('target_model_template', '')
|
1995
|
+
|
1996
|
+
source_cell_type = source_model_template.replace('hoc:', '') if source_model_template.startswith('hoc:') else source_model_template
|
1997
|
+
target_cell_type = target_model_template.replace('hoc:', '') if target_model_template.startswith('hoc:') else target_model_template
|
1998
|
+
|
1999
|
+
if source_cell_type != target_cell_type:
|
2000
|
+
continue # Only process gap junctions between same cell types
|
2001
|
+
|
2002
|
+
source_pop = edge_info.get('source_pop_name', '')
|
2003
|
+
target_pop = edge_info.get('target_pop_name', '')
|
2004
|
+
|
2005
|
+
conn_name = f"{source_pop}2{target_pop}_gj"
|
2006
|
+
if edge_type_id is not None:
|
2007
|
+
conn_name += f"_type_{edge_type_id}"
|
2008
|
+
|
2009
|
+
conn_settings = {
|
2010
|
+
'cell': source_cell_type,
|
2011
|
+
'sec_id': 0,
|
2012
|
+
'sec_x': 0.5,
|
2013
|
+
'iclamp_amp': -0.01,
|
2014
|
+
'spec_syn_param': {}
|
2015
|
+
}
|
2016
|
+
|
2017
|
+
# Load dynamics params
|
2018
|
+
dynamics_file_name = edge_info.get('dynamics_params', '')
|
2019
|
+
if dynamics_file_name and dynamics_file_name.upper() != 'NULL':
|
2020
|
+
try:
|
2021
|
+
syn_params = self._load_synaptic_params_from_config(config, dynamics_file_name)
|
2022
|
+
conn_settings['spec_syn_param'] = syn_params
|
2023
|
+
except Exception as e:
|
2024
|
+
print(f"Warning: could not load dynamics_params file '{dynamics_file_name}': {e}")
|
2025
|
+
|
2026
|
+
conn_type_settings[conn_name] = conn_settings
|
2027
|
+
|
2028
|
+
return conn_type_settings
|
2029
|
+
|
2030
|
+
def _switch_connection(self, new_connection: str) -> None:
|
2031
|
+
"""
|
2032
|
+
Switch to a different gap junction connection and update all related properties.
|
2033
|
+
|
2034
|
+
Parameters:
|
2035
|
+
-----------
|
2036
|
+
new_connection : str
|
2037
|
+
Name of the new connection type to switch to.
|
2038
|
+
"""
|
2039
|
+
if new_connection not in self.conn_type_settings:
|
2040
|
+
raise ValueError(f"Connection '{new_connection}' not found in conn_type_settings")
|
2041
|
+
|
2042
|
+
# Update current connection
|
2043
|
+
self.current_connection = new_connection
|
2044
|
+
self.conn = self.conn_type_settings[new_connection]
|
2045
|
+
|
2046
|
+
# Check if cell type changed
|
2047
|
+
new_cell_name = self.conn['cell']
|
2048
|
+
if self.cell_name != new_cell_name:
|
2049
|
+
self.cell_name = new_cell_name
|
2050
|
+
|
2051
|
+
# Recreate cells
|
2052
|
+
if self.hoc_cell is None:
|
2053
|
+
self.cell1 = getattr(h, self.cell_name)()
|
2054
|
+
self.cell2 = getattr(h, self.cell_name)()
|
2055
|
+
else:
|
2056
|
+
# For hoc_cell, recreate the second cell
|
2057
|
+
self.cell2 = getattr(h, self.cell_name)()
|
2058
|
+
|
2059
|
+
# Recreate IClamp
|
2060
|
+
self.icl = h.IClamp(self.cell1.soma[0](0.5))
|
2061
|
+
self.icl.delay = self.general_settings["tstart"]
|
2062
|
+
self.icl.dur = self.general_settings["tdur"]
|
2063
|
+
self.icl.amp = self.general_settings["iclamp_amp"]
|
2064
|
+
else:
|
2065
|
+
# Update IClamp parameters even if same cell type
|
2066
|
+
self.icl.amp = self.general_settings["iclamp_amp"]
|
2067
|
+
|
2068
|
+
# Always recreate gap junctions when switching connections
|
2069
|
+
# (even for same cell type, sec_id or sec_x might differ)
|
2070
|
+
|
2071
|
+
# Clean up previous gap junctions and parallel context
|
2072
|
+
if hasattr(self, 'gap_junc_1'):
|
2073
|
+
del self.gap_junc_1
|
2074
|
+
if hasattr(self, 'gap_junc_2'):
|
2075
|
+
del self.gap_junc_2
|
2076
|
+
|
2077
|
+
# Properly clean up the existing parallel context
|
2078
|
+
if hasattr(self, 'pc'):
|
2079
|
+
self.pc.done() # Clean up existing parallel context
|
2080
|
+
|
2081
|
+
# Force garbage collection and reset NEURON state
|
2082
|
+
import gc
|
2083
|
+
gc.collect()
|
2084
|
+
h.finitialize()
|
2085
|
+
|
2086
|
+
# Create a fresh parallel context after cleanup
|
2087
|
+
self.pc = h.ParallelContext()
|
2088
|
+
|
2089
|
+
try:
|
2090
|
+
sec1 = list(self.cell1.all)[self.conn["sec_id"]]
|
2091
|
+
sec2 = list(self.cell2.all)[self.conn["sec_id"]]
|
2092
|
+
|
2093
|
+
# Use unique IDs to avoid conflicts with existing parallel context setups
|
2094
|
+
import time
|
2095
|
+
unique_id = int(time.time() * 1000) % 10000 # Use timestamp as unique base ID
|
2096
|
+
|
2097
|
+
self.pc.source_var(sec1(self.conn["sec_x"])._ref_v, unique_id, sec=sec1)
|
2098
|
+
self.gap_junc_1 = h.Gap(sec1(0.5))
|
2099
|
+
self.pc.target_var(self.gap_junc_1._ref_vgap, unique_id + 1)
|
2100
|
+
|
2101
|
+
self.pc.source_var(sec2(self.conn["sec_x"])._ref_v, unique_id + 1, sec=sec2)
|
2102
|
+
self.gap_junc_2 = h.Gap(sec2(0.5))
|
2103
|
+
self.pc.target_var(self.gap_junc_2._ref_vgap, unique_id)
|
2104
|
+
|
2105
|
+
self.pc.setup_transfer()
|
2106
|
+
except Exception as e:
|
2107
|
+
print(f"Error setting up gap junctions: {e}")
|
2108
|
+
# Try to continue with basic setup
|
2109
|
+
self.gap_junc_1 = h.Gap(list(self.cell1.all)[self.conn["sec_id"]](0.5))
|
2110
|
+
self.gap_junc_2 = h.Gap(list(self.cell2.all)[self.conn["sec_id"]](0.5))
|
2111
|
+
|
2112
|
+
# Reset NEURON state after complete setup
|
2113
|
+
h.finitialize()
|
2114
|
+
|
2115
|
+
print(f"Successfully switched to connection: {new_connection}")
|
1806
2116
|
|
1807
2117
|
def model(self, resistance):
|
1808
2118
|
"""
|
@@ -1906,13 +2216,9 @@ class GapJunctionTuner:
|
|
1906
2216
|
continuous_update=True,
|
1907
2217
|
)
|
1908
2218
|
|
1909
|
-
ui = VBox([w_run, resistance])
|
1910
|
-
|
1911
|
-
# Create an output widget to control what gets cleared
|
1912
2219
|
output = widgets.Output()
|
1913
2220
|
|
1914
|
-
|
1915
|
-
display(output)
|
2221
|
+
ui_widgets = [w_run, resistance]
|
1916
2222
|
|
1917
2223
|
def on_button(*args):
|
1918
2224
|
with output:
|
@@ -1920,7 +2226,7 @@ class GapJunctionTuner:
|
|
1920
2226
|
output.clear_output(wait=True)
|
1921
2227
|
|
1922
2228
|
resistance_for_gap = resistance.value
|
1923
|
-
print(f"Running simulation with resistance: {resistance_for_gap}")
|
2229
|
+
print(f"Running simulation with resistance: {resistance_for_gap:0.6f} and {self.general_settings['iclamp_amp']*1000}pA current clamps")
|
1924
2230
|
|
1925
2231
|
try:
|
1926
2232
|
self.model(resistance_for_gap)
|
@@ -1941,6 +2247,25 @@ class GapJunctionTuner:
|
|
1941
2247
|
|
1942
2248
|
traceback.print_exc()
|
1943
2249
|
|
2250
|
+
# Add connection dropdown if multiple connections exist
|
2251
|
+
if len(self.conn_type_settings) > 1:
|
2252
|
+
connection_dropdown = widgets.Dropdown(
|
2253
|
+
options=list(self.conn_type_settings.keys()),
|
2254
|
+
value=self.current_connection,
|
2255
|
+
description='Connection:',
|
2256
|
+
)
|
2257
|
+
def on_connection_change(change):
|
2258
|
+
if change['type'] == 'change' and change['name'] == 'value':
|
2259
|
+
self._switch_connection(change['new'])
|
2260
|
+
on_button() # Automatically rerun the simulation after switching
|
2261
|
+
connection_dropdown.observe(on_connection_change)
|
2262
|
+
ui_widgets.insert(0, connection_dropdown)
|
2263
|
+
|
2264
|
+
ui = VBox(ui_widgets)
|
2265
|
+
|
2266
|
+
display(ui)
|
2267
|
+
display(output)
|
2268
|
+
|
1944
2269
|
# Run once initially
|
1945
2270
|
on_button()
|
1946
2271
|
w_run.on_click(on_button)
|
@@ -2321,7 +2646,6 @@ class SynapseOptimizer:
|
|
2321
2646
|
self.tuner.ispk = None
|
2322
2647
|
self.tuner.SingleEvent(plot_and_print=True)
|
2323
2648
|
|
2324
|
-
|
2325
2649
|
# dataclass means just init the typehints as self.typehint. looks a bit cleaner
|
2326
2650
|
@dataclass
|
2327
2651
|
class GapOptimizationResult:
|
@@ -2536,4 +2860,4 @@ class GapJunctionOptimizer:
|
|
2536
2860
|
results["resistance"].append(resistance)
|
2537
2861
|
results["coupling_coefficient"].append(cc)
|
2538
2862
|
|
2539
|
-
return results
|
2863
|
+
return results
|