bmtool 0.7.7__py3-none-any.whl → 0.7.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of bmtool might be problematic. Click here for more details.

bmtool/synapses.py CHANGED
@@ -173,6 +173,7 @@ class SynapseTuner:
173
173
  self.other_vars_to_record = other_vars_to_record or []
174
174
  self.ispk = None
175
175
  self.input_mode = False # Add input_mode attribute
176
+ self.last_figure = None # Store reference to last generated figure
176
177
 
177
178
  # Store original slider_vars for connection switching
178
179
  self.original_slider_vars = slider_vars or list(self.synaptic_props.keys())
@@ -1045,7 +1046,9 @@ class SynapseTuner:
1045
1046
  for j in range(num_vars_to_plot, len(axs)):
1046
1047
  fig.delaxes(axs[j])
1047
1048
 
1048
- # plt.tight_layout()
1049
+ #plt.tight_layout()
1050
+ fig.suptitle(f"Connection: {self.current_connection}")
1051
+ self.last_figure = plt.gcf()
1049
1052
  plt.show()
1050
1053
 
1051
1054
  def _set_drive_train(self, freq=50.0, delay=250.0):
@@ -1148,7 +1151,7 @@ class SynapseTuner:
1148
1151
 
1149
1152
  def _calc_ppr_induction_recovery(self, amp, normalize_by_trial=True, print_math=True):
1150
1153
  """
1151
- Calculates paired-pulse ratio, induction, and recovery metrics from response amplitudes.
1154
+ Calculates paired-pulse ratio, induction, recovery, and simple PPR metrics from response amplitudes.
1152
1155
 
1153
1156
  Parameters:
1154
1157
  -----------
@@ -1163,13 +1166,15 @@ class SynapseTuner:
1163
1166
  --------
1164
1167
  tuple
1165
1168
  A tuple containing:
1166
- - ppr: Paired-pulse ratio (2nd pulse / 1st pulse)
1169
+ - ppr: Paired-pulse ratio (2nd pulse - 1st pulse) normalized by 90th percentile amplitude
1167
1170
  - induction: Measure of facilitation/depression during initial pulses
1168
1171
  - recovery: Measure of recovery after the delay period
1172
+ - simple_ppr: Simple paired-pulse ratio (2nd pulse / 1st pulse)
1169
1173
 
1170
1174
  Notes:
1171
1175
  ------
1172
- - PPR > 1 indicates facilitation, PPR < 1 indicates depression
1176
+ - PPR > 0 indicates facilitation, PPR < 0 indicates depression
1177
+ - Simple PPR > 1 indicates facilitation, Simple PPR < 1 indicates depression
1173
1178
  - Induction > 0 indicates facilitation, Induction < 0 indicates depression
1174
1179
  - Recovery compares the response after delay to the initial pulses
1175
1180
  """
@@ -1190,34 +1195,44 @@ class SynapseTuner:
1190
1195
  f"Short Term Plasticity Results for {self.train_freq}Hz with {self.train_delay} Delay"
1191
1196
  )
1192
1197
  print("=" * 40)
1193
- print("PPR: Above 0 is facilitating, below 0 is depressing.")
1194
- print("Induction: Above 0 is facilitating, below 0 is depressing.")
1195
- print("Recovery: A measure of how fast STP decays.\n")
1198
+ print("Simple PPR: Above 1 is facilitating, below 1 is depressing")
1199
+ print("PPR: Above 0 is facilitating, below 0 is depressing.")
1200
+ print("Induction: Above 0 is facilitating, below 0 is depressing.")
1201
+ print("Recovery: A measure of how fast STP decays.\n")
1202
+
1203
+ # Simple PPR Calculation: Avg 2nd pulse / Avg 1st pulse
1204
+ simple_ppr = np.mean(amp[:, 1:2]) / np.mean(amp[:, 0:1])
1205
+ print("Simple Paired Pulse Ratio (PPR)")
1206
+ print(" Calculation: Avg 2nd pulse / Avg 1st pulse")
1207
+ print(
1208
+ f" Values: {np.mean(amp[:, 1:2]):.3f} / {np.mean(amp[:, 0:1]):.3f} = {simple_ppr:.3f}\n"
1209
+ )
1196
1210
 
1197
1211
  # PPR Calculation: (Avg 2nd pulse - Avg 1st pulse) / 90th percentile amplitude
1198
1212
  ppr = (np.mean(amp[:, 1:2]) - np.mean(amp[:, 0:1])) / percentile_90
1199
1213
  print("Paired Pulse Response (PPR)")
1200
- print("Calculation: (Avg 2nd pulse - Avg 1st pulse) / 90th percentile amplitude")
1214
+ print(" Calculation: (Avg 2nd pulse - Avg 1st pulse) / 90th percentile amplitude")
1201
1215
  print(
1202
- f"Values: ({np.mean(amp[:, 1:2]):.3f} - {np.mean(amp[:, 0:1]):.3f}) / {percentile_90:.3f} = {ppr:.3f}\n"
1216
+ f" Values: ({np.mean(amp[:, 1:2]):.3f} - {np.mean(amp[:, 0:1]):.3f}) / {percentile_90:.3f} = {ppr:.3f}\n"
1203
1217
  )
1218
+
1204
1219
 
1205
1220
  # Induction Calculation: (Avg (6th, 7th, 8th pulses) - Avg 1st pulse) / 90th percentile amplitude
1206
1221
  induction = (np.mean(amp[:, 5:8]) - np.mean(amp[:, :1])) / percentile_90
1207
1222
  print("Induction")
1208
- print("Calculation: (Avg(6th, 7th, 8th pulses) - Avg 1st pulse) / 90th percentile amplitude")
1223
+ print(" Calculation: (Avg(6th, 7th, 8th pulses) - Avg 1st pulse) / 90th percentile amplitude")
1209
1224
  print(
1210
- f"Values: {np.mean(amp[:, 5:8]):.3f} - {np.mean(amp[:, :1]):.3f} / {percentile_90:.3f} = {induction:.3f}\n"
1225
+ f" Values: {np.mean(amp[:, 5:8]):.3f} - {np.mean(amp[:, :1]):.3f} / {percentile_90:.3f} = {induction:.3f}\n"
1211
1226
  )
1212
1227
 
1213
1228
  # Recovery Calculation: (Avg (9th, 10th, 11th, 12th pulses) - Avg (1st, 2nd, 3rd, 4th pulses)) / 90th percentile amplitude
1214
1229
  recovery = (np.mean(amp[:, 8:12]) - np.mean(amp[:, :4])) / percentile_90
1215
1230
  print("Recovery")
1216
1231
  print(
1217
- "Calculation: (Avg(9th, 10th, 11th, 12th pulses) - Avg(1st to 4th pulses)) / 90th percentile amplitude"
1232
+ " Calculation: (Avg(9th, 10th, 11th, 12th pulses) - Avg(1st to 4th pulses)) / 90th percentile amplitude"
1218
1233
  )
1219
1234
  print(
1220
- f"Values: {np.mean(amp[:, 8:12]):.3f} - {np.mean(amp[:, :4]):.3f} / {percentile_90:.3f} = {recovery:.3f}\n"
1235
+ f" Values: {np.mean(amp[:, 8:12]):.3f} - {np.mean(amp[:, :4]):.3f} / {percentile_90:.3f} = {recovery:.3f}\n"
1221
1236
  )
1222
1237
 
1223
1238
  print("=" * 40 + "\n")
@@ -1226,8 +1241,9 @@ class SynapseTuner:
1226
1241
  ppr = (np.mean(amp[:, 1:2]) - np.mean(amp[:, 0:1])) / percentile_90
1227
1242
  induction = (np.mean(amp[:, 5:8]) - np.mean(amp[:, :1])) / percentile_90
1228
1243
  recovery = (np.mean(amp[:, 8:12]) - np.mean(amp[:, :4])) / percentile_90
1244
+ simple_ppr = np.mean(amp[:, 1:2]) / np.mean(amp[:, 0:1])
1229
1245
 
1230
- return ppr, induction, recovery
1246
+ return ppr, induction, recovery, simple_ppr
1231
1247
 
1232
1248
  def _set_syn_prop(self, **kwargs):
1233
1249
  """
@@ -1268,19 +1284,19 @@ class SynapseTuner:
1268
1284
  for i in range(3):
1269
1285
  self.vcl.amp[i] = self.conn["spec_settings"]["vclamp_amp"]
1270
1286
  self.vcl.dur[i] = vcldur[1][i]
1271
- #h.finitialize(self.cell.Vinit * mV)
1272
- #h.continuerun(self.tstop * ms)
1273
- h.run()
1287
+ h.finitialize(70 * mV)
1288
+ h.continuerun(self.tstop * ms)
1289
+ #h.run()
1274
1290
  else:
1275
- self.tstop = self.general_settings["tstart"] + self.general_settings["tdur"]
1291
+ # Continuous input mode: ensure simulation runs long enough for the full stimulation duration
1292
+ self.tstop = self.general_settings["tstart"] + self.w_duration.value + 300 # 300ms buffer time
1276
1293
  self.nstim.interval = 1000 / input_frequency
1277
1294
  self.nstim.number = np.ceil(self.w_duration.value / 1000 * input_frequency + 1)
1278
1295
  self.nstim2.number = 0
1279
- self.tstop = self.w_duration.value + self.general_settings["tstart"]
1280
1296
 
1281
- #h.finitialize(self.cell.Vinit * mV)
1282
- #h.continuerun(self.tstop * ms)
1283
- h.run()
1297
+ h.finitialize(70 * mV)
1298
+ h.continuerun(self.tstop * ms)
1299
+ #h.run()
1284
1300
 
1285
1301
  def InteractiveTuner(self):
1286
1302
  """
@@ -1375,6 +1391,52 @@ class SynapseTuner:
1375
1391
  options=durations, value=duration0, description="Duration"
1376
1392
  )
1377
1393
 
1394
+ # Save functionality widgets
1395
+ save_path_text = widgets.Text(
1396
+ value="plot.png",
1397
+ description="Save path:",
1398
+ layout=widgets.Layout(width='300px')
1399
+ )
1400
+ save_button = widgets.Button(description="Save Plot", icon="save", button_style="success")
1401
+
1402
+ def save_plot(b):
1403
+ if hasattr(self, 'last_figure') and self.last_figure is not None:
1404
+ try:
1405
+ # Create a new figure with just the first subplot (synaptic current)
1406
+ fig, ax = plt.subplots(figsize=(8, 6))
1407
+
1408
+ # Get the axes from the original figure
1409
+ original_axes = self.last_figure.get_axes()
1410
+ if len(original_axes) > 0:
1411
+ first_ax = original_axes[0]
1412
+
1413
+ # Copy the data from the first subplot
1414
+ for line in first_ax.get_lines():
1415
+ ax.plot(line.get_xdata(), line.get_ydata(),
1416
+ color=line.get_color(), label=line.get_label())
1417
+
1418
+ # Copy axis labels and title
1419
+ ax.set_xlabel(first_ax.get_xlabel())
1420
+ ax.set_ylabel(first_ax.get_ylabel())
1421
+ ax.set_title(first_ax.get_title())
1422
+ ax.set_xlim(first_ax.get_xlim())
1423
+ ax.legend()
1424
+ ax.grid(True)
1425
+
1426
+ # Save the new figure
1427
+ fig.savefig(save_path_text.value)
1428
+ plt.close(fig) # Close the temporary figure
1429
+ print(f"Synaptic current plot saved to {save_path_text.value}")
1430
+ else:
1431
+ print("No subplots found in the figure")
1432
+
1433
+ except Exception as e:
1434
+ print(f"Error saving plot: {e}")
1435
+ else:
1436
+ print("No plot to save")
1437
+
1438
+ save_button.on_click(save_plot)
1439
+
1378
1440
  def create_dynamic_sliders():
1379
1441
  """Create sliders based on current connection's parameters"""
1380
1442
  sliders = {}
@@ -1453,7 +1515,7 @@ class SynapseTuner:
1453
1515
  the network dropdown. It coordinates the complete switching process:
1454
1516
  1. Calls _switch_network() to rebuild connections for the new network
1455
1517
  2. Updates the connection dropdown options with new network's connections
1456
- 3. Recreates dynamic sliders for the new connection parameters
1518
+ 3. Recreates dynamic sliders for new connection parameters
1457
1519
  4. Refreshes the entire UI to reflect all changes
1458
1520
  """
1459
1521
  if w_network is None:
@@ -1515,8 +1577,9 @@ class SynapseTuner:
1515
1577
  else:
1516
1578
  connection_row = HBox([w_connection])
1517
1579
  slider_row = HBox([w_input_freq, self.w_delay, self.w_duration])
1580
+ save_row = HBox([save_path_text, save_button])
1518
1581
 
1519
- ui = VBox([connection_row, button_row, slider_row, slider_columns])
1582
+ ui = VBox([connection_row, button_row, slider_row, slider_columns, save_row])
1520
1583
 
1521
1584
  # Function to update UI based on input mode
1522
1585
  def update_ui(*args):
@@ -1618,6 +1681,7 @@ class SynapseTuner:
1618
1681
  Dictionary containing frequency-dependent metrics with keys:
1619
1682
  - 'frequencies': List of tested frequencies
1620
1683
  - 'ppr': Paired-pulse ratios at each frequency
1684
+ - 'simple_ppr': Simple paired-pulse ratios (2nd/1st pulse) at each frequency
1621
1685
  - 'induction': Induction values at each frequency
1622
1686
  - 'recovery': Recovery values at each frequency
1623
1687
 
@@ -1627,7 +1691,7 @@ class SynapseTuner:
1627
1691
  behavior of synapses, such as identifying facilitating vs. depressing regimes
1628
1692
  or the frequency at which a synapse transitions between these behaviors.
1629
1693
  """
1630
- results = {"frequencies": freqs, "ppr": [], "induction": [], "recovery": []}
1694
+ results = {"frequencies": freqs, "ppr": [], "induction": [], "recovery": [], "simple_ppr": []}
1631
1695
 
1632
1696
  # Store original state
1633
1697
  original_ispk = self.ispk
@@ -1635,11 +1699,12 @@ class SynapseTuner:
1635
1699
  for freq in tqdm(freqs, desc="Analyzing frequencies"):
1636
1700
  self._simulate_model(freq, delay)
1637
1701
  amp = self._response_amplitude()
1638
- ppr, induction, recovery = self._calc_ppr_induction_recovery(amp, print_math=False)
1702
+ ppr, induction, recovery, simple_ppr = self._calc_ppr_induction_recovery(amp, print_math=False)
1639
1703
 
1640
1704
  results["ppr"].append(float(ppr))
1641
1705
  results["induction"].append(float(induction))
1642
1706
  results["recovery"].append(float(recovery))
1707
+ results["simple_ppr"].append(float(simple_ppr))
1643
1708
 
1644
1709
  # Restore original state
1645
1710
  self.ispk = original_ispk
@@ -1659,6 +1724,7 @@ class SynapseTuner:
1659
1724
  Dictionary containing frequency analysis results with keys:
1660
1725
  - 'frequencies': List of tested frequencies
1661
1726
  - 'ppr': Paired-pulse ratios at each frequency
1727
+ - 'simple_ppr': Simple paired-pulse ratios at each frequency
1662
1728
  - 'induction': Induction values at each frequency
1663
1729
  - 'recovery': Recovery values at each frequency
1664
1730
  log_plot : bool
@@ -1667,24 +1733,27 @@ class SynapseTuner:
1667
1733
  Notes:
1668
1734
  ------
1669
1735
  Creates a figure with three subplots showing:
1670
- 1. Paired-pulse ratio vs. frequency
1736
+ 1. Paired-pulse ratios (both normalized and simple) vs. frequency
1671
1737
  2. Induction vs. frequency
1672
1738
  3. Recovery vs. frequency
1673
1739
 
1674
1740
  Each plot includes a horizontal reference line at y=0 or y=1 to indicate
1675
1741
  the boundary between facilitation and depression.
1676
1742
  """
1677
- fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(15, 5))
1743
+ fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(18, 5))
1678
1744
 
1679
- # Plot PPR
1745
+ # Plot both PPR measures
1680
1746
  if log_plot:
1681
- ax1.semilogx(results["frequencies"], results["ppr"], "o-")
1747
+ ax1.semilogx(results["frequencies"], results["ppr"], "o-", label="Normalized PPR")
1748
+ ax1.semilogx(results["frequencies"], results["simple_ppr"], "s-", label="Simple PPR")
1682
1749
  else:
1683
- ax1.plot(results["frequencies"], results["ppr"], "o-")
1750
+ ax1.plot(results["frequencies"], results["ppr"], "o-", label="Normalized PPR")
1751
+ ax1.plot(results["frequencies"], results["simple_ppr"], "s-", label="Simple PPR")
1684
1752
  ax1.axhline(y=1, color="gray", linestyle="--", alpha=0.5)
1685
1753
  ax1.set_xlabel("Frequency (Hz)")
1686
1754
  ax1.set_ylabel("Paired Pulse Ratio")
1687
1755
  ax1.set_title("PPR vs Frequency")
1756
+ ax1.legend()
1688
1757
  ax1.grid(True)
1689
1758
 
1690
1759
  # Plot Induction
@@ -1713,6 +1782,168 @@ class SynapseTuner:
1713
1782
  plt.show()
1714
1783
 
1715
1784
 
1785
+ def generate_synaptic_table(self, stp_frequency=50.0, stp_delay=250.0, plot=True):
1786
+ """
1787
+ Generate a comprehensive table of synaptic parameters for all connections.
1788
+
1789
+ This method iterates through all available connections, runs simulations to
1790
+ characterize each synapse, and compiles the results into a pandas DataFrame.
1791
+
1792
+ Parameters:
1793
+ -----------
1794
+ stp_frequency : float, optional
1795
+ Frequency in Hz to use for STP (short-term plasticity) analysis. Default is 50.0 Hz.
1796
+ stp_delay : float, optional
1797
+ Delay in ms between pulse trains for STP analysis. Default is 250.0 ms.
1798
+ plot : bool, optional
1799
+ Whether to display the resulting table. Default is True.
1800
+
1801
+ Returns:
1802
+ --------
1803
+ pd.DataFrame
1804
+ DataFrame containing synaptic parameters for each connection with columns:
1805
+ - connection: Connection name
1806
+ - rise_time: 20-80% rise time (ms)
1807
+ - decay_time: Decay time constant (ms)
1808
+ - latency: Response latency (ms)
1809
+ - half_width: Response half-width (ms)
1810
+ - peak_amplitude: Peak synaptic current amplitude (pA)
1811
+ - baseline: Baseline current (pA)
1812
+ - ppr: Paired-pulse ratio (normalized)
1813
+ - simple_ppr: Simple paired-pulse ratio (2nd/1st pulse)
1814
+ - induction: STP induction measure
1815
+ - recovery: STP recovery measure
1816
+
1817
+ Notes:
1818
+ ------
1819
+ This method temporarily switches between connections to characterize each one,
1820
+ then restores the original connection. The STP metrics are calculated at the
1821
+ specified frequency and delay.
1822
+ """
1823
+ # Store original connection to restore later
1824
+ original_connection = self.current_connection
1825
+
1826
+ # Initialize results list
1827
+ results = []
1828
+
1829
+ print(f"Analyzing {len(self.conn_type_settings)} connections...")
1830
+
1831
+ for conn_name in tqdm(self.conn_type_settings.keys(), desc="Analyzing connections"):
1832
+ try:
1833
+ # Switch to this connection
1834
+ self._switch_connection(conn_name)
1835
+
1836
+ # Run single event analysis
1837
+ self.SingleEvent(plot_and_print=False)
1838
+
1839
+ # Get synaptic properties from the single event
1840
+ syn_props = self._get_syn_prop()
1841
+
1842
+ # Run STP analysis at specified frequency
1843
+ stp_results = self.stp_frequency_response(
1844
+ freqs=[stp_frequency],
1845
+ delay=stp_delay,
1846
+ plot=False,
1847
+ log_plot=False
1848
+ )
1849
+
1850
+ # Extract STP metrics for this frequency
1851
+ freq_idx = 0 # Only one frequency tested
1852
+ ppr = stp_results['ppr'][freq_idx]
1853
+ induction = stp_results['induction'][freq_idx]
1854
+ recovery = stp_results['recovery'][freq_idx]
1855
+ simple_ppr = stp_results['simple_ppr'][freq_idx]
1856
+
1857
+ # Compile results for this connection
1858
+ conn_results = {
1859
+ 'connection': conn_name,
1860
+ 'rise_time': float(self.rise_time),
1861
+ 'decay_time': float(self.decay_time),
1862
+ 'latency': float(syn_props.get('latency', 0)),
1863
+ 'half_width': float(syn_props.get('half_width', 0)),
1864
+ 'peak_amplitude': float(syn_props.get('amp', 0)),
1865
+ 'baseline': float(syn_props.get('baseline', 0)),
1866
+ 'ppr': float(ppr),
1867
+ 'simple_ppr': float(simple_ppr),
1868
+ 'induction': float(induction),
1869
+ 'recovery': float(recovery)
1870
+ }
1871
+
1872
+ results.append(conn_results)
1873
+
1874
+ except Exception as e:
1875
+ print(f"Warning: Failed to analyze connection '{conn_name}': {e}")
1876
+ # Add partial results if possible
1877
+ results.append({
1878
+ 'connection': conn_name,
1879
+ 'rise_time': float('nan'),
1880
+ 'decay_time': float('nan'),
1881
+ 'latency': float('nan'),
1882
+ 'half_width': float('nan'),
1883
+ 'peak_amplitude': float('nan'),
1884
+ 'baseline': float('nan'),
1885
+ 'ppr': float('nan'),
1886
+ 'simple_ppr': float('nan'),
1887
+ 'induction': float('nan'),
1888
+ 'recovery': float('nan')
1889
+ })
1890
+
1891
+ # Restore original connection
1892
+ if original_connection in self.conn_type_settings:
1893
+ self._switch_connection(original_connection)
1894
+
1895
+ # Create DataFrame
1896
+ df = pd.DataFrame(results)
1897
+
1898
+ # Set connection as index for better display
1899
+ df = df.set_index('connection')
1900
+
1901
+ if plot:
1902
+ # Display the table
1903
+ print("\nSynaptic Parameters Table:")
1904
+ print("=" * 80)
1905
+ display(df.round(4))
1906
+
1907
+ # Optional: Create a simple bar plot for key metrics
1908
+ try:
1909
+ fig, axes = plt.subplots(2, 2, figsize=(15, 10))
1910
+ fig.suptitle(f'Synaptic Parameters Across Connections (STP at {stp_frequency}Hz)', fontsize=16)
1911
+
1912
+ # Plot rise/decay times
1913
+ df[['rise_time', 'decay_time']].plot(kind='bar', ax=axes[0,0])
1914
+ axes[0,0].set_title('Rise and Decay Times')
1915
+ axes[0,0].set_ylabel('Time (ms)')
1916
+ axes[0,0].tick_params(axis='x', rotation=45)
1917
+
1918
+ # Plot PPR metrics
1919
+ df[['ppr', 'simple_ppr']].plot(kind='bar', ax=axes[0,1])
1920
+ axes[0,1].set_title('Paired-Pulse Ratios')
1921
+ axes[0,1].axhline(y=1, color='gray', linestyle='--', alpha=0.5)
1922
+ axes[0,1].tick_params(axis='x', rotation=45)
1923
+
1924
+ # Plot induction
1925
+ df['induction'].plot(kind='bar', ax=axes[1,0], color='green')
1926
+ axes[1,0].set_title('STP Induction')
1927
+ axes[1,0].axhline(y=0, color='gray', linestyle='--', alpha=0.5)
1928
+ axes[1,0].set_ylabel('Induction')
1929
+ axes[1,0].tick_params(axis='x', rotation=45)
1930
+
1931
+ # Plot recovery
1932
+ df['recovery'].plot(kind='bar', ax=axes[1,1], color='orange')
1933
+ axes[1,1].set_title('STP Recovery')
1934
+ axes[1,1].axhline(y=0, color='gray', linestyle='--', alpha=0.5)
1935
+ axes[1,1].set_ylabel('Recovery')
1936
+ axes[1,1].tick_params(axis='x', rotation=45)
1937
+
1938
+ plt.tight_layout()
1939
+ plt.show()
1940
+
1941
+ except Exception as e:
1942
+ print(f"Warning: Could not create plots: {e}")
1943
+
1944
+ return df
1945
+
1946
+
1716
1947
  class GapJunctionTuner:
1717
1948
  def __init__(
1718
1949
  self,
@@ -1767,6 +1998,7 @@ class GapJunctionTuner:
1767
1998
  self.config = config
1768
1999
  self.available_networks = []
1769
2000
  self.current_network = None
2001
+ self.last_figure = None
1770
2002
  if self.conn_type_settings is None and self.config is not None:
1771
2003
  self.conn_type_settings = self._build_conn_type_settings_from_config(self.config)
1772
2004
  if self.conn_type_settings is None or len(self.conn_type_settings) == 0:
@@ -2168,6 +2400,7 @@ class GapJunctionTuner:
2168
2400
  plt.xlabel("Time (ms)")
2169
2401
  plt.ylabel("Membrane Voltage (mV)")
2170
2402
  plt.legend()
2403
+ self.last_figure = plt.gcf()
2171
2404
 
2172
2405
  def coupling_coefficient(self, t, v1, v2, t_start, t_end, dt=h.dt):
2173
2406
  """
@@ -2202,662 +2435,1133 @@ class GapJunctionTuner:
2202
2435
  return (v2[idx2] - v2[idx1]) / (v1[idx2] - v1[idx1])
2203
2436
 
2204
2437
  def InteractiveTuner(self):
2205
- w_run = widgets.Button(description="Run", icon="history", button_style="primary")
2206
- values = [i * 10**-4 for i in range(1, 1001)] # From 1e-4 to 1e-1
2207
-
2208
- # Create the SelectionSlider widget with appropriate formatting
2209
- resistance = widgets.FloatLogSlider(
2210
- value=0.001,
2211
- base=10,
2212
- min=-4, # max exponent of base
2213
- max=-1, # min exponent of base
2214
- step=0.1, # exponent step
2215
- description="Resistance: ",
2216
- continuous_update=True,
2217
- )
2218
-
2219
- output = widgets.Output()
2220
-
2221
- ui_widgets = [w_run, resistance]
2222
-
2223
- def on_button(*args):
2224
- with output:
2225
- # Clear only the output widget, not the entire cell
2226
- output.clear_output(wait=True)
2227
-
2228
- resistance_for_gap = resistance.value
2229
- print(f"Running simulation with resistance: {resistance_for_gap:0.6f} and {self.general_settings['iclamp_amp']*1000}pA current clamps")
2230
-
2231
- try:
2232
- self.model(resistance_for_gap)
2233
- self.plot_model()
2234
-
2235
- # Convert NEURON vectors to numpy arrays
2236
- t_array = np.array(self.t_vec)
2237
- v1_array = np.array(self.soma_v_1)
2238
- v2_array = np.array(self.soma_v_2)
2239
-
2240
- cc = self.coupling_coefficient(t_array, v1_array, v2_array, 500, 1000)
2241
- print(f"coupling_coefficient is {cc:0.4f}")
2242
- plt.show()
2243
-
2244
- except Exception as e:
2245
- print(f"Error during simulation or analysis: {e}")
2246
- import traceback
2247
-
2248
- traceback.print_exc()
2249
-
2250
- # Add connection dropdown if multiple connections exist
2251
- if len(self.conn_type_settings) > 1:
2252
- connection_dropdown = widgets.Dropdown(
2253
- options=list(self.conn_type_settings.keys()),
2254
- value=self.current_connection,
2255
- description='Connection:',
2256
- )
2257
- def on_connection_change(change):
2258
- if change['type'] == 'change' and change['name'] == 'value':
2259
- self._switch_connection(change['new'])
2260
- on_button() # Automatically rerun the simulation after switching
2261
- connection_dropdown.observe(on_connection_change)
2262
- ui_widgets.insert(0, connection_dropdown)
2263
-
2264
- ui = VBox(ui_widgets)
2265
-
2266
- display(ui)
2267
- display(output)
2268
-
2269
- # Run once initially
2270
- on_button()
2271
- w_run.on_click(on_button)
2272
-
2273
-
2274
- # optimizers!
2275
-
2276
-
2277
- @dataclass
2278
- class SynapseOptimizationResult:
2279
- """Container for synaptic parameter optimization results"""
2280
-
2281
- optimal_params: Dict[str, float]
2282
- achieved_metrics: Dict[str, float]
2283
- target_metrics: Dict[str, float]
2284
- error: float
2285
- optimization_path: List[Dict[str, float]]
2286
-
2287
-
2288
- class SynapseOptimizer:
2289
- def __init__(self, tuner):
2290
- """
2291
- Initialize the synapse optimizer with parameter scaling
2292
-
2293
- Parameters:
2294
- -----------
2295
- tuner : SynapseTuner
2296
- Instance of the SynapseTuner class
2297
- """
2298
- self.tuner = tuner
2299
- self.optimization_history = []
2300
- self.param_scales = {}
2301
-
2302
- def _normalize_params(self, params: np.ndarray, param_names: List[str]) -> np.ndarray:
2303
- """
2304
- Normalize parameters to similar scales for better optimization performance.
2305
-
2306
- Parameters:
2307
- -----------
2308
- params : np.ndarray
2309
- Original parameter values.
2310
- param_names : List[str]
2311
- Names of the parameters corresponding to the values.
2312
-
2313
- Returns:
2314
- --------
2315
- np.ndarray
2316
- Normalized parameter values.
2317
- """
2318
- return np.array([params[i] / self.param_scales[name] for i, name in enumerate(param_names)])
2319
-
2320
- def _denormalize_params(
2321
- self, normalized_params: np.ndarray, param_names: List[str]
2322
- ) -> np.ndarray:
2323
- """
2324
- Convert normalized parameters back to original scale.
2325
-
2326
- Parameters:
2327
- -----------
2328
- normalized_params : np.ndarray
2329
- Normalized parameter values.
2330
- param_names : List[str]
2331
- Names of the parameters corresponding to the normalized values.
2332
-
2333
- Returns:
2334
- --------
2335
- np.ndarray
2336
- Denormalized parameter values in their original scale.
2337
- """
2338
- return np.array(
2339
- [normalized_params[i] * self.param_scales[name] for i, name in enumerate(param_names)]
2340
- )
2341
-
2342
- def _calculate_metrics(self) -> Dict[str, float]:
2343
- """
2344
- Calculate standard metrics from the current simulation.
2345
-
2346
- This method runs either a single event simulation, a train input simulation,
2347
- or both based on configuration flags, and calculates relevant synaptic metrics.
2348
-
2349
- Returns:
2350
- --------
2351
- Dict[str, float]
2352
- Dictionary of calculated metrics including:
2353
- - induction: measure of synaptic facilitation/depression
2354
- - ppr: paired-pulse ratio
2355
- - recovery: recovery from facilitation/depression
2356
- - max_amplitude: maximum synaptic response amplitude
2357
- - rise_time: time for synaptic response to rise from 20% to 80% of peak
2358
- - decay_time: time constant of synaptic response decay
2359
- - latency: synaptic response latency
2360
- - half_width: synaptic response half-width
2361
- - baseline: baseline current
2362
- - amp: peak amplitude from syn_props
2363
- """
2364
- # Set these to 0 for when we return the dict
2365
- induction = 0
2366
- ppr = 0
2367
- recovery = 0
2368
- amp = 0
2369
- rise_time = 0
2370
- decay_time = 0
2371
- latency = 0
2372
- half_width = 0
2373
- baseline = 0
2374
- syn_amp = 0
2375
-
2376
- if self.run_single_event:
2377
- self.tuner.SingleEvent(plot_and_print=False)
2378
- # Use the attributes set by SingleEvent method
2379
- rise_time = getattr(self.tuner, "rise_time", 0)
2380
- decay_time = getattr(self.tuner, "decay_time", 0)
2381
- # Get additional syn_props directly
2382
- syn_props = self.tuner._get_syn_prop()
2383
- latency = syn_props.get("latency", 0)
2384
- half_width = syn_props.get("half_width", 0)
2385
- baseline = syn_props.get("baseline", 0)
2386
- syn_amp = syn_props.get("amp", 0)
2387
-
2388
- if self.run_train_input:
2389
- self.tuner._simulate_model(self.train_frequency, self.train_delay)
2390
- amp = self.tuner._response_amplitude()
2391
- ppr, induction, recovery = self.tuner._calc_ppr_induction_recovery(
2392
- amp, print_math=False
2393
- )
2394
- amp = self.tuner._find_max_amp(amp)
2395
-
2396
- return {
2397
- "induction": float(induction),
2398
- "ppr": float(ppr),
2399
- "recovery": float(recovery),
2400
- "max_amplitude": float(amp),
2401
- "rise_time": float(rise_time),
2402
- "decay_time": float(decay_time),
2403
- "latency": float(latency),
2404
- "half_width": float(half_width),
2405
- "baseline": float(baseline),
2406
- "amp": float(syn_amp),
2407
- }
2408
-
2409
- def _default_cost_function(
2410
- self, metrics: Dict[str, float], target_metrics: Dict[str, float]
2411
- ) -> float:
2412
- """
2413
- Default cost function that minimizes the squared difference between achieved and target induction.
2414
-
2415
- Parameters:
2416
- -----------
2417
- metrics : Dict[str, float]
2418
- Dictionary of calculated metrics from the current simulation.
2419
- target_metrics : Dict[str, float]
2420
- Dictionary of target metrics to optimize towards.
2421
-
2422
- Returns:
2423
- --------
2424
- float
2425
- The squared error between achieved and target induction.
2426
2438
  """
2427
- return float((metrics["induction"] - target_metrics["induction"]) ** 2)
2428
-
2429
- def _objective_function(
2430
- self,
2431
- normalized_params: np.ndarray,
2432
- param_names: List[str],
2433
- cost_function: Callable,
2434
- target_metrics: Dict[str, float],
2435
- ) -> float:
2436
- """
2437
- Calculate error using provided cost function
2438
- """
2439
- # Denormalize parameters
2440
- params = self._denormalize_params(normalized_params, param_names)
2441
-
2442
- # Set parameters
2443
- for name, value in zip(param_names, params):
2444
- setattr(self.tuner.syn, name, value)
2445
-
2446
- # just do this and have the SingleEvent handle it
2447
- if self.run_single_event:
2448
- self.tuner.using_optimizer = True
2449
- self.tuner.param_names = param_names
2450
- self.tuner.params = params
2451
-
2452
- # Calculate metrics and error
2453
- metrics = self._calculate_metrics()
2454
- error = float(cost_function(metrics, target_metrics)) # Ensure error is scalar
2455
-
2456
- # Store history with denormalized values
2457
- history_entry = {
2458
- "params": dict(zip(param_names, params)),
2459
- "metrics": metrics,
2460
- "error": error,
2461
- }
2462
- self.optimization_history.append(history_entry)
2463
-
2464
- return error
2439
+ Sets up interactive sliders for tuning short-term plasticity (STP) parameters in a Jupyter Notebook.
2465
2440
 
2466
- def optimize_parameters(
2467
- self,
2468
- target_metrics: Dict[str, float],
2469
- param_bounds: Dict[str, Tuple[float, float]],
2470
- run_single_event: bool = False,
2471
- run_train_input: bool = True,
2472
- train_frequency: float = 50,
2473
- train_delay: float = 250,
2474
- cost_function: Optional[Callable] = None,
2475
- method: str = "SLSQP",
2476
- init_guess="random",
2477
- ) -> SynapseOptimizationResult:
2478
- """
2479
- Optimize synaptic parameters to achieve target metrics.
2441
+ This method creates an interactive UI with sliders for:
2442
+ - Network selection dropdown (if multiple networks available and config provided)
2443
+ - Connection type selection dropdown
2444
+ - Input frequency
2445
+ - Delay between pulse trains
2446
+ - Duration of stimulation (for continuous input mode)
2447
+ - Synaptic parameters (e.g., Use, tau_f, tau_d) based on the syn model
2480
2448
 
2481
- Parameters:
2482
- -----------
2483
- target_metrics : Dict[str, float]
2484
- Target values for synaptic metrics (e.g., {'induction': 0.2, 'rise_time': 0.5})
2485
- param_bounds : Dict[str, Tuple[float, float]]
2486
- Bounds for each parameter to optimize (e.g., {'tau_d': (5, 50), 'Use': (0.1, 0.9)})
2487
- run_single_event : bool, optional
2488
- Whether to run single event simulations during optimization (default: False)
2489
- run_train_input : bool, optional
2490
- Whether to run train input simulations during optimization (default: True)
2491
- train_frequency : float, optional
2492
- Frequency of the stimulus train in Hz (default: 50)
2493
- train_delay : float, optional
2494
- Delay between pulse trains in ms (default: 250)
2495
- cost_function : Optional[Callable]
2496
- Custom cost function for optimization. If None, uses default cost function
2497
- that optimizes induction.
2498
- method : str, optional
2499
- Optimization method to use (default: 'SLSQP')
2500
- init_guess : str, optional
2501
- Method for initial parameter guess ('random' or 'middle_guess')
2449
+ It also provides buttons for:
2450
+ - Running a single event simulation
2451
+ - Running a train input simulation
2452
+ - Toggling voltage clamp mode
2453
+ - Switching between standard and continuous input modes
2502
2454
 
2503
- Returns:
2504
- --------
2505
- SynapseOptimizationResult
2506
- Results of the optimization including optimal parameters, achieved metrics,
2507
- target metrics, final error, and optimization path.
2455
+ Network Dropdown Feature:
2456
+ ------------------------
2457
+ When the SynapseTuner is initialized with a BMTK config file containing multiple networks:
2458
+ - A network dropdown appears next to the connection dropdown
2459
+ - Users can dynamically switch between networks (e.g., 'network_to_network', 'external_to_network')
2460
+ - Switching networks rebuilds available connections and updates the connection dropdown
2461
+ - The current connection is preserved if it exists in the new network
2462
+ - If multiple networks exist but only one is specified during init, that network is used as default
2508
2463
 
2509
2464
  Notes:
2510
2465
  ------
2511
- This function uses scipy.optimize.minimize to find the optimal parameter values
2512
- that minimize the difference between achieved and target metrics.
2466
+ Ideal for exploratory parameter tuning and interactive visualization of
2467
+ synapse behavior with different parameter values and stimulation protocols.
2468
+ The network dropdown feature enables comprehensive exploration of multi-network
2469
+ BMTK simulations without needing to reinitialize the tuner.
2513
2470
  """
2514
- self.optimization_history = []
2515
- self.train_frequency = train_frequency
2516
- self.train_delay = train_delay
2517
- self.run_single_event = run_single_event
2518
- self.run_train_input = run_train_input
2519
-
2520
- param_names = list(param_bounds.keys())
2521
- bounds = [param_bounds[name] for name in param_names]
2522
-
2523
- if cost_function is None:
2524
- cost_function = self._default_cost_function
2471
+ # Widgets setup (Sliders)
2472
+ freqs = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20, 35, 50, 100, 200]
2473
+ delays = [125, 250, 500, 1000, 2000, 4000]
2474
+ durations = [100, 300, 500, 1000, 2000, 5000, 10000]
2475
+ freq0 = 50
2476
+ delay0 = 250
2477
+ duration0 = 300
2478
+ vlamp_status = self.vclamp
2525
2479
 
2526
- # Calculate scaling factors
2527
- self.param_scales = {
2528
- name: max(abs(bounds[i][0]), abs(bounds[i][1])) for i, name in enumerate(param_names)
2529
- }
2480
+ # Connection dropdown
2481
+ connection_options = sorted(list(self.conn_type_settings.keys()))
2482
+ w_connection = widgets.Dropdown(
2483
+ options=connection_options,
2484
+ value=self.current_connection,
2485
+ description="Connection:",
2486
+ style={'description_width': 'initial'}
2487
+ )
2530
2488
 
2531
- # Normalize bounds
2532
- normalized_bounds = [
2533
- (b[0] / self.param_scales[name], b[1] / self.param_scales[name])
2534
- for name, b in zip(param_names, bounds)
2535
- ]
2489
+ # Network dropdown - only shown if config was provided and multiple networks are available
2490
+ # This enables users to switch between different network datasets dynamically
2491
+ w_network = None
2492
+ if self.config is not None and len(self.available_networks) > 1:
2493
+ w_network = widgets.Dropdown(
2494
+ options=self.available_networks,
2495
+ value=self.current_network,
2496
+ description="Network:",
2497
+ style={'description_width': 'initial'}
2498
+ )
2536
2499
 
2537
- # picks with method of init value we want to use
2538
- if init_guess == "random":
2539
- x0 = np.array([np.random.uniform(b[0], b[1]) for b in bounds])
2540
- elif init_guess == "middle_guess":
2541
- x0 = [(b[0] + b[1]) / 2 for b in bounds]
2542
- else:
2543
- raise Exception("Pick a vaid init guess method either random or midde_guess")
2544
- normalized_x0 = self._normalize_params(np.array(x0), param_names)
2545
-
2546
- # Run optimization
2547
- result = minimize(
2548
- self._objective_function,
2549
- normalized_x0,
2550
- args=(param_names, cost_function, target_metrics),
2551
- method=method,
2552
- bounds=normalized_bounds,
2500
+ w_run = widgets.Button(description="Run Train", icon="history", button_style="primary")
2501
+ w_single = widgets.Button(description="Single Event", icon="check", button_style="success")
2502
+ w_vclamp = widgets.ToggleButton(
2503
+ value=vlamp_status,
2504
+ description="Voltage Clamp",
2505
+ icon="fast-backward",
2506
+ button_style="warning",
2553
2507
  )
2554
-
2555
- # Get final parameters and metrics
2556
- final_params = dict(zip(param_names, self._denormalize_params(result.x, param_names)))
2557
- for name, value in final_params.items():
2558
- setattr(self.tuner.syn, name, value)
2559
- final_metrics = self._calculate_metrics()
2560
-
2561
- return SynapseOptimizationResult(
2562
- optimal_params=final_params,
2563
- achieved_metrics=final_metrics,
2564
- target_metrics=target_metrics,
2565
- error=result.fun,
2566
- optimization_path=self.optimization_history,
2508
+
2509
+ # Voltage clamp amplitude input
2510
+ default_vclamp_amp = getattr(self.conn['spec_settings'], 'vclamp_amp', -70.0)
2511
+ w_vclamp_amp = widgets.FloatText(
2512
+ value=default_vclamp_amp,
2513
+ description="V_clamp (mV):",
2514
+ step=5.0,
2515
+ style={'description_width': 'initial'},
2516
+ layout=widgets.Layout(width='150px')
2517
+ )
2518
+
2519
+ w_input_mode = widgets.ToggleButton(
2520
+ value=False, description="Continuous input", icon="eject", button_style="info"
2567
2521
  )
2522
+ w_input_freq = widgets.SelectionSlider(options=freqs, value=freq0, description="Input Freq")
2568
2523
 
2569
- def plot_optimization_results(self, result: SynapseOptimizationResult):
2570
- """
2571
- Plot optimization results including convergence and final traces.
2524
+ # Sliders for delay and duration
2525
+ self.w_delay = widgets.SelectionSlider(options=delays, value=delay0, description="Delay")
2526
+ self.w_duration = widgets.SelectionSlider(
2527
+ options=durations, value=duration0, description="Duration"
2528
+ )
2572
2529
 
2573
- Parameters:
2574
- -----------
2575
- result : SynapseOptimizationResult
2576
- Results from optimization as returned by optimize_parameters()
2530
+ # Save functionality widgets
2531
+ save_path_text = widgets.Text(
2532
+ value="plot.png",
2533
+ description="Save path:",
2534
+ layout=widgets.Layout(width='300px')
2535
+ )
2536
+ save_button = widgets.Button(description="Save Plot", icon="save", button_style="success")
2577
2537
 
2578
- Notes:
2579
- ------
2580
- This method generates three plots:
2581
- 1. Error convergence plot showing how the error decreased over iterations
2582
- 2. Parameter convergence plots showing how each parameter changed
2583
- 3. Final model response with the optimal parameters
2584
-
2585
- It also prints a summary of the optimization results including target vs. achieved
2586
- metrics and the optimal parameter values.
2587
- """
2588
- # Ensure errors are properly shaped for plotting
2589
- iterations = range(len(result.optimization_path))
2590
- errors = np.array([float(h["error"]) for h in result.optimization_path]).flatten()
2591
-
2592
- # Plot error convergence
2593
- fig1, ax1 = plt.subplots(figsize=(8, 5))
2594
- ax1.plot(iterations, errors, label="Error")
2595
- ax1.set_xlabel("Iteration")
2596
- ax1.set_ylabel("Error")
2597
- ax1.set_title("Error Convergence")
2598
- ax1.set_yscale("log")
2599
- ax1.legend()
2600
- plt.tight_layout()
2601
- plt.show()
2538
+ def save_plot(b):
2539
+ if hasattr(self, 'last_figure') and self.last_figure is not None:
2540
+ try:
2541
+ # Create a new figure with just the first subplot (synaptic current)
2542
+ fig, ax = plt.subplots(figsize=(8, 6))
2543
+
2544
+ # Get the axes from the original figure
2545
+ original_axes = self.last_figure.get_axes()
2546
+ if len(original_axes) > 0:
2547
+ first_ax = original_axes[0]
2548
+
2549
+ # Copy the data from the first subplot
2550
+ for line in first_ax.get_lines():
2551
+ ax.plot(line.get_xdata(), line.get_ydata(),
2552
+ color=line.get_color(), label=line.get_label())
2553
+
2554
+ # Copy axis labels and title
2555
+ ax.set_xlabel(first_ax.get_xlabel())
2556
+ ax.set_ylabel(first_ax.get_ylabel())
2557
+ ax.set_title(first_ax.get_title())
2558
+ ax.set_xlim(first_ax.get_xlim())
2559
+ ax.legend()
2560
+ ax.grid(True)
2561
+
2562
+ # Save the new figure
2563
+ fig.savefig(save_path_text.value)
2564
+ plt.close(fig) # Close the temporary figure
2565
+ print(f"Synaptic current plot saved to {save_path_text.value}")
2566
+ else:
2567
+ print("No subplots found in the figure")
2568
+
2569
+ except Exception as e:
2570
+ print(f"Error saving plot: {e}")
2571
+ else:
2572
+ print("No plot to save")
2602
2573
 
2603
- # Plot parameter convergence
2604
- param_names = list(result.optimal_params.keys())
2605
- num_params = len(param_names)
2606
- fig2, axs = plt.subplots(nrows=num_params, ncols=1, figsize=(8, 5 * num_params))
2574
+ save_button.on_click(save_plot)
2607
2575
 
2608
- if num_params == 1:
2609
- axs = [axs]
2576
+ def create_dynamic_sliders():
2577
+ """Create sliders based on current connection's parameters"""
2578
+ sliders = {}
2579
+ for key, value in self.slider_vars.items():
2580
+ if isinstance(value, (int, float)): # Only create sliders for numeric values
2581
+ if hasattr(self.syn, key):
2582
+ if value == 0:
2583
+ print(
2584
+ f"{key} was set to zero, going to try to set a range of values, try settings the {key} to a nonzero value if you dont like the range!"
2585
+ )
2586
+ slider = widgets.FloatSlider(
2587
+ value=value, min=0, max=1000, step=1, description=key
2588
+ )
2589
+ else:
2590
+ slider = widgets.FloatSlider(
2591
+ value=value, min=0, max=value * 20, step=value / 5, description=key
2592
+ )
2593
+ sliders[key] = slider
2594
+ else:
2595
+ print(f"skipping slider for {key} due to not being a synaptic variable")
2596
+ return sliders
2610
2597
 
2611
- for ax, param in zip(axs, param_names):
2612
- values = [float(h["params"][param]) for h in result.optimization_path]
2613
- ax.plot(iterations, values, label=f"{param}")
2614
- ax.set_xlabel("Iteration")
2615
- ax.set_ylabel("Parameter Value")
2616
- ax.set_title(f"Convergence of {param}")
2617
- ax.legend()
2598
+ # Generate sliders dynamically based on valid numeric entries in self.slider_vars
2599
+ self.dynamic_sliders = create_dynamic_sliders()
2600
+ print(
2601
+ "Setting up slider! The sliders ranges are set by their init value so try changing that if you dont like the slider range!"
2602
+ )
2618
2603
 
2619
- plt.tight_layout()
2604
+ # Create output widget for displaying results
2605
+ output_widget = widgets.Output()
2606
+
2607
+ def run_single_event(*args):
2608
+ clear_output()
2609
+ display(ui)
2610
+ display(output_widget)
2611
+
2612
+ self.vclamp = w_vclamp.value
2613
+ # Update voltage clamp amplitude if voltage clamp is enabled
2614
+ if self.vclamp:
2615
+ # Update the voltage clamp amplitude settings
2616
+ self.conn['spec_settings']['vclamp_amp'] = w_vclamp_amp.value
2617
+ # Update general settings if they exist
2618
+ if hasattr(self, 'general_settings'):
2619
+ self.general_settings['vclamp_amp'] = w_vclamp_amp.value
2620
+ # Update synaptic properties based on slider values
2621
+ self.ispk = None
2622
+
2623
+ # Clear previous results and run simulation
2624
+ output_widget.clear_output()
2625
+ with output_widget:
2626
+ self.SingleEvent()
2627
+
2628
+ def on_connection_change(*args):
2629
+ """Handle connection dropdown change"""
2630
+ try:
2631
+ new_connection = w_connection.value
2632
+ if new_connection != self.current_connection:
2633
+ # Switch to new connection
2634
+ self._switch_connection(new_connection)
2635
+
2636
+ # Recreate dynamic sliders for new connection
2637
+ self.dynamic_sliders = create_dynamic_sliders()
2638
+
2639
+ # Update UI
2640
+ update_ui_layout()
2641
+ update_ui()
2642
+
2643
+ except Exception as e:
2644
+ print(f"Error switching connection: {e}")
2645
+
2646
+ def on_network_change(*args):
2647
+ """
2648
+ Handle network dropdown change events.
2649
+
2650
+ This callback is triggered when the user selects a different network from
2651
+ the network dropdown. It coordinates the complete switching process:
2652
+ 1. Calls _switch_network() to rebuild connections for the new network
2653
+ 2. Updates the connection dropdown options with new network's connections
2654
+ 3. Recreates dynamic sliders for new connection parameters
2655
+ 4. Refreshes the entire UI to reflect all changes
2656
+ """
2657
+ if w_network is None:
2658
+ return
2659
+ try:
2660
+ new_network = w_network.value
2661
+ if new_network != self.current_network:
2662
+ # Switch to new network
2663
+ self._switch_network(new_network)
2664
+
2665
+ # Update connection dropdown options with new network's connections
2666
+ connection_options = list(self.conn_type_settings.keys())
2667
+ w_connection.options = connection_options
2668
+ if connection_options:
2669
+ w_connection.value = self.current_connection
2670
+
2671
+ # Recreate dynamic sliders for new connection
2672
+ self.dynamic_sliders = create_dynamic_sliders()
2673
+
2674
+ # Update UI
2675
+ update_ui_layout()
2676
+ update_ui()
2677
+
2678
+ except Exception as e:
2679
+ print(f"Error switching network: {e}")
2680
+
2681
+ def update_ui_layout():
2682
+ """
2683
+ Update the UI layout with new sliders and network dropdown.
2684
+
2685
+ This function reconstructs the entire UI layout including:
2686
+ - Network dropdown (if available) and connection dropdown in the top row
2687
+ - Button controls and input mode toggles
2688
+ - Parameter sliders arranged in columns
2689
+ """
2690
+ nonlocal ui, slider_columns
2691
+
2692
+ # Add the dynamic sliders to the UI
2693
+ slider_widgets = [slider for slider in self.dynamic_sliders.values()]
2694
+
2695
+ if slider_widgets:
2696
+ half = len(slider_widgets) // 2
2697
+ col1 = VBox(slider_widgets[:half])
2698
+ col2 = VBox(slider_widgets[half:])
2699
+ slider_columns = HBox([col1, col2])
2700
+ else:
2701
+ slider_columns = VBox([])
2702
+
2703
+ # Create button row with voltage clamp controls
2704
+ if w_vclamp.value: # Show voltage clamp amplitude input when toggle is on
2705
+ button_row = HBox([w_run, w_single, w_vclamp, w_vclamp_amp, w_input_mode])
2706
+ else: # Hide voltage clamp amplitude input when toggle is off
2707
+ button_row = HBox([w_run, w_single, w_vclamp, w_input_mode])
2708
+
2709
+ # Construct the top row - include network dropdown if available
2710
+ # This creates a horizontal layout with network dropdown (if present) and connection dropdown
2711
+ if w_network is not None:
2712
+ connection_row = HBox([w_network, w_connection])
2713
+ else:
2714
+ connection_row = HBox([w_connection])
2715
+ slider_row = HBox([w_input_freq, self.w_delay, self.w_duration])
2716
+ save_row = HBox([save_path_text, save_button])
2717
+
2718
+ ui = VBox([connection_row, button_row, slider_row, slider_columns, save_row])
2719
+
2720
+ # Function to update UI based on input mode
2721
+ def update_ui(*args):
2722
+ clear_output()
2723
+ display(ui)
2724
+ display(output_widget)
2725
+
2726
+ self.vclamp = w_vclamp.value
2727
+ # Update voltage clamp amplitude if voltage clamp is enabled
2728
+ if self.vclamp:
2729
+ self.conn['spec_settings']['vclamp_amp'] = w_vclamp_amp.value
2730
+ if hasattr(self, 'general_settings'):
2731
+ self.general_settings['vclamp_amp'] = w_vclamp_amp.value
2732
+
2733
+ self.input_mode = w_input_mode.value
2734
+ syn_props = {var: slider.value for var, slider in self.dynamic_sliders.items()}
2735
+ self._set_syn_prop(**syn_props)
2736
+
2737
+ # Clear previous results and run simulation
2738
+ output_widget.clear_output()
2739
+ with output_widget:
2740
+ if not self.input_mode:
2741
+ self._simulate_model(w_input_freq.value, self.w_delay.value, w_vclamp.value)
2742
+ else:
2743
+ self._simulate_model(w_input_freq.value, self.w_duration.value, w_vclamp.value)
2744
+ amp = self._response_amplitude()
2745
+ self._plot_model(
2746
+ [self.general_settings["tstart"] - self.nstim.interval / 3, self.tstop]
2747
+ )
2748
+ _ = self._calc_ppr_induction_recovery(amp)
2749
+
2750
+ # Function to switch between delay and duration sliders
2751
+ def switch_slider(*args):
2752
+ if w_input_mode.value:
2753
+ self.w_delay.layout.display = "none" # Hide delay slider
2754
+ self.w_duration.layout.display = "" # Show duration slider
2755
+ else:
2756
+ self.w_delay.layout.display = "" # Show delay slider
2757
+ self.w_duration.layout.display = "none" # Hide duration slider
2758
+
2759
+ # Function to handle voltage clamp toggle
2760
+ def on_vclamp_toggle(*args):
2761
+ """Handle voltage clamp toggle changes to show/hide amplitude input"""
2762
+ update_ui_layout()
2763
+ clear_output()
2764
+ display(ui)
2765
+ display(output_widget)
2766
+
2767
+ # Link widgets to their callback functions
2768
+ w_connection.observe(on_connection_change, names="value")
2769
+ # Link network dropdown callback only if network dropdown was created
2770
+ if w_network is not None:
2771
+ w_network.observe(on_network_change, names="value")
2772
+ w_input_mode.observe(switch_slider, names="value")
2773
+ w_vclamp.observe(on_vclamp_toggle, names="value")
2774
+
2775
+ # Hide the duration slider initially until the user selects it
2776
+ self.w_duration.layout.display = "none" # Hide duration slider
2777
+
2778
+ w_single.on_click(run_single_event)
2779
+ w_run.on_click(update_ui)
2780
+
2781
+ # Initial UI setup
2782
+ slider_columns = VBox([])
2783
+ ui = VBox([])
2784
+ update_ui_layout()
2785
+
2786
+ display(ui)
2787
+ update_ui()
2788
+
2789
+ def stp_frequency_response(
2790
+ self,
2791
+ freqs=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20, 35, 50, 100, 200],
2792
+ delay=250,
2793
+ plot=True,
2794
+ log_plot=True,
2795
+ ):
2796
+ """
2797
+ Analyze synaptic response across different stimulation frequencies.
2798
+
2799
+ This method systematically tests how the synapse model responds to different
2800
+ stimulation frequencies, calculating key short-term plasticity (STP) metrics
2801
+ for each frequency.
2802
+
2803
+ Parameters:
2804
+ -----------
2805
+ freqs : list, optional
2806
+ List of frequencies to analyze (in Hz). Default covers a wide range from 1-200 Hz.
2807
+ delay : float, optional
2808
+ Delay between pulse trains in ms. Default is 250 ms.
2809
+ plot : bool, optional
2810
+ Whether to plot the results. Default is True.
2811
+ log_plot : bool, optional
2812
+ Whether to use logarithmic scale for frequency axis. Default is True.
2813
+
2814
+ Returns:
2815
+ --------
2816
+ dict
2817
+ Dictionary containing frequency-dependent metrics with keys:
2818
+ - 'frequencies': List of tested frequencies
2819
+ - 'ppr': Paired-pulse ratios at each frequency
2820
+ - 'simple_ppr': Simple paired-pulse ratios (2nd/1st pulse) at each frequency
2821
+ - 'induction': Induction values at each frequency
2822
+ - 'recovery': Recovery values at each frequency
2823
+
2824
+ Notes:
2825
+ ------
2826
+ This method is particularly useful for characterizing the frequency-dependent
2827
+ behavior of synapses, such as identifying facilitating vs. depressing regimes
2828
+ or the frequency at which a synapse transitions between these behaviors.
2829
+ """
2830
+ results = {"frequencies": freqs, "ppr": [], "induction": [], "recovery": [], "simple_ppr": []}
2831
+
2832
+ # Store original state
2833
+ original_ispk = self.ispk
2834
+
2835
+ for freq in tqdm(freqs, desc="Analyzing frequencies"):
2836
+ self._simulate_model(freq, delay)
2837
+ amp = self._response_amplitude()
2838
+ ppr, induction, recovery, simple_ppr = self._calc_ppr_induction_recovery(amp, print_math=False)
2839
+
2840
+ results["ppr"].append(float(ppr))
2841
+ results["induction"].append(float(induction))
2842
+ results["recovery"].append(float(recovery))
2843
+ results["simple_ppr"].append(float(simple_ppr))
2844
+
2845
+ # Restore original state
2846
+ self.ispk = original_ispk
2847
+
2848
+ if plot:
2849
+ self._plot_frequency_analysis(results, log_plot=log_plot)
2850
+
2851
+ return results
2852
+
2853
+ def _plot_frequency_analysis(self, results, log_plot):
2854
+ """
2855
+ Plot the frequency-dependent synaptic properties.
2856
+
2857
+ Parameters:
2858
+ -----------
2859
+ results : dict
2860
+ Dictionary containing frequency analysis results with keys:
2861
+ - 'frequencies': List of tested frequencies
2862
+ - 'ppr': Paired-pulse ratios at each frequency
2863
+ - 'simple_ppr': Simple paired-pulse ratios at each frequency
2864
+ - 'induction': Induction values at each frequency
2865
+ - 'recovery': Recovery values at each frequency
2866
+ log_plot : bool
2867
+ Whether to use logarithmic scale for frequency axis
2868
+
2869
+ Notes:
2870
+ ------
2871
+ Creates a figure with three subplots showing:
2872
+ 1. Paired-pulse ratios (both normalized and simple) vs. frequency
2873
+ 2. Induction vs. frequency
2874
+ 3. Recovery vs. frequency
2875
+
2876
+ Each plot includes a horizontal reference line at y=0 or y=1 to indicate
2877
+ the boundary between facilitation and depression.
2878
+ """
2879
+ fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(18, 5))
2880
+
2881
+ # Plot both PPR measures
2882
+ if log_plot:
2883
+ ax1.semilogx(results["frequencies"], results["ppr"], "o-", label="Normalized PPR")
2884
+ ax1.semilogx(results["frequencies"], results["simple_ppr"], "s-", label="Simple PPR")
2885
+ else:
2886
+ ax1.plot(results["frequencies"], results["ppr"], "o-", label="Normalized PPR")
2887
+ ax1.plot(results["frequencies"], results["simple_ppr"], "s-", label="Simple PPR")
2888
+ ax1.axhline(y=1, color="gray", linestyle="--", alpha=0.5)
2889
+ ax1.set_xlabel("Frequency (Hz)")
2890
+ ax1.set_ylabel("Paired Pulse Ratio")
2891
+ ax1.set_title("PPR vs Frequency")
2892
+ ax1.legend()
2893
+ ax1.grid(True)
2894
+
2895
+ # Plot Induction
2896
+ if log_plot:
2897
+ ax2.semilogx(results["frequencies"], results["induction"], "o-")
2898
+ else:
2899
+ ax2.plot(results["frequencies"], results["induction"], "o-")
2900
+ ax2.axhline(y=0, color="gray", linestyle="--", alpha=0.5)
2901
+ ax2.set_xlabel("Frequency (Hz)")
2902
+ ax2.set_ylabel("Induction")
2903
+ ax2.set_title("Induction vs Frequency")
2904
+ ax2.grid(True)
2905
+
2906
+ # Plot Recovery
2907
+ if log_plot:
2908
+ ax3.semilogx(results["frequencies"], results["recovery"], "o-")
2909
+ else:
2910
+ ax3.plot(results["frequencies"], results["recovery"], "o-")
2911
+ ax3.axhline(y=0, color="gray", linestyle="--", alpha=0.5)
2912
+ ax3.set_xlabel("Frequency (Hz)")
2913
+ ax3.set_ylabel("Recovery")
2914
+ ax3.set_title("Recovery vs Frequency")
2915
+ ax3.grid(True)
2916
+
2917
+ plt.tight_layout()
2620
2918
  plt.show()
2621
2919
 
2622
- # Print final results
2623
- print("Optimization Results:")
2624
- print(f"Final Error: {float(result.error):.2e}\n")
2625
- print("Target Metrics:")
2626
- for metric, value in result.target_metrics.items():
2627
- achieved = result.achieved_metrics.get(metric)
2628
- if achieved is not None and metric != "amplitudes": # Skip amplitude array
2629
- print(f"{metric}: {float(achieved):.3f} (target: {float(value):.3f})")
2630
-
2631
- print("\nOptimal Parameters:")
2632
- for param, value in result.optimal_params.items():
2633
- print(f"{param}: {float(value):.3f}")
2634
-
2635
- # Plot final model response
2636
- if self.run_train_input:
2637
- self.tuner._plot_model(
2638
- [
2639
- self.tuner.general_settings["tstart"] - self.tuner.nstim.interval / 3,
2640
- self.tuner.tstop,
2641
- ]
2920
+ def generate_synaptic_table(self, stp_frequency=50.0, stp_delay=250.0, plot=True):
2921
+ """
2922
+ Generate a comprehensive table of synaptic parameters for all connections.
2923
+
2924
+ This method iterates through all available connections, runs simulations to
2925
+ characterize each synapse, and compiles the results into a pandas DataFrame.
2926
+
2927
+ Parameters:
2928
+ -----------
2929
+ stp_frequency : float, optional
2930
+ Frequency in Hz to use for STP (short-term plasticity) analysis. Default is 50.0 Hz.
2931
+ stp_delay : float, optional
2932
+ Delay in ms between pulse trains for STP analysis. Default is 250.0 ms.
2933
+ plot : bool, optional
2934
+ Whether to display the resulting table. Default is True.
2935
+
2936
+ Returns:
2937
+ --------
2938
+ pd.DataFrame
2939
+ DataFrame containing synaptic parameters for each connection with columns:
2940
+ - connection: Connection name
2941
+ - rise_time: 20-80% rise time (ms)
2942
+ - decay_time: Decay time constant (ms)
2943
+ - latency: Response latency (ms)
2944
+ - half_width: Response half-width (ms)
2945
+ - peak_amplitude: Peak synaptic current amplitude (pA)
2946
+ - baseline: Baseline current (pA)
2947
+ - ppr: Paired-pulse ratio (normalized)
2948
+ - simple_ppr: Simple paired-pulse ratio (2nd/1st pulse)
2949
+ - induction: STP induction measure
2950
+ - recovery: STP recovery measure
2951
+
2952
+ Notes:
2953
+ ------
2954
+ This method temporarily switches between connections to characterize each one,
2955
+ then restores the original connection. The STP metrics are calculated at the
2956
+ specified frequency and delay.
2957
+ """
2958
+ # Store original connection to restore later
2959
+ original_connection = self.current_connection
2960
+
2961
+ # Initialize results list
2962
+ results = []
2963
+
2964
+ print(f"Analyzing {len(self.conn_type_settings)} connections...")
2965
+
2966
+ for conn_name in tqdm(self.conn_type_settings.keys(), desc="Analyzing connections"):
2967
+ try:
2968
+ # Switch to this connection
2969
+ self._switch_connection(conn_name)
2970
+
2971
+ # Run single event analysis
2972
+ self.SingleEvent(plot_and_print=False)
2973
+
2974
+ # Get synaptic properties from the single event
2975
+ syn_props = self._get_syn_prop()
2976
+
2977
+ # Run STP analysis at specified frequency
2978
+ stp_results = self.stp_frequency_response(
2979
+ freqs=[stp_frequency],
2980
+ delay=stp_delay,
2981
+ plot=False,
2982
+ log_plot=False
2983
+ )
2984
+
2985
+ # Extract STP metrics for this frequency
2986
+ freq_idx = 0 # Only one frequency tested
2987
+ ppr = stp_results['ppr'][freq_idx]
2988
+ induction = stp_results['induction'][freq_idx]
2989
+ recovery = stp_results['recovery'][freq_idx]
2990
+ simple_ppr = stp_results['simple_ppr'][freq_idx]
2991
+
2992
+ # Compile results for this connection
2993
+ conn_results = {
2994
+ 'connection': conn_name,
2995
+ 'rise_time': float(self.rise_time),
2996
+ 'decay_time': float(self.decay_time),
2997
+ 'latency': float(syn_props.get('latency', 0)),
2998
+ 'half_width': float(syn_props.get('half_width', 0)),
2999
+ 'peak_amplitude': float(syn_props.get('amp', 0)),
3000
+ 'baseline': float(syn_props.get('baseline', 0)),
3001
+ 'ppr': float(ppr),
3002
+ 'simple_ppr': float(simple_ppr),
3003
+ 'induction': float(induction),
3004
+ 'recovery': float(recovery)
3005
+ }
3006
+
3007
+ results.append(conn_results)
3008
+
3009
+ except Exception as e:
3010
+ print(f"Warning: Failed to analyze connection '{conn_name}': {e}")
3011
+ # Add partial results if possible
3012
+ results.append({
3013
+ 'connection': conn_name,
3014
+ 'rise_time': float('nan'),
3015
+ 'decay_time': float('nan'),
3016
+ 'latency': float('nan'),
3017
+ 'half_width': float('nan'),
3018
+ 'peak_amplitude': float('nan'),
3019
+ 'baseline': float('nan'),
3020
+ 'ppr': float('nan'),
3021
+ 'simple_ppr': float('nan'),
3022
+ 'induction': float('nan'),
3023
+ 'recovery': float('nan')
3024
+ })
3025
+
3026
+ # Restore original connection
3027
+ if original_connection in self.conn_type_settings:
3028
+ self._switch_connection(original_connection)
3029
+
3030
+ # Create DataFrame
3031
+ df = pd.DataFrame(results)
3032
+
3033
+ # Set connection as index for better display
3034
+ df = df.set_index('connection')
3035
+
3036
+ if plot:
3037
+ # Display the table
3038
+ print("\nSynaptic Parameters Table:")
3039
+ print("=" * 80)
3040
+ display(df.round(4))
3041
+
3042
+ # Optional: Create a simple bar plot for key metrics
3043
+ try:
3044
+ fig, axes = plt.subplots(2, 2, figsize=(15, 10))
3045
+ fig.suptitle(f'Synaptic Parameters Across Connections (STP at {stp_frequency}Hz)', fontsize=16)
3046
+
3047
+ # Plot rise/decay times
3048
+ df[['rise_time', 'decay_time']].plot(kind='bar', ax=axes[0,0])
3049
+ axes[0,0].set_title('Rise and Decay Times')
3050
+ axes[0,0].set_ylabel('Time (ms)')
3051
+ axes[0,0].tick_params(axis='x', rotation=45)
3052
+
3053
+ # Plot PPR metrics
3054
+ df[['ppr', 'simple_ppr']].plot(kind='bar', ax=axes[0,1])
3055
+ axes[0,1].set_title('Paired-Pulse Ratios')
3056
+ axes[0,1].axhline(y=1, color='gray', linestyle='--', alpha=0.5)
3057
+ axes[0,1].tick_params(axis='x', rotation=45)
3058
+
3059
+ # Plot induction
3060
+ df['induction'].plot(kind='bar', ax=axes[1,0], color='green')
3061
+ axes[1,0].set_title('STP Induction')
3062
+ axes[1,0].axhline(y=0, color='gray', linestyle='--', alpha=0.5)
3063
+ axes[1,0].set_ylabel('Induction')
3064
+ axes[1,0].tick_params(axis='x', rotation=45)
3065
+
3066
+ # Plot recovery
3067
+ df['recovery'].plot(kind='bar', ax=axes[1,1], color='orange')
3068
+ axes[1,1].set_title('STP Recovery')
3069
+ axes[1,1].axhline(y=0, color='gray', linestyle='--', alpha=0.5)
3070
+ axes[1,1].set_ylabel('Recovery')
3071
+ axes[1,1].tick_params(axis='x', rotation=45)
3072
+
3073
+ plt.tight_layout()
3074
+ plt.show()
3075
+
3076
+ except Exception as e:
3077
+ print(f"Warning: Could not create plots: {e}")
3078
+
3079
+ return df
3080
+
3081
+
3082
+ class GapJunctionTuner:
3083
+ def __init__(
3084
+ self,
3085
+ mechanisms_dir: Optional[str] = None,
3086
+ templates_dir: Optional[str] = None,
3087
+ config: Optional[str] = None,
3088
+ general_settings: Optional[dict] = None,
3089
+ conn_type_settings: Optional[dict] = None,
3090
+ hoc_cell: Optional[object] = None,
3091
+ ):
3092
+ """
3093
+ Initialize the GapJunctionTuner class.
3094
+
3095
+ Parameters:
3096
+ -----------
3097
+ mechanisms_dir : str
3098
+ Directory path containing the compiled mod files needed for NEURON mechanisms.
3099
+ templates_dir : str
3100
+ Directory path containing cell template files (.hoc or .py) loaded into NEURON.
3101
+ config : str
3102
+ Path to a BMTK config.json file. Can be used to load mechanisms, templates, and other settings.
3103
+ general_settings : dict
3104
+ General settings dictionary including parameters like simulation time step, duration, and temperature.
3105
+ conn_type_settings : dict
3106
+ A dictionary containing connection-specific settings for gap junctions.
3107
+ hoc_cell : object, optional
3108
+ An already loaded NEURON cell object. If provided, template loading and cell creation will be skipped.
3109
+ """
3110
+ self.hoc_cell = hoc_cell
3111
+
3112
+ if hoc_cell is None:
3113
+ if config is None and (mechanisms_dir is None or templates_dir is None):
3114
+ raise ValueError(
3115
+ "Either a config file, both mechanisms_dir and templates_dir, or a hoc_cell must be provided."
3116
+ )
3117
+
3118
+ if config is None:
3119
+ neuron.load_mechanisms(mechanisms_dir)
3120
+ h.load_file(templates_dir)
3121
+ else:
3122
+ # this will load both mechs and templates
3123
+ load_templates_from_config(config)
3124
+
3125
+ # Use default general settings if not provided, merge with user-provided
3126
+ if general_settings is None:
3127
+ self.general_settings: dict = DEFAULT_GAP_JUNCTION_GENERAL_SETTINGS.copy()
3128
+ else:
3129
+ self.general_settings = {**DEFAULT_GAP_JUNCTION_GENERAL_SETTINGS, **general_settings}
3130
+ self.conn_type_settings = conn_type_settings
3131
+
3132
+ self._syn_params_cache = {}
3133
+ self.config = config
3134
+ self.available_networks = []
3135
+ self.current_network = None
3136
+ self.last_figure = None
3137
+ if self.conn_type_settings is None and self.config is not None:
3138
+ self.conn_type_settings = self._build_conn_type_settings_from_config(self.config)
3139
+ if self.conn_type_settings is None or len(self.conn_type_settings) == 0:
3140
+ raise ValueError("conn_type_settings must be provided or config must be given to load gap junction connections from")
3141
+ self.current_connection = list(self.conn_type_settings.keys())[0]
3142
+ self.conn = self.conn_type_settings[self.current_connection]
3143
+
3144
+ h.tstop = self.general_settings["tstart"] + self.general_settings["tdur"] + 100.0
3145
+ h.dt = self.general_settings["dt"] # Time step (resolution) of the simulation in ms
3146
+ h.steps_per_ms = 1 / h.dt
3147
+ h.celsius = self.general_settings["celsius"]
3148
+
3149
+ # Clean up any existing parallel context before setting up gap junctions
3150
+ try:
3151
+ pc_temp = h.ParallelContext()
3152
+ pc_temp.done() # Clean up any existing parallel context
3153
+ except:
3154
+ pass # Ignore errors if no existing context
3155
+
3156
+ # Force cleanup
3157
+ import gc
3158
+ gc.collect()
3159
+
3160
+ # set up gap junctions
3161
+ self.pc = h.ParallelContext()
3162
+
3163
+ # Use provided hoc_cell or create new cells
3164
+ if self.hoc_cell is not None:
3165
+ self.cell1 = self.hoc_cell
3166
+ # For gap junctions, we need two cells, so create a second one if using hoc_cell
3167
+ self.cell_name = self.conn['cell']
3168
+ self.cell2 = getattr(h, self.cell_name)()
3169
+ else:
3170
+ print(self.conn)
3171
+ self.cell_name = self.conn['cell']
3172
+ self.cell1 = getattr(h, self.cell_name)()
3173
+ self.cell2 = getattr(h, self.cell_name)()
3174
+
3175
+ self.icl = h.IClamp(self.cell1.soma[0](0.5))
3176
+ self.icl.delay = self.general_settings["tstart"]
3177
+ self.icl.dur = self.general_settings["tdur"]
3178
+ self.icl.amp = self.general_settings["iclamp_amp"] # nA
3179
+
3180
+ sec1 = list(self.cell1.all)[self.conn["sec_id"]]
3181
+ sec2 = list(self.cell2.all)[self.conn["sec_id"]]
3182
+
3183
+ # Use unique IDs to avoid conflicts with existing parallel context setups
3184
+ import time
3185
+ unique_id = int(time.time() * 1000) % 10000 # Use timestamp as unique base ID
3186
+
3187
+ self.pc.source_var(sec1(self.conn["sec_x"])._ref_v, unique_id, sec=sec1)
3188
+ self.gap_junc_1 = h.Gap(sec1(0.5))
3189
+ self.pc.target_var(self.gap_junc_1._ref_vgap, unique_id + 1)
3190
+
3191
+ self.pc.source_var(sec2(self.conn["sec_x"])._ref_v, unique_id + 1, sec=sec2)
3192
+ self.gap_junc_2 = h.Gap(sec2(0.5))
3193
+ self.pc.target_var(self.gap_junc_2._ref_vgap, unique_id)
3194
+
3195
+ self.pc.setup_transfer()
3196
+
3197
+ # Now it's safe to initialize NEURON
3198
+ h.finitialize()
3199
+
3200
+ def _load_synaptic_params_from_config(self, config: dict, dynamics_params: str) -> dict:
3201
+ try:
3202
+ # Get the synaptic models directory from config
3203
+ synaptic_models_dir = config.get('components', {}).get('synaptic_models_dir', '')
3204
+ if synaptic_models_dir:
3205
+ # Handle path variables
3206
+ if synaptic_models_dir.startswith('$'):
3207
+ # This is a placeholder, try to resolve it
3208
+ config_dir = os.path.dirname(config.get('config_path', ''))
3209
+ synaptic_models_dir = synaptic_models_dir.replace('$COMPONENTS_DIR',
3210
+ os.path.join(config_dir, 'components'))
3211
+ synaptic_models_dir = synaptic_models_dir.replace('$BASE_DIR', config_dir)
3212
+
3213
+ dynamics_file = os.path.join(synaptic_models_dir, dynamics_params)
3214
+
3215
+ if os.path.exists(dynamics_file):
3216
+ with open(dynamics_file, 'r') as f:
3217
+ return json.load(f)
3218
+ else:
3219
+ print(f"Warning: Dynamics params file not found: {dynamics_file}")
3220
+ except Exception as e:
3221
+ print(f"Warning: Error loading synaptic parameters: {e}")
3222
+
3223
+ return {}
3224
+
3225
+ def _load_available_networks(self) -> None:
3226
+ """
3227
+ Load available network names from the config file for the network dropdown feature.
3228
+
3229
+ This method is automatically called during initialization when a config file is provided.
3230
+ It populates the available_networks list which enables the network dropdown in
3231
+ InteractiveTuner when multiple networks are available.
3232
+
3233
+ Network Dropdown Behavior:
3234
+ -------------------------
3235
+ - If only one network exists: No network dropdown is shown
3236
+ - If multiple networks exist: Network dropdown appears next to connection dropdown
3237
+ - Networks are loaded from the edges data in the config file
3238
+ - Current network defaults to the first available if not specified during init
3239
+ """
3240
+ if self.config is None:
3241
+ self.available_networks = []
3242
+ return
3243
+
3244
+ try:
3245
+ edges = load_edges_from_config(self.config)
3246
+ self.available_networks = list(edges.keys())
3247
+
3248
+ # Set current network to first available if not specified
3249
+ if self.current_network is None and self.available_networks:
3250
+ self.current_network = self.available_networks[0]
3251
+ except Exception as e:
3252
+ print(f"Warning: Could not load networks from config: {e}")
3253
+ self.available_networks = []
3254
+
3255
+ def _build_conn_type_settings_from_config(self, config_path: str) -> Dict[str, dict]:
3256
+ # Load configuration and get nodes and edges using util.py methods
3257
+ config = load_config(config_path)
3258
+ # Ensure the config dict knows its source path so path substitutions can be resolved
3259
+ try:
3260
+ config['config_path'] = config_path
3261
+ except Exception:
3262
+ pass
3263
+ nodes = load_nodes_from_config(config_path)
3264
+ edges = load_edges_from_config(config_path)
3265
+
3266
+ conn_type_settings = {}
3267
+
3268
+ # Process all edge datasets
3269
+ for edge_dataset_name, edge_df in edges.items():
3270
+ if edge_df.empty:
3271
+ continue
3272
+
3273
+ # Merging with node data to get model templates
3274
+ source_node_df = None
3275
+ target_node_df = None
3276
+
3277
+ # First, try to deterministically parse the edge_dataset_name for patterns like '<src>_to_<tgt>'
3278
+ if '_to_' in edge_dataset_name:
3279
+ parts = edge_dataset_name.split('_to_')
3280
+ if len(parts) == 2:
3281
+ src_name, tgt_name = parts
3282
+ if src_name in nodes:
3283
+ source_node_df = nodes[src_name].add_prefix('source_')
3284
+ if tgt_name in nodes:
3285
+ target_node_df = nodes[tgt_name].add_prefix('target_')
3286
+
3287
+ # If not found by parsing name, fall back to inspecting a sample edge row
3288
+ if source_node_df is None or target_node_df is None:
3289
+ sample_edge = edge_df.iloc[0] if len(edge_df) > 0 else None
3290
+ if sample_edge is not None:
3291
+ source_pop_name = sample_edge.get('source_population', '')
3292
+ target_pop_name = sample_edge.get('target_population', '')
3293
+ if source_pop_name in nodes:
3294
+ source_node_df = nodes[source_pop_name].add_prefix('source_')
3295
+ if target_pop_name in nodes:
3296
+ target_node_df = nodes[target_pop_name].add_prefix('target_')
3297
+
3298
+ # As a last resort, attempt to heuristically match
3299
+ if source_node_df is None or target_node_df is None:
3300
+ for pop_name, node_df in nodes.items():
3301
+ if source_node_df is None and (edge_dataset_name.startswith(pop_name) or edge_dataset_name.endswith(pop_name)):
3302
+ source_node_df = node_df.add_prefix('source_')
3303
+ if target_node_df is None and (edge_dataset_name.startswith(pop_name) or edge_dataset_name.endswith(pop_name)):
3304
+ target_node_df = node_df.add_prefix('target_')
3305
+
3306
+ if source_node_df is None or target_node_df is None:
3307
+ print(f"Warning: Could not find node data for edge dataset {edge_dataset_name}")
3308
+ continue
3309
+
3310
+ # Merge edge data with source node info
3311
+ edges_with_source = pd.merge(
3312
+ edge_df.reset_index(),
3313
+ source_node_df,
3314
+ how='left',
3315
+ left_on='source_node_id',
3316
+ right_index=True
2642
3317
  )
2643
- amp = self.tuner._response_amplitude()
2644
- self.tuner._calc_ppr_induction_recovery(amp)
2645
- if self.run_single_event:
2646
- self.tuner.ispk = None
2647
- self.tuner.SingleEvent(plot_and_print=True)
2648
-
2649
- # dataclass means just init the typehints as self.typehint. looks a bit cleaner
2650
- @dataclass
2651
- class GapOptimizationResult:
2652
- """Container for gap junction optimization results"""
2653
-
2654
- optimal_resistance: float
2655
- achieved_cc: float
2656
- target_cc: float
2657
- error: float
2658
- optimization_path: List[Dict[str, float]]
2659
-
3318
+
3319
+ # Merge with target node info
3320
+ edges_with_nodes = pd.merge(
3321
+ edges_with_source,
3322
+ target_node_df,
3323
+ how='left',
3324
+ left_on='target_node_id',
3325
+ right_index=True
3326
+ )
3327
+
3328
+ # Skip edge datasets that don't have gap junction information
3329
+ if 'is_gap_junction' not in edges_with_nodes.columns:
3330
+ continue
3331
+
3332
+ # Filter to only gap junction edges
3333
+ # Handle NaN values in is_gap_junction column
3334
+ gap_junction_mask = edges_with_nodes['is_gap_junction'].fillna(False) == True
3335
+ gap_junction_edges = edges_with_nodes[gap_junction_mask]
3336
+ if gap_junction_edges.empty:
3337
+ continue
3338
+
3339
+ # Get unique edge types from the gap junction edges
3340
+ if 'edge_type_id' in gap_junction_edges.columns:
3341
+ edge_types = gap_junction_edges['edge_type_id'].unique()
3342
+ else:
3343
+ edge_types = [None] # Single edge type
3344
+
3345
+ # Process each edge type
3346
+ for edge_type_id in edge_types:
3347
+ # Filter edges for this type
3348
+ if edge_type_id is not None:
3349
+ edge_type_data = gap_junction_edges[gap_junction_edges['edge_type_id'] == edge_type_id]
3350
+ else:
3351
+ edge_type_data = gap_junction_edges
3352
+
3353
+ if len(edge_type_data) == 0:
3354
+ continue
3355
+
3356
+ # Get representative edge for this type
3357
+ edge_info = edge_type_data.iloc[0]
3358
+
3359
+ # Process gap junction
3360
+ source_model_template = edge_info.get('source_model_template', '')
3361
+ target_model_template = edge_info.get('target_model_template', '')
3362
+
3363
+ source_cell_type = source_model_template.replace('hoc:', '') if source_model_template.startswith('hoc:') else source_model_template
3364
+ target_cell_type = target_model_template.replace('hoc:', '') if target_model_template.startswith('hoc:') else target_model_template
3365
+
3366
+ if source_cell_type != target_cell_type:
3367
+ continue # Only process gap junctions between same cell types
3368
+
3369
+ source_pop = edge_info.get('source_pop_name', '')
3370
+ target_pop = edge_info.get('target_pop_name', '')
3371
+
3372
+ conn_name = f"{source_pop}2{target_pop}_gj"
3373
+ if edge_type_id is not None:
3374
+ conn_name += f"_type_{edge_type_id}"
3375
+
3376
+ conn_settings = {
3377
+ 'cell': source_cell_type,
3378
+ 'sec_id': 0,
3379
+ 'sec_x': 0.5,
3380
+ 'iclamp_amp': -0.01,
3381
+ 'spec_syn_param': {}
3382
+ }
3383
+
3384
+ # Load dynamics params
3385
+ dynamics_file_name = edge_info.get('dynamics_params', '')
3386
+ if dynamics_file_name and dynamics_file_name.upper() != 'NULL':
3387
+ try:
3388
+ syn_params = self._load_synaptic_params_from_config(config, dynamics_file_name)
3389
+ conn_settings['spec_syn_param'] = syn_params
3390
+ except Exception as e:
3391
+ print(f"Warning: could not load dynamics_params file '{dynamics_file_name}': {e}")
3392
+
3393
+ conn_type_settings[conn_name] = conn_settings
3394
+
3395
+ return conn_type_settings
2660
3396
 
2661
- class GapJunctionOptimizer:
2662
- def __init__(self, tuner):
3397
+ def _switch_connection(self, new_connection: str) -> None:
2663
3398
  """
2664
- Initialize the gap junction optimizer
2665
-
3399
+ Switch to a different gap junction connection and update all related properties.
3400
+
2666
3401
  Parameters:
2667
3402
  -----------
2668
- tuner : GapJunctionTuner
2669
- Instance of the GapJunctionTuner class
3403
+ new_connection : str
3404
+ Name of the new connection type to switch to.
2670
3405
  """
2671
- self.tuner = tuner
2672
- self.optimization_history = []
3406
+ if new_connection not in self.conn_type_settings:
3407
+ raise ValueError(f"Connection '{new_connection}' not found in conn_type_settings")
3408
+
3409
+ # Update current connection
3410
+ self.current_connection = new_connection
3411
+ self.conn = self.conn_type_settings[new_connection]
3412
+
3413
+ # Check if cell type changed
3414
+ new_cell_name = self.conn['cell']
3415
+ if self.cell_name != new_cell_name:
3416
+ self.cell_name = new_cell_name
3417
+
3418
+ # Recreate cells
3419
+ if self.hoc_cell is None:
3420
+ self.cell1 = getattr(h, self.cell_name)()
3421
+ self.cell2 = getattr(h, self.cell_name)()
3422
+ else:
3423
+ # For hoc_cell, recreate the second cell
3424
+ self.cell2 = getattr(h, self.cell_name)()
3425
+
3426
+ # Recreate IClamp
3427
+ self.icl = h.IClamp(self.cell1.soma[0](0.5))
3428
+ self.icl.delay = self.general_settings["tstart"]
3429
+ self.icl.dur = self.general_settings["tdur"]
3430
+ self.icl.amp = self.general_settings["iclamp_amp"]
3431
+ else:
3432
+ # Update IClamp parameters even if same cell type
3433
+ self.icl.amp = self.general_settings["iclamp_amp"]
3434
+
3435
+ # Always recreate gap junctions when switching connections
3436
+ # (even for same cell type, sec_id or sec_x might differ)
3437
+
3438
+ # Clean up previous gap junctions and parallel context
3439
+ if hasattr(self, 'gap_junc_1'):
3440
+ del self.gap_junc_1
3441
+ if hasattr(self, 'gap_junc_2'):
3442
+ del self.gap_junc_2
3443
+
3444
+ # Properly clean up the existing parallel context
3445
+ if hasattr(self, 'pc'):
3446
+ self.pc.done() # Clean up existing parallel context
3447
+
3448
+ # Force garbage collection and reset NEURON state
3449
+ import gc
3450
+ gc.collect()
3451
+ h.finitialize()
3452
+
3453
+ # Create a fresh parallel context after cleanup
3454
+ self.pc = h.ParallelContext()
3455
+
3456
+ try:
3457
+ sec1 = list(self.cell1.all)[self.conn["sec_id"]]
3458
+ sec2 = list(self.cell2.all)[self.conn["sec_id"]]
3459
+
3460
+ # Use unique IDs to avoid conflicts with existing parallel context setups
3461
+ import time
3462
+ unique_id = int(time.time() * 1000) % 10000 # Use timestamp as unique base ID
3463
+
3464
+ self.pc.source_var(sec1(self.conn["sec_x"])._ref_v, unique_id, sec=sec1)
3465
+ self.gap_junc_1 = h.Gap(sec1(0.5))
3466
+ self.pc.target_var(self.gap_junc_1._ref_vgap, unique_id + 1)
3467
+
3468
+ self.pc.source_var(sec2(self.conn["sec_x"])._ref_v, unique_id + 1, sec=sec2)
3469
+ self.gap_junc_2 = h.Gap(sec2(0.5))
3470
+ self.pc.target_var(self.gap_junc_2._ref_vgap, unique_id)
3471
+
3472
+ self.pc.setup_transfer()
3473
+ except Exception as e:
3474
+ print(f"Error setting up gap junctions: {e}")
3475
+ # Try to continue with basic setup
3476
+ self.gap_junc_1 = h.Gap(list(self.cell1.all)[self.conn["sec_id"]](0.5))
3477
+ self.gap_junc_2 = h.Gap(list(self.cell2.all)[self.conn["sec_id"]](0.5))
3478
+
3479
+ # Reset NEURON state after complete setup
3480
+ h.finitialize()
3481
+
3482
+ print(f"Successfully switched to connection: {new_connection}")
2673
3483
 
2674
- def _objective_function(self, resistance: float, target_cc: float) -> float:
3484
+ def model(self, resistance):
2675
3485
  """
2676
- Calculate error between achieved and target coupling coefficient
3486
+ Run a simulation with a specified gap junction resistance.
2677
3487
 
2678
3488
  Parameters:
2679
3489
  -----------
2680
3490
  resistance : float
2681
- Gap junction resistance to try
2682
- target_cc : float
2683
- Target coupling coefficient to match
2684
-
2685
- Returns:
2686
- --------
2687
- float : Error between achieved and target coupling coefficient
2688
- """
2689
- # Run model with current resistance
2690
- self.tuner.model(resistance)
2691
-
2692
- # Calculate coupling coefficient
2693
- achieved_cc = self.tuner.coupling_coefficient(
2694
- self.tuner.t_vec,
2695
- self.tuner.soma_v_1,
2696
- self.tuner.soma_v_2,
2697
- self.tuner.general_settings["tstart"],
2698
- self.tuner.general_settings["tstart"] + self.tuner.general_settings["tdur"],
2699
- )
2700
-
2701
- # Calculate error
2702
- error = (achieved_cc - target_cc) ** 2 # MSE
2703
-
2704
- # Store history
2705
- self.optimization_history.append(
2706
- {"resistance": resistance, "achieved_cc": achieved_cc, "error": error}
2707
- )
2708
-
2709
- return error
2710
-
2711
- def optimize_resistance(
2712
- self, target_cc: float, resistance_bounds: tuple = (1e-4, 1e-2), method: str = "bounded"
2713
- ) -> GapOptimizationResult:
2714
- """
2715
- Optimize gap junction resistance to achieve a target coupling coefficient.
2716
-
2717
- Parameters:
2718
- -----------
2719
- target_cc : float
2720
- Target coupling coefficient to achieve (between 0 and 1)
2721
- resistance_bounds : tuple, optional
2722
- (min, max) bounds for resistance search in MOhm. Default is (1e-4, 1e-2).
2723
- method : str, optional
2724
- Optimization method to use. Default is 'bounded' which works well
2725
- for single-parameter optimization.
2726
-
2727
- Returns:
2728
- --------
2729
- GapOptimizationResult
2730
- Container with optimization results including:
2731
- - optimal_resistance: The optimized resistance value
2732
- - achieved_cc: The coupling coefficient achieved with the optimal resistance
2733
- - target_cc: The target coupling coefficient
2734
- - error: The final error (squared difference between target and achieved)
2735
- - optimization_path: List of all values tried during optimization
3491
+ The gap junction resistance value (in MOhm) to use for the simulation.
2736
3492
 
2737
3493
  Notes:
2738
3494
  ------
2739
- Uses scipy.optimize.minimize_scalar with bounded method, which is
2740
- appropriate for this single-parameter optimization problem.
3495
+ This method sets up the gap junction resistance, initializes recording vectors for time
3496
+ and membrane voltages of both cells, and runs the NEURON simulation.
2741
3497
  """
2742
- self.optimization_history = []
2743
-
2744
- # Run optimization
2745
- result = minimize_scalar(
2746
- self._objective_function, args=(target_cc,), bounds=resistance_bounds, method=method
2747
- )
3498
+ self.gap_junc_1.g = resistance
3499
+ self.gap_junc_2.g = resistance
2748
3500
 
2749
- # Run final model with optimal resistance
2750
- self.tuner.model(result.x)
2751
- final_cc = self.tuner.coupling_coefficient(
2752
- self.tuner.t_vec,
2753
- self.tuner.soma_v_1,
2754
- self.tuner.soma_v_2,
2755
- self.tuner.general_settings["tstart"],
2756
- self.tuner.general_settings["tstart"] + self.tuner.general_settings["tdur"],
2757
- )
3501
+ t_vec = h.Vector()
3502
+ soma_v_1 = h.Vector()
3503
+ soma_v_2 = h.Vector()
3504
+ t_vec.record(h._ref_t)
3505
+ soma_v_1.record(self.cell1.soma[0](0.5)._ref_v)
3506
+ soma_v_2.record(self.cell2.soma[0](0.5)._ref_v)
2758
3507
 
2759
- # Package up our results
2760
- optimization_result = GapOptimizationResult(
2761
- optimal_resistance=result.x,
2762
- achieved_cc=final_cc,
2763
- target_cc=target_cc,
2764
- error=result.fun,
2765
- optimization_path=self.optimization_history,
2766
- )
3508
+ self.t_vec = t_vec
3509
+ self.soma_v_1 = soma_v_1
3510
+ self.soma_v_2 = soma_v_2
2767
3511
 
2768
- return optimization_result
3512
+ h.finitialize(-70 * mV)
3513
+ h.continuerun(h.tstop * ms)
2769
3514
 
2770
- def plot_optimization_results(self, result: GapOptimizationResult):
3515
+ def plot_model(self):
2771
3516
  """
2772
- Plot optimization results including convergence and final voltage traces
3517
+ Plot the voltage traces of both cells to visualize gap junction coupling.
2773
3518
 
2774
- Parameters:
2775
- -----------
2776
- result : GapOptimizationResult
2777
- Results from optimization
3519
+ This method creates a plot showing the membrane potential of both cells over time,
3520
+ highlighting the effect of gap junction coupling when a current step is applied to cell 1.
2778
3521
  """
2779
- fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(15, 10))
2780
-
2781
- # Plot voltage traces
2782
3522
  t_range = [
2783
- self.tuner.general_settings["tstart"] - 100.0,
2784
- self.tuner.general_settings["tstart"] + self.tuner.general_settings["tdur"] + 100.0,
3523
+ self.general_settings["tstart"] - 100.0,
3524
+ self.general_settings["tstart"] + self.general_settings["tdur"] + 100.0,
2785
3525
  ]
2786
- t = np.array(self.tuner.t_vec)
2787
- v1 = np.array(self.tuner.soma_v_1)
2788
- v2 = np.array(self.tuner.soma_v_2)
3526
+ t = np.array(self.t_vec)
3527
+ v1 = np.array(self.soma_v_1)
3528
+ v2 = np.array(self.soma_v_2)
2789
3529
  tidx = (t >= t_range[0]) & (t <= t_range[1])
2790
3530
 
2791
- ax1.plot(t[tidx], v1[tidx], "b", label=f"{self.tuner.cell_name} 1")
2792
- ax1.plot(t[tidx], v2[tidx], "r", label=f"{self.tuner.cell_name} 2")
2793
- ax1.set_xlabel("Time (ms)")
2794
- ax1.set_ylabel("Membrane Voltage (mV)")
2795
- ax1.legend()
2796
- ax1.set_title("Optimized Voltage Traces")
2797
-
2798
- # Plot error convergence
2799
- errors = [h["error"] for h in result.optimization_path]
2800
- ax2.plot(errors)
2801
- ax2.set_xlabel("Iteration")
2802
- ax2.set_ylabel("Error")
2803
- ax2.set_title("Error Convergence")
2804
- ax2.set_yscale("log")
2805
-
2806
- # Plot resistance convergence
2807
- resistances = [h["resistance"] for h in result.optimization_path]
2808
- ax3.plot(resistances)
2809
- ax3.set_xlabel("Iteration")
2810
- ax3.set_ylabel("Resistance")
2811
- ax3.set_title("Resistance Convergence")
2812
- ax3.set_yscale("log")
2813
-
2814
- # Print final results
2815
- result_text = (
2816
- f"Optimal Resistance: {result.optimal_resistance:.2e}\n"
2817
- f"Target CC: {result.target_cc:.3f}\n"
2818
- f"Achieved CC: {result.achieved_cc:.3f}\n"
2819
- f"Final Error: {result.error:.2e}"
2820
- )
2821
- ax4.text(0.1, 0.7, result_text, transform=ax4.transAxes, fontsize=10)
2822
- ax4.axis("off")
2823
-
2824
- plt.tight_layout()
2825
- plt.show()
3531
+ plt.figure()
3532
+ plt.plot(t[tidx], v1[tidx], "b", label=f"{self.cell_name} 1")
3533
+ plt.plot(t[tidx], v2[tidx], "r", label=f"{self.cell_name} 2")
3534
+ plt.title(f"{self.cell_name} gap junction")
3535
+ plt.xlabel("Time (ms)")
3536
+ plt.ylabel("Membrane Voltage (mV)")
3537
+ plt.legend()
3538
+ self.last_figure = plt.gcf()
2826
3539
 
2827
- def parameter_sweep(self, resistance_range: np.ndarray) -> dict:
3540
+ def coupling_coefficient(self, t, v1, v2, t_start, t_end, dt=h.dt):
2828
3541
  """
2829
- Perform a parameter sweep across different resistance values.
3542
+ Calculate the coupling coefficient between two cells connected by a gap junction.
2830
3543
 
2831
3544
  Parameters:
2832
3545
  -----------
2833
- resistance_range : np.ndarray
2834
- Array of resistance values to test.
3546
+ t : array-like
3547
+ Time vector.
3548
+ v1 : array-like
3549
+ Voltage trace of the cell receiving the current injection.
3550
+ v2 : array-like
3551
+ Voltage trace of the coupled cell.
3552
+ t_start : float
3553
+ Start time for calculating the steady-state voltage change.
3554
+ t_end : float
3555
+ End time for calculating the steady-state voltage change.
3556
+ dt : float, optional
3557
+ Time step of the simulation. Default is h.dt.
2835
3558
 
2836
3559
  Returns:
2837
3560
  --------
2838
- dict
2839
- Dictionary containing the results of the parameter sweep, with keys:
2840
- - 'resistance': List of resistance values tested
2841
- - 'coupling_coefficient': Corresponding coupling coefficients
2842
-
2843
- Notes:
2844
- ------
2845
- This method is useful for understanding the relationship between gap junction
2846
- resistance and coupling coefficient before attempting optimization.
2847
- """
2848
- results = {"resistance": [], "coupling_coefficient": []}
2849
-
2850
- for resistance in tqdm(resistance_range, desc="Sweeping resistance values"):
2851
- self.tuner.model(resistance)
2852
- cc = self.tuner.coupling_coefficient(
2853
- self.tuner.t_vec,
2854
- self.tuner.soma_v_1,
2855
- self.tuner.soma_v_2,
2856
- self.tuner.general_settings["tstart"],
2857
- self.tuner.general_settings["tstart"] + self.tuner.general_settings["tdur"],
2858
- )
2859
-
2860
- results["resistance"].append(resistance)
2861
- results["coupling_coefficient"].append(cc)
2862
-
2863
- return results
3561
+ float
3562
+ The coupling coefficient, defined as the ratio of voltage change in cell 2
3563
+ to voltage change in cell 1 (ΔV₂/ΔV₁).
3564
+ """
3565
+ t = np.asarray(t)
3566
+ v1 = np.asarray(v1)
3567
+ v2 = np