bmtool 0.7.5__tar.gz → 0.7.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. {bmtool-0.7.5 → bmtool-0.7.6}/PKG-INFO +1 -1
  2. {bmtool-0.7.5 → bmtool-0.7.6}/bmtool/bmplot/connections.py +4 -3
  3. {bmtool-0.7.5 → bmtool-0.7.6}/bmtool/singlecell.py +53 -8
  4. {bmtool-0.7.5 → bmtool-0.7.6}/bmtool/synapses.py +630 -52
  5. {bmtool-0.7.5 → bmtool-0.7.6}/bmtool/util/util.py +29 -12
  6. {bmtool-0.7.5 → bmtool-0.7.6}/bmtool.egg-info/PKG-INFO +1 -1
  7. {bmtool-0.7.5 → bmtool-0.7.6}/setup.py +1 -1
  8. {bmtool-0.7.5 → bmtool-0.7.6}/LICENSE +0 -0
  9. {bmtool-0.7.5 → bmtool-0.7.6}/README.md +0 -0
  10. {bmtool-0.7.5 → bmtool-0.7.6}/bmtool/SLURM.py +0 -0
  11. {bmtool-0.7.5 → bmtool-0.7.6}/bmtool/__init__.py +0 -0
  12. {bmtool-0.7.5 → bmtool-0.7.6}/bmtool/__main__.py +0 -0
  13. {bmtool-0.7.5 → bmtool-0.7.6}/bmtool/analysis/__init__.py +0 -0
  14. {bmtool-0.7.5 → bmtool-0.7.6}/bmtool/analysis/entrainment.py +0 -0
  15. {bmtool-0.7.5 → bmtool-0.7.6}/bmtool/analysis/lfp.py +0 -0
  16. {bmtool-0.7.5 → bmtool-0.7.6}/bmtool/analysis/netcon_reports.py +0 -0
  17. {bmtool-0.7.5 → bmtool-0.7.6}/bmtool/analysis/spikes.py +0 -0
  18. {bmtool-0.7.5 → bmtool-0.7.6}/bmtool/bmplot/__init__.py +0 -0
  19. {bmtool-0.7.5 → bmtool-0.7.6}/bmtool/bmplot/entrainment.py +0 -0
  20. {bmtool-0.7.5 → bmtool-0.7.6}/bmtool/bmplot/lfp.py +0 -0
  21. {bmtool-0.7.5 → bmtool-0.7.6}/bmtool/bmplot/netcon_reports.py +0 -0
  22. {bmtool-0.7.5 → bmtool-0.7.6}/bmtool/bmplot/spikes.py +0 -0
  23. {bmtool-0.7.5 → bmtool-0.7.6}/bmtool/connectors.py +0 -0
  24. {bmtool-0.7.5 → bmtool-0.7.6}/bmtool/debug/__init__.py +0 -0
  25. {bmtool-0.7.5 → bmtool-0.7.6}/bmtool/debug/commands.py +0 -0
  26. {bmtool-0.7.5 → bmtool-0.7.6}/bmtool/debug/debug.py +0 -0
  27. {bmtool-0.7.5 → bmtool-0.7.6}/bmtool/graphs.py +0 -0
  28. {bmtool-0.7.5 → bmtool-0.7.6}/bmtool/manage.py +0 -0
  29. {bmtool-0.7.5 → bmtool-0.7.6}/bmtool/plot_commands.py +0 -0
  30. {bmtool-0.7.5 → bmtool-0.7.6}/bmtool/util/__init__.py +0 -0
  31. {bmtool-0.7.5 → bmtool-0.7.6}/bmtool/util/commands.py +0 -0
  32. {bmtool-0.7.5 → bmtool-0.7.6}/bmtool/util/neuron/__init__.py +0 -0
  33. {bmtool-0.7.5 → bmtool-0.7.6}/bmtool/util/neuron/celltuner.py +0 -0
  34. {bmtool-0.7.5 → bmtool-0.7.6}/bmtool.egg-info/SOURCES.txt +0 -0
  35. {bmtool-0.7.5 → bmtool-0.7.6}/bmtool.egg-info/dependency_links.txt +0 -0
  36. {bmtool-0.7.5 → bmtool-0.7.6}/bmtool.egg-info/entry_points.txt +0 -0
  37. {bmtool-0.7.5 → bmtool-0.7.6}/bmtool.egg-info/requires.txt +0 -0
  38. {bmtool-0.7.5 → bmtool-0.7.6}/bmtool.egg-info/top_level.txt +0 -0
  39. {bmtool-0.7.5 → bmtool-0.7.6}/pyproject.toml +0 -0
  40. {bmtool-0.7.5 → bmtool-0.7.6}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: bmtool
3
- Version: 0.7.5
3
+ Version: 0.7.6
4
4
  Summary: BMTool
5
5
  Home-page: https://github.com/cyneuro/bmtool
6
6
  Download-URL:
@@ -661,7 +661,8 @@ def connection_histogram(
661
661
  (edges[source_id_type] == source_id) & (edges[target_id_type] == target_id)
662
662
  ]
663
663
  if not include_gap:
664
- temp = temp[~temp["is_gap_junction"]]
664
+ gap_col = temp["is_gap_junction"].fillna(False).astype(bool)
665
+ temp = temp[~gap_col]
665
666
  node_pairs = temp.groupby("target_node_id")["source_node_id"].count()
666
667
  try:
667
668
  conn_mean = statistics.mean(node_pairs.values)
@@ -1022,8 +1023,8 @@ def plot_synapse_location(config: str, source: str, target: str, sids: str, tids
1022
1023
  )
1023
1024
 
1024
1025
  # Fix the validation logic - it was using 'or' instead of 'and'
1025
- if syn_feature not in ["afferent_section_id", "afferent_section_pos"]:
1026
- raise ValueError("Currently only syn features supported are afferent_section_id or afferent_section_pos")
1026
+ #if syn_feature not in ["afferent_section_id", "afferent_section_pos"]:
1027
+ # raise ValueError("Currently only syn features supported are afferent_section_id or afferent_section_pos")
1027
1028
 
1028
1029
  try:
1029
1030
  # Load mechanisms and template
@@ -10,6 +10,8 @@ import pandas as pd
10
10
  from neuron import h
11
11
  from scipy.optimize import curve_fit
12
12
 
13
+ from bmtool.util.util import load_templates_from_config, load_config
14
+
13
15
 
14
16
  def load_biophys1():
15
17
  """
@@ -1025,19 +1027,62 @@ class ZAP(CurrentClamp):
1025
1027
 
1026
1028
 
1027
1029
  class Profiler:
1028
- """All in one single cell profiler"""
1030
+ """All in one single cell profiler
1031
+
1032
+ This Profiler now supports being initialized with either explicit
1033
+ `template_dir` and `mechanism_dir` paths or with a BMTK `config` file
1034
+ (which should contain `components.templates_dir` and
1035
+ `components.mechanisms_dir`). When `config` is provided it will be used
1036
+ to load mechanisms and templates via the utility helpers.
1037
+ """
1029
1038
 
1030
- def __init__(self, template_dir: str = None, mechanism_dir: str = None, dt=None):
1039
+ def __init__(self, template_dir: str = None, mechanism_dir: str = None, dt=None, config: str = None):
1040
+ # initialize to None and then prefer config-derived paths if provided
1031
1041
  self.template_dir = None
1032
1042
  self.mechanism_dir = None
1033
1043
 
1034
- if not self.template_dir:
1035
- self.template_dir = template_dir
1036
- if not self.mechanism_dir:
1037
- self.mechanism_dir = mechanism_dir
1038
- self.templates = None
1044
+ # If a BMTK config is provided, load mechanisms/templates from it
1045
+ if config is not None:
1046
+ try:
1047
+ # load and apply the config values for directories
1048
+ conf = load_config(config)
1049
+ # conf behaves like a dict returned by bmtk Config.from_json
1050
+ try:
1051
+ comps = conf["components"]
1052
+ except Exception:
1053
+ comps = getattr(conf, "components", None)
1054
+
1055
+ if comps is not None:
1056
+ # support dict-like and object-like components
1057
+ try:
1058
+ self.template_dir = comps.get("templates_dir")
1059
+ except Exception:
1060
+ self.template_dir = getattr(comps, "templates_dir", None)
1061
+ try:
1062
+ self.mechanism_dir = comps.get("mechanisms_dir")
1063
+ except Exception:
1064
+ self.mechanism_dir = getattr(comps, "mechanisms_dir", None)
1065
+
1066
+ # actually load mechanisms and templates using the helper
1067
+ load_templates_from_config(config)
1068
+ except Exception:
1069
+ # fall back to explicit dirs if config parsing/loading fails
1070
+ pass
1071
+
1072
+ else:
1073
+ # fall back to explicit args if not set by config
1074
+ if not self.template_dir:
1075
+ self.template_dir = template_dir
1076
+ if not self.mechanism_dir:
1077
+ self.mechanism_dir = mechanism_dir
1078
+
1079
+ # template_dir is required for loading templates later
1080
+ if self.template_dir is None:
1081
+ raise ValueError("Profiler requires either 'template_dir' or a 'config' containing components.templates_dir")
1082
+
1083
+ self.templates = None
1039
1084
 
1040
- self.load_templates()
1085
+ self.load_templates()
1041
1086
 
1042
1087
  h.load_file("stdrun.hoc")
1043
1088
  if dt is not None:
@@ -8,6 +8,7 @@ import ipywidgets as widgets
8
8
  import matplotlib.pyplot as plt
9
9
  import neuron
10
10
  import numpy as np
11
+ import pandas as pd
11
12
  from IPython.display import clear_output, display
12
13
  from ipywidgets import HBox, VBox
13
14
  from neuron import h
@@ -18,10 +19,10 @@ from scipy.optimize import curve_fit, minimize, minimize_scalar
18
19
  from scipy.signal import find_peaks
19
20
  from tqdm.notebook import tqdm
20
21
 
21
- from bmtool.util.util import load_templates_from_config
22
+ from bmtool.util.util import load_templates_from_config, load_nodes_from_config, load_edges_from_config, load_config
22
23
 
23
24
  DEFAULT_GENERAL_SETTINGS = {
24
- "vclamp": True,
25
+ "vclamp": False,
25
26
  "rise_interval": (0.1, 0.9),
26
27
  "tstart": 500.0,
27
28
  "tdur": 100.0,
@@ -43,17 +44,18 @@ DEFAULT_GAP_JUNCTION_GENERAL_SETTINGS = {
43
44
  class SynapseTuner:
44
45
  def __init__(
45
46
  self,
47
+ conn_type_settings: Optional[Dict[str, dict]] = None,
48
+ connection: Optional[str] = None,
49
+ current_name: str = "i",
46
50
  mechanisms_dir: Optional[str] = None,
47
51
  templates_dir: Optional[str] = None,
48
52
  config: Optional[str] = None,
49
- conn_type_settings: Optional[dict] = None,
50
- connection: Optional[str] = None,
51
53
  general_settings: Optional[dict] = None,
52
54
  json_folder_path: Optional[str] = None,
53
- current_name: str = "i",
54
- other_vars_to_record: Optional[list] = None,
55
- slider_vars: Optional[list] = None,
55
+ other_vars_to_record: Optional[List[str]] = None,
56
+ slider_vars: Optional[List[str]] = None,
56
57
  hoc_cell: Optional[object] = None,
58
+ network: Optional[str] = None,
57
59
  ) -> None:
58
60
  """
59
61
  Initialize the SynapseTuner class with connection type settings, mechanisms, and template directories.
@@ -80,8 +82,28 @@ class SynapseTuner:
80
82
  List of synaptic variables you would like sliders set up for the STP sliders method by default will use all parameters in spec_syn_param.
81
83
  hoc_cell : Optional[object]
82
84
  An already loaded NEURON cell object. If provided, template loading and cell setup will be skipped.
85
+ network : Optional[str]
86
+ Name of the specific network dataset to access from the loaded edges data (e.g., 'network_to_network').
87
+ If not provided, will use all available networks. When a config file is provided, this enables
88
+ the network dropdown feature in InteractiveTuner for switching between different networks.
89
+
90
+ Network Dropdown Feature:
91
+ -------------------------
92
+ When initialized with a BMTK config file, the tuner automatically:
93
+ 1. Loads all available network datasets from the config
94
+ 2. Creates a network dropdown in InteractiveTuner (if multiple networks exist)
95
+ 3. Allows dynamic switching between networks, which rebuilds connection types
96
+ 4. Updates connection dropdown options when network is changed
97
+ 5. Preserves current connection if it exists in the new network, otherwise selects the first available
83
98
  """
84
99
  self.hoc_cell = hoc_cell
100
+ # Store config and network information for network dropdown functionality
101
+ self.config = config # Store config path for network dropdown functionality
102
+ self.available_networks = [] # Store available networks from config file
103
+ self.current_network = network # Store current network selection
104
+ # Cache for loaded dynamics params JSON by filename to avoid repeated disk reads
105
+ self._syn_params_cache = {}
106
+ h.load_file('stdrun.hoc')
85
107
 
86
108
  if hoc_cell is None:
87
109
  if config is None and (mechanisms_dir is None or templates_dir is None):
@@ -95,11 +117,37 @@ class SynapseTuner:
95
117
  else:
96
118
  # loads both mech and templates
97
119
  load_templates_from_config(config)
120
+ # Load available networks from config for network dropdown feature
121
+ self._load_available_networks()
122
+ # Prebuild connection type settings for each available network to
123
+ # make network switching in the UI fast. This will make __init__ slower
124
+ # but dramatically speed up response when changing the network dropdown.
125
+ self._prebuilt_conn_type_settings = {}
126
+ try:
127
+ for net in self.available_networks:
128
+ self._prebuilt_conn_type_settings[net] = self._build_conn_type_settings_from_config(config, network=net)
129
+ except Exception as e:
130
+ print(f"Warning: error prebuilding conn_type_settings for networks: {e}")
98
131
 
99
132
  if conn_type_settings is None:
100
- raise ValueError("conn_type_settings must be provided.")
133
+ if config is not None:
134
+ print("Building conn_type_settings from BMTK config files...")
135
+ # If we prebuilt per-network settings, use the one for the requested network
136
+ if hasattr(self, '_prebuilt_conn_type_settings') and network in getattr(self, '_prebuilt_conn_type_settings', {}):
137
+ conn_type_settings = self._prebuilt_conn_type_settings[network]
138
+ else:
139
+ conn_type_settings = self._build_conn_type_settings_from_config(config, network=network)
140
+ print(f"Found {len(conn_type_settings)} connection types: {list(conn_type_settings.keys())}")
141
+
142
+ # If connection is not specified, use the first available connection
143
+ if connection is None and conn_type_settings:
144
+ connection = list(conn_type_settings.keys())[0]
145
+ print(f"No connection specified, using first available: {connection}")
146
+ else:
147
+ raise ValueError("conn_type_settings must be provided if config is not specified.")
148
+
101
149
  if connection is None:
102
- raise ValueError("connection must be provided.")
150
+ raise ValueError("connection must be provided or inferable from conn_type_settings.")
103
151
  if connection not in conn_type_settings:
104
152
  raise ValueError(f"connection '{connection}' not found in conn_type_settings.")
105
153
 
@@ -182,6 +230,358 @@ class SynapseTuner:
182
230
 
183
231
  self._set_up_recorders()
184
232
 
233
+ def _build_conn_type_settings_from_config(self, config_path: str, node_set: Optional[str] = None, network: Optional[str] = None) -> Dict[str, dict]:
234
+ """
235
+ Build conn_type_settings from BMTK simulation and circuit config files using the method used by relation matrix function in util.
236
+
237
+ Parameters:
238
+ -----------
239
+ config_path : str
240
+ Path to the simulation config JSON file.
241
+ node_set : Optional[str]
242
+ Specific node set to filter connections for. If None, processes all connections.
243
+ network : Optional[str]
244
+ Name of the specific network dataset to access (e.g., 'network_to_network').
245
+ If None, processes all available networks.
246
+
247
+ Returns:
248
+ --------
249
+ Dict[str, dict]
250
+ Dictionary with connection names as keys and connection settings as values.
251
+
252
+ NOTE: a lot of this code could probs be made a bit more simple or just removed i kinda tried a bunch of things and it works now
253
+ but is kinda complex and some code is probs note needed
254
+
255
+ """
256
+ # Load configuration and get nodes and edges using util.py methods
257
+ config = load_config(config_path)
258
+ # Ensure the config dict knows its source path so path substitutions can be resolved
259
+ try:
260
+ # load_config may return a dict; store path used so callers can resolve $COMPONENTS_DIR
261
+ config['config_path'] = config_path
262
+ except Exception:
263
+ pass
264
+ nodes = load_nodes_from_config(config_path)
265
+ edges = load_edges_from_config(config_path)
266
+
267
+ conn_type_settings = {}
268
+
269
+ # If a specific network is requested, only process that one
270
+ if network:
271
+ if network not in edges:
272
+ print(f"Warning: Network '{network}' not found in edges. Available networks: {list(edges.keys())}")
273
+ return conn_type_settings
274
+ edge_datasets = {network: edges[network]}
275
+ else:
276
+ edge_datasets = edges
277
+
278
+ # Process each edge dataset using the util.py approach
279
+ for edge_dataset_name, edge_df in edge_datasets.items():
280
+ if edge_df.empty:
281
+ continue
282
+
283
+ # Create merged DataFrames with source and target node information like util.py does
284
+ source_node_df = None
285
+ target_node_df = None
286
+
287
+ # First, try to deterministically parse the edge_dataset_name for patterns like '<src>_to_<tgt>'
288
+ # e.g., 'network_to_network', 'extnet_to_network'
289
+ if '_to_' in edge_dataset_name:
290
+ parts = edge_dataset_name.split('_to_')
291
+ if len(parts) == 2:
292
+ src_name, tgt_name = parts
293
+ if src_name in nodes:
294
+ source_node_df = nodes[src_name].add_prefix('source_')
295
+ if tgt_name in nodes:
296
+ target_node_df = nodes[tgt_name].add_prefix('target_')
297
+
298
+ # If not found by parsing name, fall back to inspecting a sample edge row which contains
299
+ # explicit 'source_population' and 'target_population' fields (this avoids reversing source/target)
300
+ if source_node_df is None or target_node_df is None:
301
+ sample_edge = edge_df.iloc[0] if len(edge_df) > 0 else None
302
+ if sample_edge is not None:
303
+ # Use explicit population names from the edge entry
304
+ source_pop_name = sample_edge.get('source_population', '')
305
+ target_pop_name = sample_edge.get('target_population', '')
306
+ if source_pop_name in nodes:
307
+ source_node_df = nodes[source_pop_name].add_prefix('source_')
308
+ if target_pop_name in nodes:
309
+ target_node_df = nodes[target_pop_name].add_prefix('target_')
310
+
311
+ # As a last resort, attempt to heuristically match by prefix/suffix of the dataset name
312
+ if source_node_df is None or target_node_df is None:
313
+ for pop_name, node_df in nodes.items():
314
+ if source_node_df is None and (edge_dataset_name.startswith(pop_name) or edge_dataset_name.endswith(pop_name)):
315
+ source_node_df = node_df.add_prefix('source_')
316
+ elif target_node_df is None and (edge_dataset_name.startswith(pop_name) or edge_dataset_name.endswith(pop_name)):
317
+ target_node_df = node_df.add_prefix('target_')
318
+
319
+ # If we still don't have the node data, skip this edge dataset
320
+ if source_node_df is None or target_node_df is None:
321
+ print(f"Warning: Could not find node data for edge dataset {edge_dataset_name}")
322
+ continue
323
+
324
+ # Merge edge data with source node info
325
+ edges_with_source = pd.merge(
326
+ edge_df.reset_index(),
327
+ source_node_df,
328
+ how='left',
329
+ left_on='source_node_id',
330
+ right_index=True
331
+ )
332
+
333
+ # Merge with target node info
334
+ edges_with_nodes = pd.merge(
335
+ edges_with_source,
336
+ target_node_df,
337
+ how='left',
338
+ left_on='target_node_id',
339
+ right_index=True
340
+ )
341
+
342
+ # Get unique edge types from the merged dataset
343
+ if 'edge_type_id' in edges_with_nodes.columns:
344
+ edge_types = edges_with_nodes['edge_type_id'].unique()
345
+ else:
346
+ edge_types = [0] # Single edge type
347
+
348
+ # Process each edge type
349
+ for edge_type_id in edge_types:
350
+ # Filter edges for this type
351
+ if 'edge_type_id' in edges_with_nodes.columns:
352
+ edge_type_data = edges_with_nodes[edges_with_nodes['edge_type_id'] == edge_type_id]
353
+ else:
354
+ edge_type_data = edges_with_nodes
355
+
356
+ if len(edge_type_data) == 0:
357
+ continue
358
+
359
+ # Get representative edge for this type
360
+ edge_info = edge_type_data.iloc[0]
361
+
362
+ # Skip gap junctions
363
+ if 'is_gap_junction' in edge_info and pd.notna(edge_info['is_gap_junction']) and edge_info['is_gap_junction']:
364
+ continue
365
+
366
+ # Get population names from the merged data (this is the key improvement!)
367
+ source_pop = edge_info.get('source_pop_name', '')
368
+ target_pop = edge_info.get('target_pop_name', '')
369
+
370
+ # Get target cell template from the merged data
371
+ target_model_template = edge_info.get('target_model_template', '')
372
+ if target_model_template.startswith('hoc:'):
373
+ target_cell_type = target_model_template.replace('hoc:', '')
374
+ else:
375
+ target_cell_type = target_model_template
376
+
377
+ # Create connection name using the actual population names
378
+ if source_pop and target_pop:
379
+ conn_name = f"{source_pop}2{target_pop}"
380
+ else:
381
+ conn_name = f"{edge_dataset_name}_type_{edge_type_id}"
382
+
383
+ # Get synaptic model template
384
+ model_template = edge_info.get('model_template', 'exp2syn')
385
+
386
+ # Build connection settings early so we can attach metadata like dynamics file name
387
+ conn_settings = {
388
+ 'spec_settings': {
389
+ 'post_cell': target_cell_type,
390
+ 'vclamp_amp': -70.0, # Default voltage clamp amplitude
391
+ 'sec_x': 0.5, # Default location on section
392
+ 'sec_id': 0, # Default to soma
393
+ # level_of_detail may be overridden by dynamics params below
394
+ 'level_of_detail': model_template,
395
+ },
396
+ 'spec_syn_param': {}
397
+ }
398
+
399
+ # Load synaptic parameters from dynamics_params file if available.
400
+ # NOTE: the edge DataFrame produced by load_edges_from_config/load_sonata_edges_to_dataframe
401
+ # already contains the 'dynamics_params' column (from the CSV) or the
402
+ # flattened H5 dynamics_params attributes (prefixed with 'dynamics_params/').
403
+ # Prefer the direct 'dynamics_params' column value from the merged DataFrame
404
+ # rather than performing ad-hoc string parsing here.
405
+ syn_params = {}
406
+ dynamics_file_name = None
407
+ # Prefer a top-level 'dynamics_params' column if present
408
+ if 'dynamics_params' in edge_info and pd.notna(edge_info.get('dynamics_params')):
409
+ val = edge_info.get('dynamics_params')
410
+ # Some CSV loaders can produce bytes or numpy types; coerce to str
411
+ try:
412
+ dynamics_file_name = str(val).strip()
413
+ except Exception:
414
+ dynamics_file_name = None
415
+
416
+ # If we found a dynamics file name, use it directly (skip token parsing)
417
+ if dynamics_file_name and dynamics_file_name.upper() != 'NULL':
418
+ try:
419
+ conn_settings['spec_settings']['dynamics_params_file'] = dynamics_file_name
420
+ # use a cache to avoid re-reading the same JSON multiple times
421
+ if dynamics_file_name in self._syn_params_cache:
422
+ syn_params = self._syn_params_cache[dynamics_file_name]
423
+ else:
424
+ syn_params = self._load_synaptic_params_from_config(config, dynamics_file_name)
425
+ # cache result (even if empty dict) to avoid repeated lookups
426
+ self._syn_params_cache[dynamics_file_name] = syn_params
427
+ except Exception as e:
428
+ print(f"Warning: could not load dynamics_params file '{dynamics_file_name}' for edge {edge_dataset_name}: {e}")
429
+
430
+ # If a dynamics params JSON filename was provided, prefer using its basename
431
+ # as the connection name so that the UI matches the JSON definitions.
432
+ if dynamics_file_name:
433
+ try:
434
+ json_base = os.path.splitext(os.path.basename(dynamics_file_name))[0]
435
+ # Ensure uniqueness in conn_type_settings
436
+ if json_base in conn_type_settings:
437
+ # Append edge_type_id to disambiguate
438
+ json_base = f"{json_base}_type_{edge_type_id}"
439
+ conn_name = json_base
440
+ except Exception:
441
+ pass
442
+
443
+ # If the dynamics params defined a level_of_detail, override the default
444
+ if isinstance(syn_params, dict) and 'level_of_detail' in syn_params:
445
+ conn_settings['spec_settings']['level_of_detail'] = syn_params.get('level_of_detail', model_template)
446
+
447
+ # Add synaptic parameters, excluding level_of_detail
448
+ for key, value in syn_params.items():
449
+ if key != 'level_of_detail':
450
+ conn_settings['spec_syn_param'][key] = value
451
+ else:
452
+ # Fallback: some SONATA/H5 edge files expose dynamics params as flattened
453
+ # columns named like 'dynamics_params/<param>'. If no filename was given,
454
+ # gather any such columns from edge_info and use them as spec_syn_param.
455
+ for col in edge_info.index:
456
+ if isinstance(col, str) and col.startswith('dynamics_params/'):
457
+ param_key = col.split('/', 1)[1]
458
+ try:
459
+ val = edge_info[col]
460
+ if pd.notna(val):
461
+ conn_settings['spec_syn_param'][param_key] = val
462
+ except Exception:
463
+ # Ignore malformed entries
464
+ pass
465
+
466
+ # Add weight from edge info if available
467
+ if 'syn_weight' in edge_info and pd.notna(edge_info['syn_weight']):
468
+ conn_settings['spec_syn_param']['initW'] = float(edge_info['syn_weight'])
469
+
470
+ # Handle afferent section information
471
+ if 'afferent_section_id' in edge_info and pd.notna(edge_info['afferent_section_id']):
472
+ conn_settings['spec_settings']['sec_id'] = int(edge_info['afferent_section_id'])
473
+
474
+ if 'afferent_section_pos' in edge_info and pd.notna(edge_info['afferent_section_pos']):
475
+ conn_settings['spec_settings']['sec_x'] = float(edge_info['afferent_section_pos'])
476
+
477
+ # Store in connection settings
478
+ conn_type_settings[conn_name] = conn_settings
479
+
480
+ return conn_type_settings
481
+
482
+ def _load_available_networks(self) -> None:
483
+ """
484
+ Load available network names from the config file for the network dropdown feature.
485
+
486
+ This method is automatically called during initialization when a config file is provided.
487
+ It populates the available_networks list which enables the network dropdown in
488
+ InteractiveTuner when multiple networks are available.
489
+
490
+ Network Dropdown Behavior:
491
+ -------------------------
492
+ - If only one network exists: No network dropdown is shown
493
+ - If multiple networks exist: Network dropdown appears next to connection dropdown
494
+ - Networks are loaded from the edges data in the config file
495
+ - Current network defaults to the first available if not specified during init
496
+ """
497
+ if self.config is None:
498
+ self.available_networks = []
499
+ return
500
+
501
+ try:
502
+ edges = load_edges_from_config(self.config)
503
+ self.available_networks = list(edges.keys())
504
+
505
+ # Set current network to first available if not specified
506
+ if self.current_network is None and self.available_networks:
507
+ self.current_network = self.available_networks[0]
508
+ except Exception as e:
509
+ print(f"Warning: Could not load networks from config: {e}")
510
+ self.available_networks = []
511
+
512
+ def _load_synaptic_params_from_config(self, config: dict, dynamics_params: str) -> dict:
513
+ """
514
+ Load synaptic parameters from dynamics params file using config information.
515
+
516
+ Parameters:
517
+ -----------
518
+ config : dict
519
+ BMTK configuration dictionary
520
+ dynamics_params : str
521
+ Dynamics parameters filename
522
+
523
+ Returns:
524
+ --------
525
+ dict
526
+ Synaptic parameters dictionary
527
+ """
528
+ try:
529
+ # Get the synaptic models directory from config
530
+ synaptic_models_dir = config.get('components', {}).get('synaptic_models_dir', '')
531
+ if synaptic_models_dir:
532
+ # Handle path variables
533
+ if synaptic_models_dir.startswith('$'):
534
+ # This is a placeholder, try to resolve it
535
+ config_dir = os.path.dirname(config.get('config_path', ''))
536
+ synaptic_models_dir = synaptic_models_dir.replace('$COMPONENTS_DIR',
537
+ os.path.join(config_dir, 'components'))
538
+ synaptic_models_dir = synaptic_models_dir.replace('$BASE_DIR', config_dir)
539
+
540
+ dynamics_file = os.path.join(synaptic_models_dir, dynamics_params)
541
+
542
+ if os.path.exists(dynamics_file):
543
+ with open(dynamics_file, 'r') as f:
544
+ return json.load(f)
545
+ else:
546
+ print(f"Warning: Dynamics params file not found: {dynamics_file}")
547
+ except Exception as e:
548
+ print(f"Warning: Error loading synaptic parameters: {e}")
549
+
550
+ return {}
551
+
552
+ @classmethod
553
+ def list_connections_from_config(cls, config_path: str, network: Optional[str] = None) -> Dict[str, dict]:
554
+ """
555
+ Class method to list all available connections from a BMTK config file without creating a tuner.
556
+
557
+ Parameters:
558
+ -----------
559
+ config_path : str
560
+ Path to the simulation config JSON file.
561
+ network : Optional[str]
562
+ Name of the specific network dataset to access (e.g., 'network_to_network').
563
+ If None, processes all available networks.
564
+
565
+ Returns:
566
+ --------
567
+ Dict[str, dict]
568
+ Dictionary with connection names as keys and connection info as values.
569
+ """
570
+ # Create a temporary instance just to use the parsing methods
571
+ temp_tuner = cls.__new__(cls) # Create without calling __init__
572
+ conn_type_settings = temp_tuner._build_conn_type_settings_from_config(config_path, network=network)
573
+
574
+ # Create a summary of connections with key info
575
+ connections_summary = {}
576
+ for conn_name, settings in conn_type_settings.items():
577
+ connections_summary[conn_name] = {
578
+ 'post_cell': settings['spec_settings']['post_cell'],
579
+ 'synapse_type': settings['spec_settings']['level_of_detail'],
580
+ 'parameters': list(settings['spec_syn_param'].keys())
581
+ }
582
+
583
+ return connections_summary
584
+
185
585
  def _switch_connection(self, new_connection: str) -> None:
186
586
  """
187
587
  Switch to a different connection type and update all related properties.
@@ -266,6 +666,61 @@ class SynapseTuner:
266
666
 
267
667
  print(f"Successfully switched to connection: {new_connection}")
268
668
 
669
+ def _switch_network(self, new_network: str) -> None:
670
+ """
671
+ Switch to a different network and rebuild conn_type_settings for the new network.
672
+
673
+ This method is called when the user selects a different network from the network
674
+ dropdown in InteractiveTuner. It performs a complete rebuild of the connection
675
+ types available for the new network.
676
+
677
+ Parameters:
678
+ -----------
679
+ new_network : str
680
+ Name of the new network to switch to.
681
+
682
+ Network Switching Process:
683
+ -------------------------
684
+ 1. Validates the new network exists in available_networks
685
+ 2. Rebuilds conn_type_settings using the new network's edge data
686
+ 3. Updates the connection dropdown with new network's available connections
687
+ 4. Preserves current connection if it exists in new network
688
+ 5. Falls back to first available connection if current doesn't exist
689
+ 6. Recreates synapses and NEURON objects for the new connection
690
+ 7. Updates UI components to reflect the changes
691
+ """
692
+ if new_network not in self.available_networks:
693
+ print(f"Warning: Network '{new_network}' not found in available networks: {self.available_networks}")
694
+ return
695
+
696
+ if new_network == self.current_network:
697
+ return # No change needed
698
+
699
+ # Update current network
700
+ self.current_network = new_network
701
+
702
+ # Switch conn_type_settings using prebuilt data if available, otherwise build on-demand
703
+ if self.config:
704
+ print(f"Switching connections for network: {new_network}")
705
+ if hasattr(self, '_prebuilt_conn_type_settings') and new_network in self._prebuilt_conn_type_settings:
706
+ self.conn_type_settings = self._prebuilt_conn_type_settings[new_network]
707
+ else:
708
+ # Fallback: build on-demand (slower)
709
+ self.conn_type_settings = self._build_conn_type_settings_from_config(self.config, network=new_network)
710
+
711
+ # Update available connections and select first one if current doesn't exist
712
+ available_connections = list(self.conn_type_settings.keys())
713
+ if self.current_connection not in available_connections and available_connections:
714
+ self.current_connection = available_connections[0]
715
+ print(f"Connection '{self.current_connection}' not available in new network. Switched to: {available_connections[0]}")
716
+
717
+ # Switch to the (potentially new) connection
718
+ if self.current_connection in self.conn_type_settings:
719
+ self._switch_connection(self.current_connection)
720
+
721
+ print(f"Successfully switched to network: {new_network}")
722
+ print(f"Available connections: {available_connections}")
723
+
269
724
  def _update_spec_syn_param(self, json_folder_path: str) -> None:
270
725
  """
271
726
  Update specific synaptic parameters using JSON files located in the specified folder.
@@ -305,11 +760,14 @@ class SynapseTuner:
305
760
  - `_set_up_cell()` should be called before setting up the synapse.
306
761
  - Synapse location, type, and properties are specified within `spec_syn_param` and `spec_settings`.
307
762
  """
308
- self.syn = getattr(h, self.conn["spec_settings"]["level_of_detail"])(
309
- list(self.cell.all)[self.conn["spec_settings"]["sec_id"]](
310
- self.conn["spec_settings"]["sec_x"]
763
+ try:
764
+ self.syn = getattr(h, self.conn["spec_settings"]["level_of_detail"])(
765
+ list(self.cell.all)[self.conn["spec_settings"]["sec_id"]](
766
+ self.conn["spec_settings"]["sec_x"]
767
+ )
311
768
  )
312
- )
769
+ except:
770
+ raise Exception("Make sure the mod file exist you are trying to load check spelling!")
313
771
  for key, value in self.conn["spec_syn_param"].items():
314
772
  if isinstance(value, (int, float)):
315
773
  if hasattr(self.syn, key):
@@ -717,7 +1175,9 @@ class SynapseTuner:
717
1175
  amp = np.array(amp)
718
1176
  amp = amp * 1000 # scale up
719
1177
  amp = amp.reshape(-1, amp.shape[-1])
720
- maxamp = amp.max(axis=1 if normalize_by_trial else None)
1178
+
1179
+ # Calculate 90th percentile amplitude for normalization
1180
+ percentile_90 = np.percentile(amp, 90)
721
1181
 
722
1182
  def format_array(arr):
723
1183
  """Format an array to 2 significant figures for cleaner output."""
@@ -729,49 +1189,42 @@ class SynapseTuner:
729
1189
  f"Short Term Plasticity Results for {self.train_freq}Hz with {self.train_delay} Delay"
730
1190
  )
731
1191
  print("=" * 40)
732
- print("PPR: Above 1 is facilitating, below 1 is depressing.")
1192
+ print("PPR: Above 0 is facilitating, below 0 is depressing.")
733
1193
  print("Induction: Above 0 is facilitating, below 0 is depressing.")
734
1194
  print("Recovery: A measure of how fast STP decays.\n")
735
1195
 
736
- # PPR Calculation
737
- ppr = amp[:, 1:2] / amp[:, 0:1]
1196
+ # PPR Calculation: (Avg 2nd pulse - Avg 1st pulse) / 90th percentile amplitude
1197
+ ppr = (np.mean(amp[:, 1:2]) - np.mean(amp[:, 0:1])) / percentile_90
738
1198
  print("Paired Pulse Response (PPR)")
739
- print("Calculation: 2nd pulse / 1st pulse")
1199
+ print("Calculation: (Avg 2nd pulse - Avg 1st pulse) / 90th percentile amplitude")
740
1200
  print(
741
- f"Values: ({format_array(amp[:, 1:2])}) / ({format_array(amp[:, 0:1])}) = {format_array(ppr)}\n"
1201
+ f"Values: ({np.mean(amp[:, 1:2]):.3f} - {np.mean(amp[:, 0:1]):.3f}) / {percentile_90:.3f} = {ppr:.3f}\n"
742
1202
  )
743
1203
 
744
- # Induction Calculation
745
- induction = np.mean((amp[:, 5:8].mean(axis=1) - amp[:, :1].mean(axis=1)) / maxamp)
1204
+ # Induction Calculation: (Avg (6th, 7th, 8th pulses) - Avg 1st pulse) / 90th percentile amplitude
1205
+ induction = (np.mean(amp[:, 5:8]) - np.mean(amp[:, :1])) / percentile_90
746
1206
  print("Induction")
747
- print("Calculation: (avg(6th, 7th, 8th pulses) - 1st pulse) / max amps")
1207
+ print("Calculation: (Avg(6th, 7th, 8th pulses) - Avg 1st pulse) / 90th percentile amplitude")
748
1208
  print(
749
- f"Values: avg({format_array(amp[:, 5:8])}) - {format_array(amp[:, :1])} / {format_array(maxamp)}"
750
- )
751
- print(
752
- f"({format_array(amp[:, 5:8].mean(axis=1))}) - ({format_array(amp[:, :1].mean(axis=1))}) / {format_array(maxamp)} = {induction:.3f}\n"
1209
+ f"Values: {np.mean(amp[:, 5:8]):.3f} - {np.mean(amp[:, :1]):.3f} / {percentile_90:.3f} = {induction:.3f}\n"
753
1210
  )
754
1211
 
755
- # Recovery Calculation
756
- recovery = np.mean((amp[:, 8:12].mean(axis=1) - amp[:, :4].mean(axis=1)) / maxamp)
1212
+ # Recovery Calculation: (Avg (9th, 10th, 11th, 12th pulses) - Avg (1st, 2nd, 3rd, 4th pulses)) / 90th percentile amplitude
1213
+ recovery = (np.mean(amp[:, 8:12]) - np.mean(amp[:, :4])) / percentile_90
757
1214
  print("Recovery")
758
1215
  print(
759
- "Calculation: (avg(9th, 10th, 11th, 12th pulses) - avg(1st to 4th pulses)) / max amps"
760
- )
761
- print(
762
- f"Values: avg({format_array(amp[:, 8:12])}) - avg({format_array(amp[:, :4])}) / {format_array(maxamp)}"
1216
+ "Calculation: (Avg(9th, 10th, 11th, 12th pulses) - Avg(1st to 4th pulses)) / 90th percentile amplitude"
763
1217
  )
764
1218
  print(
765
- f"({format_array(amp[:, 8:12].mean(axis=1))}) - ({format_array(amp[:, :4].mean(axis=1))}) / {format_array(maxamp)} = {recovery:.3f}\n"
1219
+ f"Values: {np.mean(amp[:, 8:12]):.3f} - {np.mean(amp[:, :4]):.3f} / {percentile_90:.3f} = {recovery:.3f}\n"
766
1220
  )
767
1221
 
768
1222
  print("=" * 40 + "\n")
769
1223
 
770
- recovery = np.mean((amp[:, 8:12].mean(axis=1) - amp[:, :4].mean(axis=1)) / maxamp)
771
- induction = np.mean((amp[:, 5:8].mean(axis=1) - amp[:, :1].mean(axis=1)) / maxamp)
772
- ppr = amp[:, 1:2] / amp[:, 0:1]
773
- # maxamp = max(amp, key=lambda x: abs(x[0]))
774
- maxamp = maxamp.max()
1224
+ # Calculate final metrics
1225
+ ppr = (np.mean(amp[:, 1:2]) - np.mean(amp[:, 0:1])) / percentile_90
1226
+ induction = (np.mean(amp[:, 5:8]) - np.mean(amp[:, :1])) / percentile_90
1227
+ recovery = (np.mean(amp[:, 8:12]) - np.mean(amp[:, :4])) / percentile_90
775
1228
 
776
1229
  return ppr, induction, recovery
777
1230
 
@@ -833,6 +1286,7 @@ class SynapseTuner:
833
1286
  Sets up interactive sliders for tuning short-term plasticity (STP) parameters in a Jupyter Notebook.
834
1287
 
835
1288
  This method creates an interactive UI with sliders for:
1289
+ - Network selection dropdown (if multiple networks available and config provided)
836
1290
  - Connection type selection dropdown
837
1291
  - Input frequency
838
1292
  - Delay between pulse trains
@@ -845,10 +1299,21 @@ class SynapseTuner:
845
1299
  - Toggling voltage clamp mode
846
1300
  - Switching between standard and continuous input modes
847
1301
 
1302
+ Network Dropdown Feature:
1303
+ ------------------------
1304
+ When the SynapseTuner is initialized with a BMTK config file containing multiple networks:
1305
+ - A network dropdown appears next to the connection dropdown
1306
+ - Users can dynamically switch between networks (e.g., 'network_to_network', 'external_to_network')
1307
+ - Switching networks rebuilds available connections and updates the connection dropdown
1308
+ - The current connection is preserved if it exists in the new network
1309
+ - If multiple networks exist but only one is specified during init, that network is used as default
1310
+
848
1311
  Notes:
849
1312
  ------
850
1313
  Ideal for exploratory parameter tuning and interactive visualization of
851
1314
  synapse behavior with different parameter values and stimulation protocols.
1315
+ The network dropdown feature enables comprehensive exploration of multi-network
1316
+ BMTK simulations without needing to reinitialize the tuner.
852
1317
  """
853
1318
  # Widgets setup (Sliders)
854
1319
  freqs = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20, 35, 50, 100, 200]
@@ -868,6 +1333,17 @@ class SynapseTuner:
868
1333
  style={'description_width': 'initial'}
869
1334
  )
870
1335
 
1336
+ # Network dropdown - only shown if config was provided and multiple networks are available
1337
+ # This enables users to switch between different network datasets dynamically
1338
+ w_network = None
1339
+ if self.config is not None and len(self.available_networks) > 1:
1340
+ w_network = widgets.Dropdown(
1341
+ options=self.available_networks,
1342
+ value=self.current_network,
1343
+ description="Network:",
1344
+ style={'description_width': 'initial'}
1345
+ )
1346
+
871
1347
  w_run = widgets.Button(description="Run Train", icon="history", button_style="primary")
872
1348
  w_single = widgets.Button(description="Single Event", icon="check", button_style="success")
873
1349
  w_vclamp = widgets.ToggleButton(
@@ -876,6 +1352,17 @@ class SynapseTuner:
876
1352
  icon="fast-backward",
877
1353
  button_style="warning",
878
1354
  )
1355
+
1356
+ # Voltage clamp amplitude input
1357
+ default_vclamp_amp = getattr(self.conn['spec_settings'], 'vclamp_amp', -70.0)
1358
+ w_vclamp_amp = widgets.FloatText(
1359
+ value=default_vclamp_amp,
1360
+ description="V_clamp (mV):",
1361
+ step=5.0,
1362
+ style={'description_width': 'initial'},
1363
+ layout=widgets.Layout(width='150px')
1364
+ )
1365
+
879
1366
  w_input_mode = widgets.ToggleButton(
880
1367
  value=False, description="Continuous input", icon="eject", button_style="info"
881
1368
  )
@@ -915,13 +1402,29 @@ class SynapseTuner:
915
1402
  "Setting up slider! The sliders ranges are set by their init value so try changing that if you dont like the slider range!"
916
1403
  )
917
1404
 
1405
+ # Create output widget for displaying results
1406
+ output_widget = widgets.Output()
1407
+
918
1408
  def run_single_event(*args):
919
1409
  clear_output()
920
1410
  display(ui)
1411
+ display(output_widget)
1412
+
921
1413
  self.vclamp = w_vclamp.value
1414
+ # Update voltage clamp amplitude if voltage clamp is enabled
1415
+ if self.vclamp:
1416
+ # Update the voltage clamp amplitude settings
1417
+ self.conn['spec_settings']['vclamp_amp'] = w_vclamp_amp.value
1418
+ # Update general settings if they exist
1419
+ if hasattr(self, 'general_settings'):
1420
+ self.general_settings['vclamp_amp'] = w_vclamp_amp.value
922
1421
  # Update synaptic properties based on slider values
923
1422
  self.ispk = None
924
- self.SingleEvent()
1423
+
1424
+ # Clear previous results and run simulation
1425
+ output_widget.clear_output()
1426
+ with output_widget:
1427
+ self.SingleEvent()
925
1428
 
926
1429
  def on_connection_change(*args):
927
1430
  """Handle connection dropdown change"""
@@ -941,8 +1444,50 @@ class SynapseTuner:
941
1444
  except Exception as e:
942
1445
  print(f"Error switching connection: {e}")
943
1446
 
1447
+ def on_network_change(*args):
1448
+ """
1449
+ Handle network dropdown change events.
1450
+
1451
+ This callback is triggered when the user selects a different network from
1452
+ the network dropdown. It coordinates the complete switching process:
1453
+ 1. Calls _switch_network() to rebuild connections for the new network
1454
+ 2. Updates the connection dropdown options with new network's connections
1455
+ 3. Recreates dynamic sliders for the new connection parameters
1456
+ 4. Refreshes the entire UI to reflect all changes
1457
+ """
1458
+ if w_network is None:
1459
+ return
1460
+ try:
1461
+ new_network = w_network.value
1462
+ if new_network != self.current_network:
1463
+ # Switch to new network
1464
+ self._switch_network(new_network)
1465
+
1466
+ # Update connection dropdown options with new network's connections
1467
+ connection_options = list(self.conn_type_settings.keys())
1468
+ w_connection.options = connection_options
1469
+ if connection_options:
1470
+ w_connection.value = self.current_connection
1471
+
1472
+ # Recreate dynamic sliders for new connection
1473
+ self.dynamic_sliders = create_dynamic_sliders()
1474
+
1475
+ # Update UI
1476
+ update_ui_layout()
1477
+ update_ui()
1478
+
1479
+ except Exception as e:
1480
+ print(f"Error switching network: {e}")
1481
+
944
1482
  def update_ui_layout():
945
- """Update the UI layout with new sliders"""
1483
+ """
1484
+ Update the UI layout with new sliders and network dropdown.
1485
+
1486
+ This function reconstructs the entire UI layout including:
1487
+ - Network dropdown (if available) and connection dropdown in the top row
1488
+ - Button controls and input mode toggles
1489
+ - Parameter sliders arranged in columns
1490
+ """
946
1491
  nonlocal ui, slider_columns
947
1492
 
948
1493
  # Add the dynamic sliders to the UI
@@ -956,9 +1501,18 @@ class SynapseTuner:
956
1501
  else:
957
1502
  slider_columns = VBox([])
958
1503
 
959
- # Reconstruct the UI
960
- connection_row = HBox([w_connection])
961
- button_row = HBox([w_run, w_single, w_vclamp, w_input_mode])
1504
+ # Create button row with voltage clamp controls
1505
+ if w_vclamp.value: # Show voltage clamp amplitude input when toggle is on
1506
+ button_row = HBox([w_run, w_single, w_vclamp, w_vclamp_amp, w_input_mode])
1507
+ else: # Hide voltage clamp amplitude input when toggle is off
1508
+ button_row = HBox([w_run, w_single, w_vclamp, w_input_mode])
1509
+
1510
+ # Construct the top row - include network dropdown if available
1511
+ # This creates a horizontal layout with network dropdown (if present) and connection dropdown
1512
+ if w_network is not None:
1513
+ connection_row = HBox([w_network, w_connection])
1514
+ else:
1515
+ connection_row = HBox([w_connection])
962
1516
  slider_row = HBox([w_input_freq, self.w_delay, self.w_duration])
963
1517
 
964
1518
  ui = VBox([connection_row, button_row, slider_row, slider_columns])
@@ -967,19 +1521,31 @@ class SynapseTuner:
967
1521
  def update_ui(*args):
968
1522
  clear_output()
969
1523
  display(ui)
1524
+ display(output_widget)
1525
+
970
1526
  self.vclamp = w_vclamp.value
1527
+ # Update voltage clamp amplitude if voltage clamp is enabled
1528
+ if self.vclamp:
1529
+ self.conn['spec_settings']['vclamp_amp'] = w_vclamp_amp.value
1530
+ if hasattr(self, 'general_settings'):
1531
+ self.general_settings['vclamp_amp'] = w_vclamp_amp.value
1532
+
971
1533
  self.input_mode = w_input_mode.value
972
1534
  syn_props = {var: slider.value for var, slider in self.dynamic_sliders.items()}
973
1535
  self._set_syn_prop(**syn_props)
974
- if not self.input_mode:
975
- self._simulate_model(w_input_freq.value, self.w_delay.value, w_vclamp.value)
976
- else:
977
- self._simulate_model(w_input_freq.value, self.w_duration.value, w_vclamp.value)
978
- amp = self._response_amplitude()
979
- self._plot_model(
980
- [self.general_settings["tstart"] - self.nstim.interval / 3, self.tstop]
981
- )
982
- _ = self._calc_ppr_induction_recovery(amp)
1536
+
1537
+ # Clear previous results and run simulation
1538
+ output_widget.clear_output()
1539
+ with output_widget:
1540
+ if not self.input_mode:
1541
+ self._simulate_model(w_input_freq.value, self.w_delay.value, w_vclamp.value)
1542
+ else:
1543
+ self._simulate_model(w_input_freq.value, self.w_duration.value, w_vclamp.value)
1544
+ amp = self._response_amplitude()
1545
+ self._plot_model(
1546
+ [self.general_settings["tstart"] - self.nstim.interval / 3, self.tstop]
1547
+ )
1548
+ _ = self._calc_ppr_induction_recovery(amp)
983
1549
 
984
1550
  # Function to switch between delay and duration sliders
985
1551
  def switch_slider(*args):
@@ -990,9 +1556,21 @@ class SynapseTuner:
990
1556
  self.w_delay.layout.display = "" # Show delay slider
991
1557
  self.w_duration.layout.display = "none" # Hide duration slider
992
1558
 
1559
+ # Function to handle voltage clamp toggle
1560
+ def on_vclamp_toggle(*args):
1561
+ """Handle voltage clamp toggle changes to show/hide amplitude input"""
1562
+ update_ui_layout()
1563
+ clear_output()
1564
+ display(ui)
1565
+ display(output_widget)
1566
+
993
1567
  # Link widgets to their callback functions
994
1568
  w_connection.observe(on_connection_change, names="value")
1569
+ # Link network dropdown callback only if network dropdown was created
1570
+ if w_network is not None:
1571
+ w_network.observe(on_network_change, names="value")
995
1572
  w_input_mode.observe(switch_slider, names="value")
1573
+ w_vclamp.observe(on_vclamp_toggle, names="value")
996
1574
 
997
1575
  # Hide the duration slider initially until the user selects it
998
1576
  self.w_duration.layout.display = "none" # Hide duration slider
@@ -1,4 +1,5 @@
1
1
  import argparse
2
+ from logging import raiseExceptions
2
3
  import math
3
4
  import os
4
5
  import smtplib
@@ -1081,7 +1082,9 @@ def connection_totals(
1081
1082
  total = edges[(edges[source_id_type] == source_id) & (edges[target_id_type] == target_id)]
1082
1083
  if not include_gap:
1083
1084
  try:
1084
- total = total[~total["is_gap_junction"]]
1085
+ # Handle mixed types and NaN values in is_gap_junction column
1086
+ gap_col = total["is_gap_junction"].fillna(False).astype(bool)
1087
+ total = total[~gap_col]
1085
1088
  except:
1086
1089
  # If there are no gap junctions, just continue
1087
1090
  pass
@@ -1129,7 +1132,8 @@ def percent_connections(
1129
1132
  cons = edges[(edges[source_id_type] == source_id) & (edges[target_id_type] == target_id)]
1130
1133
  if not include_gap:
1131
1134
  try:
1132
- gaps = cons["is_gap_junction"]==True
1135
+ # Handle mixed types and NaN values in is_gap_junction column
1136
+ gaps = cons["is_gap_junction"].fillna(False).astype(bool)
1133
1137
  cons = cons[~gaps]
1134
1138
  except:
1135
1139
  raise Exception("no gap junctions found to drop from connections")
@@ -1200,9 +1204,17 @@ def connection_divergence(
1200
1204
  cons = edges[(edges[source_id_type] == source_id) & (edges[target_id_type] == target_id)]
1201
1205
  if not include_gap:
1202
1206
  try:
1203
- cons = cons[~cons["is_gap_junction"]]
1207
+ # Handle mixed types and NaN values in is_gap_junction column
1208
+ gap_col = cons["is_gap_junction"].fillna(False).astype(bool)
1209
+ cons = cons[~gap_col]
1204
1210
  except:
1205
- raise Exception("no gap junctions found to drop from connections")
1211
+ raise Exception("error")
1212
+
1213
+ if cons.empty:
1214
+ if method == "mean+std":
1215
+ return (0, 0)
1216
+ else:
1217
+ return 0
1206
1218
 
1207
1219
  if convergence:
1208
1220
  if method == "min":
@@ -1213,15 +1225,16 @@ def connection_divergence(
1213
1225
  return round(count, 2)
1214
1226
  elif method == "std":
1215
1227
  std = cons["target_node_id"].value_counts().std()
1216
- return round(std, 2)
1228
+ return round(std, 2) if not np.isnan(std) else 0
1217
1229
  elif method == "mean":
1218
1230
  mean = cons["target_node_id"].value_counts().mean()
1219
- return round(mean, 2)
1231
+ return round(mean, 2) if not np.isnan(mean) else 0
1220
1232
  elif method == "mean+std": # default is mean + std
1221
1233
  mean = cons["target_node_id"].value_counts().mean()
1222
1234
  std = cons["target_node_id"].value_counts().std()
1223
- # std = cons.apply(pd.Series.value_counts).target_node_id.dropna().std() no longer a valid way
1224
- return (round(mean, 2)), (round(std, 2))
1235
+ mean = round(mean, 2) if not np.isnan(mean) else 0
1236
+ std = round(std, 2) if not np.isnan(std) else 0
1237
+ return (mean, std)
1225
1238
  else: # divergence
1226
1239
  if method == "min":
1227
1240
  count = cons["source_node_id"].value_counts().min()
@@ -1231,14 +1244,16 @@ def connection_divergence(
1231
1244
  return round(count, 2)
1232
1245
  elif method == "std":
1233
1246
  std = cons["source_node_id"].value_counts().std()
1234
- return round(std, 2)
1247
+ return round(std, 2) if not np.isnan(std) else 0
1235
1248
  elif method == "mean":
1236
1249
  mean = cons["source_node_id"].value_counts().mean()
1237
- return round(mean, 2)
1250
+ return round(mean, 2) if not np.isnan(mean) else 0
1238
1251
  elif method == "mean+std": # default is mean + std
1239
1252
  mean = cons["source_node_id"].value_counts().mean()
1240
1253
  std = cons["source_node_id"].value_counts().std()
1241
- return (round(mean, 2)), (round(std, 2))
1254
+ mean = round(mean, 2) if not np.isnan(mean) else 0
1255
+ std = round(std, 2) if not np.isnan(std) else 0
1256
+ return (mean, std)
1242
1257
 
1243
1258
  return relation_matrix(
1244
1259
  config,
@@ -1442,7 +1457,9 @@ def connection_probabilities(
1442
1457
  ]
1443
1458
  if not include_gap:
1444
1459
  try:
1445
- relevant_edges = relevant_edges[~relevant_edges["is_gap_junction"]]
1460
+ # Handle mixed types and NaN values in is_gap_junction column
1461
+ gap_col = relevant_edges["is_gap_junction"].fillna(False).astype(bool)
1462
+ relevant_edges = relevant_edges[~gap_col]
1446
1463
  except:
1447
1464
  raise Exception("no gap junctions found to drop from connections")
1448
1465
  connected_distances = eudist(relevant_edges, dist_X, dist_Y, dist_Z).values.tolist()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: bmtool
3
- Version: 0.7.5
3
+ Version: 0.7.6
4
4
  Summary: BMTool
5
5
  Home-page: https://github.com/cyneuro/bmtool
6
6
  Download-URL:
@@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
5
5
 
6
6
  setup(
7
7
  name="bmtool",
8
- version="0.7.5",
8
+ version="0.7.6",
9
9
  author="Neural Engineering Laboratory at the University of Missouri",
10
10
  author_email="gregglickert@mail.missouri.edu",
11
11
  description="BMTool",
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes