bmtool 0.5.4__tar.gz → 0.5.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. {bmtool-0.5.4 → bmtool-0.5.5}/PKG-INFO +3 -2
  2. bmtool-0.5.5/bmtool/SLURM.py +294 -0
  3. {bmtool-0.5.4 → bmtool-0.5.5}/bmtool/connectors.py +70 -51
  4. bmtool-0.5.5/bmtool/graphs.py +170 -0
  5. {bmtool-0.5.4 → bmtool-0.5.5}/bmtool/singlecell.py +48 -7
  6. bmtool-0.5.5/bmtool/synapses.py +638 -0
  7. {bmtool-0.5.4 → bmtool-0.5.5}/bmtool.egg-info/PKG-INFO +3 -2
  8. {bmtool-0.5.4 → bmtool-0.5.5}/bmtool.egg-info/SOURCES.txt +2 -0
  9. {bmtool-0.5.4 → bmtool-0.5.5}/bmtool.egg-info/requires.txt +2 -1
  10. {bmtool-0.5.4 → bmtool-0.5.5}/setup.py +3 -2
  11. bmtool-0.5.4/bmtool/graphs.py +0 -170
  12. {bmtool-0.5.4 → bmtool-0.5.5}/LICENSE +0 -0
  13. {bmtool-0.5.4 → bmtool-0.5.5}/README.md +0 -0
  14. {bmtool-0.5.4 → bmtool-0.5.5}/bmtool/__init__.py +0 -0
  15. {bmtool-0.5.4 → bmtool-0.5.5}/bmtool/__main__.py +0 -0
  16. {bmtool-0.5.4 → bmtool-0.5.5}/bmtool/bmplot.py +0 -0
  17. {bmtool-0.5.4 → bmtool-0.5.5}/bmtool/debug/__init__.py +0 -0
  18. {bmtool-0.5.4 → bmtool-0.5.5}/bmtool/debug/commands.py +0 -0
  19. {bmtool-0.5.4 → bmtool-0.5.5}/bmtool/debug/debug.py +0 -0
  20. {bmtool-0.5.4 → bmtool-0.5.5}/bmtool/manage.py +0 -0
  21. {bmtool-0.5.4 → bmtool-0.5.5}/bmtool/plot_commands.py +0 -0
  22. {bmtool-0.5.4 → bmtool-0.5.5}/bmtool/util/__init__.py +0 -0
  23. {bmtool-0.5.4 → bmtool-0.5.5}/bmtool/util/commands.py +0 -0
  24. {bmtool-0.5.4 → bmtool-0.5.5}/bmtool/util/neuron/__init__.py +0 -0
  25. {bmtool-0.5.4 → bmtool-0.5.5}/bmtool/util/neuron/celltuner.py +0 -0
  26. {bmtool-0.5.4 → bmtool-0.5.5}/bmtool/util/util.py +0 -0
  27. {bmtool-0.5.4 → bmtool-0.5.5}/bmtool.egg-info/dependency_links.txt +0 -0
  28. {bmtool-0.5.4 → bmtool-0.5.5}/bmtool.egg-info/entry_points.txt +0 -0
  29. {bmtool-0.5.4 → bmtool-0.5.5}/bmtool.egg-info/top_level.txt +0 -0
  30. {bmtool-0.5.4 → bmtool-0.5.5}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: bmtool
3
- Version: 0.5.4
3
+ Version: 0.5.5
4
4
  Summary: BMTool
5
5
  Home-page: https://github.com/cyneuro/bmtool
6
6
  Download-URL:
@@ -28,7 +28,8 @@ Requires-Dist: numpy
28
28
  Requires-Dist: pandas
29
29
  Requires-Dist: questionary
30
30
  Requires-Dist: pynmodlt
31
- Requires-Dist: plotly
31
+ Requires-Dist: xarray
32
+ Requires-Dist: fooof
32
33
 
33
34
  # bmtool
34
35
  A collection of modules to make developing [Neuron](https://www.neuron.yale.edu/neuron/) and [BMTK](https://alleninstitute.github.io/bmtk/) models easier.
@@ -0,0 +1,294 @@
1
+ import time
2
+ import os
3
+ import subprocess
4
+ import json
5
+
6
+
7
+ def check_job_status(job_id):
8
+ """
9
+ Checks the status of a SLURM job using scontrol.
10
+
11
+ Args:
12
+ job_id (str): The SLURM job ID.
13
+
14
+ Returns:
15
+ str: The state of the job.
16
+ """
17
+ try:
18
+ result = subprocess.run(['scontrol', 'show', 'job', job_id], capture_output=True, text=True)
19
+ if result.returncode != 0:
20
+ # this check is not needed if check_interval is less than 5 min (~300 seconds)
21
+ #if 'slurm_load_jobs error: Invalid job id specified' in result.stderr:
22
+ # return 'COMPLETED' # Treat invalid job ID as completed because scontrol expires and removed job info when done.
23
+ raise Exception(f"Error checking job status: {result.stderr}")
24
+
25
+ job_state = None
26
+ for line in result.stdout.split('\n'):
27
+ if 'JobState=' in line:
28
+ job_state = line.strip().split('JobState=')[1].split()[0]
29
+ break
30
+
31
+ if job_state is None:
32
+ raise Exception(f"Failed to retrieve job status for job ID: {job_id}")
33
+
34
+ return job_state
35
+ except Exception as e:
36
+ print(f"Exception while checking job status: {e}", flush=True)
37
+ return 'UNKNOWN'
38
+
39
+
40
+ def submit_job(script_path):
41
+ """
42
+ Submits a SLURM job script.
43
+
44
+ Args:
45
+ script_path (str): The path to the SLURM job script.
46
+
47
+ Returns:
48
+ str: The job ID of the submitted job.
49
+
50
+ Raises:
51
+ Exception: If there is an error in submitting the job.
52
+ """
53
+ result = subprocess.run(['sbatch', script_path], capture_output=True, text=True)
54
+ if result.returncode != 0:
55
+ raise Exception(f"Error submitting job: {result.stderr}")
56
+ job_id = result.stdout.strip().split()[-1]
57
+ return job_id
58
+
59
+
60
+ class seedSweep:
61
+ def __init__(self, json_file_path, param_name):
62
+ """
63
+ Initializes the seedSweep instance.
64
+
65
+ Args:
66
+ json_file_path (str): Path to the JSON file to be updated.
67
+ param_name (str): The name of the parameter to be modified.
68
+ """
69
+ self.json_file_path = json_file_path
70
+ self.param_name = param_name
71
+
72
+ def edit_json(self, new_value):
73
+ """
74
+ Updates the JSON file with a new parameter value.
75
+
76
+ Args:
77
+ new_value: The new value for the parameter.
78
+ """
79
+ with open(self.json_file_path, 'r') as f:
80
+ data = json.load(f)
81
+
82
+ data[self.param_name] = new_value
83
+
84
+ with open(self.json_file_path, 'w') as f:
85
+ json.dump(data, f, indent=4)
86
+
87
+ print(f"JSON file '{self.json_file_path}' modified successfully with {self.param_name}={new_value}.", flush=True)
88
+
89
+
90
+ def change_json_file_path(self,new_json_file_path):
91
+ self.json_file_path = new_json_file_path
92
+
93
+
94
+ # class could just be added to seedSweep but for now will make new class since it was easier
95
+ class multiSeedSweep(seedSweep):
96
+ """
97
+ MultSeedSweeps are centered around some base JSON cell file. When that base JSON is updated, the other JSONs
98
+ change according to their ratio with the base JSON.
99
+ """
100
+ def __init__(self, base_json_file_path, param_name, syn_dict_list=[], base_ratio=1):
101
+ """
102
+ Initializes the multipleSeedSweep instance.
103
+
104
+ Args:
105
+ base_json_file_path (str): File path for the base JSON file.
106
+ param_name (str): The name of the parameter to be modified.
107
+ syn_dict_list (list): A list containing dictionaries with the 'json_file_path' and 'ratio' (in comparison to the base_json) for each JSON file.
108
+ base_ratio (float): The ratio between the other JSONs; usually the current value for the parameter.
109
+ """
110
+ super().__init__(base_json_file_path, param_name)
111
+ self.syn_dict_list = syn_dict_list
112
+ self.base_ratio = base_ratio
113
+
114
+ def edit_all_jsons(self, new_value):
115
+ """
116
+ Updates the base JSON file with a new parameter value and then updates the other JSON files based on the ratio.
117
+
118
+ Args:
119
+ new_value: The new value for the parameter in the base JSON.
120
+ """
121
+ self.edit_json(new_value)
122
+ base_ratio = self.base_ratio
123
+ for syn_dict in self.syn_dict_list:
124
+ json_file_path = syn_dict['json_file_path']
125
+ new_ratio = syn_dict['ratio'] / base_ratio
126
+
127
+ with open(json_file_path, 'r') as f:
128
+ data = json.load(f)
129
+ altered_value = new_ratio * new_value
130
+ data[self.param_name] = altered_value
131
+
132
+ with open(json_file_path, 'w') as f:
133
+ json.dump(data, f, indent=4)
134
+
135
+ print(f"JSON file '{json_file_path}' modified successfully with {self.param_name}={altered_value}.", flush=True)
136
+
137
+
138
+ class SimulationBlock:
139
+ def __init__(self, block_name, time, partition, nodes, ntasks, mem, simulation_cases, output_base_dir,account=None,additional_commands=None,
140
+ status_list = ['COMPLETED', 'FAILED', 'CANCELLED']):
141
+ """
142
+ Initializes the SimulationBlock instance.
143
+
144
+ Args:
145
+ block_name (str): Name of the block.
146
+ time (str): Time limit for the job.
147
+ partition (str): Partition to submit the job to.
148
+ nodes (int): Number of nodes to request.
149
+ ntasks (int): Number of tasks.
150
+ mem (int) : Number of gigabytes (per node)
151
+ simulation_cases (dict): Dictionary of simulation cases with their commands.
152
+ output_base_dir (str): Base directory for the output files.
153
+ account (str) : account to charge on HPC
154
+ additional commands (list): commands to run before bmtk model starts useful for loading modules
155
+ status_list (list): List of things to check before running next block.
156
+ Adding RUNNING runs blocks faster but uses MUCH more resources and is only recommended on large HPC
157
+ """
158
+ self.block_name = block_name
159
+ self.time = time
160
+ self.partition = partition
161
+ self.nodes = nodes
162
+ self.ntasks = ntasks
163
+ self.mem = mem
164
+ self.simulation_cases = simulation_cases
165
+ self.output_base_dir = output_base_dir
166
+ self.account = account
167
+ self.additional_commands = additional_commands if additional_commands is not None else []
168
+ self.status_list = status_list
169
+ self.job_ids = []
170
+
171
+ def create_batch_script(self, case_name, command):
172
+ """
173
+ Creates a SLURM batch script for the given simulation case.
174
+
175
+ Args:
176
+ case_name (str): Name of the simulation case.
177
+ command (str): Command to run the simulation.
178
+
179
+ Returns:
180
+ str: Path to the batch script file.
181
+ """
182
+ block_output_dir = os.path.join(self.output_base_dir, self.block_name) # Create block-specific output folder
183
+ case_output_dir = os.path.join(block_output_dir, case_name) # Create case-specific output folder
184
+ os.makedirs(case_output_dir, exist_ok=True)
185
+
186
+ batch_script_path = os.path.join(block_output_dir, 'script.sh')
187
+ additional_commands_str = "\n".join(self.additional_commands)
188
+ # Conditional account linegit
189
+ account_line = f"#SBATCH --account={self.account}\n" if self.account else ""
190
+
191
+ # Write the batch script to the file
192
+ with open(batch_script_path, 'w') as script_file:
193
+ script_file.write(f"""#!/bin/bash
194
+ #SBATCH --job-name={self.block_name}_{case_name}
195
+ #SBATCH --output={block_output_dir}/%x_%j.out
196
+ #SBATCH --error={block_output_dir}/%x_%j.err
197
+ #SBATCH --time={self.time}
198
+ #SBATCH --partition={self.partition}
199
+ #SBATCH --nodes={self.nodes}
200
+ #SBATCH --ntasks={self.ntasks}
201
+ #SBATCH --mem={self.mem}
202
+ {account_line}
203
+
204
+ # Additional user-defined commands
205
+ {additional_commands_str}
206
+
207
+ export OUTPUT_DIR={case_output_dir}
208
+
209
+ {command}
210
+ """)
211
+
212
+ #print(f"Batch script created: {batch_script_path}", flush=True)
213
+
214
+ return batch_script_path
215
+
216
+ def submit_block(self):
217
+ """
218
+ Submits all simulation cases in the block as separate SLURM jobs.
219
+ """
220
+ for case_name, command in self.simulation_cases.items():
221
+ script_path = self.create_batch_script(case_name, command)
222
+ result = subprocess.run(['sbatch', script_path], capture_output=True, text=True)
223
+ if result.returncode == 0:
224
+ job_id = result.stdout.strip().split()[-1]
225
+ self.job_ids.append(job_id)
226
+ print(f"Submitted {case_name} with job ID {job_id}", flush=True)
227
+ else:
228
+ print(f"Failed to submit {case_name}: {result.stderr}", flush=True)
229
+
230
+ def check_block_status(self):
231
+ """
232
+ Checks the status of all jobs in the block.
233
+
234
+ Returns:
235
+ bool: True if all jobs in the block are completed, False otherwise.
236
+ """
237
+ for job_id in self.job_ids:
238
+ status = check_job_status(job_id)
239
+ if status not in self.status_list:
240
+ return False
241
+ return True
242
+
243
+
244
+ class SequentialBlockRunner:
245
+ """
246
+ Class to handle submitting multiple blocks sequentially.
247
+
248
+ Attributes:
249
+ blocks (list): List of SimulationBlock instances to be run.
250
+ json_editor (seedSweep or multiSweep): Instance of seedSweep to edit JSON file.
251
+ param_values (list): List of values for the parameter to be modified.
252
+ """
253
+
254
+ def __init__(self, blocks, json_editor=None, param_values=None, check_interval=200):
255
+ self.blocks = blocks
256
+ self.json_editor = json_editor
257
+ self.param_values = param_values
258
+ self.check_interval = check_interval
259
+
260
+ def submit_blocks_sequentially(self):
261
+ """
262
+ Submits all blocks sequentially, ensuring each block starts only after the previous block has completed.
263
+ Updates the JSON file with new parameters before each block run.
264
+ """
265
+ for i, block in enumerate(self.blocks):
266
+ # Update JSON file with new parameter value
267
+ if self.json_editor == None and self.param_values == None:
268
+ print(f"skipping json editing for block {block.block_name}",flush=True)
269
+ else:
270
+ if len(self.blocks) != len(self.param_values):
271
+ raise Exception("Number of blocks needs to each number of params given")
272
+ new_value = self.param_values[i]
273
+ # NGL didnt test the multi but should work
274
+ if isinstance(self.json_editor, multiSeedSweep):
275
+ self.json_editor.edit_all_jsons(new_value)
276
+ elif isinstance(self.json_editor,seedSweep):
277
+ print(f"Updating JSON file with parameter value for block: {block.block_name}", flush=True)
278
+ self.json_editor.edit_json(new_value)
279
+ else:
280
+ raise Exception("json editor provided but not a seedSweep class not sure what your doing?!?")
281
+
282
+
283
+ # Submit the block
284
+ print(f"Submitting block: {block.block_name}", flush=True)
285
+ block.submit_block()
286
+
287
+ # Wait for the block to complete
288
+ while not block.check_block_status():
289
+ print(f"Waiting for block {block.block_name} to complete...", flush=True)
290
+ time.sleep(self.check_interval)
291
+
292
+ print(f"Block {block.block_name} completed.", flush=True)
293
+ print("All blocks are done!",flush=True)
294
+
@@ -222,7 +222,7 @@ class GaussianDropoff(DistantDependentProbability):
222
222
  "Probability crosses 1 at distance %.3g.\n") % (pmax, d)
223
223
  if self.ptotal is not None:
224
224
  warn += " ptotal may not be reached."
225
- print(warn)
225
+ print(warn,flush=True)
226
226
  self.probability = lambda dist: np.fmin(probability(dist), 1.)
227
227
  else:
228
228
  self.probability = probability
@@ -335,7 +335,7 @@ class Timer(object):
335
335
  return (time.perf_counter() - self._start) * self.scale
336
336
 
337
337
  def report(self, msg='Run time'):
338
- print((msg + ": %.3f " + self.unit) % self.end())
338
+ print((msg + ": %.3f " + self.unit) % self.end(),flush=True)
339
339
 
340
340
 
341
341
  def pr_2_rho(p0, p1, pr):
@@ -355,7 +355,7 @@ def rho_2_pr(p0, p1, rho):
355
355
  pr0, pr = pr, np.max((0., p0 + p1 - 1, np.min((p0, p1, pr))))
356
356
  rho0, rho = rho, (pr - p0 * p1) / (p0 * (1 - p0) * p1 * (1 - p1)) ** .5
357
357
  print('rho changed from %.3f to %.3f; pr changed from %.3f to %.3f'
358
- % (rho0, rho, pr0, pr))
358
+ % (rho0, rho, pr0, pr),flush=True)
359
359
  return pr
360
360
 
361
361
 
@@ -534,7 +534,7 @@ class ReciprocalConnector(AbstractConnector):
534
534
  pr=0., pr_arg=None, estimate_rho=True, rho=None,
535
535
  dist_range_forward=None, dist_range_backward=None,
536
536
  n_syn0=1, n_syn1=1, autapses=False,
537
- quick_pop_check=False, cache_data=True, verbose=True,save_report=True):
537
+ quick_pop_check=False, cache_data=True, verbose=True,save_report=True,report_name='connection_report.csv'):
538
538
  args = locals()
539
539
  var_set = ('p0', 'p0_arg', 'p1', 'p1_arg',
540
540
  'pr', 'pr_arg', 'n_syn0', 'n_syn1')
@@ -553,6 +553,7 @@ class ReciprocalConnector(AbstractConnector):
553
553
  self.cache = self.ConnectorCache(cache_data and self.estimate_rho)
554
554
  self.verbose = verbose
555
555
  self.save_report = save_report
556
+ self.report_name = report_name
556
557
 
557
558
  self.conn_prop = [{}, {}]
558
559
  self.stage = 0
@@ -667,12 +668,12 @@ class ReciprocalConnector(AbstractConnector):
667
668
  fetch = out_len > 0
668
669
  if not fetch:
669
670
  print("\nWarning: Cache did not work properly for "
670
- + func_name + '\n')
671
+ + func_name + '\n',flush=True)
671
672
  self.fetch_output(func_name, fetch)
672
673
  self.iter_count = 0
673
674
  else:
674
675
  # if output not correct, disable and use original function
675
- print("\nWarning: Cache did not work properly.\n")
676
+ print("\nWarning: Cache did not work properly.\n",flush=True)
676
677
  for func_name in self.cache_dict:
677
678
  self.fetch_output(func_name, False)
678
679
  self.enable = False
@@ -831,7 +832,7 @@ class ReciprocalConnector(AbstractConnector):
831
832
  self.cache.cache_output(var, name, name in self.callable_set)
832
833
  if self.verbose and len(self.cache.cache_dict):
833
834
  print('Output of %s will be cached.'
834
- % ', '.join(self.cache.cache_dict))
835
+ % ', '.join(self.cache.cache_dict),flush=True)
835
836
 
836
837
  def setup_dist_range_checker(self):
837
838
  # Checker that determines whether to consider a pair for rho estimation
@@ -867,7 +868,7 @@ class ReciprocalConnector(AbstractConnector):
867
868
  if self.verbose:
868
869
  src_str, trg_str = self.get_nodes_info()
869
870
  print("\nStart building connection between: \n "
870
- + src_str + "\n " + trg_str)
871
+ + src_str + "\n " + trg_str,flush=True)
871
872
  self.initialize()
872
873
  cache = self.cache # write mode
873
874
 
@@ -892,11 +893,11 @@ class ReciprocalConnector(AbstractConnector):
892
893
  rho = (self.pr() * n - p0p1_sum) / norm_fac_sum
893
894
  if abs(rho) > 1:
894
895
  print("\nWarning: Estimated value of rho=%.3f "
895
- "outside the range [-1, 1]." % rho)
896
+ "outside the range [-1, 1]." % rho,flush=True)
896
897
  rho = np.clip(rho, -1, 1).item()
897
- print("Force rho to be %.0f.\n" % rho)
898
+ print("Force rho to be %.0f.\n" % rho,flush=True)
898
899
  elif self.verbose:
899
- print("Estimated value of rho=%.3f" % rho)
900
+ print("Estimated value of rho=%.3f" % rho,flush=True)
900
901
  self.rho = rho
901
902
  else:
902
903
  self.rho = 0
@@ -948,7 +949,7 @@ class ReciprocalConnector(AbstractConnector):
948
949
  if self.verbose:
949
950
  self.timer.report('Total time for creating connection matrix')
950
951
  if self.wrong_pr:
951
- print("Warning: Value of 'pr' outside the bounds occurred.\n")
952
+ print("Warning: Value of 'pr' outside the bounds occurred.\n",flush=True)
952
953
  self.connection_number_info()
953
954
  if self.save_report:
954
955
  self.save_connection_report()
@@ -976,7 +977,7 @@ class ReciprocalConnector(AbstractConnector):
976
977
  self.stage = 0
977
978
  self.initial_all_to_all()
978
979
  if self.verbose:
979
- print("Assigning forward connections.")
980
+ print("Assigning forward connections.",flush=True)
980
981
  self.timer.start()
981
982
  return self.make_connection()
982
983
 
@@ -985,7 +986,7 @@ class ReciprocalConnector(AbstractConnector):
985
986
  if self.iter_count == 0:
986
987
  self.stage = 1
987
988
  if self.verbose:
988
- print("Assigning backward connections.")
989
+ print("Assigning backward connections.",flush=True)
989
990
  return self.make_connection()
990
991
 
991
992
  def free_memory(self):
@@ -1038,14 +1039,14 @@ class ReciprocalConnector(AbstractConnector):
1038
1039
  n_conn, n_poss, n_pair, fraction = self.connection_number()
1039
1040
  conn_type = "(all, reciprocal)" if self.recurrent \
1040
1041
  else "(forward, backward, reciprocal)"
1041
- print("Numbers of " + conn_type + " connections:")
1042
- print("Number of connected pairs: (%s)" % arr2str(n_conn, '%d'))
1043
- print("Number of possible connections: (%s)" % arr2str(n_poss, '%d'))
1042
+ print("Numbers of " + conn_type + " connections:",flush=True)
1043
+ print("Number of connected pairs: (%s)" % arr2str(n_conn, '%d'),flush=True)
1044
+ print("Number of possible connections: (%s)" % arr2str(n_poss, '%d'),flush=True)
1044
1045
  print("Fraction of connected pairs in possible ones: (%s)"
1045
- % arr2str(100 * fraction[0], '%.2f%%'))
1046
- print("Number of total pairs: %d" % n_pair)
1046
+ % arr2str(100 * fraction[0], '%.2f%%'),flush=True)
1047
+ print("Number of total pairs: %d" % n_pair,flush=True)
1047
1048
  print("Fraction of connected pairs in all pairs: (%s)\n"
1048
- % arr2str(100 * fraction[1], '%.2f%%'))
1049
+ % arr2str(100 * fraction[1], '%.2f%%'),flush=True)
1049
1050
 
1050
1051
  def save_connection_report(self):
1051
1052
  """Save connections into a CSV file to be read from later"""
@@ -1064,15 +1065,14 @@ class ReciprocalConnector(AbstractConnector):
1064
1065
  # Append the data to the CSV file
1065
1066
  try:
1066
1067
  # Check if the file exists by trying to read it
1067
- existing_df = pd.read_csv('connection_report.csv')
1068
+ existing_df = pd.read_csv(self.report_name)
1068
1069
  # If no exception is raised, append without header
1069
- df.to_csv('connection_report.csv', mode='a', header=False, index=False)
1070
+ df.to_csv(self.report_name, mode='a', header=False, index=False)
1070
1071
  except FileNotFoundError:
1071
1072
  # If the file does not exist, write with header
1072
- df.to_csv('connection_report.csv', mode='w', header=True, index=False)
1073
+ df.to_csv(self.report_name, mode='w', header=True, index=False)
1073
1074
 
1074
1075
 
1075
-
1076
1076
  class UnidirectionConnector(AbstractConnector):
1077
1077
  """
1078
1078
  Object for buiilding unidirectional connections in bmtk network model with
@@ -1104,13 +1104,14 @@ class UnidirectionConnector(AbstractConnector):
1104
1104
  This is useful in similar manner as in ReciprocalConnector.
1105
1105
  """
1106
1106
 
1107
- def __init__(self, p=1., p_arg=None, n_syn=1, verbose=True,save_report=True):
1107
+ def __init__(self, p=1., p_arg=None, n_syn=1, verbose=True,save_report=True,report_name='connection_report.csv'):
1108
1108
  args = locals()
1109
1109
  var_set = ('p', 'p_arg', 'n_syn')
1110
1110
  self.vars = {key: args[key] for key in var_set}
1111
1111
 
1112
1112
  self.verbose = verbose
1113
1113
  self.save_report = save_report
1114
+ self.report_name = report_name
1114
1115
  self.conn_prop = {}
1115
1116
  self.iter_count = 0
1116
1117
 
@@ -1167,7 +1168,7 @@ class UnidirectionConnector(AbstractConnector):
1167
1168
  if self.verbose:
1168
1169
  src_str, trg_str = self.get_nodes_info()
1169
1170
  print("\nStart building connection \n from "
1170
- + src_str + "\n to " + trg_str)
1171
+ + src_str + "\n to " + trg_str,flush=True)
1171
1172
 
1172
1173
  # Make random connections
1173
1174
  p_arg = self.p_arg(source, target)
@@ -1202,39 +1203,37 @@ class UnidirectionConnector(AbstractConnector):
1202
1203
 
1203
1204
  def connection_number_info(self):
1204
1205
  """Print connection numbers after connections built"""
1205
- print("Number of connected pairs: %d" % self.n_conn)
1206
- print("Number of possible connections: %d" % self.n_poss)
1206
+ print("Number of connected pairs: %d" % self.n_conn,flush=True)
1207
+ print("Number of possible connections: %d" % self.n_poss,flush=True)
1207
1208
  print("Fraction of connected pairs in possible ones: %.2f%%"
1208
1209
  % (100. * self.n_conn / self.n_poss) if self.n_poss else 0.)
1209
- print("Number of total pairs: %d" % self.n_pair)
1210
+ print("Number of total pairs: %d" % self.n_pair,flush=True)
1210
1211
  print("Fraction of connected pairs in all pairs: %.2f%%\n"
1211
- % (100. * self.n_conn / self.n_pair))
1212
+ % (100. * self.n_conn / self.n_pair),flush=True)
1212
1213
 
1213
1214
  def save_connection_report(self):
1214
1215
  """Save connections into a CSV file to be read from later"""
1215
1216
  src_str, trg_str = self.get_nodes_info()
1216
- n_pair = self.n_pair
1217
- fraction_0 = self.n_conn / self.n_poss if self.n_poss else 0.
1218
- fraction_1 = self.n_conn / self.n_pair
1217
+ n_conn, n_poss, n_pair, fraction = self.connection_number()
1219
1218
 
1220
- # Convert fraction to percentage and prepare data for the DataFrame
1219
+ # Extract the population name from source_str and target_str
1221
1220
  data = {
1222
1221
  "Source": [src_str],
1223
1222
  "Target": [trg_str],
1224
- "Fraction of connected pairs in possible ones (%)": [fraction_0*100],
1225
- "Fraction of connected pairs in all pairs (%)": [fraction_1*100]
1223
+ "Fraction of connected pairs in possible ones (%)": [fraction[0]*100],
1224
+ "Fraction of connected pairs in all pairs (%)": [fraction[1]*100]
1226
1225
  }
1227
1226
  df = pd.DataFrame(data)
1228
1227
 
1229
1228
  # Append the data to the CSV file
1230
1229
  try:
1231
1230
  # Check if the file exists by trying to read it
1232
- existing_df = pd.read_csv('connection_report.csv')
1231
+ existing_df = pd.read_csv(self.report_name)
1233
1232
  # If no exception is raised, append without header
1234
- df.to_csv('connection_report.csv', mode='a', header=False, index=False)
1233
+ df.to_csv(self.report_name, mode='a', header=False, index=False)
1235
1234
  except FileNotFoundError:
1236
1235
  # If the file does not exist, write with header
1237
- df.to_csv('connection_report.csv', mode='w', header=True, index=False)
1236
+ df.to_csv(self.report_name, mode='w', header=True, index=False)
1238
1237
 
1239
1238
 
1240
1239
  class GapJunction(UnidirectionConnector):
@@ -1258,8 +1257,8 @@ class GapJunction(UnidirectionConnector):
1258
1257
  Similar to `UnidirectionConnector`.
1259
1258
  """
1260
1259
 
1261
- def __init__(self, p=1., p_arg=None, verbose=True,save_report=True):
1262
- super().__init__(p=p, p_arg=p_arg, verbose=verbose,save_report=save_report)
1260
+ def __init__(self, p=1., p_arg=None, verbose=True,report_name='connection_report.csv'):
1261
+ super().__init__(p=p, p_arg=p_arg, verbose=verbose,report_name=report_name)
1263
1262
 
1264
1263
  def setup_nodes(self, source=None, target=None):
1265
1264
  super().setup_nodes(source=source, target=target)
@@ -1275,7 +1274,7 @@ class GapJunction(UnidirectionConnector):
1275
1274
  self.initialize()
1276
1275
  if self.verbose:
1277
1276
  src_str, _ = self.get_nodes_info()
1278
- print("\nStart building gap junction \n in " + src_str)
1277
+ print("\nStart building gap junction \n in " + src_str,flush=True)
1279
1278
 
1280
1279
  # Consider each pair only once
1281
1280
  nsyns = 0
@@ -1328,12 +1327,12 @@ class GapJunction(UnidirectionConnector):
1328
1327
  # Append the data to the CSV file
1329
1328
  try:
1330
1329
  # Check if the file exists by trying to read it
1331
- existing_df = pd.read_csv('connection_report.csv')
1330
+ existing_df = pd.read_csv(self.report_name)
1332
1331
  # If no exception is raised, append without header
1333
- df.to_csv('connection_report.csv', mode='a', header=False, index=False)
1332
+ df.to_csv(self.report_name, mode='a', header=False, index=False)
1334
1333
  except FileNotFoundError:
1335
1334
  # If the file does not exist, write with header
1336
- df.to_csv('connection_report.csv', mode='w', header=True, index=False)
1335
+ df.to_csv(self.report_name, mode='w', header=True, index=False)
1337
1336
 
1338
1337
 
1339
1338
  class CorrelatedGapJunction(GapJunction):
@@ -1364,8 +1363,8 @@ class CorrelatedGapJunction(GapJunction):
1364
1363
  """
1365
1364
 
1366
1365
  def __init__(self, p_non=1., p_uni=1., p_rec=1., p_arg=None,
1367
- connector=None, verbose=True,save_report=True):
1368
- super().__init__(p=p_non, p_arg=p_arg, verbose=verbose,save_report=save_report)
1366
+ connector=None, verbose=True):
1367
+ super().__init__(p=p_non, p_arg=p_arg, verbose=verbose)
1369
1368
  self.vars['p_non'] = self.vars.pop('p')
1370
1369
  self.vars['p_uni'] = p_uni
1371
1370
  self.vars['p_rec'] = p_rec
@@ -1402,7 +1401,7 @@ class CorrelatedGapJunction(GapJunction):
1402
1401
  self.initialize()
1403
1402
  if self.verbose:
1404
1403
  src_str, _ = self.get_nodes_info()
1405
- print("\nStart building gap junction \n in " + src_str)
1404
+ print("\nStart building gap junction \n in " + src_str,flush=True)
1406
1405
 
1407
1406
  # Consider each pair only once
1408
1407
  nsyns = 0
@@ -1512,7 +1511,7 @@ class OneToOneSequentialConnector(AbstractConnector):
1512
1511
 
1513
1512
  if self.verbose and self.idx_range[-1] == self.n_source:
1514
1513
  print("All " + ("source" if self.partition_source else "target")
1515
- + " population partitions are filled.")
1514
+ + " population partitions are filled.",flush=True)
1516
1515
 
1517
1516
  def edge_params(self, target_pop_idx=-1):
1518
1517
  """Create the arguments for BMTK add_edges() method"""
@@ -1537,14 +1536,14 @@ class OneToOneSequentialConnector(AbstractConnector):
1537
1536
  self.target_count = 0
1538
1537
  src_str, trg_str = self.get_nodes_info()
1539
1538
  print("\nStart building connection " +
1540
- ("to " if self.partition_source else "from ") + src_str)
1539
+ ("to " if self.partition_source else "from ") + src_str,flush=True)
1541
1540
  self.timer = Timer()
1542
1541
 
1543
1542
  if self.iter_count == self.idx_range[self.target_count]:
1544
1543
  # Beginning of each target population
1545
1544
  src_str, trg_str = self.get_nodes_info(self.target_count)
1546
1545
  print((" %d. " % self.target_count) +
1547
- ("from " if self.partition_source else "to ") + trg_str)
1546
+ ("from " if self.partition_source else "to ") + trg_str,flush=True)
1548
1547
  self.target_count += 1
1549
1548
  self.timer_part = Timer()
1550
1549
 
@@ -1581,6 +1580,18 @@ FLUC_STDEV = 0.2 # ms
1581
1580
  DELAY_LOWBOUND = 0.2 # ms must be greater than h.dt
1582
1581
  DELAY_UPBOUND = 2.0 # ms
1583
1582
 
1583
+ def syn_const_delay(source=None, target = None, dist=100,
1584
+ min_delay=SYN_MIN_DELAY, velocity=SYN_VELOCITY,
1585
+ fluc_stdev=FLUC_STDEV, delay_bound=(DELAY_LOWBOUND, DELAY_UPBOUND),
1586
+ connector=None):
1587
+ """Synapse delay constant with some random fluctuation.
1588
+ """
1589
+ del_fluc = fluc_stdev * rng.normal()
1590
+ delay = dist / SYN_VELOCITY + SYN_MIN_DELAY + del_fluc
1591
+ delay = min(max(delay, DELAY_LOWBOUND), DELAY_UPBOUND)
1592
+ return delay
1593
+
1594
+
1584
1595
  def syn_dist_delay_feng(source, target, min_delay=SYN_MIN_DELAY,
1585
1596
  velocity=SYN_VELOCITY, fluc_stdev=FLUC_STDEV,
1586
1597
  delay_bound=(DELAY_LOWBOUND, DELAY_UPBOUND),
@@ -1610,6 +1621,14 @@ def syn_section_PN(source, target, p=0.9,
1610
1621
  return sec_id[syn_loc], sec_x[syn_loc]
1611
1622
 
1612
1623
 
1624
+ def syn_const_delay_feng_section_PN(source, target, p=0.9,
1625
+ sec_id=(1, 2), sec_x=(0.4, 0.6), **kwargs):
1626
+ """Assign both synapse delay and location with constant distance assumed"""
1627
+ delay = syn_const_delay(source, target,**kwargs)
1628
+ s_id, s_x = syn_section_PN(source, target, p=p, sec_id=sec_id, sec_x=sec_x)
1629
+ return delay, s_id, s_x
1630
+
1631
+
1613
1632
  def syn_dist_delay_feng_section_PN(source, target, p=0.9,
1614
1633
  sec_id=(1, 2), sec_x=(0.4, 0.6), **kwargs):
1615
1634
  """Assign both synapse delay and location"""