bmtool 0.5.2__py3-none-any.whl → 0.5.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- bmtool/bmplot.py +76 -1
- bmtool/connectors.py +216 -59
- bmtool/graphs.py +170 -0
- bmtool/singlecell.py +4 -4
- bmtool/util/util.py +85 -18
- {bmtool-0.5.2.dist-info → bmtool-0.5.3.dist-info}/METADATA +139 -3
- {bmtool-0.5.2.dist-info → bmtool-0.5.3.dist-info}/RECORD +11 -10
- {bmtool-0.5.2.dist-info → bmtool-0.5.3.dist-info}/WHEEL +1 -1
- {bmtool-0.5.2.dist-info → bmtool-0.5.3.dist-info}/LICENSE +0 -0
- {bmtool-0.5.2.dist-info → bmtool-0.5.3.dist-info}/entry_points.txt +0 -0
- {bmtool-0.5.2.dist-info → bmtool-0.5.3.dist-info}/top_level.txt +0 -0
bmtool/bmplot.py
CHANGED
@@ -3,7 +3,6 @@ Want to be able to take multiple plot names in and plot them all at the same tim
|
|
3
3
|
https://stackoverflow.com/questions/458209/is-there-a-way-to-detach-matplotlib-plots-so-that-the-computation-can-continue
|
4
4
|
"""
|
5
5
|
from .util import util
|
6
|
-
|
7
6
|
import argparse,os,sys
|
8
7
|
|
9
8
|
import numpy as np
|
@@ -267,6 +266,82 @@ def divergence_connection_matrix(config=None,title=None,sources=None, targets=No
|
|
267
266
|
plot_connection_info(syn_info,data,source_labels,target_labels,title, save_file=save_file)
|
268
267
|
return
|
269
268
|
|
269
|
+
def gap_junction_matrix(config=None,title=None,sources=None, targets=None, sids=None,tids=None, no_prepend_pop=False,save_file=None,type='convergence'):
|
270
|
+
"""
|
271
|
+
Generates connection plot displaying gap junction data.
|
272
|
+
config: A BMTK simulation config
|
273
|
+
sources: network name(s) to plot
|
274
|
+
targets: network name(s) to plot
|
275
|
+
sids: source node identifier
|
276
|
+
tids: target node identifier
|
277
|
+
no_prepend_pop: dictates if population name is displayed before sid or tid when displaying graph
|
278
|
+
save_file: If plot should be saved
|
279
|
+
type:'convergence' or 'percent' connections
|
280
|
+
"""
|
281
|
+
if not config:
|
282
|
+
raise Exception("config not defined")
|
283
|
+
if not sources or not targets:
|
284
|
+
raise Exception("Sources or targets not defined")
|
285
|
+
if type !='convergence' and type!='percent':
|
286
|
+
raise Exception("type must be 'convergence' or 'percent'")
|
287
|
+
sources = sources.split(",")
|
288
|
+
targets = targets.split(",")
|
289
|
+
if sids:
|
290
|
+
sids = sids.split(",")
|
291
|
+
else:
|
292
|
+
sids = []
|
293
|
+
if tids:
|
294
|
+
tids = tids.split(",")
|
295
|
+
else:
|
296
|
+
tids = []
|
297
|
+
syn_info, data, source_labels, target_labels = util.gap_junction_connections(config=config,nodes=None,edges=None,sources=sources,targets=targets,sids=sids,tids=tids,prepend_pop=not no_prepend_pop,type=type)
|
298
|
+
|
299
|
+
|
300
|
+
def filter_rows(syn_info, data, source_labels, target_labels):
|
301
|
+
new_syn_info = syn_info
|
302
|
+
new_data = data
|
303
|
+
new_source_labels = source_labels
|
304
|
+
new_target_labels = target_labels
|
305
|
+
for row in new_data:
|
306
|
+
row_index = -1
|
307
|
+
try:
|
308
|
+
if((np.isnan(row).all())): #checks if all of a row is nan
|
309
|
+
row_index = np.where(np.isnan(new_data)==np.isnan(row))[0][0]
|
310
|
+
except:
|
311
|
+
row_index = -1
|
312
|
+
finally:
|
313
|
+
if(all(x==0 for x in row)): #checks if all of a row is zeroes
|
314
|
+
row_index = np.where(new_data==row)[0][0]
|
315
|
+
if row_index!=-1: #deletes corresponding row accordingly in all relevant variables.
|
316
|
+
new_syn_info = np.delete(new_syn_info,row_index,0)
|
317
|
+
new_data = np.delete(new_data,row_index,0)
|
318
|
+
new_source_labels = np.delete(new_source_labels,row_index)
|
319
|
+
return new_syn_info, new_data,new_source_labels,new_target_labels
|
320
|
+
|
321
|
+
def filter_rows_and_columns(syn_info,data,source_labels,target_labels):
|
322
|
+
syn_info, data, source_labels, target_labels = filter_rows(syn_info, data, source_labels, target_labels)
|
323
|
+
transposed_syn_info = np.transpose(syn_info) #transpose everything and put it in to make sure columns get filtered
|
324
|
+
transposed_data = np.transpose(data)
|
325
|
+
transposed_source_labels = target_labels
|
326
|
+
transposed_target_labels = source_labels
|
327
|
+
syn_info, data, source_labels, target_labels = filter_rows(transposed_syn_info, transposed_data, transposed_source_labels, transposed_target_labels)
|
328
|
+
filtered_syn_info = np.transpose(syn_info) #transpose everything back to original order after filtering.
|
329
|
+
filtered_data = np.transpose(data)
|
330
|
+
filtered_source_labels = target_labels
|
331
|
+
filtered_target_labels = source_labels
|
332
|
+
return filtered_syn_info,filtered_data,filtered_source_labels,filtered_target_labels
|
333
|
+
|
334
|
+
syn_info, data, source_labels, target_labels = filter_rows_and_columns(syn_info, data, source_labels, target_labels)
|
335
|
+
|
336
|
+
if title == None or title=="":
|
337
|
+
title = 'Gap Junction'
|
338
|
+
if type == 'convergence':
|
339
|
+
title+=' Syn Convergence'
|
340
|
+
elif type == 'percent':
|
341
|
+
title+=' Percent Connectivity'
|
342
|
+
plot_connection_info(syn_info,data,source_labels,target_labels,title, save_file=save_file)
|
343
|
+
return
|
344
|
+
|
270
345
|
def connection_histogram(config=None,nodes=None,edges=None,sources=[],targets=[],sids=[],tids=[],prepend_pop=True,synaptic_info='0',
|
271
346
|
source_cell = None,target_cell = None,include_gap=True):
|
272
347
|
"""
|
bmtool/connectors.py
CHANGED
@@ -4,6 +4,7 @@ from scipy.special import erf
|
|
4
4
|
from scipy.optimize import minimize_scalar
|
5
5
|
from functools import partial
|
6
6
|
import time
|
7
|
+
import types
|
7
8
|
|
8
9
|
rng = np.random.default_rng()
|
9
10
|
|
@@ -11,6 +12,13 @@ rng = np.random.default_rng()
|
|
11
12
|
############################## CONNECT CELLS #################################
|
12
13
|
|
13
14
|
# Utility Functions
|
15
|
+
def num_prop(ratio, N):
|
16
|
+
"""Calculate numbers of total N in proportion to ratio"""
|
17
|
+
ratio = np.asarray(ratio)
|
18
|
+
p = np.cumsum(np.insert(ratio.ravel(), 0, 0)) # cumulative proportion
|
19
|
+
return np.diff(np.round(N / p[-1] * p).astype(int)).reshape(ratio.shape)
|
20
|
+
|
21
|
+
|
14
22
|
def decision(prob, size=None):
|
15
23
|
"""
|
16
24
|
Make single random decision based on input probability.
|
@@ -76,10 +84,10 @@ class DistantDependentProbability(ProbabilityFunction):
|
|
76
84
|
assert(min_dist >= 0 and min_dist < max_dist)
|
77
85
|
self.min_dist, self.max_dist = min_dist, max_dist
|
78
86
|
|
79
|
-
def __call__(self, dist):
|
87
|
+
def __call__(self, dist, *arg, **kwargs):
|
80
88
|
"""Return probability for single distance input"""
|
81
89
|
if dist >= self.min_dist and dist <= self.max_dist:
|
82
|
-
return self.probability(dist)
|
90
|
+
return self.probability(dist)
|
83
91
|
else:
|
84
92
|
return 0.
|
85
93
|
|
@@ -88,7 +96,10 @@ class DistantDependentProbability(ProbabilityFunction):
|
|
88
96
|
dist = np.asarray(dist)
|
89
97
|
dec = np.zeros(dist.shape, dtype=bool)
|
90
98
|
mask = (dist >= self.min_dist) & (dist <= self.max_dist)
|
91
|
-
|
99
|
+
dist = dist[mask]
|
100
|
+
prob = np.empty(dist.shape)
|
101
|
+
prob[:] = self.probability(dist)
|
102
|
+
dec[mask] = decisions(prob)
|
92
103
|
return dec
|
93
104
|
|
94
105
|
|
@@ -99,7 +110,7 @@ class UniformInRange(DistantDependentProbability):
|
|
99
110
|
super().__init__(min_dist=min_dist, max_dist=max_dist)
|
100
111
|
self.p = np.array(p)
|
101
112
|
assert(self.p.size == 1)
|
102
|
-
assert(
|
113
|
+
assert(p >= 0. and p <= 1.)
|
103
114
|
|
104
115
|
def probability(self, dist):
|
105
116
|
return self.p
|
@@ -127,7 +138,7 @@ class GaussianDropoff(DistantDependentProbability):
|
|
127
138
|
input pmax, and calculate pmax. See calc_pmax_from_ptotal() method.
|
128
139
|
ptotal_dist_range: Distance range for calculating pmax when ptotal is
|
129
140
|
specified. If not specified, set to range (min_dist, max_dist).
|
130
|
-
dist_type: spherical or cylindrical for distance metric.
|
141
|
+
dist_type: 'spherical' or 'cylindrical' for distance metric.
|
131
142
|
Used when ptotal is specified.
|
132
143
|
|
133
144
|
Returns:
|
@@ -215,6 +226,51 @@ class GaussianDropoff(DistantDependentProbability):
|
|
215
226
|
self.probability = probability
|
216
227
|
|
217
228
|
|
229
|
+
class NormalizedReciprocalRate(ProbabilityFunction):
|
230
|
+
"""Reciprocal connection probability given normalized reciprocal rate.
|
231
|
+
Normalized reciprocal rate is defined as the ratio between the reciprocal
|
232
|
+
connection probability and the connection probability for a randomly
|
233
|
+
connected network where the two unidirectional connections between any pair
|
234
|
+
of neurons are independent. NRR = pr / (p0 * p1)
|
235
|
+
|
236
|
+
Parameters:
|
237
|
+
NRR: a constant or distance dependent function for normalized reciprocal
|
238
|
+
rate. When being a function, it should be accept vectorized input.
|
239
|
+
Returns:
|
240
|
+
A callable object that returns the probability value.
|
241
|
+
"""
|
242
|
+
|
243
|
+
def __init__(self, NRR=1.):
|
244
|
+
self.NRR = NRR if callable(NRR) else lambda *x: NRR
|
245
|
+
|
246
|
+
def probability(self, dist, p0, p1):
|
247
|
+
"""Allow numpy array input and return probability in numpy array"""
|
248
|
+
return p0 * p1 * self.NRR(dist)
|
249
|
+
|
250
|
+
def __call__(self, dist, p0, p1, *arg, **kwargs):
|
251
|
+
"""Return probability for single distance input"""
|
252
|
+
return self.probability(dist, p0, p1)
|
253
|
+
|
254
|
+
def decisions(self, dist, p0, p1, cond=None):
|
255
|
+
"""Return bool array of decisions
|
256
|
+
dist: distance (scalar or array). Will be ignored if NRR is constant.
|
257
|
+
p0, p1: forward and backward probability (scalar or array)
|
258
|
+
cond: A tuple (direction, array of outcomes) representing the condition.
|
259
|
+
Conditional probability will be returned if specified. The condition
|
260
|
+
event is determined by connection direction (0 for forward, or 1 for
|
261
|
+
backward) and outcomes (bool array of whether connection exists).
|
262
|
+
"""
|
263
|
+
dist, p0, p1 = map(np.asarray, (dist, p0, p1))
|
264
|
+
pr = np.empty(dist.shape)
|
265
|
+
pr[:] = self.probability(dist, p0, p1)
|
266
|
+
pr = np.clip(pr, a_min=np.fmax(p0 + p1 - 1., 0.), a_max=np.fmin(p0, p1))
|
267
|
+
if cond is not None:
|
268
|
+
mask = np.asarray(cond[1])
|
269
|
+
pr[mask] /= p1 if cond[0] else p0
|
270
|
+
pr[~mask] = 0.
|
271
|
+
return decisions(pr)
|
272
|
+
|
273
|
+
|
218
274
|
# Connector Classes
|
219
275
|
class AbstractConnector(ABC):
|
220
276
|
"""Abstract base class for connectors"""
|
@@ -231,22 +287,6 @@ class AbstractConnector(ABC):
|
|
231
287
|
`connection_rule` method."""
|
232
288
|
return NotImplemented
|
233
289
|
|
234
|
-
@staticmethod
|
235
|
-
def is_same_pop(source, target, quick=True):
|
236
|
-
"""Whether two NodePool objects direct to the same population"""
|
237
|
-
if quick:
|
238
|
-
# Quick check (compare filter conditions)
|
239
|
-
same = (source.network_name == target.network_name and
|
240
|
-
source._NodePool__properties ==
|
241
|
-
target._NodePool__properties)
|
242
|
-
else:
|
243
|
-
# Strict check (compare all nodes)
|
244
|
-
same = (source.network_name == target.network_name and
|
245
|
-
len(source) == len(target) and
|
246
|
-
all([s.node_id == t.node_id
|
247
|
-
for s, t in zip(source, target)]))
|
248
|
-
return same
|
249
|
-
|
250
290
|
@staticmethod
|
251
291
|
def constant_function(val):
|
252
292
|
"""Convert a constant to a constant function"""
|
@@ -255,7 +295,23 @@ class AbstractConnector(ABC):
|
|
255
295
|
return constant
|
256
296
|
|
257
297
|
|
258
|
-
# Helper
|
298
|
+
# Helper functions
|
299
|
+
def is_same_pop(source, target, quick=False):
|
300
|
+
"""Check whether two NodePool objects direct to the same population"""
|
301
|
+
if quick:
|
302
|
+
# Quick check (compare filter conditions)
|
303
|
+
same = (source.network_name == target.network_name and
|
304
|
+
source._NodePool__properties ==
|
305
|
+
target._NodePool__properties)
|
306
|
+
else:
|
307
|
+
# Strict check (compare all nodes)
|
308
|
+
same = (source.network_name == target.network_name and
|
309
|
+
len(source) == len(target) and
|
310
|
+
all([s.node_id == t.node_id
|
311
|
+
for s, t in zip(source, target)]))
|
312
|
+
return same
|
313
|
+
|
314
|
+
|
259
315
|
class Timer(object):
|
260
316
|
def __init__(self, unit='sec'):
|
261
317
|
if unit == 'ms':
|
@@ -303,7 +359,7 @@ def rho_2_pr(p0, p1, rho):
|
|
303
359
|
|
304
360
|
class ReciprocalConnector(AbstractConnector):
|
305
361
|
"""
|
306
|
-
Object for
|
362
|
+
Object for buiilding connections in bmtk network model with reciprocal
|
307
363
|
probability within a single population (or between two populations).
|
308
364
|
|
309
365
|
Algorithm:
|
@@ -388,10 +444,17 @@ class ReciprocalConnector(AbstractConnector):
|
|
388
444
|
symmetric_p1_arg: Whether p0_arg and p1_arg are identical. If this is
|
389
445
|
set to True, argument p1_arg will be ignored. This is forced to be
|
390
446
|
True when the population is recurrent.
|
391
|
-
pr, pr_arg: Probability of reciprocal connection and its input
|
392
|
-
when it is a function, similar to p0, p0_arg, p1, p1_arg.
|
393
|
-
a function when it has an explicit relation with some node
|
394
|
-
properties such as distance.
|
447
|
+
pr, pr_arg: Probability of reciprocal connection and its first input
|
448
|
+
argument when it is a function, similar to p0, p0_arg, p1, p1_arg.
|
449
|
+
It can be a function when it has an explicit relation with some node
|
450
|
+
properties such as distance. A function pr requires two additional
|
451
|
+
positional arguments p0 and p1 even if they are not used, i.e.,
|
452
|
+
pr(pr_arg, p0, p1), just in case pr is dependent on p0 and p1, e.g.,
|
453
|
+
when normalized reciprocal rate NRR = pr/(p0*p1) is given.
|
454
|
+
When pr_arg is a string, the same value as p1_arg will be used for
|
455
|
+
pr_arg if the string contains '1', e.g., '1', 'p1'. Otherwise, e.g.,
|
456
|
+
'', '0', 'p0', p0_arg will be used for pr_arg. Specifying this can
|
457
|
+
avoid recomputing pr_arg when it's given by p0_arg or p1_arg.
|
395
458
|
estimate_rho: Whether estimate rho that result in an overall pr. This
|
396
459
|
is forced to be False if pr is a function or if rho is specified.
|
397
460
|
To estimate rho, all the pairs with possible connections, meaning
|
@@ -425,6 +488,10 @@ class ReciprocalConnector(AbstractConnector):
|
|
425
488
|
into the connection matrix to reduce memory consumption.
|
426
489
|
autapses: Whether to allow connecting a cell to itself. Default: False.
|
427
490
|
This is ignored when the population is not recurrent.
|
491
|
+
quick_pop_check: Whether to use quick method to check if source and
|
492
|
+
target populations are the same. Default: False.
|
493
|
+
Quick method checks only whether filter conditions match.
|
494
|
+
Strict method checks whether all node id's match considering order.
|
428
495
|
cache_data: Whether to cache the values of p0, p0_arg, p1, p1_arg
|
429
496
|
during estimation of rho. This improves performance when
|
430
497
|
estimate_rho is True while not creating a significant overhead in
|
@@ -465,7 +532,7 @@ class ReciprocalConnector(AbstractConnector):
|
|
465
532
|
pr=0., pr_arg=None, estimate_rho=True, rho=None,
|
466
533
|
dist_range_forward=None, dist_range_backward=None,
|
467
534
|
n_syn0=1, n_syn1=1, autapses=False,
|
468
|
-
cache_data=True, verbose=True):
|
535
|
+
quick_pop_check=False, cache_data=True, verbose=True):
|
469
536
|
args = locals()
|
470
537
|
var_set = ('p0', 'p0_arg', 'p1', 'p1_arg',
|
471
538
|
'pr', 'pr_arg', 'n_syn0', 'n_syn1')
|
@@ -480,6 +547,7 @@ class ReciprocalConnector(AbstractConnector):
|
|
480
547
|
self.rho = rho
|
481
548
|
|
482
549
|
self.autapses = autapses
|
550
|
+
self.quick = quick_pop_check
|
483
551
|
self.cache = self.ConnectorCache(cache_data and self.estimate_rho)
|
484
552
|
self.verbose = verbose
|
485
553
|
|
@@ -493,8 +561,8 @@ class ReciprocalConnector(AbstractConnector):
|
|
493
561
|
if self.stage:
|
494
562
|
# check whether the correct populations
|
495
563
|
if (source is None or target is None or
|
496
|
-
not
|
497
|
-
not
|
564
|
+
not is_same_pop(source, self.target, quick=self.quick) or
|
565
|
+
not is_same_pop(target, self.source, quick=self.quick)):
|
498
566
|
raise ValueError("Source or target population not consistent.")
|
499
567
|
# Skip adding nodes for the backward stage.
|
500
568
|
return
|
@@ -508,7 +576,7 @@ class ReciprocalConnector(AbstractConnector):
|
|
508
576
|
raise ValueError("Target nodes do not exists")
|
509
577
|
|
510
578
|
# Setup nodes
|
511
|
-
self.recurrent =
|
579
|
+
self.recurrent = is_same_pop(self.source, self.target, quick=self.quick)
|
512
580
|
self.source_ids = [s.node_id for s in self.source]
|
513
581
|
self.n_source = len(self.source_ids)
|
514
582
|
self.source_list = list(self.source)
|
@@ -551,6 +619,7 @@ class ReciprocalConnector(AbstractConnector):
|
|
551
619
|
self.enable = enable
|
552
620
|
self._output = {}
|
553
621
|
self.cache_dict = {}
|
622
|
+
self.set_next_it()
|
554
623
|
self.write_mode()
|
555
624
|
|
556
625
|
def cache_output(self, func, func_name, cache=True):
|
@@ -606,9 +675,14 @@ class ReciprocalConnector(AbstractConnector):
|
|
606
675
|
self.enable = False
|
607
676
|
self.mode = 'read'
|
608
677
|
|
609
|
-
def
|
678
|
+
def set_next_it(self):
|
610
679
|
if self.enable:
|
611
|
-
|
680
|
+
def next_it():
|
681
|
+
self.iter_count += 1
|
682
|
+
else:
|
683
|
+
def next_it():
|
684
|
+
pass
|
685
|
+
self.next_it = next_it
|
612
686
|
|
613
687
|
def node_2_idx_input(self, var_func, reverse=False):
|
614
688
|
"""Convert a function that accept nodes as input
|
@@ -703,6 +777,13 @@ class ReciprocalConnector(AbstractConnector):
|
|
703
777
|
|
704
778
|
# *** A sequence of major methods executed during build ***
|
705
779
|
def setup_variables(self):
|
780
|
+
# If pr_arg is string, use the same value as p0_arg or p1_arg
|
781
|
+
if isinstance(self.vars['pr_arg'], str):
|
782
|
+
pr_arg_func = 'p1_arg' if '1' in self.vars['pr_arg'] else 'p0_arg'
|
783
|
+
self.vars['pr_arg'] = self.vars[pr_arg_func]
|
784
|
+
else:
|
785
|
+
pr_arg_func = None
|
786
|
+
|
706
787
|
callable_set = set()
|
707
788
|
# Make constant variables constant functions
|
708
789
|
for name, var in self.vars.items():
|
@@ -718,6 +799,21 @@ class ReciprocalConnector(AbstractConnector):
|
|
718
799
|
var = self.vars[name]
|
719
800
|
setattr(self, name, self.node_2_idx_input(var, '1' in name))
|
720
801
|
|
802
|
+
# Set up function for pr_arg if use value from p0_arg or p1_arg
|
803
|
+
if pr_arg_func is None:
|
804
|
+
self._pr_arg = self.pr_arg # use specified pr_arg
|
805
|
+
else:
|
806
|
+
self._pr_arg_val = 0. # storing current value from p_arg
|
807
|
+
p_arg = getattr(self, pr_arg_func)
|
808
|
+
def p_arg_4_pr(*args, **kwargs):
|
809
|
+
val = p_arg(*args, **kwargs)
|
810
|
+
self._pr_arg_val = val
|
811
|
+
return val
|
812
|
+
setattr(self, pr_arg_func, p_arg_4_pr)
|
813
|
+
def pr_arg(self, *arg):
|
814
|
+
return self._pr_arg_val
|
815
|
+
self._pr_arg = types.MethodType(pr_arg, self)
|
816
|
+
|
721
817
|
def cache_variables(self):
|
722
818
|
# Select cacheable attrilbutes
|
723
819
|
cache_set = {'p0', 'p0_arg', 'p1', 'p1_arg'}
|
@@ -825,7 +921,7 @@ class ReciprocalConnector(AbstractConnector):
|
|
825
921
|
if forward:
|
826
922
|
forward = decision(p0)
|
827
923
|
if backward:
|
828
|
-
pr = self.pr(self.
|
924
|
+
pr = self.pr(self._pr_arg(i, j), p0, p1)
|
829
925
|
backward = decision(self.cond_backward(forward, p0, p1, pr))
|
830
926
|
|
831
927
|
# Make connection
|
@@ -849,7 +945,7 @@ class ReciprocalConnector(AbstractConnector):
|
|
849
945
|
if self.verbose:
|
850
946
|
self.timer.report('Total time for creating connection matrix')
|
851
947
|
if self.wrong_pr:
|
852
|
-
print("
|
948
|
+
print("Warning: Value of 'pr' outside the bounds occurred.\n")
|
853
949
|
self.connection_number_info()
|
854
950
|
|
855
951
|
def make_connection(self):
|
@@ -949,7 +1045,7 @@ class ReciprocalConnector(AbstractConnector):
|
|
949
1045
|
|
950
1046
|
class UnidirectionConnector(AbstractConnector):
|
951
1047
|
"""
|
952
|
-
Object for
|
1048
|
+
Object for buiilding unidirectional connections in bmtk network model with
|
953
1049
|
given probability within a single population (or between two populations).
|
954
1050
|
|
955
1051
|
Parameters:
|
@@ -1081,9 +1177,80 @@ class UnidirectionConnector(AbstractConnector):
|
|
1081
1177
|
% (100. * self.n_conn / self.n_pair))
|
1082
1178
|
|
1083
1179
|
|
1084
|
-
class
|
1180
|
+
class GapJunction(UnidirectionConnector):
|
1181
|
+
"""
|
1182
|
+
Object for buiilding gap junction connections in bmtk network model with
|
1183
|
+
given probabilities within a single population which is uncorrelated with
|
1184
|
+
the recurrent chemical synapses in this population.
|
1185
|
+
|
1186
|
+
Parameters:
|
1187
|
+
p, p_arg: Probability of forward connection and its input argument when
|
1188
|
+
it is a function, similar to p0, p0_arg in ReciprocalConnector. It
|
1189
|
+
can be a constant or a deterministic function whose value must be
|
1190
|
+
within range [0, 1]. When p is constant, the connection is
|
1191
|
+
homogenous.
|
1192
|
+
verbose: Whether show verbose information in console.
|
1193
|
+
|
1194
|
+
Returns:
|
1195
|
+
An object that works with BMTK to build edges in a network.
|
1196
|
+
|
1197
|
+
Important attributes:
|
1198
|
+
Similar to `UnidirectionConnector`.
|
1085
1199
|
"""
|
1086
|
-
|
1200
|
+
|
1201
|
+
def __init__(self, p=1., p_arg=None, verbose=True):
|
1202
|
+
super().__init__(p=p, p_arg=p_arg, verbose=verbose)
|
1203
|
+
|
1204
|
+
def setup_nodes(self, source=None, target=None):
|
1205
|
+
super().setup_nodes(source=source, target=target)
|
1206
|
+
if len(self.source) != len(self.target):
|
1207
|
+
raise ValueError("Source and target must be the same for "
|
1208
|
+
"gap junction.")
|
1209
|
+
self.n_source = len(self.source)
|
1210
|
+
|
1211
|
+
def make_connection(self, source, target, *args, **kwargs):
|
1212
|
+
"""Assign gap junction per iteration using one_to_one iterator"""
|
1213
|
+
# Initialize in the first iteration
|
1214
|
+
if self.iter_count == 0:
|
1215
|
+
self.initialize()
|
1216
|
+
if self.verbose:
|
1217
|
+
src_str, _ = self.get_nodes_info()
|
1218
|
+
print("\nStart building gap junction \n in " + src_str)
|
1219
|
+
|
1220
|
+
# Consider each pair only once
|
1221
|
+
nsyns = 0
|
1222
|
+
i, j = divmod(self.iter_count, self.n_source)
|
1223
|
+
if i < j:
|
1224
|
+
p_arg = self.p_arg(source, target)
|
1225
|
+
p = self.p(p_arg)
|
1226
|
+
possible = p > 0
|
1227
|
+
self.n_poss += possible
|
1228
|
+
if possible and decision(p):
|
1229
|
+
nsyns = 1
|
1230
|
+
sid, tid = source.node_id, target.node_id
|
1231
|
+
self.add_conn_prop(sid, tid, p_arg)
|
1232
|
+
self.add_conn_prop(tid, sid, p_arg)
|
1233
|
+
self.n_conn += 1
|
1234
|
+
|
1235
|
+
self.iter_count += 1
|
1236
|
+
|
1237
|
+
# Detect end of iteration
|
1238
|
+
if self.iter_count == self.n_pair:
|
1239
|
+
if self.verbose:
|
1240
|
+
self.connection_number_info()
|
1241
|
+
self.timer.report('Done! \nTime for building connections')
|
1242
|
+
return nsyns
|
1243
|
+
|
1244
|
+
def connection_number_info(self):
|
1245
|
+
n_pair = self.n_pair
|
1246
|
+
self.n_pair = (n_pair - len(self.source)) // 2
|
1247
|
+
super().connection_number_info()
|
1248
|
+
self.n_pair = n_pair
|
1249
|
+
|
1250
|
+
|
1251
|
+
class CorrelatedGapJunction(GapJunction):
|
1252
|
+
"""
|
1253
|
+
Object for buiilding gap junction connections in bmtk network model with
|
1087
1254
|
given probabilities within a single population which could be correlated
|
1088
1255
|
with the recurrent chemical synapses in this population.
|
1089
1256
|
|
@@ -1120,13 +1287,6 @@ class CorrelatedGapJunction(UnidirectionConnector):
|
|
1120
1287
|
conn_prop = conn_prop[0]
|
1121
1288
|
self.ref_conn_prop = conn_prop
|
1122
1289
|
|
1123
|
-
def setup_nodes(self, source=None, target=None):
|
1124
|
-
super().setup_nodes(source=source, target=target)
|
1125
|
-
if len(self.source) != len(self.target):
|
1126
|
-
raise ValueError("Source and target must be the same for "
|
1127
|
-
"gap junction.")
|
1128
|
-
self.n_source = len(self.source)
|
1129
|
-
|
1130
1290
|
def conn_exist(self, sid, tid):
|
1131
1291
|
trg_dict = self.ref_conn_prop.get(sid)
|
1132
1292
|
if trg_dict is not None and tid in trg_dict:
|
@@ -1153,7 +1313,7 @@ class CorrelatedGapJunction(UnidirectionConnector):
|
|
1153
1313
|
if self.iter_count == 0:
|
1154
1314
|
self.initialize()
|
1155
1315
|
if self.verbose:
|
1156
|
-
src_str,
|
1316
|
+
src_str, _ = self.get_nodes_info()
|
1157
1317
|
print("\nStart building gap junction \n in " + src_str)
|
1158
1318
|
|
1159
1319
|
# Consider each pair only once
|
@@ -1182,15 +1342,9 @@ class CorrelatedGapJunction(UnidirectionConnector):
|
|
1182
1342
|
self.timer.report('Done! \nTime for building connections')
|
1183
1343
|
return nsyns
|
1184
1344
|
|
1185
|
-
def connection_number_info(self):
|
1186
|
-
n_pair = self.n_pair
|
1187
|
-
self.n_pair = (n_pair - len(self.source)) // 2
|
1188
|
-
super().connection_number_info()
|
1189
|
-
self.n_pair = n_pair
|
1190
|
-
|
1191
1345
|
|
1192
1346
|
class OneToOneSequentialConnector(AbstractConnector):
|
1193
|
-
"""Object for
|
1347
|
+
"""Object for buiilding one to one correspondence connections in bmtk
|
1194
1348
|
network model with between two populations. One of the population can
|
1195
1349
|
consist of multiple sub-populations. These sub-populations need to be added
|
1196
1350
|
sequentially using setup_nodes() and edge_params() methods followed by BMTK
|
@@ -1337,21 +1491,24 @@ FLUC_STDEV = 0.2 # ms
|
|
1337
1491
|
DELAY_LOWBOUND = 0.2 # ms must be greater than h.dt
|
1338
1492
|
DELAY_UPBOUND = 2.0 # ms
|
1339
1493
|
|
1340
|
-
def syn_dist_delay_feng(source, target,
|
1341
|
-
|
1342
|
-
|
1494
|
+
def syn_dist_delay_feng(source, target, min_delay=SYN_MIN_DELAY,
|
1495
|
+
velocity=SYN_VELOCITY, fluc_stdev=FLUC_STDEV,
|
1496
|
+
delay_bound=(DELAY_LOWBOUND, DELAY_UPBOUND),
|
1497
|
+
connector=None):
|
1343
1498
|
"""Synpase delay linearly dependent on distance.
|
1344
1499
|
min_delay: minimum delay (ms)
|
1345
1500
|
velocity: synapse conduction velocity (micron/ms)
|
1346
1501
|
fluc_stdev: standard deviation of random Gaussian fluctuation (ms)
|
1502
|
+
delay_bound: (lower, upper) bounds of delay (ms)
|
1503
|
+
connector: connector object from which to read distance
|
1347
1504
|
"""
|
1348
1505
|
if connector is None:
|
1349
1506
|
dist = euclid_dist(target['positions'], source['positions'])
|
1350
1507
|
else:
|
1351
1508
|
dist = connector.get_conn_prop(source.node_id, target.node_id)
|
1352
1509
|
del_fluc = fluc_stdev * rng.normal()
|
1353
|
-
delay = dist /
|
1354
|
-
delay = min(max(delay,
|
1510
|
+
delay = dist / velocity + min_delay + del_fluc
|
1511
|
+
delay = min(max(delay, delay_bound[0]), delay_bound[1])
|
1355
1512
|
return delay
|
1356
1513
|
|
1357
1514
|
|
@@ -1373,4 +1530,4 @@ def syn_dist_delay_feng_section_PN(source, target, p=0.9,
|
|
1373
1530
|
|
1374
1531
|
def syn_uniform_delay_section(source, target, low=DELAY_LOWBOUND,
|
1375
1532
|
high=DELAY_UPBOUND, **kwargs):
|
1376
|
-
return rng.uniform(low, high)
|
1533
|
+
return rng.uniform(low, high)
|
bmtool/graphs.py
ADDED
@@ -0,0 +1,170 @@
|
|
1
|
+
import networkx as nx
|
2
|
+
import plotly.graph_objects as go
|
3
|
+
import pandas as pd
|
4
|
+
import bmtool.util.util as u
|
5
|
+
import pandas as pd
|
6
|
+
|
7
|
+
|
8
|
+
def generate_graph(config,source,target):
|
9
|
+
"""
|
10
|
+
returns a graph object
|
11
|
+
config: A BMTK simulation config
|
12
|
+
source: network name
|
13
|
+
target: network name
|
14
|
+
"""
|
15
|
+
nodes,edges = u.load_nodes_edges_from_config(config)
|
16
|
+
nodes_source = nodes[source]
|
17
|
+
nodes_target = nodes[target]
|
18
|
+
if source != target:
|
19
|
+
# Concatenate the DataFrames if source and target are different nodes
|
20
|
+
nodes = pd.concat([nodes_source, nodes_target])
|
21
|
+
else:
|
22
|
+
nodes = nodes[source]
|
23
|
+
edge_to_grap = source+"_to_"+target
|
24
|
+
edges = edges[edge_to_grap]
|
25
|
+
|
26
|
+
# Create an empty graph
|
27
|
+
G = nx.Graph()
|
28
|
+
|
29
|
+
# Add nodes to the graph with their positions and labels
|
30
|
+
for index, node_data in nodes.iterrows():
|
31
|
+
G.add_node(index, pos=(node_data['pos_x'], node_data['pos_y'], node_data['pos_z']), label=node_data['pop_name'])
|
32
|
+
|
33
|
+
# Add edges to the graph
|
34
|
+
for _, row in edges.iterrows():
|
35
|
+
G.add_edge(row['source_node_id'], row['target_node_id'])
|
36
|
+
|
37
|
+
return G
|
38
|
+
|
39
|
+
|
40
|
+
def plot_graph(Graph=None,show_edges = False,title = None):
|
41
|
+
"""
|
42
|
+
Generate an interactive plot of the network
|
43
|
+
Graph: A Graph object
|
44
|
+
show_edges: Boolean to show edges in graph plot
|
45
|
+
title: A string for the title of the graph
|
46
|
+
|
47
|
+
"""
|
48
|
+
|
49
|
+
# Extract node positions
|
50
|
+
node_positions = nx.get_node_attributes(Graph, 'pos')
|
51
|
+
node_x = [data[0] for data in node_positions.values()]
|
52
|
+
node_y = [data[1] for data in node_positions.values()]
|
53
|
+
node_z = [data[2] for data in node_positions.values()]
|
54
|
+
|
55
|
+
# Create edge traces
|
56
|
+
edge_x = []
|
57
|
+
edge_y = []
|
58
|
+
edge_z = []
|
59
|
+
for edge in Graph.edges():
|
60
|
+
x0, y0, z0 = node_positions[edge[0]]
|
61
|
+
x1, y1, z1 = node_positions[edge[1]]
|
62
|
+
edge_x.extend([x0, x1, None])
|
63
|
+
edge_y.extend([y0, y1, None])
|
64
|
+
edge_z.extend([z0, z1, None])
|
65
|
+
|
66
|
+
# Create edge trace
|
67
|
+
edge_trace = go.Scatter3d(
|
68
|
+
x=edge_x,
|
69
|
+
y=edge_y,
|
70
|
+
z=edge_z,
|
71
|
+
line=dict(width=1, color='#888'),
|
72
|
+
hoverinfo='none',
|
73
|
+
mode='lines',
|
74
|
+
opacity=0.2)
|
75
|
+
|
76
|
+
# Create node trace
|
77
|
+
node_trace = go.Scatter3d(
|
78
|
+
x=node_x,
|
79
|
+
y=node_y,
|
80
|
+
z=node_z,
|
81
|
+
mode='markers',
|
82
|
+
hoverinfo='text',
|
83
|
+
marker=dict(
|
84
|
+
showscale=True,
|
85
|
+
colorscale='YlGnBu', # Adjust color scale here
|
86
|
+
reversescale=True,
|
87
|
+
color=[len(list(Graph.neighbors(node))) for node in Graph.nodes()], # Assign color data here
|
88
|
+
size=5, # Adjust the size of the nodes here
|
89
|
+
colorbar=dict(
|
90
|
+
thickness=15,
|
91
|
+
title='Node Connections',
|
92
|
+
xanchor='left',
|
93
|
+
titleside='right'
|
94
|
+
),
|
95
|
+
line_width=2,
|
96
|
+
cmin=0, # Adjust color scale range here
|
97
|
+
cmax=max([len(list(Graph.neighbors(node))) for node in Graph.nodes()]) # Adjust color scale range here
|
98
|
+
))
|
99
|
+
|
100
|
+
# Define hover text for nodes
|
101
|
+
node_hover_text = [f'Node ID: {node_id}<br>Population Name: {node_data["label"]}<br># of Connections: {len(list(Graph.neighbors(node_id)))}' for node_id, node_data in Graph.nodes(data=True)]
|
102
|
+
node_trace.hovertext = node_hover_text
|
103
|
+
|
104
|
+
# Create figure
|
105
|
+
if show_edges:
|
106
|
+
graph_prop = [edge_trace,node_trace]
|
107
|
+
else:
|
108
|
+
graph_prop = [node_trace]
|
109
|
+
|
110
|
+
if title == None:
|
111
|
+
title = '3D plot'
|
112
|
+
|
113
|
+
fig = go.Figure(data=graph_prop,
|
114
|
+
layout=go.Layout(
|
115
|
+
title=title,
|
116
|
+
titlefont_size=16,
|
117
|
+
showlegend=False,
|
118
|
+
hovermode='closest',
|
119
|
+
margin=dict(b=20, l=5, r=5, t=40),
|
120
|
+
scene=dict(
|
121
|
+
xaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
|
122
|
+
yaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
|
123
|
+
zaxis=dict(showgrid=False, zeroline=False, showticklabels=False)
|
124
|
+
),
|
125
|
+
width=800,
|
126
|
+
height=800
|
127
|
+
))
|
128
|
+
|
129
|
+
# Show figure
|
130
|
+
fig.show()
|
131
|
+
|
132
|
+
|
133
|
+
def export_node_connections_to_csv(Graph, filename):
|
134
|
+
"""
|
135
|
+
Generates a csv file with node type and all connections that node receives
|
136
|
+
Graph: a Graph object
|
137
|
+
filename: A string for the name of output must end in .csv
|
138
|
+
"""
|
139
|
+
# Create an empty dictionary to store the connections for each node
|
140
|
+
node_connections = {}
|
141
|
+
|
142
|
+
# Iterate over each node in the graph
|
143
|
+
for node in Graph.nodes():
|
144
|
+
# Initialize a dictionary to store the connections for the current node
|
145
|
+
connections = {}
|
146
|
+
node_label = Graph.nodes[node]['label']
|
147
|
+
|
148
|
+
# Iterate over each neighbor of the current node
|
149
|
+
for neighbor in Graph.neighbors(node):
|
150
|
+
# Get the label of the neighbor node
|
151
|
+
neighbor_label = Graph.nodes[neighbor]['label']
|
152
|
+
|
153
|
+
# Increment the connection count for the current node and neighbor label
|
154
|
+
connections[f'{neighbor_label} Connections'] = connections.get(f'{neighbor_label} Connections', 0) + 1
|
155
|
+
|
156
|
+
# Add the connections information for the current node to the dictionary
|
157
|
+
connections['Node Label'] = node_label
|
158
|
+
node_connections[node] = connections
|
159
|
+
|
160
|
+
# Convert the dictionary to a DataFrame
|
161
|
+
df = pd.DataFrame(node_connections).fillna(0).T
|
162
|
+
|
163
|
+
# Reorder columns so that 'Node Label' is the leftmost column
|
164
|
+
cols = df.columns.tolist()
|
165
|
+
cols = ['Node Label'] + [col for col in cols if col != 'Node Label']
|
166
|
+
df = df[cols]
|
167
|
+
|
168
|
+
# Write the DataFrame to a CSV file
|
169
|
+
df.to_csv(filename)
|
170
|
+
|
bmtool/singlecell.py
CHANGED
@@ -217,8 +217,8 @@ class Passive(CurrentClamp):
|
|
217
217
|
self.v_final_time = self.t_vec[self.index_v_final]
|
218
218
|
|
219
219
|
t_idx = slice(self.index_v_rest, self.index_v_final + 1)
|
220
|
-
self.v_vec_inj = self.v_vec
|
221
|
-
self.t_vec_inj = self.t_vec
|
220
|
+
self.v_vec_inj = np.array(self.v_vec)[t_idx]
|
221
|
+
self.t_vec_inj = np.array(self.t_vec)[t_idx] - self.v_rest_time
|
222
222
|
|
223
223
|
self.v_diff = self.cell_v_final - self.v_rest
|
224
224
|
self.r_in = self.v_diff / self.inj_amp # MegaOhms
|
@@ -388,8 +388,8 @@ class ZAP(CurrentClamp):
|
|
388
388
|
self.v_rest_time = self.t_vec[self.index_v_rest]
|
389
389
|
|
390
390
|
t_idx = slice(self.index_v_rest, self.index_v_final + 1)
|
391
|
-
self.v_vec_inj = self.v_vec
|
392
|
-
self.t_vec_inj = self.t_vec
|
391
|
+
self.v_vec_inj = np.array(self.v_vec)[t_idx] - self.v_rest
|
392
|
+
self.t_vec_inj = np.array(self.t_vec)[t_idx] - self.v_rest_time
|
393
393
|
|
394
394
|
self.cell_v_amp_max = np.abs(self.v_vec_inj).max()
|
395
395
|
self.Z = np.fft.rfft(self.v_vec_inj) / np.fft.rfft(self.zap_vec_inj) # MOhms
|
bmtool/util/util.py
CHANGED
@@ -258,18 +258,20 @@ def load_nodes_from_paths(node_paths):
|
|
258
258
|
for group_id in range(n_group):
|
259
259
|
group = nodes_grp[str(group_id)]
|
260
260
|
idx = node_group_id == group_id
|
261
|
+
group_node = node_id[idx]
|
262
|
+
group_index = node_group_index[idx]
|
261
263
|
for prop in group:
|
262
264
|
if prop == 'positions':
|
263
|
-
positions = group[prop][
|
265
|
+
positions = group[prop][group_index]
|
264
266
|
for i in range(positions.shape[1]):
|
265
267
|
if pos_labels[i] not in nodes_df:
|
266
268
|
nodes_df[pos_labels[i]] = np.nan
|
267
|
-
nodes_df.loc[
|
269
|
+
nodes_df.loc[group_node, pos_labels[i]] = positions[:, i]
|
268
270
|
else:
|
269
271
|
# create new column with NaN if property does not exist
|
270
272
|
if prop not in nodes_df:
|
271
273
|
nodes_df[prop] = np.nan
|
272
|
-
nodes_df.loc[
|
274
|
+
nodes_df.loc[group_node, prop] = group[prop][group_index]
|
273
275
|
prop_dtype[prop] = group[prop].dtype
|
274
276
|
# convert to original data type if possible
|
275
277
|
for prop, dtype in prop_dtype.items():
|
@@ -615,7 +617,9 @@ def connection_totals(config=None,nodes=None,edges=None,sources=[],targets=[],si
|
|
615
617
|
return total
|
616
618
|
return relation_matrix(config,nodes,edges,sources,targets,sids,tids,prepend_pop,relation_func=total_connection_relationship,synaptic_info=synaptic_info)
|
617
619
|
|
618
|
-
|
620
|
+
|
621
|
+
def percent_connections(config=None,nodes=None,edges=None,sources=[],targets=[],sids=[],tids=[],prepend_pop=True,type='convergence',method=None,include_gap=True):
|
622
|
+
|
619
623
|
|
620
624
|
def precent_func(**kwargs):
|
621
625
|
edges = kwargs["edges"]
|
@@ -644,8 +648,12 @@ def percent_connections(config=None,nodes=None,edges=None,sources=[],targets=[],
|
|
644
648
|
num_bi = (cons_recip.count().source_node_id - cons_recip_dedup.count().source_node_id)
|
645
649
|
num_uni = total_cons - num_bi
|
646
650
|
|
647
|
-
num_sources = s_list.apply(pd.Series.value_counts)[source_id_type].dropna().sort_index().loc[source_id]
|
648
|
-
num_targets = t_list.apply(pd.Series.value_counts)[target_id_type].dropna().sort_index().loc[target_id]
|
651
|
+
#num_sources = s_list.apply(pd.Series.value_counts)[source_id_type].dropna().sort_index().loc[source_id]
|
652
|
+
#num_targets = t_list.apply(pd.Series.value_counts)[target_id_type].dropna().sort_index().loc[target_id]
|
653
|
+
|
654
|
+
num_sources = s_list[source_id_type].value_counts().sort_index().loc[source_id]
|
655
|
+
num_targets = t_list[target_id_type].value_counts().sort_index().loc[target_id]
|
656
|
+
|
649
657
|
|
650
658
|
total = round(total_cons / (num_sources*num_targets) * 100,2)
|
651
659
|
uni = round(num_uni / (num_sources*num_targets) * 100,2)
|
@@ -660,6 +668,7 @@ def percent_connections(config=None,nodes=None,edges=None,sources=[],targets=[],
|
|
660
668
|
|
661
669
|
return relation_matrix(config,nodes,edges,sources,targets,sids,tids,prepend_pop,relation_func=precent_func)
|
662
670
|
|
671
|
+
|
663
672
|
def connection_divergence(config=None,nodes=None,edges=None,sources=[],targets=[],sids=[],tids=[],prepend_pop=True,convergence=False,method='mean+std',include_gap=True):
|
664
673
|
|
665
674
|
import pandas as pd
|
@@ -680,41 +689,97 @@ def connection_divergence(config=None,nodes=None,edges=None,sources=[],targets=[
|
|
680
689
|
|
681
690
|
if convergence:
|
682
691
|
if method == 'min':
|
683
|
-
count = cons.
|
692
|
+
count = cons['target_node_id'].value_counts().min()
|
684
693
|
return round(count,2)
|
685
694
|
elif method == 'max':
|
686
|
-
count = cons.
|
695
|
+
count = cons['target_node_id'].value_counts().max()
|
687
696
|
return round(count,2)
|
688
697
|
elif method == 'std':
|
689
|
-
std = cons.
|
698
|
+
std = cons['target_node_id'].value_counts().std()
|
690
699
|
return round(std,2)
|
691
700
|
elif method == 'mean':
|
692
701
|
mean = cons['target_node_id'].value_counts().mean()
|
693
702
|
return round(mean,2)
|
694
703
|
elif method == 'mean+std': #default is mean + std
|
695
704
|
mean = cons['target_node_id'].value_counts().mean()
|
696
|
-
std = cons.
|
705
|
+
std = cons['target_node_id'].value_counts().std()
|
706
|
+
#std = cons.apply(pd.Series.value_counts).target_node_id.dropna().std() no longer a valid way
|
697
707
|
return (round(mean,2)), (round(std,2))
|
698
708
|
else: #divergence
|
699
709
|
if method == 'min':
|
700
|
-
count = cons.
|
710
|
+
count = cons['source_node_id'].value_counts().min()
|
701
711
|
return round(count,2)
|
702
712
|
elif method == 'max':
|
703
|
-
count = cons.
|
713
|
+
count = cons['source_node_id'].value_counts().max()
|
704
714
|
return round(count,2)
|
705
715
|
elif method == 'std':
|
706
|
-
std = cons.
|
716
|
+
std = cons['source_node_id'].value_counts().std()
|
707
717
|
return round(std,2)
|
708
718
|
elif method == 'mean':
|
709
719
|
mean = cons['source_node_id'].value_counts().mean()
|
710
720
|
return round(mean,2)
|
711
721
|
elif method == 'mean+std': #default is mean + std
|
712
722
|
mean = cons['source_node_id'].value_counts().mean()
|
713
|
-
std = cons.
|
723
|
+
std = cons['source_node_id'].value_counts().std()
|
714
724
|
return (round(mean,2)), (round(std,2))
|
715
725
|
|
716
726
|
return relation_matrix(config,nodes,edges,sources,targets,sids,tids,prepend_pop,relation_func=total_connection_relationship)
|
717
727
|
|
728
|
+
def gap_junction_connections(config=None,nodes=None,edges=None,sources=[],targets=[],sids=[],tids=[],prepend_pop=True,type='convergence'):
|
729
|
+
import pandas as pd
|
730
|
+
|
731
|
+
|
732
|
+
def total_connection_relationship(**kwargs): #reduced version of original function; only gets mean+std
|
733
|
+
edges = kwargs["edges"]
|
734
|
+
source_id_type = kwargs["sid"]
|
735
|
+
target_id_type = kwargs["tid"]
|
736
|
+
source_id = kwargs["source_id"]
|
737
|
+
target_id = kwargs["target_id"]
|
738
|
+
|
739
|
+
cons = edges[(edges[source_id_type] == source_id) & (edges[target_id_type]==target_id)]
|
740
|
+
#print(cons)
|
741
|
+
|
742
|
+
cons = cons[cons['is_gap_junction'] == True] #only gap_junctions
|
743
|
+
mean = cons['target_node_id'].value_counts().mean()
|
744
|
+
std = cons['target_node_id'].value_counts().std()
|
745
|
+
return (round(mean,2)), (round(std,2))
|
746
|
+
|
747
|
+
def precent_func(**kwargs): #barely different than original function; only gets gap_junctions.
|
748
|
+
edges = kwargs["edges"]
|
749
|
+
source_id_type = kwargs["sid"]
|
750
|
+
target_id_type = kwargs["tid"]
|
751
|
+
source_id = kwargs["source_id"]
|
752
|
+
target_id = kwargs["target_id"]
|
753
|
+
t_list = kwargs["target_nodes"]
|
754
|
+
s_list = kwargs["source_nodes"]
|
755
|
+
|
756
|
+
cons = edges[(edges[source_id_type] == source_id) & (edges[target_id_type]==target_id)]
|
757
|
+
#add functionality that shows only the one's with gap_junctions
|
758
|
+
cons = cons[cons['is_gap_junction'] == True]
|
759
|
+
total_cons = cons.count().source_node_id
|
760
|
+
|
761
|
+
num_sources = s_list[source_id_type].value_counts().sort_index().loc[source_id]
|
762
|
+
num_targets = t_list[target_id_type].value_counts().sort_index().loc[target_id]
|
763
|
+
|
764
|
+
|
765
|
+
total = round(total_cons / (num_sources*num_targets) * 100,2)
|
766
|
+
return total
|
767
|
+
|
768
|
+
if type == 'convergence':
|
769
|
+
return relation_matrix(config,nodes,edges,sources,targets,sids,tids,prepend_pop,relation_func=total_connection_relationship)
|
770
|
+
elif type == 'percent':
|
771
|
+
return relation_matrix(config,nodes,edges,sources,targets,sids,tids,prepend_pop,relation_func=precent_func)
|
772
|
+
|
773
|
+
|
774
|
+
def gap_junction_percent_connections(config=None,nodes=None,edges=None,sources=[],targets=[],sids=[],tids=[],prepend_pop=True,method=None):
|
775
|
+
import pandas as pd
|
776
|
+
|
777
|
+
|
778
|
+
|
779
|
+
|
780
|
+
|
781
|
+
|
782
|
+
|
718
783
|
def connection_probabilities(config=None,nodes=None,edges=None,sources=[],
|
719
784
|
targets=[],sids=[],tids=[],prepend_pop=True,dist_X=True,dist_Y=True,dist_Z=True,num_bins=10,include_gap=True):
|
720
785
|
|
@@ -753,11 +818,11 @@ def connection_probabilities(config=None,nodes=None,edges=None,sources=[],
|
|
753
818
|
def eudist(df,use_x=True,use_y=True,use_z=True):
|
754
819
|
def _dist(x):
|
755
820
|
if len(x) == 6:
|
756
|
-
return distance.euclidean((x[0],x[1],x[2]),(x[3],x[4],x[5]))
|
821
|
+
return distance.euclidean((x.iloc[0], x.iloc[1], x.iloc[2]), (x.iloc[3], x.iloc[4], x.iloc[5]))
|
757
822
|
elif len(x) == 4:
|
758
|
-
return distance.euclidean((x[0],x[1]),(x[2],x[3]))
|
823
|
+
return distance.euclidean((x.iloc[0],x.iloc[1]),(x.iloc[2],x.iloc[3]))
|
759
824
|
elif len(x) == 2:
|
760
|
-
return distance.euclidean((x[0]),(x[1]))
|
825
|
+
return distance.euclidean((x.iloc[0]),(x.iloc[1]))
|
761
826
|
else:
|
762
827
|
return -1
|
763
828
|
|
@@ -835,6 +900,7 @@ def connection_graph_edge_types(config=None,nodes=None,edges=None,sources=[],tar
|
|
835
900
|
|
836
901
|
return relation_matrix(config,nodes,edges,sources,targets,sids,tids,prepend_pop,relation_func=synapse_type_relationship,return_type=object)
|
837
902
|
|
903
|
+
|
838
904
|
def edge_property_matrix(edge_property, config=None, nodes=None, edges=None, sources=[],targets=[],sids=[],tids=[],prepend_pop=True,report=None,time=-1,time_compare=None):
|
839
905
|
|
840
906
|
var_report = None
|
@@ -906,10 +972,11 @@ def percent_connectivity(config=None,nodes=None,edges=None,sources=[],targets=[]
|
|
906
972
|
|
907
973
|
return ret, source_labels, target_labels
|
908
974
|
|
909
|
-
|
975
|
+
|
910
976
|
def connection_average_synapses():
|
911
977
|
return
|
912
978
|
|
979
|
+
|
913
980
|
def connection_divergence_average_old(config=None, nodes=None, edges=None,populations=[],convergence=False):
|
914
981
|
"""
|
915
982
|
For each cell in source count # of connections in target and average
|
@@ -1,11 +1,11 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: bmtool
|
3
|
-
Version: 0.5.
|
3
|
+
Version: 0.5.3
|
4
4
|
Summary: BMTool
|
5
5
|
Home-page: https://github.com/cyneuro/bmtool
|
6
6
|
Download-URL:
|
7
|
-
Author:
|
8
|
-
Author-email:
|
7
|
+
Author: Neural Engineering Laboratory at the University of Missouri
|
8
|
+
Author-email: gregglickert@mail.missouri.edu
|
9
9
|
License: MIT
|
10
10
|
Classifier: Intended Audience :: Developers
|
11
11
|
Classifier: Intended Audience :: Education
|
@@ -28,6 +28,7 @@ Requires-Dist: numpy
|
|
28
28
|
Requires-Dist: pandas
|
29
29
|
Requires-Dist: questionary
|
30
30
|
Requires-Dist: pynmodlt
|
31
|
+
Requires-Dist: plotly
|
31
32
|
|
32
33
|
# bmtool
|
33
34
|
A collection of modules to make developing [Neuron](https://www.neuron.yale.edu/neuron/) and [BMTK](https://alleninstitute.github.io/bmtk/) models easier.
|
@@ -40,6 +41,7 @@ A collection of modules to make developing [Neuron](https://www.neuron.yale.edu/
|
|
40
41
|
- [Single Cell](#Single-Cell-Module)
|
41
42
|
- [Connectors](#Connectors-Module)
|
42
43
|
- [Bmplot](#bmplot-Module)
|
44
|
+
- [Graphs](#graphs-module)
|
43
45
|
|
44
46
|
## Getting Started
|
45
47
|
|
@@ -391,6 +393,7 @@ net.add_edges(**connector.edge_params())
|
|
391
393
|
- [Percent connections](#Percent-connection-plot)
|
392
394
|
- [Convergence connnections](#convergence-plot)
|
393
395
|
- [Divergence connections](#divergence-plot)
|
396
|
+
- [Gap Junction connections](#gap-junction-plot)
|
394
397
|
- [connection histogram](#connection-histogram)
|
395
398
|
- [probability connection](#probability-of-connection-plot)
|
396
399
|
- [3D location](#3d-position-plot)
|
@@ -452,6 +455,17 @@ bmplot.divergence_connection_matrix(config='config.json',sources='LA',targets='L
|
|
452
455
|
|
453
456
|

|
454
457
|
|
458
|
+
### Gap Junction plot
|
459
|
+
#### While gap junctions can be include in the above plots, you can use this function to only view gap junctions.Type can be either 'convergence' or 'percent' connections to generate different plots
|
460
|
+
|
461
|
+
|
462
|
+
```python
|
463
|
+
bmplot.gap_junction_matrix(config='config.json',sources='LA',targets='LA',sids='pop_name',tids='pop_name',no_prepend_pop=True,type='percent')
|
464
|
+
```
|
465
|
+
|
466
|
+
|
467
|
+
|
468
|
+

|
455
469
|
|
456
470
|
|
457
471
|
### Connection histogram
|
@@ -776,5 +790,127 @@ bmplot.plot_basic_cell_info(config_file='config.json')
|
|
776
790
|
|
777
791
|
|
778
792
|
'LA'
|
793
|
+
## Graphs Module
|
794
|
+
- [Generate graph](#generate-graph)
|
795
|
+
- [Plot Graph](#plot-graph)
|
796
|
+
- [Connectioon table](#generate-graph-connection-table)
|
797
|
+
|
798
|
+
### Generate Graph
|
799
|
+
|
800
|
+
|
801
|
+
```python
|
802
|
+
from bmtool import graphs
|
803
|
+
import networkx as nx
|
804
|
+
|
805
|
+
Graph = graphs.generate_graph(config='config.json',source='LA',target='LA')
|
806
|
+
print("Number of nodes:", Graph.number_of_nodes())
|
807
|
+
print("Number of edges:", Graph.number_of_edges())
|
808
|
+
print("Node labels:", set(nx.get_node_attributes(Graph, 'label').values()))
|
809
|
+
```
|
810
|
+
|
811
|
+
Number of nodes: 2000
|
812
|
+
Number of edges: 84235
|
813
|
+
Node labels: {'SOM', 'PNc', 'PNa', 'PV'}
|
814
|
+
|
815
|
+
|
816
|
+
### Plot Graph
|
817
|
+
#### Generates an interactive plot showing nodes, edges and # of connections
|
818
|
+
|
819
|
+
|
820
|
+
```python
|
821
|
+
graphs.plot_graph(Graph)
|
822
|
+
```
|
823
|
+
|
824
|
+
|
825
|
+
|
826
|
+
### Generate graph connection table
|
827
|
+
#### Generates a CSV of all cells and the number of connections each individual cell receives
|
828
|
+
|
829
|
+
|
830
|
+
```python
|
831
|
+
import pandas as pd
|
832
|
+
graphs.export_node_connections_to_csv(Graph, 'node_connections.csv')
|
833
|
+
df = pd.read_csv('node_connections.csv')
|
834
|
+
df.head()
|
835
|
+
```
|
836
|
+
|
837
|
+
|
838
|
+
|
839
|
+
|
840
|
+
<div>
|
841
|
+
<style scoped>
|
842
|
+
.dataframe tbody tr th:only-of-type {
|
843
|
+
vertical-align: middle;
|
844
|
+
}
|
845
|
+
|
846
|
+
.dataframe tbody tr th {
|
847
|
+
vertical-align: top;
|
848
|
+
}
|
849
|
+
|
850
|
+
.dataframe thead th {
|
851
|
+
text-align: right;
|
852
|
+
}
|
853
|
+
</style>
|
854
|
+
<table border="1" class="dataframe">
|
855
|
+
<thead>
|
856
|
+
<tr style="text-align: right;">
|
857
|
+
<th></th>
|
858
|
+
<th>Unnamed: 0</th>
|
859
|
+
<th>Node Label</th>
|
860
|
+
<th>PNc Connections</th>
|
861
|
+
<th>PV Connections</th>
|
862
|
+
<th>SOM Connections</th>
|
863
|
+
<th>PNa Connections</th>
|
864
|
+
</tr>
|
865
|
+
</thead>
|
866
|
+
<tbody>
|
867
|
+
<tr>
|
868
|
+
<th>0</th>
|
869
|
+
<td>0</td>
|
870
|
+
<td>PNa</td>
|
871
|
+
<td>15</td>
|
872
|
+
<td>11</td>
|
873
|
+
<td>9</td>
|
874
|
+
<td>6</td>
|
875
|
+
</tr>
|
876
|
+
<tr>
|
877
|
+
<th>1</th>
|
878
|
+
<td>1</td>
|
879
|
+
<td>PNa</td>
|
880
|
+
<td>24</td>
|
881
|
+
<td>25</td>
|
882
|
+
<td>6</td>
|
883
|
+
<td>21</td>
|
884
|
+
</tr>
|
885
|
+
<tr>
|
886
|
+
<th>2</th>
|
887
|
+
<td>2</td>
|
888
|
+
<td>PNa</td>
|
889
|
+
<td>27</td>
|
890
|
+
<td>28</td>
|
891
|
+
<td>12</td>
|
892
|
+
<td>25</td>
|
893
|
+
</tr>
|
894
|
+
<tr>
|
895
|
+
<th>3</th>
|
896
|
+
<td>3</td>
|
897
|
+
<td>PNa</td>
|
898
|
+
<td>19</td>
|
899
|
+
<td>27</td>
|
900
|
+
<td>15</td>
|
901
|
+
<td>35</td>
|
902
|
+
</tr>
|
903
|
+
<tr>
|
904
|
+
<th>4</th>
|
905
|
+
<td>4</td>
|
906
|
+
<td>PNa</td>
|
907
|
+
<td>25</td>
|
908
|
+
<td>11</td>
|
909
|
+
<td>8</td>
|
910
|
+
<td>16</td>
|
911
|
+
</tr>
|
912
|
+
</tbody>
|
913
|
+
</table>
|
914
|
+
</div>
|
779
915
|
|
780
916
|
|
@@ -1,21 +1,22 @@
|
|
1
1
|
bmtool/__init__.py,sha256=ZStTNkAJHJxG7Pwiy5UgCzC4KlhMS5pUNPtUJZVwL_Y,136
|
2
2
|
bmtool/__main__.py,sha256=TmFkmDxjZ6250nYD4cgGhn-tbJeEm0u-EMz2ajAN9vE,650
|
3
|
-
bmtool/bmplot.py,sha256=
|
4
|
-
bmtool/connectors.py,sha256=
|
3
|
+
bmtool/bmplot.py,sha256=MiUJ4KoN2v285en0aBdrTqrWIaZJEnb2DPzS3kYaCdM,39680
|
4
|
+
bmtool/connectors.py,sha256=t8L25eb19vZgCduWW8fJFSrEyUQ4_zGNaENMomw0d0Q,66376
|
5
|
+
bmtool/graphs.py,sha256=j6xEXSFCoeh3mdoyXAKEjWRk8oFbNAesAbddmzAMIs0,5752
|
5
6
|
bmtool/manage.py,sha256=_lCU0qBQZ4jSxjzAJUd09JEetb--cud7KZgxQFbLGSY,657
|
6
7
|
bmtool/plot_commands.py,sha256=Tqujyf0c0u8olhiHOMwgUSJXIIE1hgjv6otb25G9cA0,12298
|
7
|
-
bmtool/singlecell.py,sha256=
|
8
|
+
bmtool/singlecell.py,sha256=2kepmupsUs_QVPHmH9GURvmUyYCNBSvF1G1_6lFjDyM,24965
|
8
9
|
bmtool/debug/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
9
10
|
bmtool/debug/commands.py,sha256=AwtcR7BUUheM0NxvU1Nu234zCdpobhJv5noX8x5K2vY,583
|
10
11
|
bmtool/debug/debug.py,sha256=xqnkzLiH3s-tS26Y5lZZL62qR2evJdi46Gud-HzxEN4,207
|
11
12
|
bmtool/util/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
12
13
|
bmtool/util/commands.py,sha256=zJF-fiLk0b8LyzHDfvewUyS7iumOxVnj33IkJDzux4M,64396
|
13
|
-
bmtool/util/util.py,sha256=
|
14
|
+
bmtool/util/util.py,sha256=J9fA73DnPF8Kp8CWONDFCuNza9e_3PNTd0jXuVb5iFs,56060
|
14
15
|
bmtool/util/neuron/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
15
16
|
bmtool/util/neuron/celltuner.py,sha256=xSRpRN6DhPFz4q5buq_W8UmsD7BbUrkzYBEbKVloYss,87194
|
16
|
-
bmtool-0.5.
|
17
|
-
bmtool-0.5.
|
18
|
-
bmtool-0.5.
|
19
|
-
bmtool-0.5.
|
20
|
-
bmtool-0.5.
|
21
|
-
bmtool-0.5.
|
17
|
+
bmtool-0.5.3.dist-info/LICENSE,sha256=qrXg2jj6kz5d0EnN11hllcQt2fcWVNumx0xNbV05nyM,1068
|
18
|
+
bmtool-0.5.3.dist-info/METADATA,sha256=zEDBcXW6HyIngsb6RXAg17wHiDJ1-tM4PlJk7M21H74,24084
|
19
|
+
bmtool-0.5.3.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
|
20
|
+
bmtool-0.5.3.dist-info/entry_points.txt,sha256=0-BHZ6nUnh0twWw9SXNTiRmKjDnb1VO2DfG_-oprhAc,45
|
21
|
+
bmtool-0.5.3.dist-info/top_level.txt,sha256=gpd2Sj-L9tWbuJEd5E8C8S8XkNm5yUE76klUYcM-eWM,7
|
22
|
+
bmtool-0.5.3.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|