morphgen-rates 0.2.0__tar.gz → 0.4.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. {morphgen_rates-0.2.0/src/morphgen_rates.egg-info → morphgen_rates-0.4.0}/PKG-INFO +1 -1
  2. {morphgen_rates-0.2.0 → morphgen_rates-0.4.0}/pyproject.toml +1 -1
  3. morphgen_rates-0.4.0/src/morphgen_rates/__init__.py +4 -0
  4. {morphgen_rates-0.2.0 → morphgen_rates-0.4.0}/src/morphgen_rates/data.py +54 -114
  5. morphgen_rates-0.4.0/src/morphgen_rates/init_count.py +208 -0
  6. {morphgen_rates-0.2.0 → morphgen_rates-0.4.0}/src/morphgen_rates/rates.py +15 -12
  7. {morphgen_rates-0.2.0 → morphgen_rates-0.4.0/src/morphgen_rates.egg-info}/PKG-INFO +1 -1
  8. morphgen_rates-0.4.0/src/morphgen_rates.egg-info/SOURCES.txt +13 -0
  9. {morphgen_rates-0.2.0 → morphgen_rates-0.4.0}/tests/test.py +1 -1
  10. morphgen_rates-0.2.0/src/morphgen_rates/__init__.py +0 -3
  11. morphgen_rates-0.2.0/src/morphgen_rates/data/mitral_bifurcations.csv +0 -10
  12. morphgen_rates-0.2.0/src/morphgen_rates/data/mitral_sholl_plot.csv +0 -29
  13. morphgen_rates-0.2.0/src/morphgen_rates/data/neocortex_apical_sholl_plot.csv +0 -29
  14. morphgen_rates-0.2.0/src/morphgen_rates/data/neocortex_bifurcations.csv +0 -10
  15. morphgen_rates-0.2.0/src/morphgen_rates/data/pyr_apical_bifurcations.csv +0 -13
  16. morphgen_rates-0.2.0/src/morphgen_rates/data/pyr_apical_sholl_plot.csv +0 -17
  17. morphgen_rates-0.2.0/src/morphgen_rates/data/sl_apical_bifurcations.csv +0 -4
  18. morphgen_rates-0.2.0/src/morphgen_rates/data/sl_apical_sholl_plot.csv +0 -11
  19. morphgen_rates-0.2.0/src/morphgen_rates/data/tufted_bifurcations.csv +0 -6
  20. morphgen_rates-0.2.0/src/morphgen_rates/data/tufted_sholl_plot.csv +0 -29
  21. morphgen_rates-0.2.0/src/morphgen_rates.egg-info/SOURCES.txt +0 -22
  22. {morphgen_rates-0.2.0 → morphgen_rates-0.4.0}/LICENSE +0 -0
  23. {morphgen_rates-0.2.0 → morphgen_rates-0.4.0}/README.md +0 -0
  24. {morphgen_rates-0.2.0 → morphgen_rates-0.4.0}/setup.cfg +0 -0
  25. {morphgen_rates-0.2.0 → morphgen_rates-0.4.0}/src/morphgen_rates.egg-info/dependency_links.txt +0 -0
  26. {morphgen_rates-0.2.0 → morphgen_rates-0.4.0}/src/morphgen_rates.egg-info/requires.txt +0 -0
  27. {morphgen_rates-0.2.0 → morphgen_rates-0.4.0}/src/morphgen_rates.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: morphgen-rates
3
- Version: 0.2.0
3
+ Version: 0.4.0
4
4
  Summary: Compute bifurcation and annihilation rates from morphology data
5
5
  Author-email: Francesco Cavarretta <fcavarretta@ualr.edu>
6
6
  Requires-Python: >=3.9
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "morphgen-rates"
7
- version = "0.2.0"
7
+ version = "0.4.0"
8
8
  description = "Compute bifurcation and annihilation rates from morphology data"
9
9
  authors = [
10
10
  { name = "Francesco Cavarretta", email = "fcavarretta@ualr.edu" },
@@ -0,0 +1,4 @@
1
+ from .rates import compute_rates
2
+ from .data import get_data
3
+ from .init_count import compute_init_number_probs
4
+ __all__ = ["compute_rates", "get_data", "compute_init_number_probs"]
@@ -1,47 +1,8 @@
1
1
  import pandas as pd
2
2
  from pathlib import Path
3
3
 
4
- _file_list = {
5
- 'aPC':{
6
- 'SL':{
7
- 'apical':{
8
- 'fname_sholl':'sl_apical_sholl_plot',
9
- 'fname_bif':'sl_apical_bifurcations'
10
- }
11
- },
12
- 'PYR':{
13
- 'apical':{
14
- 'fname_sholl':'pyr_apical_sholl_plot',
15
- 'fname_bif':'pyr_apical_bifurcations'
16
- }
17
- },
18
- },
19
- 'NEOC':{
20
- 'PYR':{
21
- 'apical':{
22
- 'fname_sholl':'neocortex_sholl_plot',
23
- 'fname_bif':'neocortex_bifurcations'
24
- }
25
- },
26
- },
27
- 'OB':{
28
- 'MITRAL':{
29
- 'lateral':{
30
- 'fname_sholl':'mitral_sholl_plot',
31
- 'fname_bif':'mitral_bifurcations'
32
- }
33
- },
34
- 'TUFTED':{
35
- 'lateral':{
36
- 'fname_sholl':'tufted_sholl_plot',
37
- 'fname_bif':'tufted_bifurcations'
38
- }
39
- },
40
- }
41
- }
42
-
43
-
44
- def _local_data_path(filename, ext="csv"):
4
+
5
+ def _local_data_path(filename='morph_data', ext="csv"):
45
6
  """
46
7
  Build a path like: <this_file_dir>/data/<filename>.<ext>
47
8
 
@@ -58,79 +19,10 @@ def _local_data_path(filename, ext="csv"):
58
19
  Full path to the data file
59
20
  """
60
21
  work_dir = Path(__file__).resolve().parent
61
- return work_dir / "data" / f"{filename}.{ext}"
22
+ return work_dir / f"{filename}.{ext}"
62
23
 
63
24
 
64
- def _get_by_path(d, path, sep="/"):
65
- """
66
- Retrieve a value from a nested dictionary using a path-like key string.
67
-
68
- Example
69
- -------
70
- d = {"a": {"b": {"c": 123}}}
71
- get_by_path(d, "a/b/c") -> 123
72
-
73
- Parameters
74
- ----------
75
- d : dict
76
- Nested dictionary
77
- path : str
78
- Path of keys, e.g. "keys1/keys2/keys3"
79
- sep : str, default "/"
80
- Path separator
81
-
82
- Returns
83
- -------
84
- object
85
- The value stored at the given path
86
-
87
- Raises
88
- ------
89
- KeyError
90
- If any key along the path is missing
91
- TypeError
92
- If a non-dict is encountered before the final key
93
- """
94
- cur = d
95
- for k in path.split(sep):
96
- if not isinstance(cur, dict):
97
- raise TypeError(f"Expected dict at '{k}', got {type(cur).__name__}")
98
- cur = cur[k]
99
- return cur
100
-
101
-
102
- def _get_data(fname_sholl, fname_bif):
103
- data = {}
104
-
105
- # Load Sholl plot summary statistics (bin counts + variance) from CSV
106
- if fname_sholl:
107
- df_sholl = pd.read_csv(_local_data_path(fname_sholl), index_col=0)
108
- # manipulate the data
109
- df_sholl = df_sholl.T.describe().T[['mean', 'std']]
110
- df_sholl = df_sholl[(df_sholl != 0).all(axis=1)]
111
- bin_size = df_sholl.index[1] - df_sholl.index[0]
112
- df_sholl = df_sholl.to_numpy()
113
-
114
- data['sholl'] = {
115
- 'bin_size':bin_size,
116
- 'mean':df_sholl[:, 0],
117
- 'var':df_sholl[:, 1] ** 2,
118
- }
119
-
120
- if fname_bif:
121
- # Load bifurcation summary statistics from CSV
122
- df_bif = pd.read_csv(_local_data_path(fname_bif), index_col=0).to_numpy()
123
-
124
- # Bundle inputs exactly as loaded (no preprocessing)
125
- data["bifurcations"] = {
126
- 'mean':df_bif.mean(),
127
- 'var':df_bif.var()
128
- }
129
-
130
- return data
131
-
132
-
133
- def get_data(data_path):
25
+ def get_data(key):
134
26
  """
135
27
  Retrieve a dataset entry using a key-path of the form
136
28
  "<brain region>/<neuron class>/<subcellular section>".
@@ -143,7 +35,7 @@ def get_data(data_path):
143
35
 
144
36
  Parameters
145
37
  ----------
146
- data_path : str
38
+ key : str
147
39
  Dataset identifier expressed as a key path:
148
40
 
149
41
  "<brain region>/<neuron class>/<subcellular section>"
@@ -203,5 +95,53 @@ def get_data(data_path):
203
95
  >>> data["bifurcations"]["mean"]
204
96
  12.3
205
97
  """
206
- return _get_data(**_get_by_path(_file_list, data_path))
98
+ data = {}
99
+
100
+ # split the key
101
+ parts = tuple(p.strip() for p in key.split("/") if p.strip())
102
+ if len(parts) != 2:
103
+ raise ValueError(f"Expected key like 'area/neuron_type', got: {key!r}")
104
+ area, neuron_type = parts
105
+
106
+ # load data
107
+ df = pd.read_csv(_local_data_path(), index_col=0)
108
+
109
+ # select specific area and neuron type
110
+ df = df[(df['area'] == area) & (df['neuron_type'] == neuron_type)]
207
111
 
112
+ # neuron name unnecessary
113
+ df.drop(['area', 'neuron_type', 'neuron_name'], axis=1, inplace=True)
114
+
115
+ # statistics
116
+ df = df.groupby('section_type').describe()
117
+
118
+ # select only a subset of columns
119
+ df = df.loc[:, df.columns.get_level_values(1).isin(['mean', 'std', 'min', 'max'])]
120
+
121
+ # get subsections
122
+ for section_type, row in df.iterrows():
123
+ data[section_type] = {}
124
+
125
+ print()
126
+
127
+ # get statistics
128
+ for data_type in ['bifurcation_count', 'total_length']:
129
+ tmp = row.loc[row.index.get_level_values(0) == data_type, :]
130
+ tmp.index = tmp.index.droplevel(0)
131
+ data[section_type][data_type] = tmp.to_dict()
132
+
133
+ # count neurites at the soma
134
+ tmp = row.loc[row.index.get_level_values(0) == 'Count0', :]
135
+ tmp.index = tmp.index.droplevel(0)
136
+ data[section_type]['primary_count'] = tmp.to_dict()
137
+
138
+ # sholl plots
139
+ tmp = row.loc[row.index.get_level_values(0).str.startswith('Count'), :]
140
+ data[section_type]['sholl_plot'] = {
141
+ 'bin_size':row[('bin_size', 'mean')].tolist(),
142
+ 'mean':tmp.loc[tmp.index.get_level_values(1) == 'mean', :].tolist(),
143
+ 'std':tmp.loc[tmp.index.get_level_values(1) == 'std', :].tolist()
144
+ }
145
+
146
+ return data
147
+
@@ -0,0 +1,208 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Dict, Optional, Sequence, Union
4
+
5
+ import numpy as np
6
+ import pyomo.environ as pyo
7
+
8
+
9
+ def compute_init_number_probs(
10
+ mean_primary_dendrites: float,
11
+ sd_primary_dendrites: float,
12
+ min_primary_dendrites: int,
13
+ max_primary_dendrites: int,
14
+ *,
15
+ support_values: Optional[Sequence[float]] = None,
16
+ epsilon: float = 1e-12,
17
+ slack_penalty: float = 1e-1,
18
+ use_variance_form: bool = True,
19
+ use_abs_slack: bool = False,
20
+ solver: str = "ipopt",
21
+ solver_options: Optional[Dict[str, Union[str, int, float]]] = None,
22
+ ) -> np.ndarray:
23
+ """
24
+ Maximum-entropy PMF for the (discrete) number of primary dendrites.
25
+
26
+ This returns a numpy array p of length n = max_primary_dendrites + 1, where:
27
+ - p[i] is the probability of observing i primary dendrites
28
+ - p[i] = 0 for i < min_primary_dendrites or i > max_primary_dendrites
29
+
30
+ The distribution is obtained by maximizing Shannon entropy:
31
+ H(p) = -sum_i p[i] * log(p[i])
32
+
33
+ Subject to:
34
+ - Normalization: sum_{i in [min,max]} p[i] = 1
35
+ - Soft mean constraint (with slack):
36
+ sum i*p[i] - mean_primary_dendrites = slack_mean
37
+ - Soft dispersion constraint (with slack):
38
+ If use_variance_form=True (recommended):
39
+ sum (i-mean)^2 * p[i] - (sd_primary_dendrites^2) = slack_disp
40
+ If use_variance_form=False:
41
+ sqrt( sum (i-mean)^2 * p[i] + tiny ) - sd_primary_dendrites = slack_disp
42
+
43
+ The objective is penalized to keep slacks small:
44
+ maximize H(p) - slack_penalty * (slack terms)
45
+
46
+ Parameters
47
+ ----------
48
+ mean_primary_dendrites : float
49
+ Target mean number of primary dendrites
50
+ sd_primary_dendrites : float
51
+ Target standard deviation (>= 0)
52
+ min_primary_dendrites : int
53
+ Minimum allowed dendrite count (inclusive)
54
+ max_primary_dendrites : int
55
+ Maximum allowed dendrite count (inclusive). Also sets array length n=max+1
56
+
57
+ Keyword-only parameters
58
+ ----------------------
59
+ support_values : Sequence[float] | None
60
+ Optional support for indices 0..max. If None, uses support=i (integers).
61
+ Keep this None if you truly mean "i is the dendrite count".
62
+ epsilon : float
63
+ Lower bound on active probabilities to avoid log(0)
64
+ slack_penalty : float
65
+ Larger values enforce closer moment matching
66
+ use_variance_form : bool
67
+ Recommended True: match variance to sd^2 (smoother than sqrt constraint)
68
+ use_abs_slack : bool
69
+ If True, use L1-like slack penalty via +/- variables; otherwise squared (smooth)
70
+ solver : str
71
+ Nonlinear solver name (typically "ipopt")
72
+ solver_options : dict | None
73
+ Passed to the solver (e.g., {"max_iter": 5000})
74
+
75
+ Returns
76
+ -------
77
+ np.ndarray
78
+ Probability vector p with length max_primary_dendrites + 1
79
+
80
+ Raises
81
+ ------
82
+ ValueError
83
+ For invalid inputs
84
+ RuntimeError
85
+ If the requested solver is not available
86
+ """
87
+ if max_primary_dendrites < 0:
88
+ raise ValueError("max_primary_dendrites must be >= 0")
89
+ if sd_primary_dendrites < 0:
90
+ raise ValueError("sd_primary_dendrites must be nonnegative")
91
+ if not (0 <= min_primary_dendrites <= max_primary_dendrites):
92
+ raise ValueError("Require 0 <= min_primary_dendrites <= max_primary_dendrites")
93
+ if slack_penalty <= 0:
94
+ raise ValueError("slack_penalty must be positive")
95
+ if epsilon <= 0:
96
+ raise ValueError("epsilon must be positive")
97
+
98
+ n = max_primary_dendrites + 1
99
+ active = list(range(min_primary_dendrites, max_primary_dendrites + 1))
100
+
101
+ # Support values for each index i (default: i itself)
102
+ if support_values is None:
103
+ support_values = list(range(n))
104
+ if len(support_values) != n:
105
+ raise ValueError("support_values must have length n = max_primary_dendrites + 1")
106
+
107
+ support = {i: float(support_values[i]) for i in range(n)}
108
+ mu = float(mean_primary_dendrites)
109
+ sd = float(sd_primary_dendrites)
110
+ target_var = sd * sd
111
+
112
+ # -----------------------------
113
+ # Pyomo model
114
+ # -----------------------------
115
+ m = pyo.ConcreteModel()
116
+ m.A = pyo.Set(initialize=active, ordered=True)
117
+
118
+ # Decision variables for active probabilities only
119
+ m.p = pyo.Var(m.A, domain=pyo.NonNegativeReals, bounds=(epsilon, 1.0))
120
+
121
+ # Normalization over active set
122
+ m.norm = pyo.Constraint(expr=sum(m.p[i] for i in m.A) == 1.0)
123
+
124
+ # Moment expressions
125
+ mean_expr = sum(support[i] * m.p[i] for i in m.A)
126
+ var_expr = sum((support[i] - mu) ** 2 * m.p[i] for i in m.A)
127
+
128
+ # Soft constraints with slack
129
+ if use_abs_slack:
130
+ # L1 slack via +/- decomposition
131
+ m.s_mean_pos = pyo.Var(domain=pyo.NonNegativeReals)
132
+ m.s_mean_neg = pyo.Var(domain=pyo.NonNegativeReals)
133
+ m.s_disp_pos = pyo.Var(domain=pyo.NonNegativeReals)
134
+ m.s_disp_neg = pyo.Var(domain=pyo.NonNegativeReals)
135
+
136
+ m.mean_soft = pyo.Constraint(expr=mean_expr - mu == m.s_mean_pos - m.s_mean_neg)
137
+
138
+ if use_variance_form:
139
+ m.disp_soft = pyo.Constraint(expr=var_expr - target_var == m.s_disp_pos - m.s_disp_neg)
140
+ else:
141
+ tiny = 1e-18
142
+ m.disp_soft = pyo.Constraint(
143
+ expr=pyo.sqrt(var_expr + tiny) - sd == m.s_disp_pos - m.s_disp_neg
144
+ )
145
+
146
+ slack_term = (m.s_mean_pos + m.s_mean_neg) + (m.s_disp_pos + m.s_disp_neg)
147
+
148
+ else:
149
+ # Smooth squared slacks
150
+ m.s_mean = pyo.Var(domain=pyo.Reals)
151
+ m.s_disp = pyo.Var(domain=pyo.Reals)
152
+
153
+ m.mean_soft = pyo.Constraint(expr=mean_expr - mu == m.s_mean)
154
+
155
+ if use_variance_form:
156
+ m.disp_soft = pyo.Constraint(expr=var_expr - target_var == m.s_disp)
157
+ else:
158
+ tiny = 1e-18
159
+ m.disp_soft = pyo.Constraint(expr=pyo.sqrt(var_expr + tiny) - sd == m.s_disp)
160
+
161
+ slack_term = m.s_mean**2 + m.s_disp**2
162
+
163
+ # Entropy objective (active probs only; inactive probs are exactly 0)
164
+ entropy = -sum(m.p[i] * pyo.log(m.p[i]) for i in m.A)
165
+ m.obj = pyo.Objective(expr=entropy - float(slack_penalty) * slack_term, sense=pyo.maximize)
166
+
167
+ # Solve
168
+ opt = pyo.SolverFactory(solver)
169
+ if opt is None or not opt.available():
170
+ raise RuntimeError(
171
+ f"Solver '{solver}' is not available. Install/configure it (e.g., ipopt) "
172
+ "or pass a different solver name."
173
+ )
174
+ if solver_options:
175
+ for k, v in solver_options.items():
176
+ opt.options[k] = v
177
+
178
+ res = opt.solve(m, tee=False)
179
+
180
+ # -----------------------------
181
+ # Extract solution into numpy array
182
+ # -----------------------------
183
+ p = np.zeros(n, dtype=float)
184
+ for i in active:
185
+ p[i] = float(pyo.value(m.p[i]))
186
+
187
+ # Optional: renormalize tiny numerical drift (keeps zeros outside band)
188
+ s = p.sum()
189
+ if s > 0:
190
+ p[active] /= s
191
+
192
+ return p
193
+
194
+
195
+ if __name__ == "__main__":
196
+ p = maxent_primary_dendrite_pmf(
197
+ mean_primary_dendrites=2.33,
198
+ sd_primary_dendrites=1.53,
199
+ min_primary_dendrites=1,
200
+ max_primary_dendrites=4,
201
+ slack_penalty=0.1,
202
+ use_variance_form=True,
203
+ use_abs_slack=False,
204
+ solver="ipopt",
205
+ )
206
+ print("p shape:", p.shape)
207
+ print("sum:", p.sum())
208
+ print(p)
@@ -35,23 +35,23 @@ def compute_rates(data, max_step_size):
35
35
  Input container with the following structure:
36
36
 
37
37
  data = {
38
- "sholl": {
38
+ "sholl_plot": {
39
39
  "bin_size": float,
40
40
  "mean": numpy.ndarray, # shape (K,)
41
41
  "var": numpy.ndarray, # shape (K,)
42
42
  },
43
- "bifurcations": {
43
+ "bifurcation_count": {
44
44
  "mean": float,
45
45
  "var": float,
46
46
  },
47
47
  }
48
48
 
49
49
  Where:
50
- - `data["sholl"]["bin_size"]` is the spatial bin size used to build the Sholl plot
51
- - `data["sholl"]["mean"][i]` is the mean Sholl intersection count in bin i
52
- - `data["sholl"]["var"][i]` is the variance of the Sholl intersection count in bin i
53
- - `data["bifurcations"]["mean"]` is the mean number of bifurcations
54
- - `data["bifurcations"]["var"]` is the variance of the number of bifurcations
50
+ - `data["sholl_plot"]["bin_size"]` is the spatial bin size used to build the Sholl plot
51
+ - `data["sholl_plot"]["mean"][i]` is the mean Sholl intersection count in bin i
52
+ - `data["sholl_plot"]["var"][i]` is the variance of the Sholl intersection count in bin i
53
+ - `data["bifurcation_count"]["mean"]` is the mean number of bifurcations
54
+ - `data["bifurcation_count"]["var"]` is the variance of the number of bifurcations
55
55
 
56
56
  max_step_size : float
57
57
  Maximum advancement (in distance from the soma) allowed for a single
@@ -71,7 +71,7 @@ def compute_rates(data, max_step_size):
71
71
 
72
72
  Notes
73
73
  -----
74
- - `data["sholl"]["mean"]` and `data["sholl"]["var"]` must be 1D arrays of equal length
74
+ - `data["sholl_plot"]["mean"]` and `data["sholl_plot"]["var"]` must be 1D arrays of equal length
75
75
  - Variances must be non-negative
76
76
  - Ensure `bin_size` and `max_step_size` use consistent spatial units
77
77
  """
@@ -86,12 +86,15 @@ def compute_rates(data, max_step_size):
86
86
 
87
87
 
88
88
  global _Mean_Penalty, _Var_Penalty
89
- dx = data['sholl']['bin_size']
90
- Z = data['sholl']['mean']
91
- V = data['sholl']['var']
89
+ dx = data['sholl_plot']['bin_size']
90
+
91
+ min_zero_bin = min(data['sholl_plot']['mean'].index(0), data['sholl_plot']['std'].index(0))
92
+
93
+ Z = np.array(data['sholl_plot']['mean'][:min_zero_bin])
94
+ V = np.power(data['sholl_plot']['std'][:min_zero_bin], 2)
92
95
 
93
96
  if 'bifurcations' in data:
94
- n_bif = [data['bifurcations']['mean'], data['bifurcations']['var']]
97
+ n_bif = [data['bifurcation_count']['mean'], data['bifurcation_count']['std'] ** 2]
95
98
  else:
96
99
  n_bif = None
97
100
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: morphgen-rates
3
- Version: 0.2.0
3
+ Version: 0.4.0
4
4
  Summary: Compute bifurcation and annihilation rates from morphology data
5
5
  Author-email: Francesco Cavarretta <fcavarretta@ualr.edu>
6
6
  Requires-Python: >=3.9
@@ -0,0 +1,13 @@
1
+ LICENSE
2
+ README.md
3
+ pyproject.toml
4
+ src/morphgen_rates/__init__.py
5
+ src/morphgen_rates/data.py
6
+ src/morphgen_rates/init_count.py
7
+ src/morphgen_rates/rates.py
8
+ src/morphgen_rates.egg-info/PKG-INFO
9
+ src/morphgen_rates.egg-info/SOURCES.txt
10
+ src/morphgen_rates.egg-info/dependency_links.txt
11
+ src/morphgen_rates.egg-info/requires.txt
12
+ src/morphgen_rates.egg-info/top_level.txt
13
+ tests/test.py
@@ -2,7 +2,7 @@ import pandas as pd
2
2
  from morphgen_rates import compute_rates, get_data
3
3
 
4
4
  # Bundle inputs exactly as loaded (no preprocessing)
5
- data = get_data('aPC/PYR/apical')
5
+ data = get_data('aPC/PYR')['apical_dendrite']
6
6
 
7
7
  print(data)
8
8
 
@@ -1,3 +0,0 @@
1
- from .rates import compute_rates
2
- from .data import get_data
3
- __all__ = ["compute_rates", "get_data"]
@@ -1,10 +0,0 @@
1
- ,0
2
- 0,19
3
- 1,12
4
- 2,17
5
- 3,13
6
- 4,11
7
- 5,5
8
- 6,21
9
- 7,22
10
- 8,20
@@ -1,29 +0,0 @@
1
- Distance,Count0,Count1,Count2,Count3,Count4,Count5,Count6,Count7,Count8
2
- 0.0,7.0,4.0,7.0,4.0,3.0,9.0,5.0,5.0,5.0
3
- 50.0,8.0,6.0,8.0,5.0,5.0,10.0,8.0,7.0,7.0
4
- 100.0,14.0,11.0,9.0,6.0,7.0,11.0,8.0,9.0,11.0
5
- 150.0,16.0,14.0,14.0,8.0,8.0,11.0,11.0,13.0,12.0
6
- 200.0,16.0,11.0,14.0,10.0,8.0,12.0,12.0,14.0,14.0
7
- 250.0,15.0,16.0,14.0,11.0,8.0,14.0,12.0,15.0,15.0
8
- 300.0,18.0,12.0,15.0,14.0,9.0,14.0,14.0,17.0,16.0
9
- 350.0,17.0,14.0,15.0,12.0,9.0,13.0,12.0,18.0,18.0
10
- 400.0,17.0,12.0,17.0,12.0,9.0,13.0,11.0,19.0,18.0
11
- 450.0,18.0,8.0,17.0,14.0,9.0,12.0,11.0,20.0,18.0
12
- 500.0,17.0,10.0,17.0,11.0,8.0,12.0,11.0,20.0,18.0
13
- 550.0,16.0,8.0,13.0,11.0,7.0,11.0,9.0,18.0,17.0
14
- 600.0,15.0,7.0,12.0,7.0,7.0,9.0,9.0,16.0,15.0
15
- 650.0,13.0,6.0,12.0,6.0,8.0,9.0,8.0,13.0,14.0
16
- 700.0,9.0,4.0,11.0,5.0,7.0,8.0,8.0,12.0,14.0
17
- 750.0,10.0,4.0,11.0,3.0,7.0,5.0,6.0,10.0,13.0
18
- 800.0,6.0,4.0,8.0,2.0,7.0,2.0,5.0,10.0,10.0
19
- 850.0,2.0,5.0,7.0,0.0,4.0,2.0,4.0,6.0,8.0
20
- 900.0,2.0,4.0,3.0,0.0,2.0,0.0,3.0,4.0,6.0
21
- 950.0,2.0,3.0,3.0,0.0,2.0,0.0,2.0,2.0,4.0
22
- 1000.0,2.0,2.0,2.0,0.0,1.0,0.0,1.0,1.0,1.0
23
- 1050.0,2.0,2.0,0.0,0.0,0.0,0.0,2.0,1.0,0.0
24
- 1100.0,0.0,2.0,0.0,0.0,0.0,0.0,2.0,0.0,0.0
25
- 1150.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0
26
- 1200.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0
27
- 1250.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0
28
- 1300.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0
29
- 1350.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0
@@ -1,29 +0,0 @@
1
- Distance,Count0,Count1,Count2,Count3,Count4,Count5,Count6,Count7,Count8
2
- 0.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0
3
- 50.0,3.0,4.0,4.0,4.0,4.0,2.0,10.0,4.5,1.0
4
- 100.0,7.0,7.0,6.0,7.0,10.0,3.0,9.0,8.0,14.0
5
- 150.0,9.0,11.0,7.0,2.0,8.0,5.0,6.0,12.0,12.0
6
- 200.0,4.0,6.0,8.0,12.0,9.0,9.0,6.0,18.0,8.0
7
- 250.0,1.0,3.0,5.0,5.0,5.0,4.0,1.0,13.0,6.0
8
- 300.0,2.0,2.0,2.0,3.0,2.0,2.0,3.0,5.0,1.0
9
- 350.0,1.0,2.0,1.0,1.0,2.0,1.0,1.0,3.0,1.0
10
- 400.0,1.0,2.0,1.0,1.0,2.0,2.0,1.0,1.0,1.0
11
- 450.0,2.0,2.0,1.0,1.0,2.0,3.0,3.0,1.0,1.0
12
- 500.0,2.0,3.0,1.0,3.0,3.0,3.0,1.0,1.0,1.0
13
- 550.0,2.0,4.0,1.0,4.0,2.0,4.0,3.0,1.0,2.0
14
- 600.0,4.0,4.0,1.0,5.0,3.0,4.0,3.0,1.0,4.0
15
- 650.0,7.0,4.0,1.0,3.0,3.0,5.0,4.0,3.0,6.0
16
- 700.0,6.0,3.0,2.0,4.0,3.0,7.0,10.0,3.0,14.0
17
- 750.0,10.0,9.0,2.0,8.0,4.0,8.0,12.0,4.0,4.0
18
- 800.0,0.0,17.0,2.0,10.0,4.0,10.0,1.0,4.0,0.0
19
- 850.0,0.0,3.0,2.0,4.0,9.0,11.0,0.0,4.0,0.0
20
- 900.0,0.0,0.0,3.0,0.0,19.0,1.0,0.0,5.0,0.0
21
- 950.0,0.0,0.0,6.0,0.0,16.0,0.0,0.0,7.0,0.0
22
- 1000.0,0.0,0.0,7.0,0.0,0.0,0.0,0.0,10.0,0.0
23
- 1050.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0
24
- 1100.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
25
- 1150.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
26
- 1200.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
27
- 1250.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
28
- 1300.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
29
- 1350.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
@@ -1,10 +0,0 @@
1
- ,0
2
- 0,47
3
- 1,119
4
- 2,43
5
- 3,90
6
- 4,85
7
- 5,65
8
- 6,63
9
- 7,65
10
- 8,53
@@ -1,13 +0,0 @@
1
- ,0
2
- 0,23
3
- 1,30
4
- 2,18
5
- 3,21
6
- 4,19
7
- 5,18
8
- 6,17
9
- 7,15
10
- 8,14
11
- 9,15
12
- 10,15
13
- 11,21
@@ -1,17 +0,0 @@
1
- Distance,Count0,Count1,Count2,Count3,Count4,Count5,Count6,Count7,Count8,Count9,Count10,Count11
2
- 0.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,2.0,1.0,1.0,1.0,1.0
3
- 50.0,4.0,1.0,6.0,3.0,3.0,5.0,2.0,2.0,2.0,2.0,2.0,3.0
4
- 100.0,5.0,1.0,5.0,6.0,3.0,4.0,2.0,2.0,5.0,2.0,5.0,8.0
5
- 150.0,3.0,2.0,6.0,6.0,4.0,4.0,4.0,4.0,8.0,3.0,8.0,10.0
6
- 200.0,5.0,4.0,6.0,7.0,5.0,5.0,6.0,6.0,10.0,5.0,7.0,15.0
7
- 250.0,7.0,4.0,9.0,11.0,6.0,5.0,6.0,6.0,12.0,7.0,10.0,15.0
8
- 300.0,13.0,7.0,11.0,11.0,4.0,6.0,4.0,9.0,11.0,9.0,11.0,15.0
9
- 350.0,20.0,8.0,12.0,12.0,7.0,8.0,4.0,8.0,9.0,11.0,12.0,13.0
10
- 400.0,11.0,8.0,11.0,8.0,7.0,6.0,4.0,12.0,1.0,12.0,14.0,3.0
11
- 450.0,0.0,16.0,12.0,5.0,10.0,7.0,6.0,12.0,0.0,9.0,13.0,1.0
12
- 500.0,0.0,14.0,11.0,2.0,14.0,9.0,8.0,12.0,0.0,9.0,6.0,0.0
13
- 550.0,0.0,20.0,11.0,1.0,12.0,7.0,8.0,9.0,0.0,6.0,0.0,0.0
14
- 600.0,0.0,10.0,7.0,0.0,8.0,6.0,3.0,4.0,0.0,1.0,0.0,0.0
15
- 650.0,0.0,3.0,0.0,0.0,2.0,2.0,0.0,0.0,0.0,1.0,0.0,0.0
16
- 700.0,0.0,1.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
17
- 750.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
@@ -1,11 +0,0 @@
1
- Distance,Count0,Count1,Count2
2
- 0.0,2.0,2.0,1.0
3
- 50.0,5.0,5.0,2.0
4
- 100.0,8.0,7.0,2.0
5
- 150.0,15.0,5.0,1.0
6
- 200.0,16.0,8.0,1.0
7
- 250.0,17.0,9.0,2.0
8
- 300.0,11.0,10.0,2.0
9
- 350.0,4.0,10.0,2.0
10
- 400.0,0.0,1.0,2.0
11
- 450.0,0.0,0.0,0.0
@@ -1,6 +0,0 @@
1
- ,0
2
- 0,40
3
- 1,21
4
- 2,67
5
- 3,49
6
- 4,22
@@ -1,29 +0,0 @@
1
- Distance,Count0,Count1,Count2,Count3,Count4
2
- 0.0,2.0,2.0,1.0,7.0,1.0
3
- 50.0,4.0,12.0,3.0,8.0,8.0
4
- 100.0,3.0,11.0,4.0,11.0,11.0
5
- 150.0,6.0,10.0,4.0,10.0,10.0
6
- 200.0,11.0,10.0,4.0,16.0,10.0
7
- 250.0,10.0,10.0,21.0,17.0,8.0
8
- 300.0,4.0,8.0,8.0,17.0,8.0
9
- 350.0,4.0,8.0,6.0,12.0,8.0
10
- 400.0,3.0,8.0,7.0,12.0,8.0
11
- 450.0,3.0,7.0,8.0,11.0,7.0
12
- 500.0,5.0,7.0,7.0,10.0,8.0
13
- 550.0,3.0,10.0,9.0,11.0,7.0
14
- 600.0,2.0,11.0,6.0,11.0,8.0
15
- 650.0,2.0,7.0,2.0,12.0,5.0
16
- 700.0,2.0,6.0,2.0,7.0,5.0
17
- 750.0,2.0,4.0,3.0,4.0,5.0
18
- 800.0,0.0,2.0,1.0,3.0,4.0
19
- 850.0,0.0,1.0,1.0,1.0,1.0
20
- 900.0,0.0,1.0,1.0,1.0,1.0
21
- 950.0,0.0,1.0,0.0,1.0,1.0
22
- 1000.0,0.0,1.0,0.0,0.0,0.0
23
- 1050.0,0.0,0.0,0.0,0.0,0.0
24
- 1100.0,0.0,0.0,0.0,0.0,0.0
25
- 1150.0,0.0,0.0,0.0,0.0,0.0
26
- 1200.0,0.0,0.0,0.0,0.0,0.0
27
- 1250.0,0.0,0.0,0.0,0.0,0.0
28
- 1300.0,0.0,0.0,0.0,0.0,0.0
29
- 1350.0,0.0,0.0,0.0,0.0,0.0
@@ -1,22 +0,0 @@
1
- LICENSE
2
- README.md
3
- pyproject.toml
4
- src/morphgen_rates/__init__.py
5
- src/morphgen_rates/data.py
6
- src/morphgen_rates/rates.py
7
- src/morphgen_rates.egg-info/PKG-INFO
8
- src/morphgen_rates.egg-info/SOURCES.txt
9
- src/morphgen_rates.egg-info/dependency_links.txt
10
- src/morphgen_rates.egg-info/requires.txt
11
- src/morphgen_rates.egg-info/top_level.txt
12
- src/morphgen_rates/data/mitral_bifurcations.csv
13
- src/morphgen_rates/data/mitral_sholl_plot.csv
14
- src/morphgen_rates/data/neocortex_apical_sholl_plot.csv
15
- src/morphgen_rates/data/neocortex_bifurcations.csv
16
- src/morphgen_rates/data/pyr_apical_bifurcations.csv
17
- src/morphgen_rates/data/pyr_apical_sholl_plot.csv
18
- src/morphgen_rates/data/sl_apical_bifurcations.csv
19
- src/morphgen_rates/data/sl_apical_sholl_plot.csv
20
- src/morphgen_rates/data/tufted_bifurcations.csv
21
- src/morphgen_rates/data/tufted_sholl_plot.csv
22
- tests/test.py
File without changes
File without changes
File without changes