morphgen-rates 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,4 @@
1
+ from .rates import compute_rates
2
+ from .data import get_data
3
+ from .init_count import compute_init_number_probs
4
+ __all__ = ["compute_rates", "get_data", "compute_init_number_probs"]
morphgen_rates/data.py ADDED
@@ -0,0 +1,147 @@
1
+ import pandas as pd
2
+ from pathlib import Path
3
+
4
+
5
+ def _local_data_path(filename='morph_data', ext="csv"):
6
+ """
7
+ Build a path like: <this_file_dir>/data/<filename>.<ext>
8
+
9
+ Parameters
10
+ ----------
11
+ filename : str
12
+ Base filename (without extension)
13
+ ext : str, default "csv"
14
+ File extension (without the dot)
15
+
16
+ Returns
17
+ -------
18
+ pathlib.Path
19
+ Full path to the data file
20
+ """
21
+ work_dir = Path(__file__).resolve().parent
22
+ return work_dir / f"{filename}.{ext}"
23
+
24
+
25
+ def get_data(key):
26
+ """
27
+ Retrieve a dataset entry using a key-path of the form
28
+ "<brain region>/<neuron class>/<subcellular section>".
29
+
30
+ The argument `data_path` is interpreted as a slash-separated path of keys used
31
+ to traverse a nested dataset dictionary. The selected dataset is expected to
32
+ contain both Sholl-plot statistics and bifurcation statistics; when both are
33
+ available, this function returns a standardized dictionary compatible with
34
+ `compute_rates`.
35
+
36
+ Parameters
37
+ ----------
38
+ key : str
39
+ Dataset identifier expressed as a key path:
40
+
41
+ "<brain region>/<neuron class>/<subcellular section>"
42
+
43
+ Examples:
44
+ - "CTX/pyr/apical"
45
+ - "HPC/pyr/basal"
46
+
47
+ Each component is used as a successive key lookup into the nested dataset
48
+ container.
49
+
50
+ Returns
51
+ -------
52
+ dict
53
+ If both Sholl and bifurcation information are present for the selected dataset,
54
+ returns:
55
+
56
+ data = {
57
+ "sholl": {
58
+ "bin_size": float,
59
+ "mean": numpy.ndarray, # shape (K,)
60
+ "var": numpy.ndarray, # shape (K,)
61
+ },
62
+ "bifurcations": {
63
+ "mean": float,
64
+ "var": float,
65
+ },
66
+ }
67
+
68
+ Where:
69
+ - `data["sholl"]["bin_size"]` is the spatial bin size used to define Sholl shells
70
+ - `data["sholl"]["mean"]` is the mean Sholl intersection count per radial bin
71
+ - `data["sholl"]["var"]` is the variance of the Sholl intersection count per bin
72
+ - `data["bifurcations"]["mean"]` is the mean bifurcation count
73
+ - `data["bifurcations"]["var"]` is the variance of the bifurcation count
74
+
75
+ Raises
76
+ ------
77
+ KeyError
78
+ If any key along `data_path` is missing (brain region, neuron class, or section)
79
+ ValueError
80
+ If the selected dataset does not contain both Sholl and bifurcation data, or
81
+ if the provided arrays have incompatible shapes
82
+
83
+ Notes
84
+ -----
85
+ - `data_path` is a *key path*, not a filesystem path
86
+ - The function assumes the dataset entry referenced by `data_path` includes:
87
+ - Sholl bin size, mean array, variance array
88
+ - Bifurcation mean and variance
89
+
90
+ Examples
91
+ --------
92
+ >>> data = get("CTX/pyr/apical")
93
+ >>> data["sholl"]["bin_size"]
94
+ 50.0
95
+ >>> data["bifurcations"]["mean"]
96
+ 12.3
97
+ """
98
+ data = {}
99
+
100
+ # split the key
101
+ parts = tuple(p.strip() for p in key.split("/") if p.strip())
102
+ if len(parts) != 2:
103
+ raise ValueError(f"Expected key like 'area/neuron_type', got: {key!r}")
104
+ area, neuron_type = parts
105
+
106
+ # load data
107
+ df = pd.read_csv(_local_data_path(), index_col=0)
108
+
109
+ # select specific area and neuron type
110
+ df = df[(df['area'] == area) & (df['neuron_type'] == neuron_type)]
111
+
112
+ # neuron name unnecessary
113
+ df.drop(['area', 'neuron_type', 'neuron_name'], axis=1, inplace=True)
114
+
115
+ # statistics
116
+ df = df.groupby('section_type').describe()
117
+
118
+ # select only a subset of columns
119
+ df = df.loc[:, df.columns.get_level_values(1).isin(['mean', 'std', 'min', 'max'])]
120
+
121
+ # get subsections
122
+ for section_type, row in df.iterrows():
123
+ data[section_type] = {}
124
+
125
+ print()
126
+
127
+ # get statistics
128
+ for data_type in ['bifurcation_count', 'total_length']:
129
+ tmp = row.loc[row.index.get_level_values(0) == data_type, :]
130
+ tmp.index = tmp.index.droplevel(0)
131
+ data[section_type][data_type] = tmp.to_dict()
132
+
133
+ # count neurites at the soma
134
+ tmp = row.loc[row.index.get_level_values(0) == 'Count0', :]
135
+ tmp.index = tmp.index.droplevel(0)
136
+ data[section_type]['primary_count'] = tmp.to_dict()
137
+
138
+ # sholl plots
139
+ tmp = row.loc[row.index.get_level_values(0).str.startswith('Count'), :]
140
+ data[section_type]['sholl_plot'] = {
141
+ 'bin_size':row[('bin_size', 'mean')].tolist(),
142
+ 'mean':tmp.loc[tmp.index.get_level_values(1) == 'mean', :].tolist(),
143
+ 'std':tmp.loc[tmp.index.get_level_values(1) == 'std', :].tolist()
144
+ }
145
+
146
+ return data
147
+
@@ -0,0 +1,208 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Dict, Optional, Sequence, Union
4
+
5
+ import numpy as np
6
+ import pyomo.environ as pyo
7
+
8
+
9
+ def compute_init_number_probs(
10
+ mean_primary_dendrites: float,
11
+ sd_primary_dendrites: float,
12
+ min_primary_dendrites: int,
13
+ max_primary_dendrites: int,
14
+ *,
15
+ support_values: Optional[Sequence[float]] = None,
16
+ epsilon: float = 1e-12,
17
+ slack_penalty: float = 1e-1,
18
+ use_variance_form: bool = True,
19
+ use_abs_slack: bool = False,
20
+ solver: str = "ipopt",
21
+ solver_options: Optional[Dict[str, Union[str, int, float]]] = None,
22
+ ) -> np.ndarray:
23
+ """
24
+ Maximum-entropy PMF for the (discrete) number of primary dendrites.
25
+
26
+ This returns a numpy array p of length n = max_primary_dendrites + 1, where:
27
+ - p[i] is the probability of observing i primary dendrites
28
+ - p[i] = 0 for i < min_primary_dendrites or i > max_primary_dendrites
29
+
30
+ The distribution is obtained by maximizing Shannon entropy:
31
+ H(p) = -sum_i p[i] * log(p[i])
32
+
33
+ Subject to:
34
+ - Normalization: sum_{i in [min,max]} p[i] = 1
35
+ - Soft mean constraint (with slack):
36
+ sum i*p[i] - mean_primary_dendrites = slack_mean
37
+ - Soft dispersion constraint (with slack):
38
+ If use_variance_form=True (recommended):
39
+ sum (i-mean)^2 * p[i] - (sd_primary_dendrites^2) = slack_disp
40
+ If use_variance_form=False:
41
+ sqrt( sum (i-mean)^2 * p[i] + tiny ) - sd_primary_dendrites = slack_disp
42
+
43
+ The objective is penalized to keep slacks small:
44
+ maximize H(p) - slack_penalty * (slack terms)
45
+
46
+ Parameters
47
+ ----------
48
+ mean_primary_dendrites : float
49
+ Target mean number of primary dendrites
50
+ sd_primary_dendrites : float
51
+ Target standard deviation (>= 0)
52
+ min_primary_dendrites : int
53
+ Minimum allowed dendrite count (inclusive)
54
+ max_primary_dendrites : int
55
+ Maximum allowed dendrite count (inclusive). Also sets array length n=max+1
56
+
57
+ Keyword-only parameters
58
+ ----------------------
59
+ support_values : Sequence[float] | None
60
+ Optional support for indices 0..max. If None, uses support=i (integers).
61
+ Keep this None if you truly mean "i is the dendrite count".
62
+ epsilon : float
63
+ Lower bound on active probabilities to avoid log(0)
64
+ slack_penalty : float
65
+ Larger values enforce closer moment matching
66
+ use_variance_form : bool
67
+ Recommended True: match variance to sd^2 (smoother than sqrt constraint)
68
+ use_abs_slack : bool
69
+ If True, use L1-like slack penalty via +/- variables; otherwise squared (smooth)
70
+ solver : str
71
+ Nonlinear solver name (typically "ipopt")
72
+ solver_options : dict | None
73
+ Passed to the solver (e.g., {"max_iter": 5000})
74
+
75
+ Returns
76
+ -------
77
+ np.ndarray
78
+ Probability vector p with length max_primary_dendrites + 1
79
+
80
+ Raises
81
+ ------
82
+ ValueError
83
+ For invalid inputs
84
+ RuntimeError
85
+ If the requested solver is not available
86
+ """
87
+ if max_primary_dendrites < 0:
88
+ raise ValueError("max_primary_dendrites must be >= 0")
89
+ if sd_primary_dendrites < 0:
90
+ raise ValueError("sd_primary_dendrites must be nonnegative")
91
+ if not (0 <= min_primary_dendrites <= max_primary_dendrites):
92
+ raise ValueError("Require 0 <= min_primary_dendrites <= max_primary_dendrites")
93
+ if slack_penalty <= 0:
94
+ raise ValueError("slack_penalty must be positive")
95
+ if epsilon <= 0:
96
+ raise ValueError("epsilon must be positive")
97
+
98
+ n = max_primary_dendrites + 1
99
+ active = list(range(min_primary_dendrites, max_primary_dendrites + 1))
100
+
101
+ # Support values for each index i (default: i itself)
102
+ if support_values is None:
103
+ support_values = list(range(n))
104
+ if len(support_values) != n:
105
+ raise ValueError("support_values must have length n = max_primary_dendrites + 1")
106
+
107
+ support = {i: float(support_values[i]) for i in range(n)}
108
+ mu = float(mean_primary_dendrites)
109
+ sd = float(sd_primary_dendrites)
110
+ target_var = sd * sd
111
+
112
+ # -----------------------------
113
+ # Pyomo model
114
+ # -----------------------------
115
+ m = pyo.ConcreteModel()
116
+ m.A = pyo.Set(initialize=active, ordered=True)
117
+
118
+ # Decision variables for active probabilities only
119
+ m.p = pyo.Var(m.A, domain=pyo.NonNegativeReals, bounds=(epsilon, 1.0))
120
+
121
+ # Normalization over active set
122
+ m.norm = pyo.Constraint(expr=sum(m.p[i] for i in m.A) == 1.0)
123
+
124
+ # Moment expressions
125
+ mean_expr = sum(support[i] * m.p[i] for i in m.A)
126
+ var_expr = sum((support[i] - mu) ** 2 * m.p[i] for i in m.A)
127
+
128
+ # Soft constraints with slack
129
+ if use_abs_slack:
130
+ # L1 slack via +/- decomposition
131
+ m.s_mean_pos = pyo.Var(domain=pyo.NonNegativeReals)
132
+ m.s_mean_neg = pyo.Var(domain=pyo.NonNegativeReals)
133
+ m.s_disp_pos = pyo.Var(domain=pyo.NonNegativeReals)
134
+ m.s_disp_neg = pyo.Var(domain=pyo.NonNegativeReals)
135
+
136
+ m.mean_soft = pyo.Constraint(expr=mean_expr - mu == m.s_mean_pos - m.s_mean_neg)
137
+
138
+ if use_variance_form:
139
+ m.disp_soft = pyo.Constraint(expr=var_expr - target_var == m.s_disp_pos - m.s_disp_neg)
140
+ else:
141
+ tiny = 1e-18
142
+ m.disp_soft = pyo.Constraint(
143
+ expr=pyo.sqrt(var_expr + tiny) - sd == m.s_disp_pos - m.s_disp_neg
144
+ )
145
+
146
+ slack_term = (m.s_mean_pos + m.s_mean_neg) + (m.s_disp_pos + m.s_disp_neg)
147
+
148
+ else:
149
+ # Smooth squared slacks
150
+ m.s_mean = pyo.Var(domain=pyo.Reals)
151
+ m.s_disp = pyo.Var(domain=pyo.Reals)
152
+
153
+ m.mean_soft = pyo.Constraint(expr=mean_expr - mu == m.s_mean)
154
+
155
+ if use_variance_form:
156
+ m.disp_soft = pyo.Constraint(expr=var_expr - target_var == m.s_disp)
157
+ else:
158
+ tiny = 1e-18
159
+ m.disp_soft = pyo.Constraint(expr=pyo.sqrt(var_expr + tiny) - sd == m.s_disp)
160
+
161
+ slack_term = m.s_mean**2 + m.s_disp**2
162
+
163
+ # Entropy objective (active probs only; inactive probs are exactly 0)
164
+ entropy = -sum(m.p[i] * pyo.log(m.p[i]) for i in m.A)
165
+ m.obj = pyo.Objective(expr=entropy - float(slack_penalty) * slack_term, sense=pyo.maximize)
166
+
167
+ # Solve
168
+ opt = pyo.SolverFactory(solver)
169
+ if opt is None or not opt.available():
170
+ raise RuntimeError(
171
+ f"Solver '{solver}' is not available. Install/configure it (e.g., ipopt) "
172
+ "or pass a different solver name."
173
+ )
174
+ if solver_options:
175
+ for k, v in solver_options.items():
176
+ opt.options[k] = v
177
+
178
+ res = opt.solve(m, tee=False)
179
+
180
+ # -----------------------------
181
+ # Extract solution into numpy array
182
+ # -----------------------------
183
+ p = np.zeros(n, dtype=float)
184
+ for i in active:
185
+ p[i] = float(pyo.value(m.p[i]))
186
+
187
+ # Optional: renormalize tiny numerical drift (keeps zeros outside band)
188
+ s = p.sum()
189
+ if s > 0:
190
+ p[active] /= s
191
+
192
+ return p
193
+
194
+
195
+ if __name__ == "__main__":
196
+ p = maxent_primary_dendrite_pmf(
197
+ mean_primary_dendrites=2.33,
198
+ sd_primary_dendrites=1.53,
199
+ min_primary_dendrites=1,
200
+ max_primary_dendrites=4,
201
+ slack_penalty=0.1,
202
+ use_variance_form=True,
203
+ use_abs_slack=False,
204
+ solver="ipopt",
205
+ )
206
+ print("p shape:", p.shape)
207
+ print("sum:", p.sum())
208
+ print(p)
@@ -0,0 +1,172 @@
1
+ from pyomo.environ import *
2
+ import numpy as np
3
+
4
+ _Mean_Penalty=0
5
+ _Var_Penalty=1.0
6
+
7
+ def mk_objective(model, kappa, Z, V):
8
+
9
+ def getA_term(model, kappa, Z, V, m, i):
10
+ return model.b[m] * 2 * Z[i] ** 2 * (Z[m + 1] - Z[m]) / (Z[m + 1] * Z[m]) / kappa[m]
11
+
12
+ def getB_term(kappa, Z, V, m):
13
+ return (Z[m + 1] - Z[m]) / (Z[m + 1] * Z[m])
14
+
15
+
16
+ terms = []
17
+ for i in range(1, kappa.size + 1):
18
+ B = (V[0] - sum(getB_term(kappa, Z, V, m) for m in range(0, i))) * (Z[i] ** 2)
19
+ terms += [2 * getA_term(model, kappa, Z, V, m, i) * (B - V[i]) for m in range(0, i) ]
20
+ terms += [getA_term(model, kappa, Z, V, m, i) * getA_term(model, kappa, Z, V, n, i) for m in range(0, i) for n in range(0, i)]
21
+ return sum(terms)
22
+
23
+ def compute_rates(data, max_step_size):
24
+ """
25
+ Compute bifurcation and annihilation rates from summary statistics.
26
+
27
+ The estimator expects Sholl-plot summary statistics (mean and variance per
28
+ radial bin) and summary statistics of bifurcation counts (mean and variance).
29
+ These quantities are used to infer the event rates of a branching-and-
30
+ annihilating process.
31
+
32
+ Parameters
33
+ ----------
34
+ data : dict
35
+ Input container with the following structure:
36
+
37
+ data = {
38
+ "sholl_plot": {
39
+ "bin_size": float,
40
+ "mean": numpy.ndarray, # shape (K,)
41
+ "var": numpy.ndarray, # shape (K,)
42
+ },
43
+ "bifurcation_count": {
44
+ "mean": float,
45
+ "var": float,
46
+ },
47
+ }
48
+
49
+ Where:
50
+ - `data["sholl_plot"]["bin_size"]` is the spatial bin size used to build the Sholl plot
51
+ - `data["sholl_plot"]["mean"][i]` is the mean Sholl intersection count in bin i
52
+ - `data["sholl_plot"]["var"][i]` is the variance of the Sholl intersection count in bin i
53
+ - `data["bifurcation_count"]["mean"]` is the mean number of bifurcations
54
+ - `data["bifurcation_count"]["var"]` is the variance of the number of bifurcations
55
+
56
+ max_step_size : float
57
+ Maximum advancement (in distance from the soma) allowed for a single
58
+ elongation step in the model. This value bounds the radial increment used
59
+ by the estimator and should be expressed in the same spatial units as the
60
+ Sholl binning.
61
+
62
+ Returns
63
+ -------
64
+ dict
65
+ Dictionary containing the estimated rates and any additional derived values
66
+ produced by the implementation. At minimum, the returned dictionary is
67
+ expected to include:
68
+
69
+ - "bifurcation_rate"
70
+ - "annihilation_rate"
71
+
72
+ Notes
73
+ -----
74
+ - `data["sholl_plot"]["mean"]` and `data["sholl_plot"]["var"]` must be 1D arrays of equal length
75
+ - Variances must be non-negative
76
+ - Ensure `bin_size` and `max_step_size` use consistent spatial units
77
+ """
78
+
79
+ ## Solves a QP problem using Pyomo with vector-style variable indexing.
80
+ ##
81
+ ## Minimize: 0.5*x[0]^2 + x[1]^2 + x[0]*x[1] + 3*x[0]
82
+ ## Subject to: x[0] + x[1] >= 1, x[i] >= 0
83
+ ##
84
+ ## Returns:
85
+ ## np.ndarray: [x[0], x[1], objective_value]
86
+
87
+
88
+ global _Mean_Penalty, _Var_Penalty
89
+ dx = data['sholl_plot']['bin_size']
90
+
91
+ min_zero_bin = min(data['sholl_plot']['mean'].index(0), data['sholl_plot']['std'].index(0))
92
+
93
+ Z = np.array(data['sholl_plot']['mean'][:min_zero_bin])
94
+ V = np.power(data['sholl_plot']['std'][:min_zero_bin], 2)
95
+
96
+ if 'bifurcations' in data:
97
+ n_bif = [data['bifurcation_count']['mean'], data['bifurcation_count']['std'] ** 2]
98
+ else:
99
+ n_bif = None
100
+
101
+ # get the kappa
102
+ kappa = np.log(Z[1:] / Z[:-1]) / dx
103
+
104
+ if np.any(kappa == 0):
105
+ kappa += 1e-5
106
+
107
+ model = ConcreteModel()
108
+
109
+ # Define index set and variables
110
+ model.b = Var(range(kappa.size), domain=NonNegativeReals)
111
+
112
+ # define 1 slack variables for eventual constraints of variance of bifurcations
113
+ model.s = Var(range(2), domain=Reals)
114
+
115
+ # Constraint:
116
+ model.constraints = ConstraintList()
117
+ for i in range(kappa.size):
118
+ model.constraints.add(model.b[i] >= kappa[i])
119
+ model.constraints.add((2 * model.b[i] - kappa[i]) * max_step_size <= 1)
120
+
121
+ # if we have number of bifurcations as contraings
122
+ if n_bif:
123
+ # constraint the average number of bifurcations
124
+ if n_bif[0]:
125
+ f = (Z[1:] - Z[:-1]) / kappa
126
+ model.constraints.add(sum(f[i] * model.b[i] for i in range(kappa.size)) == n_bif[0])
127
+
128
+ # constrain the variance for the number of bifurcations
129
+ if n_bif[1]:
130
+ f1 = (Z[1:] - Z[:-1]) / kappa
131
+ f2 = - Z[:-1] * (np.power(Z[1:] / Z[:-1], 2) - 2 * kappa * dx * Z[1:] / Z[:-1] - 1) / kappa ** 2 + V[:-1] * np.power((Z[1:] - Z[:-1]) / Z[:-1] / kappa, 2)
132
+ f3 = 2 * Z[:-1] * (np.power(Z[1:] / Z[:-1], 2) - 2 * kappa * dx * Z[1:] / Z[:-1] - 1) / kappa ** 3
133
+
134
+ var_terms = [ ]
135
+ for i in range(kappa.size):
136
+ var_terms.append(f1[i] * model.b[i] + f2[i] * model.b[i] ** 2 + f3[i] * model.b[i] ** 3)
137
+
138
+ for i in range(1, kappa.size):
139
+ for j in range(i + 1, kappa.size + 1):
140
+ term1 = model.b[i - 1] * (Z[i] - Z[i - 1])/ Z[i - 1] / kappa[i - 1]
141
+ term2 = model.b[j - 1] * (Z[j] - Z[j - 1]) / Z[j - 1] / kappa[j - 1]
142
+ for k in range(1, i + 1):
143
+ if k == 1:
144
+ Vprev = V[0]
145
+ term3 = (2 * model.b[k - 1] - kappa[k - 1]) / kappa[k - 1] * Z[k] * (Z[k] - Z[k - 1]) / Z[k - 1] + Vprev * np.power(Z[k] / Z[k - 1], 2)
146
+ var_terms.append(2 * term1 * term2 * Z[j - 1] / Z[i - 1] * term3)
147
+ Vprev = term3
148
+
149
+ model.constraints.add(sum(var_terms) + model.s[1] == n_bif[1])
150
+
151
+ # Objective
152
+ model.obj = Objective(
153
+ expr=mk_objective(model, kappa, Z, V) + _Mean_Penalty * model.s[0] ** 2 + _Var_Penalty * model.s[1] ** 2,
154
+ sense=minimize
155
+ )
156
+
157
+
158
+ # Solve
159
+ solver = SolverFactory('ipopt')
160
+ #solver.options['max_iter'] = max_iter
161
+ solver.solve(model, tee=False)
162
+ #print("Objective value:", value(model.obj))
163
+ #print(value(model.s))
164
+
165
+ # Extract bifurcation rates as array
166
+ b = np.array([value(model.b[i]) for i in model.b])
167
+ b[b < 0] = 0.
168
+
169
+ # calculate annihilation rates
170
+ a = - kappa + b
171
+ a[a < 0] = 0.
172
+ return { 'bifurcation_rate':b, 'annihilation_rate':a }
@@ -0,0 +1,45 @@
1
+ Metadata-Version: 2.4
2
+ Name: morphgen-rates
3
+ Version: 0.4.0
4
+ Summary: Compute bifurcation and annihilation rates from morphology data
5
+ Author-email: Francesco Cavarretta <fcavarretta@ualr.edu>
6
+ Requires-Python: >=3.9
7
+ Description-Content-Type: text/markdown
8
+ License-File: LICENSE
9
+ Requires-Dist: numpy>=1.23
10
+ Requires-Dist: pandas>=1.5
11
+ Requires-Dist: pyomo>=6.6
12
+ Dynamic: license-file
13
+
14
+ # morphgen-rates
15
+
16
+ Python package to compute **bifurcation** and **annihilation** rates from morphology-derived data.
17
+
18
+ ## Provenance
19
+
20
+ This package is based on code originally provided in the repository:
21
+
22
+ - https://github.com/FrancescoCavarretta/NeuronSynthesis
23
+
24
+ The original repository was developed for the following publication:
25
+
26
+ - Cavarretta F (2025) *A mathematical model for data-driven synthesis of neuron morphologies based on random walks.* Front. Appl. Math. Stat. 11:1632271. doi: 10.3389/fams.2025.1632271
27
+
28
+
29
+ ## Quick start
30
+
31
+ ```python
32
+ from morphgen_rates import compute_rates
33
+
34
+ # Replace `data` with your input object (events, traces, or morphology structures)
35
+ rates = compute_rates(data)
36
+
37
+ print(rates["bifurcation_rate"])
38
+ print(rates["annihilation_rate"])
39
+ ```
40
+
41
+
42
+ ## License
43
+
44
+ GPL-3.0-or-later
45
+
@@ -0,0 +1,9 @@
1
+ morphgen_rates/__init__.py,sha256=UE8YWsulDIfeYhGb5GHdkakUIFx4j9H3ZkoKoaDCd_0,179
2
+ morphgen_rates/data.py,sha256=yj_GT3ks6ukwtALfC4Bklcwu3MeTOr-2BGGo5W0ZxM0,4330
3
+ morphgen_rates/init_count.py,sha256=PhYlp0-CzRdf8opTKb-om3cFIKSv5M8eTcyKy1_IFMI,7283
4
+ morphgen_rates/rates.py,sha256=2Gn3Ew2uVJ7c_LdYJogxS-jAM9q-039y0maWi4CNpTM,6442
5
+ morphgen_rates-0.4.0.dist-info/licenses/LICENSE,sha256=VONsnKVXQRcWwCaHWHuwMtemIj9jNJSmpunazxlyvOk,670
6
+ morphgen_rates-0.4.0.dist-info/METADATA,sha256=Xb088-i11lgv8rY4jVvQ3ghYDpyliSw83yjC-kfYANw,1178
7
+ morphgen_rates-0.4.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
8
+ morphgen_rates-0.4.0.dist-info/top_level.txt,sha256=UYPGC2dGp9xD_4iVxVVTkKaizBA4XeDNM7OBC_DCWRk,15
9
+ morphgen_rates-0.4.0.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (80.10.2)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1,16 @@
1
+ morphgen-rates
2
+
3
+ Copyright (C) 2025 Francesco Cavarretta
4
+
5
+ This program is free software: you can redistribute it and/or modify
6
+ it under the terms of the GNU General Public License as published by
7
+ the Free Software Foundation, either version 3 of the License, or
8
+ (at your option) any later version.
9
+
10
+ This program is distributed in the hope that it will be useful,
11
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
12
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
+ GNU General Public License for more details.
14
+
15
+ You should have received a copy of the GNU General Public License
16
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
@@ -0,0 +1 @@
1
+ morphgen_rates