bluecellulab 2.6.60__py3-none-any.whl → 2.6.62__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of bluecellulab might be problematic. Click here for more details.
- bluecellulab/circuit/config/definition.py +8 -0
- bluecellulab/circuit/config/sonata_simulation_config.py +31 -6
- bluecellulab/circuit/simulation_access.py +29 -7
- bluecellulab/circuit_simulation.py +40 -94
- bluecellulab/reports/__init__.py +0 -0
- bluecellulab/reports/manager.py +78 -0
- bluecellulab/reports/utils.py +156 -0
- bluecellulab/reports/writers/__init__.py +25 -0
- bluecellulab/reports/writers/base_writer.py +30 -0
- bluecellulab/reports/writers/compartment.py +196 -0
- bluecellulab/reports/writers/spikes.py +61 -0
- bluecellulab/simulation/report.py +0 -264
- bluecellulab/simulation/simulation.py +5 -6
- bluecellulab/stimulus/circuit_stimulus_definitions.py +3 -3
- {bluecellulab-2.6.60.dist-info → bluecellulab-2.6.62.dist-info}/METADATA +1 -1
- {bluecellulab-2.6.60.dist-info → bluecellulab-2.6.62.dist-info}/RECORD +20 -13
- {bluecellulab-2.6.60.dist-info → bluecellulab-2.6.62.dist-info}/WHEEL +0 -0
- {bluecellulab-2.6.60.dist-info → bluecellulab-2.6.62.dist-info}/licenses/AUTHORS.txt +0 -0
- {bluecellulab-2.6.60.dist-info → bluecellulab-2.6.62.dist-info}/licenses/LICENSE +0 -0
- {bluecellulab-2.6.60.dist-info → bluecellulab-2.6.62.dist-info}/top_level.txt +0 -0
|
@@ -68,6 +68,14 @@ class SimulationConfig(Protocol):
|
|
|
68
68
|
def spike_location(self) -> str:
|
|
69
69
|
raise NotImplementedError
|
|
70
70
|
|
|
71
|
+
@property
|
|
72
|
+
def tstart(self) -> Optional[float]:
|
|
73
|
+
raise NotImplementedError
|
|
74
|
+
|
|
75
|
+
@property
|
|
76
|
+
def tstop(self) -> Optional[float]:
|
|
77
|
+
raise NotImplementedError
|
|
78
|
+
|
|
71
79
|
@property
|
|
72
80
|
def duration(self) -> Optional[float]:
|
|
73
81
|
raise NotImplementedError
|
|
@@ -17,6 +17,7 @@ import json
|
|
|
17
17
|
import logging
|
|
18
18
|
from pathlib import Path
|
|
19
19
|
from typing import Optional
|
|
20
|
+
import warnings
|
|
20
21
|
|
|
21
22
|
from bluecellulab.circuit.config.sections import Conditions, ConnectionOverrides
|
|
22
23
|
from bluecellulab.stimulus.circuit_stimulus_definitions import Stimulus
|
|
@@ -88,11 +89,23 @@ class SonataSimulationConfig:
|
|
|
88
89
|
|
|
89
90
|
@lru_cache(maxsize=1)
|
|
90
91
|
def get_node_sets(self) -> dict[str, dict]:
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
92
|
+
circuit_filepath = self.impl.circuit.config.get("node_sets_file")
|
|
93
|
+
base_node_sets = {}
|
|
94
|
+
if circuit_filepath:
|
|
95
|
+
with open(circuit_filepath, "r") as f:
|
|
96
|
+
base_node_sets = json.load(f)
|
|
97
|
+
|
|
98
|
+
sim_filepath = self.impl.config.get("node_sets_file")
|
|
99
|
+
if sim_filepath:
|
|
100
|
+
with open(sim_filepath, "r") as f:
|
|
101
|
+
sim_node_sets = json.load(f)
|
|
102
|
+
# Overwrite/add entries
|
|
103
|
+
base_node_sets.update(sim_node_sets)
|
|
104
|
+
|
|
105
|
+
if not base_node_sets:
|
|
106
|
+
raise ValueError("No 'node_sets_file' found in simulation or circuit config.")
|
|
107
|
+
|
|
108
|
+
return base_node_sets
|
|
96
109
|
|
|
97
110
|
@lru_cache(maxsize=1)
|
|
98
111
|
def get_report_entries(self) -> dict[str, dict]:
|
|
@@ -148,9 +161,21 @@ class SonataSimulationConfig:
|
|
|
148
161
|
return self.impl.conditions.spike_location.name
|
|
149
162
|
|
|
150
163
|
@property
|
|
151
|
-
def
|
|
164
|
+
def tstart(self) -> Optional[float]:
|
|
165
|
+
return self.impl.config.get("run", {}).get("tstart", 0.0)
|
|
166
|
+
|
|
167
|
+
@property
|
|
168
|
+
def tstop(self) -> float:
|
|
152
169
|
return self.impl.run.tstop
|
|
153
170
|
|
|
171
|
+
@property
|
|
172
|
+
def duration(self) -> Optional[float]:
|
|
173
|
+
warnings.warn(
|
|
174
|
+
"`duration` is deprecated. Use `tstop` instead.",
|
|
175
|
+
DeprecationWarning
|
|
176
|
+
)
|
|
177
|
+
return self.tstop
|
|
178
|
+
|
|
154
179
|
@property
|
|
155
180
|
def dt(self) -> float:
|
|
156
181
|
return self.impl.run.dt
|
|
@@ -172,16 +172,38 @@ class SonataSimulationAccess:
|
|
|
172
172
|
|
|
173
173
|
|
|
174
174
|
def get_synapse_replay_spikes(f_name: str) -> dict:
|
|
175
|
-
"""Read the .h5 file containing the spike replays.
|
|
175
|
+
"""Read the .h5 file containing the spike replays.
|
|
176
|
+
|
|
177
|
+
Args:
|
|
178
|
+
f_name: Path to SONATA .h5 spike file.
|
|
179
|
+
|
|
180
|
+
Returns:
|
|
181
|
+
Dictionary mapping node_id to np.array of spike times.
|
|
182
|
+
"""
|
|
183
|
+
all_spikes = []
|
|
176
184
|
with h5py.File(f_name, 'r') as f:
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
node_ids = f['/spikes/All/node_ids'][:]
|
|
185
|
+
if "spikes" not in f:
|
|
186
|
+
raise ValueError("spike file is missing required 'spikes' group.")
|
|
180
187
|
|
|
181
|
-
|
|
182
|
-
|
|
188
|
+
for population in f["spikes"]:
|
|
189
|
+
pop_group = f["spikes"][population]
|
|
190
|
+
timestamps = pop_group["timestamps"][:]
|
|
191
|
+
node_ids = pop_group["node_ids"][:]
|
|
192
|
+
|
|
193
|
+
pop_spikes = pd.DataFrame({"t": timestamps, "node_id": node_ids})
|
|
194
|
+
pop_spikes = pop_spikes.astype({"node_id": int})
|
|
195
|
+
all_spikes.append(pop_spikes)
|
|
196
|
+
|
|
197
|
+
if not all_spikes:
|
|
198
|
+
return {}
|
|
199
|
+
|
|
200
|
+
spikes = pd.concat(all_spikes, ignore_index=True)
|
|
183
201
|
|
|
184
202
|
if (spikes["t"] < 0).any():
|
|
185
203
|
logger.warning("Found negative spike times... Clipping them to 0")
|
|
186
204
|
spikes["t"].clip(lower=0., inplace=True)
|
|
187
|
-
|
|
205
|
+
|
|
206
|
+
# Group spikes by node_id and ensure spike times are sorted in ascending order.
|
|
207
|
+
# This is critical because NEURON's VecStim requires monotonically increasing times per train.
|
|
208
|
+
grouped = spikes.groupby("node_id")["t"]
|
|
209
|
+
return {k: np.sort(np.asarray(v.values)) for k, v in grouped}
|
|
@@ -17,12 +17,11 @@ simulations."""
|
|
|
17
17
|
|
|
18
18
|
from __future__ import annotations
|
|
19
19
|
from collections.abc import Iterable
|
|
20
|
-
import os
|
|
21
20
|
from pathlib import Path
|
|
22
21
|
from typing import Optional
|
|
23
22
|
import logging
|
|
24
23
|
|
|
25
|
-
from
|
|
24
|
+
from bluecellulab.reports.utils import configure_all_reports
|
|
26
25
|
import neuron
|
|
27
26
|
import numpy as np
|
|
28
27
|
import pandas as pd
|
|
@@ -47,7 +46,6 @@ from bluecellulab.circuit.simulation_access import BluepySimulationAccess, Simul
|
|
|
47
46
|
from bluecellulab.importer import load_mod_files
|
|
48
47
|
from bluecellulab.rngsettings import RNGSettings
|
|
49
48
|
from bluecellulab.simulation.neuron_globals import NeuronGlobals
|
|
50
|
-
from bluecellulab.simulation.report import configure_all_reports, write_compartment_report, write_sonata_spikes
|
|
51
49
|
from bluecellulab.stimulus.circuit_stimulus_definitions import Noise, OrnsteinUhlenbeck, RelativeOrnsteinUhlenbeck, RelativeShotNoise, ShotNoise
|
|
52
50
|
import bluecellulab.stimulus.circuit_stimulus_definitions as circuit_stimulus_definitions
|
|
53
51
|
from bluecellulab.exceptions import BluecellulabError
|
|
@@ -638,15 +636,32 @@ class CircuitSimulation:
|
|
|
638
636
|
will not be exactly reproduced.
|
|
639
637
|
"""
|
|
640
638
|
if t_stop is None:
|
|
641
|
-
|
|
642
|
-
if
|
|
639
|
+
t_stop = self.circuit_access.config.tstop
|
|
640
|
+
if t_stop is None: # type narrowing
|
|
643
641
|
t_stop = 0.0
|
|
644
|
-
else:
|
|
645
|
-
t_stop = duration
|
|
646
642
|
if dt is None:
|
|
647
643
|
dt = self.circuit_access.config.dt
|
|
648
|
-
|
|
649
|
-
|
|
644
|
+
|
|
645
|
+
config_forward_skip_value = self.circuit_access.config.forward_skip # legacy
|
|
646
|
+
config_tstart = self.circuit_access.config.tstart or 0.0 # SONATA
|
|
647
|
+
# Determine effective skip value and flag
|
|
648
|
+
if forward_skip_value is not None:
|
|
649
|
+
# User explicitly provided value → use it
|
|
650
|
+
effective_skip_value = forward_skip_value
|
|
651
|
+
effective_skip = forward_skip
|
|
652
|
+
elif config_forward_skip_value is not None:
|
|
653
|
+
# Use legacy config if available
|
|
654
|
+
effective_skip_value = config_forward_skip_value
|
|
655
|
+
effective_skip = forward_skip
|
|
656
|
+
elif config_tstart > 0.0:
|
|
657
|
+
# Use SONATA tstart *only* if no other skip value was provided
|
|
658
|
+
effective_skip_value = config_tstart
|
|
659
|
+
effective_skip = True
|
|
660
|
+
else:
|
|
661
|
+
# No skip
|
|
662
|
+
effective_skip_value = None
|
|
663
|
+
effective_skip = False
|
|
664
|
+
|
|
650
665
|
if celsius is None:
|
|
651
666
|
celsius = self.circuit_access.config.celsius
|
|
652
667
|
NeuronGlobals.get_instance().temperature = celsius
|
|
@@ -664,15 +679,13 @@ class CircuitSimulation:
|
|
|
664
679
|
"simulations")
|
|
665
680
|
|
|
666
681
|
sim.run(
|
|
667
|
-
t_stop,
|
|
682
|
+
tstop=t_stop,
|
|
668
683
|
cvode=cvode,
|
|
669
684
|
dt=dt,
|
|
670
|
-
forward_skip=
|
|
671
|
-
forward_skip_value=
|
|
685
|
+
forward_skip=effective_skip,
|
|
686
|
+
forward_skip_value=effective_skip_value,
|
|
672
687
|
show_progress=show_progress)
|
|
673
688
|
|
|
674
|
-
self.write_reports()
|
|
675
|
-
|
|
676
689
|
def get_mainsim_voltage_trace(
|
|
677
690
|
self, cell_id: int | tuple[str, int], t_start=None, t_stop=None, t_step=None
|
|
678
691
|
) -> np.ndarray:
|
|
@@ -713,23 +726,31 @@ class CircuitSimulation:
|
|
|
713
726
|
first_key = next(iter(self.cells))
|
|
714
727
|
return self.cells[first_key].get_time()
|
|
715
728
|
|
|
716
|
-
def get_time_trace(self, t_step=None) -> np.ndarray:
|
|
729
|
+
def get_time_trace(self, t_start=None, t_stop=None, t_step=None) -> np.ndarray:
|
|
717
730
|
"""Get the time vector for the recordings, negative times removed.
|
|
718
731
|
|
|
719
732
|
Parameters
|
|
720
733
|
-----------
|
|
721
|
-
|
|
722
|
-
equals
|
|
734
|
+
t_start, t_stop: time range of interest.
|
|
735
|
+
t_step: time step (multiple of report dt; equals dt by default)
|
|
723
736
|
|
|
724
737
|
Returns:
|
|
725
|
-
|
|
738
|
+
1D np.ndarray representing time points.
|
|
726
739
|
"""
|
|
727
740
|
time = self.get_time()
|
|
728
|
-
time = time[
|
|
741
|
+
time = time[time >= 0.0]
|
|
742
|
+
|
|
743
|
+
if t_start is None or t_start < 0:
|
|
744
|
+
t_start = 0
|
|
745
|
+
if t_stop is None:
|
|
746
|
+
t_stop = np.inf
|
|
747
|
+
|
|
748
|
+
time = time[(time >= t_start) & (time <= t_stop)]
|
|
729
749
|
|
|
730
750
|
if t_step is not None:
|
|
731
751
|
ratio = t_step / self.dt
|
|
732
752
|
time = _sample_array(time, ratio)
|
|
753
|
+
|
|
733
754
|
return time
|
|
734
755
|
|
|
735
756
|
def get_voltage_trace(
|
|
@@ -806,78 +827,3 @@ class CircuitSimulation:
|
|
|
806
827
|
record_dt=cell_kwargs['record_dt'],
|
|
807
828
|
template_format=cell_kwargs['template_format'],
|
|
808
829
|
emodel_properties=cell_kwargs['emodel_properties'])
|
|
809
|
-
|
|
810
|
-
def write_reports(self):
|
|
811
|
-
"""Write all reports defined in the simulation config."""
|
|
812
|
-
report_entries = self.circuit_access.config.get_report_entries()
|
|
813
|
-
|
|
814
|
-
for report_name, report_cfg in report_entries.items():
|
|
815
|
-
report_type = report_cfg.get("type", "compartment")
|
|
816
|
-
section = report_cfg.get("sections")
|
|
817
|
-
|
|
818
|
-
if report_type != "compartment":
|
|
819
|
-
raise NotImplementedError(f"Report type '{report_type}' is not supported.")
|
|
820
|
-
|
|
821
|
-
output_path = self.circuit_access.config.report_file_path(report_cfg, report_name)
|
|
822
|
-
if section == "compartment_set":
|
|
823
|
-
if report_cfg.get("cells") is not None:
|
|
824
|
-
raise ValueError(
|
|
825
|
-
"Report config error: 'cells' must not be set when using 'compartment_set' sections."
|
|
826
|
-
)
|
|
827
|
-
compartment_sets = self.circuit_access.config.get_compartment_sets()
|
|
828
|
-
write_compartment_report(
|
|
829
|
-
report_name=report_name,
|
|
830
|
-
output_path=output_path,
|
|
831
|
-
cells=self.cells,
|
|
832
|
-
report_cfg=report_cfg,
|
|
833
|
-
source_sets=compartment_sets,
|
|
834
|
-
source_type="compartment_set",
|
|
835
|
-
sim_dt=self.dt,
|
|
836
|
-
)
|
|
837
|
-
|
|
838
|
-
else:
|
|
839
|
-
node_sets = self.circuit_access.config.get_node_sets()
|
|
840
|
-
if report_cfg.get("compartments") not in ("center", "all"):
|
|
841
|
-
raise ValueError(
|
|
842
|
-
f"Unsupported 'compartments' value '{report_cfg.get('compartments')}' "
|
|
843
|
-
"for node-based section recording (must be 'center' or 'all')."
|
|
844
|
-
)
|
|
845
|
-
write_compartment_report(
|
|
846
|
-
report_name=report_name,
|
|
847
|
-
output_path=output_path,
|
|
848
|
-
cells=self.cells,
|
|
849
|
-
report_cfg=report_cfg,
|
|
850
|
-
source_sets=node_sets,
|
|
851
|
-
source_type="node_set",
|
|
852
|
-
sim_dt=self.dt,
|
|
853
|
-
)
|
|
854
|
-
|
|
855
|
-
self.write_spike_report()
|
|
856
|
-
|
|
857
|
-
def write_spike_report(self):
|
|
858
|
-
"""Collect and write in-memory recorded spike times to a SONATA HDF5
|
|
859
|
-
file, grouped by population as required by the SONATA specification."""
|
|
860
|
-
output_path = self.circuit_access.config.spikes_file_path
|
|
861
|
-
|
|
862
|
-
if os.path.exists(output_path):
|
|
863
|
-
os.remove(output_path)
|
|
864
|
-
|
|
865
|
-
# Group spikes per population
|
|
866
|
-
spikes_by_population = defaultdict(dict)
|
|
867
|
-
for gid, cell in self.cells.items():
|
|
868
|
-
pop = getattr(gid, 'population_name', None)
|
|
869
|
-
if pop is None:
|
|
870
|
-
continue
|
|
871
|
-
try:
|
|
872
|
-
cell_spikes = cell.get_recorded_spikes(location=self.spike_location, threshold=self.spike_threshold)
|
|
873
|
-
if cell_spikes is not None:
|
|
874
|
-
spikes_by_population[pop][gid.id] = list(cell_spikes)
|
|
875
|
-
except AttributeError:
|
|
876
|
-
continue
|
|
877
|
-
|
|
878
|
-
# Ensure we at least create empty groups for all known populations
|
|
879
|
-
all_populations = set(getattr(gid, 'population_name', None) for gid in self.cells.keys())
|
|
880
|
-
|
|
881
|
-
for pop in all_populations:
|
|
882
|
-
spikes = spikes_by_population.get(pop, {}) # May be empty
|
|
883
|
-
write_sonata_spikes(output_path, spikes, pop)
|
|
File without changes
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
# Copyright 2025 Open Brain Institute
|
|
2
|
+
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
from typing import Optional, Dict
|
|
16
|
+
from bluecellulab.reports.writers import get_writer
|
|
17
|
+
from bluecellulab.reports.utils import extract_spikes_from_cells # helper you already have / write
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class ReportManager:
|
|
21
|
+
"""Orchestrates writing all requested SONATA reports."""
|
|
22
|
+
|
|
23
|
+
def __init__(self, config, sim_dt: float):
|
|
24
|
+
self.cfg = config
|
|
25
|
+
self.dt = sim_dt
|
|
26
|
+
|
|
27
|
+
def write_all(
|
|
28
|
+
self,
|
|
29
|
+
cells_or_traces: Dict,
|
|
30
|
+
spikes_by_pop: Optional[Dict[str, Dict[int, list]]] = None,
|
|
31
|
+
):
|
|
32
|
+
"""Write all configured reports (compartment and spike) in SONATA
|
|
33
|
+
format.
|
|
34
|
+
|
|
35
|
+
Parameters
|
|
36
|
+
----------
|
|
37
|
+
cells_or_traces : dict
|
|
38
|
+
A dictionary mapping (population, gid) to either:
|
|
39
|
+
- Cell objects with recorded data (used in single-process simulations), or
|
|
40
|
+
- Precomputed trace dictionaries, e.g., {"voltage": ndarray}, typically gathered across ranks in parallel runs.
|
|
41
|
+
|
|
42
|
+
spikes_by_pop : dict, optional
|
|
43
|
+
A precomputed dictionary of spike times by population.
|
|
44
|
+
If not provided, spike times are extracted from `cells_or_traces`.
|
|
45
|
+
|
|
46
|
+
Notes
|
|
47
|
+
-----
|
|
48
|
+
In parallel simulations, you must gather all traces and spikes to rank 0 and pass them here.
|
|
49
|
+
"""
|
|
50
|
+
self._write_voltage_reports(cells_or_traces)
|
|
51
|
+
self._write_spike_report(spikes_by_pop or extract_spikes_from_cells(cells_or_traces, location=self.cfg.spike_location, threshold=self.cfg.spike_threshold))
|
|
52
|
+
|
|
53
|
+
def _write_voltage_reports(self, cells_or_traces):
|
|
54
|
+
for name, rcfg in self.cfg.get_report_entries().items():
|
|
55
|
+
if rcfg.get("type") != "compartment":
|
|
56
|
+
continue
|
|
57
|
+
|
|
58
|
+
section = rcfg.get("sections")
|
|
59
|
+
if section == "compartment_set":
|
|
60
|
+
if rcfg.get("cells") is not None:
|
|
61
|
+
raise ValueError("'cells' may not be set with 'compartment_set'")
|
|
62
|
+
src_sets, src_type = self.cfg.get_compartment_sets(), "compartment_set"
|
|
63
|
+
else:
|
|
64
|
+
if rcfg.get("compartments") not in ("center", "all"):
|
|
65
|
+
raise ValueError("invalid 'compartments' value")
|
|
66
|
+
src_sets, src_type = self.cfg.get_node_sets(), "node_set"
|
|
67
|
+
|
|
68
|
+
rcfg["_source_sets"] = src_sets
|
|
69
|
+
rcfg["_source_type"] = src_type
|
|
70
|
+
|
|
71
|
+
out_path = self.cfg.report_file_path(rcfg, name)
|
|
72
|
+
writer = get_writer("compartment")(rcfg, out_path, self.dt)
|
|
73
|
+
writer.write(cells_or_traces, self.cfg.tstart)
|
|
74
|
+
|
|
75
|
+
def _write_spike_report(self, spikes_by_pop):
|
|
76
|
+
out_path = self.cfg.spikes_file_path
|
|
77
|
+
writer = get_writer("spikes")({}, out_path, self.dt)
|
|
78
|
+
writer.write(spikes_by_pop)
|
|
@@ -0,0 +1,156 @@
|
|
|
1
|
+
# Copyright 2025 Open Brain Institute
|
|
2
|
+
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
"""Report class of bluecellulab."""
|
|
15
|
+
|
|
16
|
+
from collections import defaultdict
|
|
17
|
+
import logging
|
|
18
|
+
from typing import Dict, Any, List
|
|
19
|
+
|
|
20
|
+
from bluecellulab.tools import resolve_segments, resolve_source_nodes
|
|
21
|
+
|
|
22
|
+
logger = logging.getLogger(__name__)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def _configure_recording(cell, report_cfg, source, source_type, report_name):
|
|
26
|
+
"""Configure recording of a variable on a single cell.
|
|
27
|
+
|
|
28
|
+
This function sets up the recording of the specified variable (e.g., membrane voltage)
|
|
29
|
+
in the target cell, for each resolved segment.
|
|
30
|
+
|
|
31
|
+
Parameters
|
|
32
|
+
----------
|
|
33
|
+
cell : Any
|
|
34
|
+
The cell object on which to configure recordings.
|
|
35
|
+
|
|
36
|
+
report_cfg : dict
|
|
37
|
+
The configuration dictionary for this report.
|
|
38
|
+
|
|
39
|
+
source : dict
|
|
40
|
+
The source definition specifying nodes or compartments.
|
|
41
|
+
|
|
42
|
+
source_type : str
|
|
43
|
+
Either "node_set" or "compartment_set".
|
|
44
|
+
|
|
45
|
+
report_name : str
|
|
46
|
+
The name of the report (used in logging).
|
|
47
|
+
"""
|
|
48
|
+
variable = report_cfg.get("variable_name", "v")
|
|
49
|
+
|
|
50
|
+
node_id = cell.cell_id
|
|
51
|
+
compartment_nodes = source.get("compartment_set") if source_type == "compartment_set" else None
|
|
52
|
+
|
|
53
|
+
targets = resolve_segments(cell, report_cfg, node_id, compartment_nodes, source_type)
|
|
54
|
+
for sec, sec_name, seg in targets:
|
|
55
|
+
try:
|
|
56
|
+
cell.add_variable_recording(variable=variable, section=sec, segx=seg)
|
|
57
|
+
except AttributeError:
|
|
58
|
+
logger.warning(f"Recording for variable '{variable}' is not implemented in Cell.")
|
|
59
|
+
return
|
|
60
|
+
except Exception as e:
|
|
61
|
+
logger.warning(
|
|
62
|
+
f"Failed to record '{variable}' at {sec_name}({seg}) on GID {node_id} for report '{report_name}': {e}"
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def configure_all_reports(cells, simulation_config):
|
|
67
|
+
"""Configure recordings for all reports defined in the simulation
|
|
68
|
+
configuration.
|
|
69
|
+
|
|
70
|
+
This iterates through all report entries, resolves source nodes or compartments,
|
|
71
|
+
and configures the corresponding recordings on each cell.
|
|
72
|
+
|
|
73
|
+
Parameters
|
|
74
|
+
----------
|
|
75
|
+
cells : dict
|
|
76
|
+
Mapping from (population, gid) → Cell object.
|
|
77
|
+
|
|
78
|
+
simulation_config : Any
|
|
79
|
+
Simulation configuration object providing report entries,
|
|
80
|
+
node sets, and compartment sets.
|
|
81
|
+
"""
|
|
82
|
+
report_entries = simulation_config.get_report_entries()
|
|
83
|
+
|
|
84
|
+
for report_name, report_cfg in report_entries.items():
|
|
85
|
+
report_type = report_cfg.get("type", "compartment")
|
|
86
|
+
section = report_cfg.get("sections", "soma")
|
|
87
|
+
|
|
88
|
+
if report_type != "compartment":
|
|
89
|
+
raise NotImplementedError(f"Report type '{report_type}' is not supported.")
|
|
90
|
+
|
|
91
|
+
if section == "compartment_set":
|
|
92
|
+
source_type = "compartment_set"
|
|
93
|
+
source_sets = simulation_config.get_compartment_sets()
|
|
94
|
+
source_name = report_cfg.get("compartments")
|
|
95
|
+
if not source_name:
|
|
96
|
+
logger.warning(f"Report '{report_name}' does not specify a node set in 'compartments' for {source_type}.")
|
|
97
|
+
continue
|
|
98
|
+
else:
|
|
99
|
+
source_type = "node_set"
|
|
100
|
+
source_sets = simulation_config.get_node_sets()
|
|
101
|
+
source_name = report_cfg.get("cells")
|
|
102
|
+
if not source_name:
|
|
103
|
+
logger.warning(f"Report '{report_name}' does not specify a node set in 'cells' for {source_type}.")
|
|
104
|
+
continue
|
|
105
|
+
|
|
106
|
+
source = source_sets.get(source_name)
|
|
107
|
+
if not source:
|
|
108
|
+
logger.warning(f"{source_type.title()} '{source_name}' not found for report '{report_name}', skipping recording.")
|
|
109
|
+
continue
|
|
110
|
+
|
|
111
|
+
population = source["population"]
|
|
112
|
+
node_ids, _ = resolve_source_nodes(source, source_type, cells, population)
|
|
113
|
+
|
|
114
|
+
for node_id in node_ids:
|
|
115
|
+
cell = cells.get((population, node_id))
|
|
116
|
+
if not cell:
|
|
117
|
+
continue
|
|
118
|
+
_configure_recording(cell, report_cfg, source, source_type, report_name)
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def extract_spikes_from_cells(
|
|
122
|
+
cells: Dict[Any, Any],
|
|
123
|
+
location: str = "soma",
|
|
124
|
+
threshold: float = -20.0,
|
|
125
|
+
) -> Dict[str, Dict[int, list]]:
|
|
126
|
+
"""Extract spike times from recorded cells, grouped by population.
|
|
127
|
+
|
|
128
|
+
Parameters
|
|
129
|
+
----------
|
|
130
|
+
cells : dict
|
|
131
|
+
Mapping from (population, gid) → Cell object, or similar.
|
|
132
|
+
|
|
133
|
+
location : str
|
|
134
|
+
Recording location passed to Cell.get_recorded_spikes().
|
|
135
|
+
|
|
136
|
+
threshold : float
|
|
137
|
+
Voltage threshold (mV) used for spike detection.
|
|
138
|
+
|
|
139
|
+
Returns
|
|
140
|
+
-------
|
|
141
|
+
spikes_by_population : dict
|
|
142
|
+
{population → {gid_int → [spike_times_ms]}}
|
|
143
|
+
"""
|
|
144
|
+
spikes_by_pop: defaultdict[str, Dict[int, List[float]]] = defaultdict(dict)
|
|
145
|
+
|
|
146
|
+
for key, cell in cells.items():
|
|
147
|
+
if isinstance(key, tuple):
|
|
148
|
+
pop, gid = key
|
|
149
|
+
else:
|
|
150
|
+
raise ValueError(f"Cell key {key} is not a (population, gid) tuple.")
|
|
151
|
+
|
|
152
|
+
times = cell.get_recorded_spikes(location=location, threshold=threshold)
|
|
153
|
+
if times is not None and len(times) > 0:
|
|
154
|
+
spikes_by_pop[pop][gid] = list(times)
|
|
155
|
+
|
|
156
|
+
return dict(spikes_by_pop)
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
# Copyright 2025 Open Brain Institute
|
|
2
|
+
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
from .compartment import CompartmentReportWriter
|
|
16
|
+
from .spikes import SpikeReportWriter
|
|
17
|
+
|
|
18
|
+
REGISTRY = {
|
|
19
|
+
"compartment": CompartmentReportWriter,
|
|
20
|
+
"spikes": SpikeReportWriter,
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def get_writer(report_type):
|
|
25
|
+
return REGISTRY[report_type]
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
# Copyright 2025 Open Brain Institute
|
|
2
|
+
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
from abc import ABC, abstractmethod
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
from typing import Dict, Any
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class BaseReportWriter(ABC):
|
|
21
|
+
"""Abstract interface for every report writer."""
|
|
22
|
+
|
|
23
|
+
def __init__(self, report_cfg: Dict[str, Any], output_path: Path, sim_dt: float):
|
|
24
|
+
self.cfg = report_cfg
|
|
25
|
+
self.output_path = Path(output_path)
|
|
26
|
+
self.sim_dt = sim_dt
|
|
27
|
+
|
|
28
|
+
@abstractmethod
|
|
29
|
+
def write(self, data: Dict):
|
|
30
|
+
"""Write one report to disk."""
|
|
@@ -0,0 +1,196 @@
|
|
|
1
|
+
# Copyright 2025 Open Brain Institute
|
|
2
|
+
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
from pathlib import Path
|
|
16
|
+
import numpy as np
|
|
17
|
+
import h5py
|
|
18
|
+
from typing import Dict, List
|
|
19
|
+
from .base_writer import BaseReportWriter
|
|
20
|
+
from bluecellulab.reports.utils import (
|
|
21
|
+
resolve_source_nodes,
|
|
22
|
+
resolve_segments,
|
|
23
|
+
)
|
|
24
|
+
import logging
|
|
25
|
+
|
|
26
|
+
logger = logging.getLogger(__name__)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class CompartmentReportWriter(BaseReportWriter):
|
|
30
|
+
"""Writes SONATA compartment (voltage) reports."""
|
|
31
|
+
|
|
32
|
+
def write(self, cells: Dict, tstart=0):
|
|
33
|
+
report_name = self.cfg.get("name", "unnamed")
|
|
34
|
+
# section = self.cfg.get("sections")
|
|
35
|
+
variable = self.cfg.get("variable_name", "v")
|
|
36
|
+
|
|
37
|
+
source_sets = self.cfg["_source_sets"]
|
|
38
|
+
source_type = self.cfg["_source_type"]
|
|
39
|
+
src_name = self.cfg.get("cells") if source_type == "node_set" else self.cfg.get("compartments")
|
|
40
|
+
src = source_sets.get(src_name)
|
|
41
|
+
if not src:
|
|
42
|
+
logger.warning(f"{source_type.title()} '{src_name}' not found – skipping '{report_name}'.")
|
|
43
|
+
return
|
|
44
|
+
|
|
45
|
+
population = src["population"]
|
|
46
|
+
node_ids, comp_nodes = resolve_source_nodes(src, source_type, cells, population)
|
|
47
|
+
|
|
48
|
+
data_matrix: List[np.ndarray] = []
|
|
49
|
+
node_id_list: List[int] = []
|
|
50
|
+
idx_ptr: List[int] = [0]
|
|
51
|
+
elem_ids: List[int] = []
|
|
52
|
+
|
|
53
|
+
for nid in node_ids:
|
|
54
|
+
cell = cells.get((population, nid)) or cells.get(f"{population}_{nid}")
|
|
55
|
+
if cell is None:
|
|
56
|
+
continue
|
|
57
|
+
|
|
58
|
+
if isinstance(cell, dict):
|
|
59
|
+
# No section/segment structure to resolve for traces
|
|
60
|
+
trace = np.asarray(cell["voltage"], dtype=np.float32)
|
|
61
|
+
data_matrix.append(trace)
|
|
62
|
+
node_id_list.append(nid)
|
|
63
|
+
elem_ids.append(len(elem_ids))
|
|
64
|
+
idx_ptr.append(idx_ptr[-1] + 1)
|
|
65
|
+
continue
|
|
66
|
+
|
|
67
|
+
targets = resolve_segments(cell, self.cfg, nid, comp_nodes, source_type)
|
|
68
|
+
for sec, sec_name, seg in targets:
|
|
69
|
+
try:
|
|
70
|
+
if hasattr(cell, "get_variable_recording"):
|
|
71
|
+
trace = cell.get_variable_recording(variable=variable, section=sec, segx=seg)
|
|
72
|
+
else:
|
|
73
|
+
trace = np.asarray(cell["voltage"], dtype=np.float32)
|
|
74
|
+
data_matrix.append(trace)
|
|
75
|
+
node_id_list.append(nid)
|
|
76
|
+
elem_ids.append(len(elem_ids))
|
|
77
|
+
idx_ptr.append(idx_ptr[-1] + 1)
|
|
78
|
+
except Exception as e:
|
|
79
|
+
logger.warning(f"Failed recording {nid}:{sec_name}@{seg}: {e}")
|
|
80
|
+
|
|
81
|
+
if not data_matrix:
|
|
82
|
+
logger.warning(f"No data for report '{report_name}'.")
|
|
83
|
+
return
|
|
84
|
+
|
|
85
|
+
self._write_sonata_report_file(
|
|
86
|
+
self.output_path,
|
|
87
|
+
population,
|
|
88
|
+
data_matrix,
|
|
89
|
+
node_id_list,
|
|
90
|
+
idx_ptr,
|
|
91
|
+
elem_ids,
|
|
92
|
+
self.cfg,
|
|
93
|
+
self.sim_dt,
|
|
94
|
+
tstart
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
def _write_sonata_report_file(
|
|
98
|
+
self,
|
|
99
|
+
output_path,
|
|
100
|
+
population,
|
|
101
|
+
data_matrix,
|
|
102
|
+
recorded_node_ids,
|
|
103
|
+
index_pointers,
|
|
104
|
+
element_ids,
|
|
105
|
+
report_cfg,
|
|
106
|
+
sim_dt,
|
|
107
|
+
tstart
|
|
108
|
+
):
|
|
109
|
+
"""Write a SONATA HDF5 report file containing time series data.
|
|
110
|
+
|
|
111
|
+
This function downsamples the data if needed, prepares metadata arrays,
|
|
112
|
+
and writes the report in SONATA format to the specified HDF5 file.
|
|
113
|
+
|
|
114
|
+
Parameters
|
|
115
|
+
----------
|
|
116
|
+
output_path : str or Path
|
|
117
|
+
Destination path of the report file.
|
|
118
|
+
|
|
119
|
+
population : str
|
|
120
|
+
Name of the population being recorded.
|
|
121
|
+
|
|
122
|
+
data_matrix : list of ndarray
|
|
123
|
+
List of arrays containing recorded time series per element.
|
|
124
|
+
|
|
125
|
+
recorded_node_ids : list of int
|
|
126
|
+
Node IDs corresponding to the recorded traces.
|
|
127
|
+
|
|
128
|
+
index_pointers : list of int
|
|
129
|
+
Index pointers mapping node IDs to data.
|
|
130
|
+
|
|
131
|
+
element_ids : list of int
|
|
132
|
+
Element IDs (e.g., segment IDs) corresponding to each trace.
|
|
133
|
+
|
|
134
|
+
report_cfg : dict
|
|
135
|
+
Report configuration specifying time window and variable name.
|
|
136
|
+
|
|
137
|
+
sim_dt : float
|
|
138
|
+
Simulation timestep (ms).
|
|
139
|
+
|
|
140
|
+
tstart : float
|
|
141
|
+
Simulation start time (ms).
|
|
142
|
+
"""
|
|
143
|
+
start_time = float(report_cfg.get("start_time", 0.0))
|
|
144
|
+
end_time = float(report_cfg.get("end_time", 0.0))
|
|
145
|
+
dt_report = float(report_cfg.get("dt", sim_dt))
|
|
146
|
+
|
|
147
|
+
# Clamp dt_report if finer than simuldation dt
|
|
148
|
+
if dt_report < sim_dt:
|
|
149
|
+
logger.warning(
|
|
150
|
+
f"Requested report dt={dt_report} ms is finer than simulation dt={sim_dt} ms. "
|
|
151
|
+
f"Clamping report dt to {sim_dt} ms."
|
|
152
|
+
)
|
|
153
|
+
dt_report = sim_dt
|
|
154
|
+
|
|
155
|
+
step = int(round(dt_report / sim_dt))
|
|
156
|
+
if not np.isclose(step * sim_dt, dt_report, atol=1e-9):
|
|
157
|
+
raise ValueError(
|
|
158
|
+
f"dt_report={dt_report} is not an integer multiple of dt_data={sim_dt}"
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
# Downsample the data if needed
|
|
162
|
+
# Compute start and end indices in the original data
|
|
163
|
+
start_index = int(round((start_time - tstart) / sim_dt))
|
|
164
|
+
end_index = int(round((end_time - tstart) / sim_dt)) + 1 # inclusive
|
|
165
|
+
|
|
166
|
+
# Now slice and downsample
|
|
167
|
+
data_matrix_downsampled = [
|
|
168
|
+
trace[start_index:end_index:step] for trace in data_matrix
|
|
169
|
+
]
|
|
170
|
+
data_array = np.stack(data_matrix_downsampled, axis=1).astype(np.float32)
|
|
171
|
+
|
|
172
|
+
# Prepare metadata arrays
|
|
173
|
+
node_ids_arr = np.array(recorded_node_ids, dtype=np.uint64)
|
|
174
|
+
index_ptr_arr = np.array(index_pointers, dtype=np.uint64)
|
|
175
|
+
element_ids_arr = np.array(element_ids, dtype=np.uint32)
|
|
176
|
+
time_array = np.array([start_time, end_time, dt_report], dtype=np.float64)
|
|
177
|
+
|
|
178
|
+
# Ensure output directory exists
|
|
179
|
+
output_path = Path(output_path)
|
|
180
|
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
|
181
|
+
|
|
182
|
+
# Write to HDF5
|
|
183
|
+
with h5py.File(output_path, "w") as f:
|
|
184
|
+
grp = f.require_group(f"/report/{population}")
|
|
185
|
+
data_ds = grp.create_dataset("data", data=data_array.astype(np.float32))
|
|
186
|
+
|
|
187
|
+
variable = report_cfg.get("variable_name", "v")
|
|
188
|
+
if variable == "v":
|
|
189
|
+
data_ds.attrs["units"] = "mV"
|
|
190
|
+
|
|
191
|
+
mapping = grp.require_group("mapping")
|
|
192
|
+
mapping.create_dataset("node_ids", data=node_ids_arr)
|
|
193
|
+
mapping.create_dataset("index_pointers", data=index_ptr_arr)
|
|
194
|
+
mapping.create_dataset("element_ids", data=element_ids_arr)
|
|
195
|
+
time_ds = mapping.create_dataset("time", data=time_array)
|
|
196
|
+
time_ds.attrs["units"] = "ms"
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
# Copyright 2025 Open Brain Institute
|
|
2
|
+
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
from typing import Dict, List
|
|
16
|
+
from bluecellulab.reports.writers.base_writer import BaseReportWriter
|
|
17
|
+
import logging
|
|
18
|
+
import numpy as np
|
|
19
|
+
import h5py
|
|
20
|
+
|
|
21
|
+
logger = logging.getLogger(__name__)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class SpikeReportWriter(BaseReportWriter):
|
|
25
|
+
"""Writes SONATA spike report from pop→gid→times mapping."""
|
|
26
|
+
|
|
27
|
+
def write(self, spikes_by_pop: Dict[str, Dict[int, list]]):
|
|
28
|
+
if self.output_path.exists():
|
|
29
|
+
self.output_path.unlink()
|
|
30
|
+
|
|
31
|
+
self.output_path.parent.mkdir(parents=True, exist_ok=True)
|
|
32
|
+
|
|
33
|
+
for pop, gid_map in spikes_by_pop.items():
|
|
34
|
+
all_node_ids: List[int] = []
|
|
35
|
+
all_timestamps: List[float] = []
|
|
36
|
+
for node_id, times in gid_map.items():
|
|
37
|
+
all_node_ids.extend([node_id] * len(times))
|
|
38
|
+
all_timestamps.extend(times)
|
|
39
|
+
|
|
40
|
+
if not all_timestamps:
|
|
41
|
+
logger.warning(f"No spikes to write for population '{pop}'.")
|
|
42
|
+
|
|
43
|
+
# Sort by time for consistency
|
|
44
|
+
sorted_indices = np.argsort(all_timestamps)
|
|
45
|
+
node_ids_sorted = np.array(all_node_ids, dtype=np.uint64)[sorted_indices]
|
|
46
|
+
timestamps_sorted = np.array(all_timestamps, dtype=np.float64)[sorted_indices]
|
|
47
|
+
|
|
48
|
+
with h5py.File(self.output_path, 'a') as f:
|
|
49
|
+
spikes_group = f.require_group("spikes")
|
|
50
|
+
if pop in spikes_group:
|
|
51
|
+
logger.warning(f"Overwriting existing group for population '{pop}' in {self.output_path}.")
|
|
52
|
+
del spikes_group[pop]
|
|
53
|
+
|
|
54
|
+
group = spikes_group.create_group(pop)
|
|
55
|
+
sorting_enum = h5py.enum_dtype({'none': 0, 'by_id': 1, 'by_time': 2}, basetype='u1')
|
|
56
|
+
group.attrs.create("sorting", 2, dtype=sorting_enum) # 2 = by_time
|
|
57
|
+
|
|
58
|
+
timestamps_ds = group.create_dataset("timestamps", data=timestamps_sorted)
|
|
59
|
+
group.create_dataset("node_ids", data=node_ids_sorted)
|
|
60
|
+
|
|
61
|
+
timestamps_ds.attrs["units"] = "ms" # SONATA-required
|
|
@@ -1,264 +0,0 @@
|
|
|
1
|
-
# Copyright 2025 Open Brain Institute
|
|
2
|
-
|
|
3
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
-
# you may not use this file except in compliance with the License.
|
|
5
|
-
# You may obtain a copy of the License at
|
|
6
|
-
|
|
7
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
-
|
|
9
|
-
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
-
# See the License for the specific language governing permissions and
|
|
13
|
-
# limitations under the License.
|
|
14
|
-
"""Report class of bluecellulab."""
|
|
15
|
-
|
|
16
|
-
import logging
|
|
17
|
-
from pathlib import Path
|
|
18
|
-
import h5py
|
|
19
|
-
from typing import List
|
|
20
|
-
import numpy as np
|
|
21
|
-
import os
|
|
22
|
-
|
|
23
|
-
from bluecellulab.tools import resolve_segments, resolve_source_nodes
|
|
24
|
-
from bluecellulab.cell.cell_dict import CellDict
|
|
25
|
-
|
|
26
|
-
logger = logging.getLogger(__name__)
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
def _configure_recording(cell, report_cfg, source, source_type, report_name):
|
|
30
|
-
variable = report_cfg.get("variable_name", "v")
|
|
31
|
-
|
|
32
|
-
node_id = cell.cell_id
|
|
33
|
-
compartment_nodes = source.get("compartment_set") if source_type == "compartment_set" else None
|
|
34
|
-
|
|
35
|
-
targets = resolve_segments(cell, report_cfg, node_id, compartment_nodes, source_type)
|
|
36
|
-
for sec, sec_name, seg in targets:
|
|
37
|
-
try:
|
|
38
|
-
cell.add_variable_recording(variable=variable, section=sec, segx=seg)
|
|
39
|
-
except AttributeError:
|
|
40
|
-
logger.warning(f"Recording for variable '{variable}' is not implemented in Cell.")
|
|
41
|
-
return
|
|
42
|
-
except Exception as e:
|
|
43
|
-
logger.warning(
|
|
44
|
-
f"Failed to record '{variable}' at {sec_name}({seg}) on GID {node_id} for report '{report_name}': {e}"
|
|
45
|
-
)
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
def configure_all_reports(cells, simulation_config):
|
|
49
|
-
report_entries = simulation_config.get_report_entries()
|
|
50
|
-
|
|
51
|
-
for report_name, report_cfg in report_entries.items():
|
|
52
|
-
report_type = report_cfg.get("type", "compartment")
|
|
53
|
-
section = report_cfg.get("sections", "soma")
|
|
54
|
-
|
|
55
|
-
if report_type != "compartment":
|
|
56
|
-
raise NotImplementedError(f"Report type '{report_type}' is not supported.")
|
|
57
|
-
|
|
58
|
-
if section == "compartment_set":
|
|
59
|
-
source_type = "compartment_set"
|
|
60
|
-
source_sets = simulation_config.get_compartment_sets()
|
|
61
|
-
source_name = report_cfg.get("compartments")
|
|
62
|
-
if not source_name:
|
|
63
|
-
logger.warning(f"Report '{report_name}' does not specify a node set in 'compartments' for {source_type}.")
|
|
64
|
-
continue
|
|
65
|
-
else:
|
|
66
|
-
source_type = "node_set"
|
|
67
|
-
source_sets = simulation_config.get_node_sets()
|
|
68
|
-
source_name = report_cfg.get("cells")
|
|
69
|
-
if not source_name:
|
|
70
|
-
logger.warning(f"Report '{report_name}' does not specify a node set in 'cells' for {source_type}.")
|
|
71
|
-
continue
|
|
72
|
-
|
|
73
|
-
source = source_sets.get(source_name)
|
|
74
|
-
if not source:
|
|
75
|
-
logger.warning(f"{source_type.title()} '{source_name}' not found for report '{report_name}', skipping recording.")
|
|
76
|
-
continue
|
|
77
|
-
|
|
78
|
-
population = source["population"]
|
|
79
|
-
node_ids, _ = resolve_source_nodes(source, source_type, cells, population)
|
|
80
|
-
|
|
81
|
-
for node_id in node_ids:
|
|
82
|
-
cell = cells.get((population, node_id))
|
|
83
|
-
if not cell:
|
|
84
|
-
continue
|
|
85
|
-
_configure_recording(cell, report_cfg, source, source_type, report_name)
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
def write_compartment_report(
|
|
89
|
-
report_name: str,
|
|
90
|
-
output_path: str,
|
|
91
|
-
cells: CellDict,
|
|
92
|
-
report_cfg: dict,
|
|
93
|
-
source_sets: dict,
|
|
94
|
-
source_type: str,
|
|
95
|
-
sim_dt: float
|
|
96
|
-
):
|
|
97
|
-
"""Write a SONATA-compatible compartment report to an HDF5 file.
|
|
98
|
-
|
|
99
|
-
This function collects time series data (e.g., membrane voltage, ion currents)
|
|
100
|
-
from a group of cells defined by either a node set or a compartment set, and
|
|
101
|
-
writes the data to a SONATA-style report file.
|
|
102
|
-
|
|
103
|
-
Args:
|
|
104
|
-
output_path (str): Path to the output HDF5 file.
|
|
105
|
-
cells (CellDict): Mapping of (population, node_id) to cell objects that
|
|
106
|
-
provide access to pre-recorded variable traces.
|
|
107
|
-
report_cfg (dict): Configuration for the report. Must include:
|
|
108
|
-
- "variable_name": Name of the variable to report (e.g., "v", "ica", "ina").
|
|
109
|
-
- "start_time", "end_time", "dt": Timing parameters.
|
|
110
|
-
- "cells" or "compartments": Name of the node or compartment set.
|
|
111
|
-
source_sets (dict): Dictionary of either node sets or compartment sets.
|
|
112
|
-
source_type (str): Either "node_set" or "compartment_set".
|
|
113
|
-
sim_dt (float): Simulation time step used for the recorded data.
|
|
114
|
-
|
|
115
|
-
Raises:
|
|
116
|
-
ValueError: If the specified source set is not found.
|
|
117
|
-
|
|
118
|
-
Notes:
|
|
119
|
-
- Currently supports only variables explicitly handled in Cell.get_variable_recording().
|
|
120
|
-
- Cells without recordings for the requested variable will be skipped.
|
|
121
|
-
"""
|
|
122
|
-
source_name = report_cfg.get("cells") if source_type == "node_set" else report_cfg.get("compartments")
|
|
123
|
-
source = source_sets.get(source_name)
|
|
124
|
-
if not source:
|
|
125
|
-
logger.warning(f"{source_type.title()} '{source_name}' not found for report '{report_name}', skipping write.")
|
|
126
|
-
return
|
|
127
|
-
|
|
128
|
-
population = source["population"]
|
|
129
|
-
|
|
130
|
-
node_ids, compartment_nodes = resolve_source_nodes(source, source_type, cells, population)
|
|
131
|
-
|
|
132
|
-
data_matrix: List[np.ndarray] = []
|
|
133
|
-
recorded_node_ids: List[int] = []
|
|
134
|
-
index_pointers: List[int] = [0]
|
|
135
|
-
element_ids: List[int] = []
|
|
136
|
-
|
|
137
|
-
for node_id in node_ids:
|
|
138
|
-
try:
|
|
139
|
-
cell = cells[(population, node_id)]
|
|
140
|
-
except KeyError:
|
|
141
|
-
continue
|
|
142
|
-
if not cell:
|
|
143
|
-
continue
|
|
144
|
-
|
|
145
|
-
targets = resolve_segments(cell, report_cfg, node_id, compartment_nodes, source_type)
|
|
146
|
-
for sec, sec_name, seg in targets:
|
|
147
|
-
try:
|
|
148
|
-
variable = report_cfg.get("variable_name", "v")
|
|
149
|
-
trace = cell.get_variable_recording(variable=variable, section=sec, segx=seg)
|
|
150
|
-
data_matrix.append(trace)
|
|
151
|
-
recorded_node_ids.append(node_id)
|
|
152
|
-
element_ids.append(len(element_ids))
|
|
153
|
-
index_pointers.append(index_pointers[-1] + 1)
|
|
154
|
-
except Exception as e:
|
|
155
|
-
logger.warning(f"Failed recording: GID {node_id} sec {sec_name} seg {seg}: {e}")
|
|
156
|
-
|
|
157
|
-
if not data_matrix:
|
|
158
|
-
logger.warning(f"No data recorded for report '{source_name}'. Skipping write.")
|
|
159
|
-
return
|
|
160
|
-
|
|
161
|
-
write_sonata_report_file(
|
|
162
|
-
output_path, population, data_matrix, recorded_node_ids, index_pointers, element_ids, report_cfg, sim_dt
|
|
163
|
-
)
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
def write_sonata_report_file(
|
|
167
|
-
output_path,
|
|
168
|
-
population,
|
|
169
|
-
data_matrix,
|
|
170
|
-
recorded_node_ids,
|
|
171
|
-
index_pointers,
|
|
172
|
-
element_ids,
|
|
173
|
-
report_cfg,
|
|
174
|
-
sim_dt
|
|
175
|
-
):
|
|
176
|
-
start_time = float(report_cfg.get("start_time", 0.0))
|
|
177
|
-
end_time = float(report_cfg.get("end_time", 0.0))
|
|
178
|
-
dt_report = float(report_cfg.get("dt", sim_dt))
|
|
179
|
-
|
|
180
|
-
# Clamp dt_report if finer than simuldation dt
|
|
181
|
-
if dt_report < sim_dt:
|
|
182
|
-
logger.warning(
|
|
183
|
-
f"Requested report dt={dt_report} ms is finer than simulation dt={sim_dt} ms. "
|
|
184
|
-
f"Clamping report dt to {sim_dt} ms."
|
|
185
|
-
)
|
|
186
|
-
dt_report = sim_dt
|
|
187
|
-
|
|
188
|
-
step = int(round(dt_report / sim_dt))
|
|
189
|
-
if not np.isclose(step * sim_dt, dt_report, atol=1e-9):
|
|
190
|
-
raise ValueError(
|
|
191
|
-
f"dt_report={dt_report} is not an integer multiple of dt_data={sim_dt}"
|
|
192
|
-
)
|
|
193
|
-
|
|
194
|
-
# Downsample the data if needed
|
|
195
|
-
# Compute start and end indices in the original data
|
|
196
|
-
start_index = int(round(start_time / sim_dt))
|
|
197
|
-
end_index = int(round(end_time / sim_dt)) + 1 # inclusive
|
|
198
|
-
|
|
199
|
-
# Now slice and downsample
|
|
200
|
-
data_matrix_downsampled = [
|
|
201
|
-
trace[start_index:end_index:step] for trace in data_matrix
|
|
202
|
-
]
|
|
203
|
-
data_array = np.stack(data_matrix_downsampled, axis=1).astype(np.float32)
|
|
204
|
-
|
|
205
|
-
# Prepare metadata arrays
|
|
206
|
-
node_ids_arr = np.array(recorded_node_ids, dtype=np.uint64)
|
|
207
|
-
index_ptr_arr = np.array(index_pointers, dtype=np.uint64)
|
|
208
|
-
element_ids_arr = np.array(element_ids, dtype=np.uint32)
|
|
209
|
-
time_array = np.array([start_time, end_time, dt_report], dtype=np.float64)
|
|
210
|
-
|
|
211
|
-
# Ensure output directory exists
|
|
212
|
-
output_path = Path(output_path)
|
|
213
|
-
output_path.parent.mkdir(parents=True, exist_ok=True)
|
|
214
|
-
|
|
215
|
-
# Write to HDF5
|
|
216
|
-
with h5py.File(output_path, "w") as f:
|
|
217
|
-
grp = f.require_group(f"/report/{population}")
|
|
218
|
-
data_ds = grp.create_dataset("data", data=data_array.astype(np.float32))
|
|
219
|
-
|
|
220
|
-
variable = report_cfg.get("variable_name", "v")
|
|
221
|
-
if variable == "v":
|
|
222
|
-
data_ds.attrs["units"] = "mV"
|
|
223
|
-
|
|
224
|
-
mapping = grp.require_group("mapping")
|
|
225
|
-
mapping.create_dataset("node_ids", data=node_ids_arr)
|
|
226
|
-
mapping.create_dataset("index_pointers", data=index_ptr_arr)
|
|
227
|
-
mapping.create_dataset("element_ids", data=element_ids_arr)
|
|
228
|
-
time_ds = mapping.create_dataset("time", data=time_array)
|
|
229
|
-
time_ds.attrs["units"] = "ms"
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
def write_sonata_spikes(f_name: str, spikes_dict: dict[int, np.ndarray], population: str):
|
|
233
|
-
"""Write a SONATA spike group to a spike file from {node_id: [t1, t2,
|
|
234
|
-
...]}."""
|
|
235
|
-
all_node_ids: List[int] = []
|
|
236
|
-
all_timestamps: List[float] = []
|
|
237
|
-
|
|
238
|
-
for node_id, times in spikes_dict.items():
|
|
239
|
-
all_node_ids.extend([node_id] * len(times))
|
|
240
|
-
all_timestamps.extend(times)
|
|
241
|
-
|
|
242
|
-
if not all_timestamps:
|
|
243
|
-
logger.warning(f"No spikes to write for population '{population}'.")
|
|
244
|
-
|
|
245
|
-
# Sort by time for consistency
|
|
246
|
-
sorted_indices = np.argsort(all_timestamps)
|
|
247
|
-
node_ids_sorted = np.array(all_node_ids, dtype=np.uint64)[sorted_indices]
|
|
248
|
-
timestamps_sorted = np.array(all_timestamps, dtype=np.float64)[sorted_indices]
|
|
249
|
-
|
|
250
|
-
os.makedirs(os.path.dirname(f_name), exist_ok=True)
|
|
251
|
-
with h5py.File(f_name, 'a') as f: # 'a' to allow multiple writes
|
|
252
|
-
spikes_group = f.require_group("spikes")
|
|
253
|
-
if population in spikes_group:
|
|
254
|
-
logger.warning(f"Overwriting existing group for population '{population}' in {f_name}.")
|
|
255
|
-
del spikes_group[population]
|
|
256
|
-
|
|
257
|
-
group = spikes_group.create_group(population)
|
|
258
|
-
sorting_enum = h5py.enum_dtype({'none': 0, 'by_id': 1, 'by_time': 2}, basetype='u1')
|
|
259
|
-
group.attrs.create("sorting", 2, dtype=sorting_enum) # 2 = by_time
|
|
260
|
-
|
|
261
|
-
timestamps_ds = group.create_dataset("timestamps", data=timestamps_sorted)
|
|
262
|
-
group.create_dataset("node_ids", data=node_ids_sorted)
|
|
263
|
-
|
|
264
|
-
timestamps_ds.attrs["units"] = "ms" # SONATA-required
|
|
@@ -88,7 +88,7 @@ class Simulation:
|
|
|
88
88
|
|
|
89
89
|
def run(
|
|
90
90
|
self,
|
|
91
|
-
|
|
91
|
+
tstop: float,
|
|
92
92
|
cvode=True,
|
|
93
93
|
cvode_minstep=None,
|
|
94
94
|
cvode_maxstep=None,
|
|
@@ -107,10 +107,10 @@ class Simulation:
|
|
|
107
107
|
show_progress = bluecellulab.VERBOSE_LEVEL > 1
|
|
108
108
|
|
|
109
109
|
if show_progress:
|
|
110
|
-
self.progress_dt =
|
|
110
|
+
self.progress_dt = tstop / 100
|
|
111
111
|
self.init_progress_callback()
|
|
112
112
|
|
|
113
|
-
neuron.h.tstop =
|
|
113
|
+
neuron.h.tstop = tstop
|
|
114
114
|
|
|
115
115
|
cvode_old_status = neuron.h.cvode_active()
|
|
116
116
|
if cvode:
|
|
@@ -138,10 +138,9 @@ class Simulation:
|
|
|
138
138
|
# initialized heavily influence the random number generator
|
|
139
139
|
# e.g. finitialize() + step() != run()
|
|
140
140
|
|
|
141
|
-
logger.debug(f'Running a simulation until {maxtime} ms ...')
|
|
142
|
-
|
|
143
141
|
self.init_callbacks()
|
|
144
142
|
|
|
143
|
+
logger.debug(f'Running a simulation until {tstop} ms ...')
|
|
145
144
|
neuron.h.stdinit()
|
|
146
145
|
|
|
147
146
|
if forward_skip:
|
|
@@ -152,7 +151,7 @@ class Simulation:
|
|
|
152
151
|
for _ in range(0, 10):
|
|
153
152
|
neuron.h.fadvance()
|
|
154
153
|
neuron.h.dt = save_dt
|
|
155
|
-
neuron.h.t =
|
|
154
|
+
neuron.h.t = forward_skip_value
|
|
156
155
|
|
|
157
156
|
if self.pc is not None:
|
|
158
157
|
for cell in self.cells:
|
|
@@ -243,7 +243,7 @@ class Stimulus:
|
|
|
243
243
|
delay=stimulus_entry["delay"],
|
|
244
244
|
duration=stimulus_entry["duration"],
|
|
245
245
|
amp_start=stimulus_entry["amp_start"],
|
|
246
|
-
amp_end=stimulus_entry
|
|
246
|
+
amp_end=stimulus_entry.get("amp_end", stimulus_entry["amp_start"]),
|
|
247
247
|
)
|
|
248
248
|
elif pattern == Pattern.RELATIVE_LINEAR:
|
|
249
249
|
return RelativeLinear(
|
|
@@ -251,7 +251,7 @@ class Stimulus:
|
|
|
251
251
|
delay=stimulus_entry["delay"],
|
|
252
252
|
duration=stimulus_entry["duration"],
|
|
253
253
|
percent_start=stimulus_entry["percent_start"],
|
|
254
|
-
percent_end=stimulus_entry
|
|
254
|
+
percent_end=stimulus_entry.get("percent_end", stimulus_entry["percent_start"]),
|
|
255
255
|
)
|
|
256
256
|
elif pattern == Pattern.SYNAPSE_REPLAY:
|
|
257
257
|
return SynapseReplay(
|
|
@@ -285,7 +285,7 @@ class Stimulus:
|
|
|
285
285
|
decay_time=stimulus_entry["decay_time"],
|
|
286
286
|
mean_percent=stimulus_entry["mean_percent"],
|
|
287
287
|
sd_percent=stimulus_entry["sd_percent"],
|
|
288
|
-
relative_skew=stimulus_entry.get("
|
|
288
|
+
relative_skew=stimulus_entry.get("relative_skew", 0.5),
|
|
289
289
|
seed=stimulus_entry.get("random_seed", None),
|
|
290
290
|
mode=ClampMode(stimulus_entry.get("input_type", "current_clamp").lower()),
|
|
291
291
|
reversal=stimulus_entry.get("reversal", 0.0)
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
bluecellulab/__init__.py,sha256=1d_CKIJLIpon7o13h3lBnV_-33obZEPwa9KDTjlFPD8,880
|
|
2
|
-
bluecellulab/circuit_simulation.py,sha256=
|
|
2
|
+
bluecellulab/circuit_simulation.py,sha256=CryoBSUGc9z6nIAyNzCsyppeSlc_URJX6hZoZPgQI2Q,36807
|
|
3
3
|
bluecellulab/connection.py,sha256=-xT0mU7ppeHI_qjCKj17TtxXVVcUDgBsaMKt9ODmcEU,4640
|
|
4
4
|
bluecellulab/dendrogram.py,sha256=FjS6RZ6xcp5zJoY5d5qv_edqPM13tL2-UANgbZuDBjY,6427
|
|
5
5
|
bluecellulab/exceptions.py,sha256=1lKD92VIyD8cUggAI1SLxeKzj_09Ik_TlHCzPLCvDHg,2379
|
|
@@ -38,7 +38,7 @@ bluecellulab/circuit/__init__.py,sha256=Khpa13nzNvDlDS2JduyoFTukEduEkWCc5ML_JwGp
|
|
|
38
38
|
bluecellulab/circuit/format.py,sha256=90gWOXg6HK0R9a4WFSnnRH8XezxmzOGk5dRpJHbvbbU,1674
|
|
39
39
|
bluecellulab/circuit/iotools.py,sha256=Q65xYDaiensMtrulC3OLsS2_hcWr_Kje0nXFrAizMMo,1589
|
|
40
40
|
bluecellulab/circuit/node_id.py,sha256=FdoFAGq0_sCyQySOuNI0chdbVr3L8R0w2Y1em5MyIDA,1265
|
|
41
|
-
bluecellulab/circuit/simulation_access.py,sha256=
|
|
41
|
+
bluecellulab/circuit/simulation_access.py,sha256=8LX5nbKw_Hu7AR3pGLdTFGwehvhaj-r8Mh1q3dVoiVg,7745
|
|
42
42
|
bluecellulab/circuit/synapse_properties.py,sha256=TvUMiXZAAeYo1zKkus3z1EUvrE9QCIQ3Ze-jSnPSJWY,6374
|
|
43
43
|
bluecellulab/circuit/validate.py,sha256=wntnr7oIDyasqD1nM-kqz1NpfWDxBGhx0Ep3e5hHXIw,3593
|
|
44
44
|
bluecellulab/circuit/circuit_access/__init__.py,sha256=sgp6m5kP-pq60V1IFGUiSUR1OW01zdxXNNUJmPA8anI,201
|
|
@@ -47,30 +47,37 @@ bluecellulab/circuit/circuit_access/definition.py,sha256=_sUU0DkesGOFW82kS1G9vki
|
|
|
47
47
|
bluecellulab/circuit/circuit_access/sonata_circuit_access.py,sha256=tADHxVZw4VgZAz2z4NKMUwc0rd_EO40EZggw5fDhnF4,10411
|
|
48
48
|
bluecellulab/circuit/config/__init__.py,sha256=aaoJXRKBJzpxxREo9NxKc-_CCPmVeuR1mcViRXcLrC4,215
|
|
49
49
|
bluecellulab/circuit/config/bluepy_simulation_config.py,sha256=V3eqOzskX7VrMDpl-nMQVEhDg8QWgRmRduyJBii5sgI,6974
|
|
50
|
-
bluecellulab/circuit/config/definition.py,sha256=
|
|
50
|
+
bluecellulab/circuit/config/definition.py,sha256=cotKRDHOjzZKNgNSrZ29voU-66W8jaGt5yuge1hyv18,2953
|
|
51
51
|
bluecellulab/circuit/config/sections.py,sha256=QRnU44-OFvHxcF1LX4bAEP9dk3I6UKsuPNBbWkdfmRE,7151
|
|
52
|
-
bluecellulab/circuit/config/sonata_simulation_config.py,sha256=
|
|
52
|
+
bluecellulab/circuit/config/sonata_simulation_config.py,sha256=J1DFqNxIJKAxD1KU_oJV9tigMdpT_Not0Q8SF5q2EeQ,7271
|
|
53
53
|
bluecellulab/hoc/Cell.hoc,sha256=z77qRQG_-afj-RLX0xN6V-K6Duq3bR7vmlDrGWPdh4E,16435
|
|
54
54
|
bluecellulab/hoc/RNGSettings.hoc,sha256=okJBdqlPXET8BrpG1Q31GdaxHfpe3CE0L5P7MAhfQTE,1227
|
|
55
55
|
bluecellulab/hoc/TDistFunc.hoc,sha256=WKX-anvL83xGuGPH9g1oIORB17UM4Pi3-iIXzKO-pUQ,2663
|
|
56
56
|
bluecellulab/hoc/TStim.hoc,sha256=noBJbM_ZqF6T6MEgBeowNzz21I9QeYZ5brGgUvCSm4k,8473
|
|
57
57
|
bluecellulab/hoc/fileUtils.hoc,sha256=LSM7BgyjYVqo2DGSOKvg4W8IIusbsL45JVYK0vgwitU,2539
|
|
58
|
+
bluecellulab/reports/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
59
|
+
bluecellulab/reports/manager.py,sha256=hy54B7AXa46bgjKJ_Ar65_GYIu9C8Zg2fgxIlxSweRE,3284
|
|
60
|
+
bluecellulab/reports/utils.py,sha256=yZPtO0HXs6HPDVOZQbeXlPajnSOqE2Qmx8VufQFJFT0,5557
|
|
61
|
+
bluecellulab/reports/writers/__init__.py,sha256=Q8Y2GC83jseH5QY9IlEQsvaUQIH-BzgqhVjGRWD5j90,826
|
|
62
|
+
bluecellulab/reports/writers/base_writer.py,sha256=P5ramFD2oFIroEBCH1pAscbkfcBIVYFIBg4pVpSP2IU,1042
|
|
63
|
+
bluecellulab/reports/writers/compartment.py,sha256=EwFc2NwqhGEwGShEFOIrpYWRgH4b7qZCwsPvpUxeboU,7212
|
|
64
|
+
bluecellulab/reports/writers/spikes.py,sha256=ycMuoT6f-UbAbC47X9gkG44OQYeAGBQMB2RdJAq3Ykg,2558
|
|
58
65
|
bluecellulab/simulation/__init__.py,sha256=P2ebt0SFw-08J3ihN-LeRn95HeF79tzA-Q0ReLm32dM,214
|
|
59
66
|
bluecellulab/simulation/neuron_globals.py,sha256=iBjhg0-1YMP5LsVdtUDt24PEypkCL6mlyzEBZqoS8xo,4508
|
|
60
67
|
bluecellulab/simulation/parallel.py,sha256=oQ_oV2EKr8gP4yF2Dq8q9MiblDyi89_wBgLzQkLV_U0,1514
|
|
61
|
-
bluecellulab/simulation/report.py,sha256=
|
|
62
|
-
bluecellulab/simulation/simulation.py,sha256=
|
|
68
|
+
bluecellulab/simulation/report.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
69
|
+
bluecellulab/simulation/simulation.py,sha256=OfrzeHvLmOV2ptYcGie_fVGrtkDfizLpE6ZyQWVxnIE,6492
|
|
63
70
|
bluecellulab/stimulus/__init__.py,sha256=DgIgVaSyR-URf3JZzvO6j-tjCerzvktuK-ep8pjMRPQ,37
|
|
64
|
-
bluecellulab/stimulus/circuit_stimulus_definitions.py,sha256=
|
|
71
|
+
bluecellulab/stimulus/circuit_stimulus_definitions.py,sha256=h_SqjJ-L3yNxiEO8I6Cy7i-lhfZXjrdCc92dl8bjOog,17229
|
|
65
72
|
bluecellulab/stimulus/factory.py,sha256=4fvVFFjOGHSqBidLe_W1zQozfMEeePXWO6yYCs30-SM,30780
|
|
66
73
|
bluecellulab/stimulus/stimulus.py,sha256=a_hKJUtZmIgjiFjbJf6RzUPokELqn0IHCgIWGw5XLm8,30322
|
|
67
74
|
bluecellulab/synapse/__init__.py,sha256=RW8XoAMXOvK7OG1nHl_q8jSEKLj9ZN4oWf2nY9HAwuk,192
|
|
68
75
|
bluecellulab/synapse/synapse_factory.py,sha256=NHwRMYMrnRVm_sHmyKTJ1bdoNmWZNU4UPOGu7FCi-PE,6987
|
|
69
76
|
bluecellulab/synapse/synapse_types.py,sha256=zs_yBvGTH4QrbQF3nEViidyq1WM_ZcTSFdjUxB3khW0,16871
|
|
70
77
|
bluecellulab/validation/validation.py,sha256=vLaOAyCO1qwwQ5Ud9X9NDX3iSsh_2S5tUA9xRmoDHiA,18465
|
|
71
|
-
bluecellulab-2.6.
|
|
72
|
-
bluecellulab-2.6.
|
|
73
|
-
bluecellulab-2.6.
|
|
74
|
-
bluecellulab-2.6.
|
|
75
|
-
bluecellulab-2.6.
|
|
76
|
-
bluecellulab-2.6.
|
|
78
|
+
bluecellulab-2.6.62.dist-info/licenses/AUTHORS.txt,sha256=EDs3H-2HXBojbma10psixk3C2rFiOCTIREi2ZAbXYNQ,179
|
|
79
|
+
bluecellulab-2.6.62.dist-info/licenses/LICENSE,sha256=dAMAR2Sud4Nead1wGFleKiwTZfkTNZbzmuGfcTKb3kg,11335
|
|
80
|
+
bluecellulab-2.6.62.dist-info/METADATA,sha256=08Q8N5dafN3_qazVN4W0H7Lbg8unaGFAywz-HBFh8FU,8259
|
|
81
|
+
bluecellulab-2.6.62.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
82
|
+
bluecellulab-2.6.62.dist-info/top_level.txt,sha256=VSyEP8w9l3pXdRkyP_goeMwiNA8KWwitfAqUkveJkdQ,13
|
|
83
|
+
bluecellulab-2.6.62.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|