figpack 0.1.4__py3-none-any.whl → 0.1.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of figpack might be problematic. Click here for more details.
- figpack/__init__.py +1 -5
- figpack/cli.py +8 -7
- figpack/core/_bundle_utils.py +2 -0
- figpack/core/_show_view.py +4 -8
- figpack/core/_upload_bundle.py +13 -11
- figpack/core/figpack_view.py +1 -0
- figpack/figpack-gui-dist/assets/{index-Dw14QqeQ.js → index-DeyVLaXh.js} +91 -91
- figpack/figpack-gui-dist/index.html +1 -1
- figpack/spike_sorting/views/AutocorrelogramItem.py +1 -0
- figpack/spike_sorting/views/Autocorrelograms.py +39 -9
- figpack/spike_sorting/views/CrossCorrelogramItem.py +1 -0
- figpack/spike_sorting/views/CrossCorrelograms.py +45 -7
- figpack/spike_sorting/views/UnitsTable.py +5 -3
- figpack/spike_sorting/views/UnitsTableRow.py +1 -1
- figpack/spike_sorting/views/__init__.py +2 -2
- figpack/views/Box.py +3 -1
- figpack/views/Image.py +4 -2
- figpack/views/LayoutItem.py +1 -0
- figpack/views/Markdown.py +1 -0
- figpack/views/MatplotlibFigure.py +4 -2
- figpack/views/MultiChannelTimeseries.py +226 -0
- figpack/views/PlotlyFigure.py +5 -3
- figpack/views/Splitter.py +3 -1
- figpack/views/TabLayout.py +3 -1
- figpack/views/TabLayoutItem.py +1 -0
- figpack/views/TimeseriesGraph.py +3 -2
- figpack/views/__init__.py +7 -6
- {figpack-0.1.4.dist-info → figpack-0.1.5.dist-info}/METADATA +48 -7
- figpack-0.1.5.dist-info/RECORD +39 -0
- figpack-0.1.4.dist-info/RECORD +0 -38
- {figpack-0.1.4.dist-info → figpack-0.1.5.dist-info}/WHEEL +0 -0
- {figpack-0.1.4.dist-info → figpack-0.1.5.dist-info}/entry_points.txt +0 -0
- {figpack-0.1.4.dist-info → figpack-0.1.5.dist-info}/licenses/LICENSE +0 -0
- {figpack-0.1.4.dist-info → figpack-0.1.5.dist-info}/top_level.txt +0 -0
|
@@ -5,7 +5,7 @@
|
|
|
5
5
|
<link rel="icon" type="image/png" href="./assets/neurosift-logo-CLsuwLMO.png" />
|
|
6
6
|
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
|
7
7
|
<title>figpack figure</title>
|
|
8
|
-
<script type="module" crossorigin src="./assets/index-
|
|
8
|
+
<script type="module" crossorigin src="./assets/index-DeyVLaXh.js"></script>
|
|
9
9
|
<link rel="stylesheet" crossorigin href="./assets/index-BDa2iJW9.css">
|
|
10
10
|
</head>
|
|
11
11
|
<body>
|
|
@@ -2,9 +2,11 @@
|
|
|
2
2
|
Autocorrelograms view for figpack - displays multiple autocorrelograms
|
|
3
3
|
"""
|
|
4
4
|
|
|
5
|
-
import zarr
|
|
6
|
-
import numpy as np
|
|
7
5
|
from typing import List, Optional
|
|
6
|
+
|
|
7
|
+
import numpy as np
|
|
8
|
+
import zarr
|
|
9
|
+
|
|
8
10
|
from ...core.figpack_view import FigpackView
|
|
9
11
|
from .AutocorrelogramItem import AutocorrelogramItem
|
|
10
12
|
|
|
@@ -18,17 +20,49 @@ class Autocorrelograms(FigpackView):
|
|
|
18
20
|
self,
|
|
19
21
|
*,
|
|
20
22
|
autocorrelograms: List[AutocorrelogramItem],
|
|
21
|
-
height: Optional[int] = 400,
|
|
22
23
|
):
|
|
23
24
|
"""
|
|
24
25
|
Initialize an Autocorrelograms view
|
|
25
26
|
|
|
26
27
|
Args:
|
|
27
28
|
autocorrelograms: List of AutocorrelogramItem objects
|
|
28
|
-
height: Height of the view in pixels
|
|
29
29
|
"""
|
|
30
30
|
self.autocorrelograms = autocorrelograms
|
|
31
|
-
|
|
31
|
+
|
|
32
|
+
def from_sorting(sorting):
|
|
33
|
+
import spikeinterface as si
|
|
34
|
+
import spikeinterface.widgets as sw
|
|
35
|
+
|
|
36
|
+
assert isinstance(sorting, si.BaseSorting), "Input must be a BaseSorting object"
|
|
37
|
+
W = sw.plot_autocorrelograms(sorting)
|
|
38
|
+
return Autocorrelograms.from_spikeinterface_widget(W)
|
|
39
|
+
|
|
40
|
+
def from_spikeinterface_widget(W):
|
|
41
|
+
from spikeinterface.widgets.base import to_attr
|
|
42
|
+
from spikeinterface.widgets.utils_sortingview import make_serializable
|
|
43
|
+
|
|
44
|
+
from .AutocorrelogramItem import AutocorrelogramItem
|
|
45
|
+
|
|
46
|
+
data_plot = W.data_plot
|
|
47
|
+
|
|
48
|
+
dp = to_attr(data_plot)
|
|
49
|
+
|
|
50
|
+
unit_ids = make_serializable(dp.unit_ids)
|
|
51
|
+
|
|
52
|
+
ac_items = []
|
|
53
|
+
for i in range(len(unit_ids)):
|
|
54
|
+
for j in range(i, len(unit_ids)):
|
|
55
|
+
if i == j:
|
|
56
|
+
ac_items.append(
|
|
57
|
+
AutocorrelogramItem(
|
|
58
|
+
unit_id=unit_ids[i],
|
|
59
|
+
bin_edges_sec=(dp.bins / 1000.0).astype("float32"),
|
|
60
|
+
bin_counts=dp.correlograms[i, j].astype("int32"),
|
|
61
|
+
)
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
view = Autocorrelograms(autocorrelograms=ac_items)
|
|
65
|
+
return view
|
|
32
66
|
|
|
33
67
|
def _write_to_zarr_group(self, group: zarr.Group) -> None:
|
|
34
68
|
"""
|
|
@@ -40,10 +74,6 @@ class Autocorrelograms(FigpackView):
|
|
|
40
74
|
# Set the view type
|
|
41
75
|
group.attrs["view_type"] = "Autocorrelograms"
|
|
42
76
|
|
|
43
|
-
# Set view properties
|
|
44
|
-
if self.height is not None:
|
|
45
|
-
group.attrs["height"] = self.height
|
|
46
|
-
|
|
47
77
|
# Store the number of autocorrelograms
|
|
48
78
|
group.attrs["num_autocorrelograms"] = len(self.autocorrelograms)
|
|
49
79
|
|
|
@@ -2,9 +2,11 @@
|
|
|
2
2
|
CrossCorrelograms view for figpack - displays multiple cross-correlograms
|
|
3
3
|
"""
|
|
4
4
|
|
|
5
|
-
import zarr
|
|
6
|
-
import numpy as np
|
|
7
5
|
from typing import List, Optional
|
|
6
|
+
|
|
7
|
+
import numpy as np
|
|
8
|
+
import zarr
|
|
9
|
+
|
|
8
10
|
from ...core.figpack_view import FigpackView
|
|
9
11
|
from .CrossCorrelogramItem import CrossCorrelogramItem
|
|
10
12
|
|
|
@@ -19,7 +21,6 @@ class CrossCorrelograms(FigpackView):
|
|
|
19
21
|
*,
|
|
20
22
|
cross_correlograms: List[CrossCorrelogramItem],
|
|
21
23
|
hide_unit_selector: Optional[bool] = False,
|
|
22
|
-
height: Optional[int] = 500,
|
|
23
24
|
):
|
|
24
25
|
"""
|
|
25
26
|
Initialize a CrossCorrelograms view
|
|
@@ -27,11 +28,50 @@ class CrossCorrelograms(FigpackView):
|
|
|
27
28
|
Args:
|
|
28
29
|
cross_correlograms: List of CrossCorrelogramItem objects
|
|
29
30
|
hide_unit_selector: Whether to hide the unit selector widget
|
|
30
|
-
height: Height of the view in pixels
|
|
31
31
|
"""
|
|
32
32
|
self.cross_correlograms = cross_correlograms
|
|
33
33
|
self.hide_unit_selector = hide_unit_selector
|
|
34
|
-
|
|
34
|
+
|
|
35
|
+
def from_sorting(sorting):
|
|
36
|
+
import spikeinterface as si
|
|
37
|
+
import spikeinterface.widgets as sw
|
|
38
|
+
|
|
39
|
+
assert isinstance(sorting, si.BaseSorting), "Input must be a BaseSorting object"
|
|
40
|
+
W = sw.CrossCorrelogramsWidget(sorting)
|
|
41
|
+
return CrossCorrelograms.from_spikeinterface_widget(W)
|
|
42
|
+
|
|
43
|
+
def from_spikeinterface_widget(W):
|
|
44
|
+
from spikeinterface.widgets.base import to_attr
|
|
45
|
+
from spikeinterface.widgets.utils_sortingview import make_serializable
|
|
46
|
+
|
|
47
|
+
from .CrossCorrelogramItem import CrossCorrelogramItem
|
|
48
|
+
|
|
49
|
+
data_plot = W.data_plot
|
|
50
|
+
|
|
51
|
+
dp = to_attr(data_plot)
|
|
52
|
+
|
|
53
|
+
unit_ids = make_serializable(dp.unit_ids)
|
|
54
|
+
|
|
55
|
+
if dp.similarity is not None:
|
|
56
|
+
similarity = dp.similarity
|
|
57
|
+
else:
|
|
58
|
+
similarity = np.ones((len(unit_ids), len(unit_ids)))
|
|
59
|
+
|
|
60
|
+
cc_items = []
|
|
61
|
+
for i in range(len(unit_ids)):
|
|
62
|
+
for j in range(i, len(unit_ids)):
|
|
63
|
+
if similarity[i, j] >= dp.min_similarity_for_correlograms:
|
|
64
|
+
cc_items.append(
|
|
65
|
+
CrossCorrelogramItem(
|
|
66
|
+
unit_id1=unit_ids[i],
|
|
67
|
+
unit_id2=unit_ids[j],
|
|
68
|
+
bin_edges_sec=(dp.bins / 1000.0).astype("float32"),
|
|
69
|
+
bin_counts=dp.correlograms[i, j].astype("int32"),
|
|
70
|
+
)
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
view = CrossCorrelograms(cross_correlograms=cc_items, hide_unit_selector=False)
|
|
74
|
+
return view
|
|
35
75
|
|
|
36
76
|
def _write_to_zarr_group(self, group: zarr.Group) -> None:
|
|
37
77
|
"""
|
|
@@ -44,8 +84,6 @@ class CrossCorrelograms(FigpackView):
|
|
|
44
84
|
group.attrs["view_type"] = "CrossCorrelograms"
|
|
45
85
|
|
|
46
86
|
# Set view properties
|
|
47
|
-
if self.height is not None:
|
|
48
|
-
group.attrs["height"] = self.height
|
|
49
87
|
if self.hide_unit_selector is not None:
|
|
50
88
|
group.attrs["hide_unit_selector"] = self.hide_unit_selector
|
|
51
89
|
|
|
@@ -2,13 +2,15 @@
|
|
|
2
2
|
UnitsTable view for figpack - displays a table of units with their properties
|
|
3
3
|
"""
|
|
4
4
|
|
|
5
|
-
import zarr
|
|
6
|
-
import numpy as np
|
|
7
5
|
from typing import List, Optional
|
|
6
|
+
|
|
7
|
+
import numpy as np
|
|
8
|
+
import zarr
|
|
9
|
+
|
|
8
10
|
from ...core.figpack_view import FigpackView
|
|
11
|
+
from .UnitSimilarityScore import UnitSimilarityScore
|
|
9
12
|
from .UnitsTableColumn import UnitsTableColumn
|
|
10
13
|
from .UnitsTableRow import UnitsTableRow
|
|
11
|
-
from .UnitSimilarityScore import UnitSimilarityScore
|
|
12
14
|
|
|
13
15
|
|
|
14
16
|
class UnitsTable(FigpackView):
|
|
@@ -6,10 +6,10 @@ from .AutocorrelogramItem import AutocorrelogramItem
|
|
|
6
6
|
from .Autocorrelograms import Autocorrelograms
|
|
7
7
|
from .CrossCorrelogramItem import CrossCorrelogramItem
|
|
8
8
|
from .CrossCorrelograms import CrossCorrelograms
|
|
9
|
-
from .UnitsTableColumn import UnitsTableColumn
|
|
10
|
-
from .UnitsTableRow import UnitsTableRow
|
|
11
9
|
from .UnitSimilarityScore import UnitSimilarityScore
|
|
12
10
|
from .UnitsTable import UnitsTable
|
|
11
|
+
from .UnitsTableColumn import UnitsTableColumn
|
|
12
|
+
from .UnitsTableRow import UnitsTableRow
|
|
13
13
|
|
|
14
14
|
__all__ = [
|
|
15
15
|
"AutocorrelogramItem",
|
figpack/views/Box.py
CHANGED
|
@@ -2,8 +2,10 @@
|
|
|
2
2
|
Box view for figpack - a layout container that handles other views
|
|
3
3
|
"""
|
|
4
4
|
|
|
5
|
+
from typing import Any, Dict, List, Literal, Optional
|
|
6
|
+
|
|
5
7
|
import zarr
|
|
6
|
-
|
|
8
|
+
|
|
7
9
|
from ..core.figpack_view import FigpackView
|
|
8
10
|
from .LayoutItem import LayoutItem
|
|
9
11
|
|
figpack/views/Image.py
CHANGED
figpack/views/LayoutItem.py
CHANGED
figpack/views/Markdown.py
CHANGED
|
@@ -0,0 +1,226 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Multi-channel timeseries visualization component
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import math
|
|
6
|
+
from typing import List, Optional, Union
|
|
7
|
+
|
|
8
|
+
import numpy as np
|
|
9
|
+
import zarr
|
|
10
|
+
|
|
11
|
+
from ..core.figpack_view import FigpackView
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class MultiChannelTimeseries(FigpackView):
|
|
15
|
+
"""
|
|
16
|
+
A multi-channel timeseries visualization component
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
def __init__(
|
|
20
|
+
self,
|
|
21
|
+
*,
|
|
22
|
+
start_time_sec: float,
|
|
23
|
+
sampling_frequency_hz: float,
|
|
24
|
+
data: np.ndarray,
|
|
25
|
+
channel_ids: Optional[List[Union[str, int]]] = None,
|
|
26
|
+
):
|
|
27
|
+
"""
|
|
28
|
+
Initialize a MultiChannelTimeseries view
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
start_time_sec: Starting time in seconds
|
|
32
|
+
sampling_frequency_hz: Sampling rate in Hz
|
|
33
|
+
data: N×M numpy array where N is timepoints and M is channels
|
|
34
|
+
channel_ids: Optional list of channel identifiers
|
|
35
|
+
"""
|
|
36
|
+
assert data.ndim == 2, "Data must be a 2D array (timepoints × channels)"
|
|
37
|
+
assert sampling_frequency_hz > 0, "Sampling frequency must be positive"
|
|
38
|
+
|
|
39
|
+
self.start_time_sec = start_time_sec
|
|
40
|
+
self.sampling_frequency_hz = sampling_frequency_hz
|
|
41
|
+
self.data = data.astype(np.float32) # Ensure float32 for efficiency
|
|
42
|
+
|
|
43
|
+
n_timepoints, n_channels = data.shape
|
|
44
|
+
|
|
45
|
+
# Set channel IDs
|
|
46
|
+
if channel_ids is None:
|
|
47
|
+
self.channel_ids = [f"ch_{i}" for i in range(n_channels)]
|
|
48
|
+
else:
|
|
49
|
+
assert len(channel_ids) == n_channels, (
|
|
50
|
+
f"Number of channel_ids ({len(channel_ids)}) must match "
|
|
51
|
+
f"number of channels ({n_channels})"
|
|
52
|
+
)
|
|
53
|
+
self.channel_ids = [str(ch_id) for ch_id in channel_ids]
|
|
54
|
+
|
|
55
|
+
# Prepare downsampled arrays for efficient rendering
|
|
56
|
+
self.downsampled_data = self._compute_downsampled_data()
|
|
57
|
+
|
|
58
|
+
def _compute_downsampled_data(self) -> dict:
|
|
59
|
+
"""
|
|
60
|
+
Compute downsampled arrays at power-of-4 factors using a vectorized
|
|
61
|
+
min/max pyramid with NaN padding for partial bins.
|
|
62
|
+
|
|
63
|
+
Returns:
|
|
64
|
+
dict: {factor: (ceil(N/factor), 2, M) float32 array}, where the second
|
|
65
|
+
axis stores [min, max] per bin per channel.
|
|
66
|
+
"""
|
|
67
|
+
data = self.data # (N, M), float32
|
|
68
|
+
n_timepoints, n_channels = data.shape
|
|
69
|
+
downsampled = {}
|
|
70
|
+
|
|
71
|
+
if n_timepoints < 4:
|
|
72
|
+
# No level with factor >= 4 fits the stop condition (factor < N)
|
|
73
|
+
return downsampled
|
|
74
|
+
|
|
75
|
+
def _first_level_from_raw(x: np.ndarray) -> np.ndarray:
|
|
76
|
+
"""Build the factor=4 level directly from the raw data."""
|
|
77
|
+
N, M = x.shape
|
|
78
|
+
n_bins = math.ceil(N / 4)
|
|
79
|
+
pad = n_bins * 4 - N
|
|
80
|
+
# Pad time axis with NaNs so min/max ignore the padded tail
|
|
81
|
+
x_pad = np.pad(
|
|
82
|
+
x, ((0, pad), (0, 0)), mode="constant", constant_values=np.nan
|
|
83
|
+
)
|
|
84
|
+
blk = x_pad.reshape(n_bins, 4, M) # (B, 4, M)
|
|
85
|
+
mins = np.nanmin(blk, axis=1) # (B, M)
|
|
86
|
+
maxs = np.nanmax(blk, axis=1) # (B, M)
|
|
87
|
+
out = np.empty((n_bins, 2, M), dtype=np.float32)
|
|
88
|
+
out[:, 0, :] = mins
|
|
89
|
+
out[:, 1, :] = maxs
|
|
90
|
+
return out
|
|
91
|
+
|
|
92
|
+
def _downsample4_bins(level_minmax: np.ndarray) -> np.ndarray:
|
|
93
|
+
"""
|
|
94
|
+
Build the next pyramid level from the previous one by grouping every 4
|
|
95
|
+
bins. Input is (B, 2, M) -> Output is (ceil(B/4), 2, M).
|
|
96
|
+
"""
|
|
97
|
+
B, two, M = level_minmax.shape
|
|
98
|
+
assert two == 2
|
|
99
|
+
n_bins_next = math.ceil(B / 4)
|
|
100
|
+
pad = n_bins_next * 4 - B
|
|
101
|
+
lvl_pad = np.pad(
|
|
102
|
+
level_minmax,
|
|
103
|
+
((0, pad), (0, 0), (0, 0)),
|
|
104
|
+
mode="constant",
|
|
105
|
+
constant_values=np.nan,
|
|
106
|
+
)
|
|
107
|
+
blk = lvl_pad.reshape(n_bins_next, 4, 2, M) # (B', 4, 2, M)
|
|
108
|
+
|
|
109
|
+
# Next mins from mins; next maxs from maxs
|
|
110
|
+
mins = np.nanmin(blk[:, :, 0, :], axis=1) # (B', M)
|
|
111
|
+
maxs = np.nanmax(blk[:, :, 1, :], axis=1) # (B', M)
|
|
112
|
+
|
|
113
|
+
out = np.empty((n_bins_next, 2, M), dtype=np.float32)
|
|
114
|
+
out[:, 0, :] = mins
|
|
115
|
+
out[:, 1, :] = maxs
|
|
116
|
+
return out
|
|
117
|
+
|
|
118
|
+
# Level 1: factor = 4 from raw data
|
|
119
|
+
factor = 4
|
|
120
|
+
level = _first_level_from_raw(data)
|
|
121
|
+
downsampled[factor] = level
|
|
122
|
+
|
|
123
|
+
# Higher levels: factor *= 4 each time, built from previous level
|
|
124
|
+
factor *= 4 # -> 16
|
|
125
|
+
while factor < n_timepoints / 1000:
|
|
126
|
+
level = _downsample4_bins(level)
|
|
127
|
+
downsampled[factor] = level
|
|
128
|
+
factor *= 4
|
|
129
|
+
|
|
130
|
+
return downsampled
|
|
131
|
+
|
|
132
|
+
def _calculate_optimal_chunk_size(
|
|
133
|
+
self, shape: tuple, target_size_mb: float = 5.0
|
|
134
|
+
) -> tuple:
|
|
135
|
+
"""
|
|
136
|
+
Calculate optimal chunk size for Zarr storage targeting ~5MB per chunk
|
|
137
|
+
|
|
138
|
+
Args:
|
|
139
|
+
shape: Array shape (n_timepoints, ..., n_channels)
|
|
140
|
+
target_size_mb: Target chunk size in MB
|
|
141
|
+
|
|
142
|
+
Returns:
|
|
143
|
+
Tuple of chunk dimensions
|
|
144
|
+
"""
|
|
145
|
+
# Calculate bytes per element (float32 = 4 bytes)
|
|
146
|
+
bytes_per_element = 4
|
|
147
|
+
target_size_bytes = target_size_mb * 1024 * 1024
|
|
148
|
+
|
|
149
|
+
if len(shape) == 2: # Original data: (n_timepoints, n_channels)
|
|
150
|
+
n_timepoints, n_channels = shape
|
|
151
|
+
elements_per_timepoint = n_channels
|
|
152
|
+
elif len(shape) == 3: # Downsampled data: (n_timepoints, 2, n_channels)
|
|
153
|
+
n_timepoints, _, n_channels = shape
|
|
154
|
+
elements_per_timepoint = 2 * n_channels
|
|
155
|
+
else:
|
|
156
|
+
raise ValueError(f"Unsupported shape: {shape}")
|
|
157
|
+
|
|
158
|
+
# Calculate chunk size in timepoints
|
|
159
|
+
max_timepoints_per_chunk = target_size_bytes // (
|
|
160
|
+
elements_per_timepoint * bytes_per_element
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
# Round down to nearest power of 2 for efficiency
|
|
164
|
+
chunk_timepoints = 2 ** int(math.log2(max_timepoints_per_chunk))
|
|
165
|
+
chunk_timepoints = max(chunk_timepoints, 1) # At least 1
|
|
166
|
+
chunk_timepoints = min(chunk_timepoints, n_timepoints) # At most n_timepoints
|
|
167
|
+
|
|
168
|
+
if len(shape) == 2:
|
|
169
|
+
return (chunk_timepoints, n_channels)
|
|
170
|
+
else: # len(shape) == 3
|
|
171
|
+
return (chunk_timepoints, 2, n_channels)
|
|
172
|
+
|
|
173
|
+
def _write_to_zarr_group(self, group: zarr.Group) -> None:
|
|
174
|
+
"""
|
|
175
|
+
Write the multi-channel timeseries data to a Zarr group
|
|
176
|
+
|
|
177
|
+
Args:
|
|
178
|
+
group: Zarr group to write data into
|
|
179
|
+
"""
|
|
180
|
+
group.attrs["view_type"] = "MultiChannelTimeseries"
|
|
181
|
+
|
|
182
|
+
# Store metadata
|
|
183
|
+
group.attrs["start_time_sec"] = self.start_time_sec
|
|
184
|
+
group.attrs["sampling_frequency_hz"] = self.sampling_frequency_hz
|
|
185
|
+
group.attrs["channel_ids"] = self.channel_ids
|
|
186
|
+
|
|
187
|
+
n_timepoints, n_channels = self.data.shape
|
|
188
|
+
group.attrs["n_timepoints"] = n_timepoints
|
|
189
|
+
group.attrs["n_channels"] = n_channels
|
|
190
|
+
|
|
191
|
+
# Store original data with optimal chunking
|
|
192
|
+
original_chunks = self._calculate_optimal_chunk_size(self.data.shape)
|
|
193
|
+
group.create_dataset(
|
|
194
|
+
"data",
|
|
195
|
+
data=self.data,
|
|
196
|
+
chunks=original_chunks,
|
|
197
|
+
compression="blosc",
|
|
198
|
+
compression_opts={"cname": "lz4", "clevel": 5, "shuffle": 1},
|
|
199
|
+
)
|
|
200
|
+
|
|
201
|
+
# Store downsampled data arrays
|
|
202
|
+
downsample_factors = list(self.downsampled_data.keys())
|
|
203
|
+
group.attrs["downsample_factors"] = downsample_factors
|
|
204
|
+
|
|
205
|
+
for factor, downsampled_array in self.downsampled_data.items():
|
|
206
|
+
dataset_name = f"data_ds_{factor}"
|
|
207
|
+
|
|
208
|
+
# Calculate optimal chunks for this downsampled array
|
|
209
|
+
ds_chunks = self._calculate_optimal_chunk_size(downsampled_array.shape)
|
|
210
|
+
|
|
211
|
+
group.create_dataset(
|
|
212
|
+
dataset_name,
|
|
213
|
+
data=downsampled_array,
|
|
214
|
+
chunks=ds_chunks,
|
|
215
|
+
compression="blosc",
|
|
216
|
+
compression_opts={"cname": "lz4", "clevel": 5, "shuffle": 1},
|
|
217
|
+
)
|
|
218
|
+
|
|
219
|
+
print(
|
|
220
|
+
f"Stored MultiChannelTimeseries with {len(downsample_factors)} downsampled levels:"
|
|
221
|
+
)
|
|
222
|
+
print(f" Original: {self.data.shape} (chunks: {original_chunks})")
|
|
223
|
+
for factor in downsample_factors:
|
|
224
|
+
ds_shape = self.downsampled_data[factor].shape
|
|
225
|
+
ds_chunks = self._calculate_optimal_chunk_size(ds_shape)
|
|
226
|
+
print(f" Factor {factor}: {ds_shape} (chunks: {ds_chunks})")
|
figpack/views/PlotlyFigure.py
CHANGED
|
@@ -2,11 +2,13 @@
|
|
|
2
2
|
PlotlyFigure view for figpack - displays plotly figures
|
|
3
3
|
"""
|
|
4
4
|
|
|
5
|
-
import zarr
|
|
6
5
|
import json
|
|
6
|
+
from datetime import date, datetime
|
|
7
|
+
from typing import Any, Dict, Union
|
|
8
|
+
|
|
7
9
|
import numpy as np
|
|
8
|
-
|
|
9
|
-
|
|
10
|
+
import zarr
|
|
11
|
+
|
|
10
12
|
from ..core.figpack_view import FigpackView
|
|
11
13
|
|
|
12
14
|
|
figpack/views/Splitter.py
CHANGED
figpack/views/TabLayout.py
CHANGED
|
@@ -2,8 +2,10 @@
|
|
|
2
2
|
TabLayout view for figpack - a tabbed layout container that handles other views
|
|
3
3
|
"""
|
|
4
4
|
|
|
5
|
+
from typing import Any, Dict, List, Optional
|
|
6
|
+
|
|
5
7
|
import zarr
|
|
6
|
-
|
|
8
|
+
|
|
7
9
|
from ..core.figpack_view import FigpackView
|
|
8
10
|
from .TabLayoutItem import TabLayoutItem
|
|
9
11
|
|
figpack/views/TabLayoutItem.py
CHANGED
figpack/views/TimeseriesGraph.py
CHANGED
|
@@ -2,10 +2,11 @@
|
|
|
2
2
|
Views module for figpack - contains visualization components
|
|
3
3
|
"""
|
|
4
4
|
|
|
5
|
-
import
|
|
6
|
-
from typing import Optional, List, Dict, Any
|
|
5
|
+
from typing import Any, Dict, List, Optional
|
|
7
6
|
|
|
7
|
+
import numpy as np
|
|
8
8
|
import zarr
|
|
9
|
+
|
|
9
10
|
from ..core.figpack_view import FigpackView
|
|
10
11
|
|
|
11
12
|
|
figpack/views/__init__.py
CHANGED
|
@@ -1,10 +1,11 @@
|
|
|
1
|
-
from .TimeseriesGraph import TimeseriesGraph
|
|
2
1
|
from .Box import Box
|
|
3
|
-
from .
|
|
4
|
-
from .TabLayout import TabLayout
|
|
2
|
+
from .Image import Image
|
|
5
3
|
from .LayoutItem import LayoutItem
|
|
6
|
-
from .TabLayoutItem import TabLayoutItem
|
|
7
4
|
from .Markdown import Markdown
|
|
8
|
-
from .PlotlyFigure import PlotlyFigure
|
|
9
5
|
from .MatplotlibFigure import MatplotlibFigure
|
|
10
|
-
from .
|
|
6
|
+
from .MultiChannelTimeseries import MultiChannelTimeseries
|
|
7
|
+
from .PlotlyFigure import PlotlyFigure
|
|
8
|
+
from .Splitter import Splitter
|
|
9
|
+
from .TabLayout import TabLayout
|
|
10
|
+
from .TabLayoutItem import TabLayoutItem
|
|
11
|
+
from .TimeseriesGraph import TimeseriesGraph
|