figpack 0.2.11__py3-none-any.whl → 0.2.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of figpack might be problematic. Click here for more details.
- figpack/__init__.py +1 -1
- figpack/figpack-figure-dist/assets/{index-DvunBzII.js → index-htsKDix1.js} +57 -57
- figpack/figpack-figure-dist/index.html +1 -1
- figpack/views/Box.py +4 -0
- figpack/views/PlotlyExtension/plotly_view.js +106 -0
- figpack/views/TimeseriesGraph.py +279 -1
- {figpack-0.2.11.dist-info → figpack-0.2.13.dist-info}/METADATA +1 -1
- {figpack-0.2.11.dist-info → figpack-0.2.13.dist-info}/RECORD +12 -11
- {figpack-0.2.11.dist-info → figpack-0.2.13.dist-info}/WHEEL +0 -0
- {figpack-0.2.11.dist-info → figpack-0.2.13.dist-info}/entry_points.txt +0 -0
- {figpack-0.2.11.dist-info → figpack-0.2.13.dist-info}/licenses/LICENSE +0 -0
- {figpack-0.2.11.dist-info → figpack-0.2.13.dist-info}/top_level.txt +0 -0
|
@@ -5,7 +5,7 @@
|
|
|
5
5
|
<link rel="icon" type="image/png" href="./assets/neurosift-logo-CLsuwLMO.png" />
|
|
6
6
|
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
|
7
7
|
<title>figpack figure</title>
|
|
8
|
-
<script type="module" crossorigin src="./assets/index-
|
|
8
|
+
<script type="module" crossorigin src="./assets/index-htsKDix1.js"></script>
|
|
9
9
|
<link rel="stylesheet" crossorigin href="./assets/index-D9a3K6eW.css">
|
|
10
10
|
</head>
|
|
11
11
|
<body>
|
figpack/views/Box.py
CHANGED
|
@@ -21,6 +21,7 @@ class Box(FigpackView):
|
|
|
21
21
|
direction: Literal["horizontal", "vertical"] = "vertical",
|
|
22
22
|
show_titles: bool = True,
|
|
23
23
|
items: List[LayoutItem],
|
|
24
|
+
title: Optional[str] = None,
|
|
24
25
|
):
|
|
25
26
|
"""
|
|
26
27
|
Initialize a Box layout view
|
|
@@ -29,6 +30,7 @@ class Box(FigpackView):
|
|
|
29
30
|
direction: Layout direction - "horizontal" or "vertical"
|
|
30
31
|
show_titles: Whether to show titles for layout items
|
|
31
32
|
items: List of LayoutItem objects containing the child views
|
|
33
|
+
title: Optional title to display at the top of the box
|
|
32
34
|
|
|
33
35
|
Raises:
|
|
34
36
|
ValueError: If direction is not "horizontal" or "vertical"
|
|
@@ -39,6 +41,7 @@ class Box(FigpackView):
|
|
|
39
41
|
self.direction = direction
|
|
40
42
|
self.show_titles = show_titles
|
|
41
43
|
self.items = items
|
|
44
|
+
self.title = title
|
|
42
45
|
|
|
43
46
|
def _write_to_zarr_group(self, group: zarr.Group) -> None:
|
|
44
47
|
"""
|
|
@@ -53,6 +56,7 @@ class Box(FigpackView):
|
|
|
53
56
|
# Set layout properties
|
|
54
57
|
group.attrs["direction"] = self.direction
|
|
55
58
|
group.attrs["show_titles"] = self.show_titles
|
|
59
|
+
group.attrs["title"] = self.title
|
|
56
60
|
|
|
57
61
|
# Create a list to store item metadata
|
|
58
62
|
items_metadata = []
|
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Plotly Extension for figpack
|
|
3
|
+
* Provides interactive graph visualization using Plotly library
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
const loadFigureData = async (zarrGroup) => {
|
|
7
|
+
// Get the figure data from the zarr array
|
|
8
|
+
const data = await zarrGroup.file.getDatasetData(
|
|
9
|
+
joinPath(zarrGroup.path, "figure_data"),
|
|
10
|
+
{},
|
|
11
|
+
);
|
|
12
|
+
if (!data || data.length === 0) {
|
|
13
|
+
throw new Error("Empty figure data");
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
// Convert the uint8 array back to string
|
|
17
|
+
const uint8Array = new Uint8Array(data);
|
|
18
|
+
const decoder = new TextDecoder("utf-8");
|
|
19
|
+
const jsonString = decoder.decode(uint8Array);
|
|
20
|
+
|
|
21
|
+
// Parse the JSON string
|
|
22
|
+
const parsedData = JSON.parse(jsonString);
|
|
23
|
+
|
|
24
|
+
return parsedData;
|
|
25
|
+
};
|
|
26
|
+
|
|
27
|
+
(function() {
|
|
28
|
+
window.figpackExtensions = window.figpackExtensions || {};
|
|
29
|
+
|
|
30
|
+
window.figpackExtensions['figpack_plotly'] = {
|
|
31
|
+
render: async function(container, zarrGroup, width, height, onResize) {
|
|
32
|
+
container.innerHTML = '';
|
|
33
|
+
|
|
34
|
+
try {
|
|
35
|
+
const figureData = await loadFigureData(zarrGroup);
|
|
36
|
+
|
|
37
|
+
const makePlot = () => {
|
|
38
|
+
window.Plotly.newPlot(
|
|
39
|
+
container,
|
|
40
|
+
figureData.data || [],
|
|
41
|
+
{
|
|
42
|
+
...figureData.layout,
|
|
43
|
+
width: width,
|
|
44
|
+
height: height,
|
|
45
|
+
margin: { l: 50, r: 50, t: 50, b: 50 },
|
|
46
|
+
},
|
|
47
|
+
{
|
|
48
|
+
responsive: true,
|
|
49
|
+
displayModeBar: true,
|
|
50
|
+
displaylogo: false,
|
|
51
|
+
},
|
|
52
|
+
)
|
|
53
|
+
};
|
|
54
|
+
|
|
55
|
+
makePlot();
|
|
56
|
+
|
|
57
|
+
// Handle resize events
|
|
58
|
+
onResize((newWidth, newHeight) => {
|
|
59
|
+
window.Plotly.relayout(container, {width: newWidth, height: newHeight});
|
|
60
|
+
});
|
|
61
|
+
|
|
62
|
+
return {
|
|
63
|
+
destroy: () => {
|
|
64
|
+
window.Plotly.purge(container);
|
|
65
|
+
}
|
|
66
|
+
};
|
|
67
|
+
|
|
68
|
+
} catch (error) {
|
|
69
|
+
console.error('Error rendering plotly figure:', error);
|
|
70
|
+
this.renderError(container, width, height, error.message);
|
|
71
|
+
return { destroy: () => {} };
|
|
72
|
+
}
|
|
73
|
+
},
|
|
74
|
+
|
|
75
|
+
renderError: function(container, width, height, message) {
|
|
76
|
+
container.innerHTML = `
|
|
77
|
+
<div style="
|
|
78
|
+
width: ${width}px;
|
|
79
|
+
height: ${height}px;
|
|
80
|
+
display: flex;
|
|
81
|
+
align-items: center;
|
|
82
|
+
justify-content: center;
|
|
83
|
+
background-color: #f8f9fa;
|
|
84
|
+
border: 1px solid #dee2e6;
|
|
85
|
+
color: #6c757d;
|
|
86
|
+
font-family: system-ui, -apple-system, sans-serif;
|
|
87
|
+
font-size: 14px;
|
|
88
|
+
text-align: center;
|
|
89
|
+
padding: 20px;
|
|
90
|
+
box-sizing: border-box;
|
|
91
|
+
">
|
|
92
|
+
<div>
|
|
93
|
+
<div style="margin-bottom: 10px; font-weight: 500;">Force Graph Error</div>
|
|
94
|
+
<div style="font-size: 12px;">${message}</div>
|
|
95
|
+
</div>
|
|
96
|
+
</div>
|
|
97
|
+
`;
|
|
98
|
+
}
|
|
99
|
+
};
|
|
100
|
+
})();
|
|
101
|
+
|
|
102
|
+
const joinPath = function(p1, p2) {
|
|
103
|
+
if (p1.endsWith('/')) p1 = p1.slice(0, -1);
|
|
104
|
+
if (p2.startsWith('/')) p2 = p2.slice(1);
|
|
105
|
+
return p1 + '/' + p2;
|
|
106
|
+
};
|
figpack/views/TimeseriesGraph.py
CHANGED
|
@@ -2,7 +2,8 @@
|
|
|
2
2
|
Views module for figpack - contains visualization components
|
|
3
3
|
"""
|
|
4
4
|
|
|
5
|
-
|
|
5
|
+
import math
|
|
6
|
+
from typing import Any, Dict, List, Optional, Union
|
|
6
7
|
|
|
7
8
|
import numpy as np
|
|
8
9
|
import zarr
|
|
@@ -118,6 +119,41 @@ class TimeseriesGraph(FigpackView):
|
|
|
118
119
|
)
|
|
119
120
|
)
|
|
120
121
|
|
|
122
|
+
def add_uniform_series(
|
|
123
|
+
self,
|
|
124
|
+
*,
|
|
125
|
+
name: str,
|
|
126
|
+
start_time_sec: float,
|
|
127
|
+
sampling_frequency_hz: float,
|
|
128
|
+
data: np.ndarray,
|
|
129
|
+
channel_names: Optional[List[str]] = None,
|
|
130
|
+
colors: Optional[List[str]] = None,
|
|
131
|
+
width: float = 1.0,
|
|
132
|
+
) -> None:
|
|
133
|
+
"""
|
|
134
|
+
Add a uniform timeseries to the graph with optional multi-channel support
|
|
135
|
+
|
|
136
|
+
Args:
|
|
137
|
+
name: Base name of the series for legend
|
|
138
|
+
start_time_sec: Starting time in seconds
|
|
139
|
+
sampling_frequency_hz: Sampling rate in Hz
|
|
140
|
+
data: 1D array (single channel) or 2D array (timepoints × channels)
|
|
141
|
+
channel_names: Optional list of channel names
|
|
142
|
+
colors: Optional list of colors for each channel
|
|
143
|
+
width: Line width
|
|
144
|
+
"""
|
|
145
|
+
self._series.append(
|
|
146
|
+
TGUniformSeries(
|
|
147
|
+
name=name,
|
|
148
|
+
start_time_sec=start_time_sec,
|
|
149
|
+
sampling_frequency_hz=sampling_frequency_hz,
|
|
150
|
+
data=data,
|
|
151
|
+
channel_names=channel_names,
|
|
152
|
+
colors=colors,
|
|
153
|
+
width=width,
|
|
154
|
+
)
|
|
155
|
+
)
|
|
156
|
+
|
|
121
157
|
def _write_to_zarr_group(self, group: zarr.Group) -> None:
|
|
122
158
|
"""
|
|
123
159
|
Write the graph data to a Zarr group
|
|
@@ -133,6 +169,8 @@ class TimeseriesGraph(FigpackView):
|
|
|
133
169
|
series._write_to_zarr_group(series_group)
|
|
134
170
|
elif isinstance(series, TGIntervalSeries):
|
|
135
171
|
series._write_to_zarr_group(series_group)
|
|
172
|
+
elif isinstance(series, TGUniformSeries):
|
|
173
|
+
series._write_to_zarr_group(series_group)
|
|
136
174
|
else:
|
|
137
175
|
raise ValueError(f"Unknown series type: {type(series)}")
|
|
138
176
|
|
|
@@ -253,3 +291,243 @@ class TGIntervalSeries:
|
|
|
253
291
|
group.attrs["series_type"] = "interval"
|
|
254
292
|
group.attrs["color"] = self.color
|
|
255
293
|
group.attrs["alpha"] = self.alpha
|
|
294
|
+
|
|
295
|
+
|
|
296
|
+
class TGUniformSeries:
|
|
297
|
+
def __init__(
|
|
298
|
+
self,
|
|
299
|
+
*,
|
|
300
|
+
name: str,
|
|
301
|
+
start_time_sec: float,
|
|
302
|
+
sampling_frequency_hz: float,
|
|
303
|
+
data: np.ndarray,
|
|
304
|
+
channel_names: Optional[List[str]] = None,
|
|
305
|
+
colors: Optional[List[str]] = None,
|
|
306
|
+
width: float = 1.0,
|
|
307
|
+
):
|
|
308
|
+
assert sampling_frequency_hz > 0, "Sampling frequency must be positive"
|
|
309
|
+
|
|
310
|
+
# Handle both 1D and 2D data
|
|
311
|
+
if data.ndim == 1:
|
|
312
|
+
# Convert 1D to 2D with single channel
|
|
313
|
+
data = data.reshape(-1, 1)
|
|
314
|
+
elif data.ndim == 2:
|
|
315
|
+
# Already 2D, use as-is
|
|
316
|
+
pass
|
|
317
|
+
else:
|
|
318
|
+
raise ValueError("Data must be 1D or 2D array")
|
|
319
|
+
|
|
320
|
+
n_timepoints, n_channels = data.shape
|
|
321
|
+
|
|
322
|
+
self.name = name
|
|
323
|
+
self.start_time_sec = start_time_sec
|
|
324
|
+
self.sampling_frequency_hz = sampling_frequency_hz
|
|
325
|
+
self.data = data.astype(np.float32) # Ensure float32 for efficiency
|
|
326
|
+
|
|
327
|
+
# Set channel names
|
|
328
|
+
if channel_names is None:
|
|
329
|
+
if n_channels == 1:
|
|
330
|
+
self.channel_names = [name]
|
|
331
|
+
else:
|
|
332
|
+
self.channel_names = [f"{name}_ch_{i}" for i in range(n_channels)]
|
|
333
|
+
else:
|
|
334
|
+
assert len(channel_names) == n_channels, (
|
|
335
|
+
f"Number of channel_names ({len(channel_names)}) must match "
|
|
336
|
+
f"number of channels ({n_channels})"
|
|
337
|
+
)
|
|
338
|
+
self.channel_names = [str(ch_name) for ch_name in channel_names]
|
|
339
|
+
|
|
340
|
+
# Set colors
|
|
341
|
+
if colors is None:
|
|
342
|
+
# Default colors for multiple channels
|
|
343
|
+
default_colors = [
|
|
344
|
+
"blue",
|
|
345
|
+
"red",
|
|
346
|
+
"green",
|
|
347
|
+
"orange",
|
|
348
|
+
"purple",
|
|
349
|
+
"brown",
|
|
350
|
+
"pink",
|
|
351
|
+
"gray",
|
|
352
|
+
]
|
|
353
|
+
self.colors = [
|
|
354
|
+
default_colors[i % len(default_colors)] for i in range(n_channels)
|
|
355
|
+
]
|
|
356
|
+
else:
|
|
357
|
+
assert len(colors) == n_channels, (
|
|
358
|
+
f"Number of colors ({len(colors)}) must match "
|
|
359
|
+
f"number of channels ({n_channels})"
|
|
360
|
+
)
|
|
361
|
+
self.colors = colors
|
|
362
|
+
|
|
363
|
+
self.width = width
|
|
364
|
+
|
|
365
|
+
# Prepare downsampled arrays for efficient rendering
|
|
366
|
+
self.downsampled_data = self._compute_downsampled_data()
|
|
367
|
+
|
|
368
|
+
def _compute_downsampled_data(self) -> dict:
|
|
369
|
+
"""
|
|
370
|
+
Compute downsampled arrays at power-of-4 factors using a vectorized
|
|
371
|
+
min/max pyramid with NaN padding for partial bins.
|
|
372
|
+
|
|
373
|
+
Returns:
|
|
374
|
+
dict: {factor: (ceil(N/factor), 2, M) float32 array}, where the second
|
|
375
|
+
axis stores [min, max] per bin per channel.
|
|
376
|
+
"""
|
|
377
|
+
data = self.data # (N, M), float32
|
|
378
|
+
n_timepoints, n_channels = data.shape
|
|
379
|
+
downsampled = {}
|
|
380
|
+
|
|
381
|
+
if n_timepoints < 4:
|
|
382
|
+
# No level with factor >= 4 fits the stop condition (factor < N)
|
|
383
|
+
return downsampled
|
|
384
|
+
|
|
385
|
+
def _first_level_from_raw(x: np.ndarray) -> np.ndarray:
|
|
386
|
+
"""Build the factor=4 level directly from the raw data."""
|
|
387
|
+
N, M = x.shape
|
|
388
|
+
n_bins = math.ceil(N / 4)
|
|
389
|
+
pad = n_bins * 4 - N
|
|
390
|
+
# Pad time axis with NaNs so min/max ignore the padded tail
|
|
391
|
+
x_pad = np.pad(
|
|
392
|
+
x, ((0, pad), (0, 0)), mode="constant", constant_values=np.nan
|
|
393
|
+
)
|
|
394
|
+
blk = x_pad.reshape(n_bins, 4, M) # (B, 4, M)
|
|
395
|
+
mins = np.nanmin(blk, axis=1) # (B, M)
|
|
396
|
+
maxs = np.nanmax(blk, axis=1) # (B, M)
|
|
397
|
+
out = np.empty((n_bins, 2, M), dtype=np.float32)
|
|
398
|
+
out[:, 0, :] = mins
|
|
399
|
+
out[:, 1, :] = maxs
|
|
400
|
+
return out
|
|
401
|
+
|
|
402
|
+
def _downsample4_bins(level_minmax: np.ndarray) -> np.ndarray:
|
|
403
|
+
"""
|
|
404
|
+
Build the next pyramid level from the previous one by grouping every 4
|
|
405
|
+
bins. Input is (B, 2, M) -> Output is (ceil(B/4), 2, M).
|
|
406
|
+
"""
|
|
407
|
+
B, two, M = level_minmax.shape
|
|
408
|
+
assert two == 2
|
|
409
|
+
n_bins_next = math.ceil(B / 4)
|
|
410
|
+
pad = n_bins_next * 4 - B
|
|
411
|
+
lvl_pad = np.pad(
|
|
412
|
+
level_minmax,
|
|
413
|
+
((0, pad), (0, 0), (0, 0)),
|
|
414
|
+
mode="constant",
|
|
415
|
+
constant_values=np.nan,
|
|
416
|
+
)
|
|
417
|
+
blk = lvl_pad.reshape(n_bins_next, 4, 2, M) # (B', 4, 2, M)
|
|
418
|
+
|
|
419
|
+
# Next mins from mins; next maxs from maxs
|
|
420
|
+
mins = np.nanmin(blk[:, :, 0, :], axis=1) # (B', M)
|
|
421
|
+
maxs = np.nanmax(blk[:, :, 1, :], axis=1) # (B', M)
|
|
422
|
+
|
|
423
|
+
out = np.empty((n_bins_next, 2, M), dtype=np.float32)
|
|
424
|
+
out[:, 0, :] = mins
|
|
425
|
+
out[:, 1, :] = maxs
|
|
426
|
+
return out
|
|
427
|
+
|
|
428
|
+
# Level 1: factor = 4 from raw data
|
|
429
|
+
factor = 4
|
|
430
|
+
level = _first_level_from_raw(data)
|
|
431
|
+
downsampled[factor] = level
|
|
432
|
+
|
|
433
|
+
# Higher levels: factor *= 4 each time, built from previous level
|
|
434
|
+
factor *= 4 # -> 16
|
|
435
|
+
while factor < n_timepoints / 1000:
|
|
436
|
+
level = _downsample4_bins(level)
|
|
437
|
+
downsampled[factor] = level
|
|
438
|
+
factor *= 4
|
|
439
|
+
|
|
440
|
+
return downsampled
|
|
441
|
+
|
|
442
|
+
def _calculate_optimal_chunk_size(
|
|
443
|
+
self, shape: tuple, target_size_mb: float = 5.0
|
|
444
|
+
) -> tuple:
|
|
445
|
+
"""
|
|
446
|
+
Calculate optimal chunk size for Zarr storage targeting ~5MB per chunk
|
|
447
|
+
|
|
448
|
+
Args:
|
|
449
|
+
shape: Array shape (n_timepoints, ..., n_channels)
|
|
450
|
+
target_size_mb: Target chunk size in MB
|
|
451
|
+
|
|
452
|
+
Returns:
|
|
453
|
+
Tuple of chunk dimensions
|
|
454
|
+
"""
|
|
455
|
+
# Calculate bytes per element (float32 = 4 bytes)
|
|
456
|
+
bytes_per_element = 4
|
|
457
|
+
target_size_bytes = target_size_mb * 1024 * 1024
|
|
458
|
+
|
|
459
|
+
if len(shape) == 2: # Original data: (n_timepoints, n_channels)
|
|
460
|
+
n_timepoints, n_channels = shape
|
|
461
|
+
elements_per_timepoint = n_channels
|
|
462
|
+
elif len(shape) == 3: # Downsampled data: (n_timepoints, 2, n_channels)
|
|
463
|
+
n_timepoints, _, n_channels = shape
|
|
464
|
+
elements_per_timepoint = 2 * n_channels
|
|
465
|
+
else:
|
|
466
|
+
raise ValueError(f"Unsupported shape: {shape}")
|
|
467
|
+
|
|
468
|
+
# Calculate chunk size in timepoints
|
|
469
|
+
max_timepoints_per_chunk = target_size_bytes // (
|
|
470
|
+
elements_per_timepoint * bytes_per_element
|
|
471
|
+
)
|
|
472
|
+
|
|
473
|
+
# Find next lower power of 2
|
|
474
|
+
chunk_timepoints = 2 ** math.floor(math.log2(max_timepoints_per_chunk))
|
|
475
|
+
chunk_timepoints = max(chunk_timepoints, 1) # At least 1
|
|
476
|
+
chunk_timepoints = min(chunk_timepoints, n_timepoints) # At most n_timepoints
|
|
477
|
+
|
|
478
|
+
# If n_timepoints is less than our calculated size, round down to next power of 2
|
|
479
|
+
if chunk_timepoints > n_timepoints:
|
|
480
|
+
chunk_timepoints = 2 ** math.floor(math.log2(n_timepoints))
|
|
481
|
+
|
|
482
|
+
if len(shape) == 2:
|
|
483
|
+
return (chunk_timepoints, n_channels)
|
|
484
|
+
else: # len(shape) == 3
|
|
485
|
+
return (chunk_timepoints, 2, n_channels)
|
|
486
|
+
|
|
487
|
+
def _write_to_zarr_group(self, group: zarr.Group) -> None:
|
|
488
|
+
"""
|
|
489
|
+
Write the uniform series data to a Zarr group
|
|
490
|
+
|
|
491
|
+
Args:
|
|
492
|
+
group: Zarr group to write data into
|
|
493
|
+
"""
|
|
494
|
+
group.attrs["series_type"] = "uniform"
|
|
495
|
+
|
|
496
|
+
# Store metadata
|
|
497
|
+
group.attrs["start_time_sec"] = self.start_time_sec
|
|
498
|
+
group.attrs["sampling_frequency_hz"] = self.sampling_frequency_hz
|
|
499
|
+
group.attrs["channel_names"] = self.channel_names
|
|
500
|
+
group.attrs["colors"] = self.colors
|
|
501
|
+
group.attrs["width"] = self.width
|
|
502
|
+
|
|
503
|
+
n_timepoints, n_channels = self.data.shape
|
|
504
|
+
group.attrs["n_timepoints"] = n_timepoints
|
|
505
|
+
group.attrs["n_channels"] = n_channels
|
|
506
|
+
|
|
507
|
+
# Store original data with optimal chunking
|
|
508
|
+
original_chunks = self._calculate_optimal_chunk_size(self.data.shape)
|
|
509
|
+
group.create_dataset(
|
|
510
|
+
"data",
|
|
511
|
+
data=self.data,
|
|
512
|
+
chunks=original_chunks,
|
|
513
|
+
compression="blosc",
|
|
514
|
+
compression_opts={"cname": "lz4", "clevel": 5, "shuffle": 1},
|
|
515
|
+
)
|
|
516
|
+
|
|
517
|
+
# Store downsampled data arrays
|
|
518
|
+
downsample_factors = list(self.downsampled_data.keys())
|
|
519
|
+
group.attrs["downsample_factors"] = downsample_factors
|
|
520
|
+
|
|
521
|
+
for factor, downsampled_array in self.downsampled_data.items():
|
|
522
|
+
dataset_name = f"data_ds_{factor}"
|
|
523
|
+
|
|
524
|
+
# Calculate optimal chunks for this downsampled array
|
|
525
|
+
ds_chunks = self._calculate_optimal_chunk_size(downsampled_array.shape)
|
|
526
|
+
|
|
527
|
+
group.create_dataset(
|
|
528
|
+
dataset_name,
|
|
529
|
+
data=downsampled_array,
|
|
530
|
+
chunks=ds_chunks,
|
|
531
|
+
compression="blosc",
|
|
532
|
+
compression_opts={"cname": "lz4", "clevel": 5, "shuffle": 1},
|
|
533
|
+
)
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
figpack/__init__.py,sha256=
|
|
1
|
+
figpack/__init__.py,sha256=Dx3Hx8Y0cLdDNiad3EXUGYb18ZXDSUJ_Zm1-ptbsO8k,360
|
|
2
2
|
figpack/cli.py,sha256=xWF7J2BxUqOLvPu-Kje7Q6oGukTroXsLq8WN8vJgyw0,8321
|
|
3
3
|
figpack/core/__init__.py,sha256=V4wVdyBJ80mi9Rz8HjDSQNkqhqYB6sq4vWH3xQ10kaE,232
|
|
4
4
|
figpack/core/_bundle_utils.py,sha256=16hgTExPLkJCtGjVUCLlnbs_qgns6v01egVr3CEnUXE,6082
|
|
@@ -11,9 +11,9 @@ figpack/core/config.py,sha256=oOR7SlP192vuFhYlS-h14HnG-kd_3gaz0vshXch2RNc,173
|
|
|
11
11
|
figpack/core/extension_view.py,sha256=flFj8X4XuDzzOyeOe3SBddbr_t4wbg1V3vFzJoEVAB4,1995
|
|
12
12
|
figpack/core/figpack_extension.py,sha256=EJHZpe7GsQMUnSvxcYf8374-f6n85F_k1IEelFMRFP8,4332
|
|
13
13
|
figpack/core/figpack_view.py,sha256=X_EtpWTloENfRnDEJcBxXBPditaObv5BWMzO-_stAho,6297
|
|
14
|
-
figpack/figpack-figure-dist/index.html,sha256=
|
|
14
|
+
figpack/figpack-figure-dist/index.html,sha256=43TC_xwYZKyIVAPEXf_i-aBzbVOy2RGF1XcrY_qMcrI,486
|
|
15
15
|
figpack/figpack-figure-dist/assets/index-D9a3K6eW.css,sha256=ki61XkOz_TBJnU9Qyk5EgBzh2-_ilZQui2i8DHSarEo,5584
|
|
16
|
-
figpack/figpack-figure-dist/assets/index-
|
|
16
|
+
figpack/figpack-figure-dist/assets/index-htsKDix1.js,sha256=0JVZ2euRidkKZCsLe_DgSLQ88sZwY8mUfmFYaz7-aJQ,1624444
|
|
17
17
|
figpack/figpack-figure-dist/assets/neurosift-logo-CLsuwLMO.png,sha256=g5m-TwrGh5f6-9rXtWV-znH4B0nHgc__0GWclRDLUHs,9307
|
|
18
18
|
figpack/franklab/__init__.py,sha256=HkehqGImJE_sE2vbPDo-HbgtEYaMICb9-230xTYvRTU,56
|
|
19
19
|
figpack/franklab/views/TrackAnimation.py,sha256=3Jv1Ri4FIwTyqNahinqhHsBH1Bv_iZrEGx12w6diJ2M,5636
|
|
@@ -34,7 +34,7 @@ figpack/spike_sorting/views/UnitsTable.py,sha256=M3y1IDJzSnvOaM1-QOyJOVcUcdTkVvx
|
|
|
34
34
|
figpack/spike_sorting/views/UnitsTableColumn.py,sha256=zBnuoeILTuiVLDvtcOxqa37E5WlbR12rlwNJUeWXxY4,847
|
|
35
35
|
figpack/spike_sorting/views/UnitsTableRow.py,sha256=rEb2hMTA_pl2fTW1nOvnGir0ysfNx4uww3aekZzfWjk,720
|
|
36
36
|
figpack/spike_sorting/views/__init__.py,sha256=2caaV9yxi97aHoYfUWUYyyIXQSZJRtRVqmPOW9ZeG1I,1159
|
|
37
|
-
figpack/views/Box.py,sha256=
|
|
37
|
+
figpack/views/Box.py,sha256=KEItT39xpw0Nl_4NU3WpPmi67wQKVSgiMyB4zAFGf4w,2409
|
|
38
38
|
figpack/views/DataFrame.py,sha256=VFP-EM_Wnc1G3uimVVMJe08KKWCAZe7DvmYf5e07uTk,3653
|
|
39
39
|
figpack/views/Gallery.py,sha256=sHlZbaqxcktasmNsJnuxe8WmgUQ6iurG50JiChKSMbQ,3314
|
|
40
40
|
figpack/views/GalleryItem.py,sha256=b_upJno5P3ANSulbG-h3t6Xj56tPGJ7iVxqyiZu3zaQ,1244
|
|
@@ -47,13 +47,14 @@ figpack/views/Spectrogram.py,sha256=GfTNinkuMFu4dxn35MvSB4Perz84sx4LMcjUrOviz6c,
|
|
|
47
47
|
figpack/views/Splitter.py,sha256=x9jLCTlIvDy5p9ymVd0X48KDccyD6bJANhXyFgKEmtE,2007
|
|
48
48
|
figpack/views/TabLayout.py,sha256=5g3nmL95PfqgI0naqZXHMwLVo2ebDlGX01Hy9044bUw,1898
|
|
49
49
|
figpack/views/TabLayoutItem.py,sha256=xmHA0JsW_6naJze4_mQuP_Fy0Nm17p2N7w_AsmVRp8k,880
|
|
50
|
-
figpack/views/TimeseriesGraph.py,sha256=
|
|
50
|
+
figpack/views/TimeseriesGraph.py,sha256=lFE6FIXXXPC0Ww9GWOvLVxfwXxcNVCybwEoKkiWhe34,17461
|
|
51
51
|
figpack/views/__init__.py,sha256=nyd3Ot2x702W4j9oBN0lK6i0DZzg9Ai41XoYCm5DeQ8,546
|
|
52
52
|
figpack/views/PlotlyExtension/PlotlyExtension.py,sha256=_-GiqzHFb9uF1O6RNKEkd85SX8z9z5kFOh8tO_9aAbk,3778
|
|
53
53
|
figpack/views/PlotlyExtension/__init__.py,sha256=80Wy1mDMWyagjuR99ECxJePIYpRQ6TSyHkB0uZoBZ_0,70
|
|
54
|
-
figpack
|
|
55
|
-
figpack-0.2.
|
|
56
|
-
figpack-0.2.
|
|
57
|
-
figpack-0.2.
|
|
58
|
-
figpack-0.2.
|
|
59
|
-
figpack-0.2.
|
|
54
|
+
figpack/views/PlotlyExtension/plotly_view.js,sha256=gCsZS0IaYGTN5a3DC2c4NmzoOxZi1xGfCAYI6WSoFpM,3596
|
|
55
|
+
figpack-0.2.13.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
56
|
+
figpack-0.2.13.dist-info/METADATA,sha256=8TibM4nTAUoW2yQ143TtaPaVV09eP3Z07HmjLhcNmZ4,3617
|
|
57
|
+
figpack-0.2.13.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
58
|
+
figpack-0.2.13.dist-info/entry_points.txt,sha256=l6d3siH2LxXa8qJGbjAqpIZtI5AkMSyDeoRDCzdrUto,45
|
|
59
|
+
figpack-0.2.13.dist-info/top_level.txt,sha256=lMKGaC5xWmAYBx9Ac1iMokm42KFnJFjmkP2ldyvOo-c,8
|
|
60
|
+
figpack-0.2.13.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|