swcgeom 0.20.0__cp312-cp312-macosx_15_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of swcgeom might be problematic. Click here for more details.
- swcgeom/__init__.py +21 -0
- swcgeom/analysis/__init__.py +13 -0
- swcgeom/analysis/feature_extractor.py +454 -0
- swcgeom/analysis/features.py +218 -0
- swcgeom/analysis/lmeasure.py +750 -0
- swcgeom/analysis/sholl.py +201 -0
- swcgeom/analysis/trunk.py +183 -0
- swcgeom/analysis/visualization.py +191 -0
- swcgeom/analysis/visualization3d.py +81 -0
- swcgeom/analysis/volume.py +143 -0
- swcgeom/core/__init__.py +19 -0
- swcgeom/core/branch.py +129 -0
- swcgeom/core/branch_tree.py +65 -0
- swcgeom/core/compartment.py +107 -0
- swcgeom/core/node.py +130 -0
- swcgeom/core/path.py +155 -0
- swcgeom/core/population.py +394 -0
- swcgeom/core/swc.py +247 -0
- swcgeom/core/swc_utils/__init__.py +19 -0
- swcgeom/core/swc_utils/assembler.py +35 -0
- swcgeom/core/swc_utils/base.py +180 -0
- swcgeom/core/swc_utils/checker.py +112 -0
- swcgeom/core/swc_utils/io.py +335 -0
- swcgeom/core/swc_utils/normalizer.py +163 -0
- swcgeom/core/swc_utils/subtree.py +70 -0
- swcgeom/core/tree.py +387 -0
- swcgeom/core/tree_utils.py +277 -0
- swcgeom/core/tree_utils_impl.py +58 -0
- swcgeom/images/__init__.py +9 -0
- swcgeom/images/augmentation.py +149 -0
- swcgeom/images/contrast.py +87 -0
- swcgeom/images/folder.py +217 -0
- swcgeom/images/io.py +578 -0
- swcgeom/images/loaders/__init__.py +8 -0
- swcgeom/images/loaders/pbd.cpython-312-darwin.so +0 -0
- swcgeom/images/loaders/pbd.pyx +523 -0
- swcgeom/images/loaders/raw.cpython-312-darwin.so +0 -0
- swcgeom/images/loaders/raw.pyx +183 -0
- swcgeom/transforms/__init__.py +20 -0
- swcgeom/transforms/base.py +136 -0
- swcgeom/transforms/branch.py +223 -0
- swcgeom/transforms/branch_tree.py +74 -0
- swcgeom/transforms/geometry.py +270 -0
- swcgeom/transforms/image_preprocess.py +107 -0
- swcgeom/transforms/image_stack.py +219 -0
- swcgeom/transforms/images.py +206 -0
- swcgeom/transforms/mst.py +183 -0
- swcgeom/transforms/neurolucida_asc.py +498 -0
- swcgeom/transforms/path.py +56 -0
- swcgeom/transforms/population.py +36 -0
- swcgeom/transforms/tree.py +265 -0
- swcgeom/transforms/tree_assembler.py +160 -0
- swcgeom/utils/__init__.py +18 -0
- swcgeom/utils/debug.py +23 -0
- swcgeom/utils/download.py +119 -0
- swcgeom/utils/dsu.py +58 -0
- swcgeom/utils/ellipse.py +131 -0
- swcgeom/utils/file.py +90 -0
- swcgeom/utils/neuromorpho.py +581 -0
- swcgeom/utils/numpy_helper.py +70 -0
- swcgeom/utils/plotter_2d.py +134 -0
- swcgeom/utils/plotter_3d.py +35 -0
- swcgeom/utils/renderer.py +145 -0
- swcgeom/utils/sdf.py +324 -0
- swcgeom/utils/solid_geometry.py +154 -0
- swcgeom/utils/transforms.py +367 -0
- swcgeom/utils/volumetric_object.py +483 -0
- swcgeom-0.20.0.dist-info/METADATA +86 -0
- swcgeom-0.20.0.dist-info/RECORD +72 -0
- swcgeom-0.20.0.dist-info/WHEEL +5 -0
- swcgeom-0.20.0.dist-info/licenses/LICENSE +201 -0
- swcgeom-0.20.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,523 @@
|
|
|
1
|
+
# -----------------------------------------------------------------------------
|
|
2
|
+
# This file is adapted from the v3d-py-helper project:
|
|
3
|
+
# https://github.com/SEU-ALLEN-codebase/v3d-py-helper
|
|
4
|
+
#
|
|
5
|
+
# Original license: MIT License
|
|
6
|
+
# Copyright (c) Zuohan Zhao
|
|
7
|
+
#
|
|
8
|
+
# Vaa3D in Python Made Easy
|
|
9
|
+
# Python library for Vaa3D functions.
|
|
10
|
+
#
|
|
11
|
+
# The original project is distributed via PyPI under the name `v3d-py-helper`,
|
|
12
|
+
# with its latest release (v0.4.1) not supporting Python 3.13.
|
|
13
|
+
#
|
|
14
|
+
# As of Python 3.13 (released on October 7, 2024), this package fails to build
|
|
15
|
+
# from source due to missing dependencies (e.g., libtiff), and no prebuilt wheels
|
|
16
|
+
# are available on PyPI for Python 3.13. An issue has been raised, but the project
|
|
17
|
+
# appears to be unmaintained at this time, and the author has not responded.
|
|
18
|
+
#
|
|
19
|
+
# To ensure continued compatibility and usability of Vaa3D features under Python 3.13+,
|
|
20
|
+
# we have copied and minimally adapted necessary source files into this project,
|
|
21
|
+
# preserving license and attribution in accordance with the MIT License.
|
|
22
|
+
#
|
|
23
|
+
# Please consult the original repository for full documentation:
|
|
24
|
+
# https://SEU-ALLEN-codebase.github.io/v3d-py-helper
|
|
25
|
+
#
|
|
26
|
+
# If the upstream project resumes maintenance and releases official support
|
|
27
|
+
# for Python 3.13+, this bundled version may be deprecated in favor of the
|
|
28
|
+
# canonical package.
|
|
29
|
+
# -----------------------------------------------------------------------------
|
|
30
|
+
|
|
31
|
+
import struct
|
|
32
|
+
import os
|
|
33
|
+
import cython
|
|
34
|
+
cimport cython
|
|
35
|
+
import numpy as np
|
|
36
|
+
cimport numpy as np
|
|
37
|
+
import sys
|
|
38
|
+
|
|
39
|
+
from cpython.bytearray cimport PyByteArray_AsString
|
|
40
|
+
from libc.stdio cimport FILE, fopen, fread, fclose, fwrite
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
DEF FORMAT_KEY = b"v3d_volume_pkbitdf_encod"
|
|
44
|
+
DEF HEADER_SIZE = 43
|
|
45
|
+
DEF COMPRESSION_ENLARGEMENT = 2
|
|
46
|
+
DEF LITTLE = b'L'
|
|
47
|
+
DEF REPEAT_MAX_LEN = 255 - 222
|
|
48
|
+
|
|
49
|
+
cdef unsigned char[3] MAX_LEN = [79 - 31, 182 - 79, 222 - 182]
|
|
50
|
+
cdef double[3] MAX_EFF = [16. / 3., 16. / 4., 16. / 5.]
|
|
51
|
+
cdef char[3][2] ran = [[-3, 4], [-7, 8], [-15, 16]]
|
|
52
|
+
cdef unsigned char[3] shift_bits = [3, 4, 5]
|
|
53
|
+
cdef unsigned char[3] gap = [31, 79, 182]
|
|
54
|
+
cdef unsigned char[3] mask = [0b00000111, 0b0001111, 0b00011111]
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
cdef class PBD:
|
|
58
|
+
"""
|
|
59
|
+
Supporting most biological image LOSSLESS compression with sound SNR, where blocks of the signal are close in intensity.
|
|
60
|
+
It supports 16bit and 8bit compression, can compress up to 25% or even lower. Note that the 16bit used by other sources
|
|
61
|
+
is incomplete and can only reach 50%. This package gives you a choice of a full blood one that can make 25%, but the
|
|
62
|
+
full blood output might not be loaded by other programs currently.
|
|
63
|
+
|
|
64
|
+
The compression/decompression are optimized and can even be faster than Vaa3D.
|
|
65
|
+
|
|
66
|
+
modified from v3d_external/v3d_main/neuron_annotator/utility/ImageLoaderBasic.cpp
|
|
67
|
+
|
|
68
|
+
by Zuohan Zhao, Southeast University
|
|
69
|
+
|
|
70
|
+
2022/6/23
|
|
71
|
+
"""
|
|
72
|
+
cdef:
|
|
73
|
+
long long total_read_bytes, compression_pos, decompression_pos, read_step_size_bytes
|
|
74
|
+
bytearray compression_buffer, decompression_buffer
|
|
75
|
+
bint endian_switch, pbd16_full_blood
|
|
76
|
+
bytes endian_sys
|
|
77
|
+
|
|
78
|
+
def __init__(self, pbd16_full_blood=True, read_step_size_bytes = 1024 * 20000):
|
|
79
|
+
"""
|
|
80
|
+
:param pbd16_full_blood: Turn off or on to allow the full blood saving of 16bit image loading. Note
|
|
81
|
+
other programs may not be able to load it. Default is on. Default as turned on.
|
|
82
|
+
:param read_step_size_bytes: Adjust the number of bytes for each time of buffer loading, default as 20000KB.
|
|
83
|
+
"""
|
|
84
|
+
self.endian_sys = sys.byteorder[0].upper().encode('ascii')
|
|
85
|
+
self.endian_switch = False
|
|
86
|
+
self.decompression_pos = self.compression_pos = self.total_read_bytes = 0
|
|
87
|
+
self.compression_buffer = self.decompression_buffer = bytearray()
|
|
88
|
+
self.pbd16_full_blood = pbd16_full_blood
|
|
89
|
+
self.read_step_size_bytes = read_step_size_bytes
|
|
90
|
+
|
|
91
|
+
@cython.boundscheck(False)
|
|
92
|
+
@cython.wraparound(False)
|
|
93
|
+
cdef long long decompress_pbd8(self, long long look_ahead):
|
|
94
|
+
cdef:
|
|
95
|
+
unsigned char* decomp = <unsigned char*>PyByteArray_AsString(self.decompression_buffer)
|
|
96
|
+
unsigned char* comp = <unsigned char*>PyByteArray_AsString(self.compression_buffer)
|
|
97
|
+
long long cp = self.compression_pos, dp = self.decompression_pos
|
|
98
|
+
unsigned char count, shift, carry
|
|
99
|
+
char delta
|
|
100
|
+
while cp < look_ahead:
|
|
101
|
+
count = comp[cp]
|
|
102
|
+
cp += 1
|
|
103
|
+
if count < 33:
|
|
104
|
+
count += 1
|
|
105
|
+
for shift in range(count):
|
|
106
|
+
decomp[dp + shift] = comp[cp + shift]
|
|
107
|
+
cp += count
|
|
108
|
+
dp += count
|
|
109
|
+
elif count < 128:
|
|
110
|
+
count -= 32
|
|
111
|
+
shift = 0
|
|
112
|
+
while count > 0:
|
|
113
|
+
if shift == 0:
|
|
114
|
+
carry = comp[cp]
|
|
115
|
+
cp += 1
|
|
116
|
+
delta = (carry & 0b00000011 << shift) >> shift
|
|
117
|
+
if delta == 3:
|
|
118
|
+
delta = -1
|
|
119
|
+
decomp[dp] = decomp[dp - 1] + delta
|
|
120
|
+
dp += 1
|
|
121
|
+
count -= 1
|
|
122
|
+
shift = shift + 2 & 7
|
|
123
|
+
else:
|
|
124
|
+
count -= 127
|
|
125
|
+
for shift in range(count):
|
|
126
|
+
decomp[dp + shift] = comp[cp]
|
|
127
|
+
dp += count
|
|
128
|
+
cp += 1
|
|
129
|
+
return dp
|
|
130
|
+
|
|
131
|
+
@cython.boundscheck(False)
|
|
132
|
+
@cython.wraparound(False)
|
|
133
|
+
cdef long long decompress_pbd16(self, long long look_ahead):
|
|
134
|
+
cdef:
|
|
135
|
+
unsigned char * decomp = <unsigned char *> PyByteArray_AsString(self.decompression_buffer)
|
|
136
|
+
unsigned char * comp = <unsigned char *> PyByteArray_AsString(self.compression_buffer)
|
|
137
|
+
long long cp = self.compression_pos, dp = self.decompression_pos
|
|
138
|
+
unsigned char count, i
|
|
139
|
+
char delta, shift
|
|
140
|
+
unsigned short carry
|
|
141
|
+
unsigned short* ptr
|
|
142
|
+
|
|
143
|
+
while cp < look_ahead:
|
|
144
|
+
count = comp[cp]
|
|
145
|
+
cp += 1
|
|
146
|
+
if count < 32:
|
|
147
|
+
count = count + 1 << 1
|
|
148
|
+
if self.endian_switch:
|
|
149
|
+
for i in range(0, count, 2):
|
|
150
|
+
decomp[dp + i] = comp[cp + i + 1]
|
|
151
|
+
decomp[dp + i + 1] = comp[cp + i]
|
|
152
|
+
else:
|
|
153
|
+
for i in range(0, count, 2):
|
|
154
|
+
decomp[dp + i] = comp[cp + i]
|
|
155
|
+
decomp[dp + i + 1] = comp[cp + i + 1]
|
|
156
|
+
cp += count
|
|
157
|
+
dp += count
|
|
158
|
+
continue
|
|
159
|
+
elif count < 80:
|
|
160
|
+
i = 0
|
|
161
|
+
elif count < 183:
|
|
162
|
+
i = 1
|
|
163
|
+
elif count < 223:
|
|
164
|
+
i = 2
|
|
165
|
+
else:
|
|
166
|
+
count = count - 222 << 1
|
|
167
|
+
if self.endian_switch:
|
|
168
|
+
for i in range(0, count, 2):
|
|
169
|
+
decomp[dp + i] = comp[cp + 1]
|
|
170
|
+
decomp[dp + i + 1] = comp[cp]
|
|
171
|
+
else:
|
|
172
|
+
for i in range(0, count, 2):
|
|
173
|
+
decomp[dp + i] = comp[cp]
|
|
174
|
+
decomp[dp + i + 1] = comp[cp + 1]
|
|
175
|
+
dp += count
|
|
176
|
+
cp += 2
|
|
177
|
+
continue
|
|
178
|
+
count -= gap[i]
|
|
179
|
+
shift = 0
|
|
180
|
+
ptr = <unsigned short *> &decomp[dp]
|
|
181
|
+
while count > 0:
|
|
182
|
+
shift -= shift_bits[i]
|
|
183
|
+
if shift < 0:
|
|
184
|
+
carry = carry << 8 | comp[cp]
|
|
185
|
+
cp += 1
|
|
186
|
+
shift += 8
|
|
187
|
+
delta = carry >> shift & mask[i]
|
|
188
|
+
if delta > ran[i][1]:
|
|
189
|
+
delta = ran[i][1] - delta
|
|
190
|
+
ptr[0] = ptr[-1] + delta
|
|
191
|
+
ptr += 1
|
|
192
|
+
dp += 2
|
|
193
|
+
count -= 1
|
|
194
|
+
return dp
|
|
195
|
+
|
|
196
|
+
@cython.boundscheck(False)
|
|
197
|
+
@cython.wraparound(False)
|
|
198
|
+
cdef void update_compression_buffer8(self):
|
|
199
|
+
cdef:
|
|
200
|
+
long long look_ahead = self.compression_pos
|
|
201
|
+
unsigned char lav, compressed_diff_entries
|
|
202
|
+
while look_ahead < self.total_read_bytes:
|
|
203
|
+
lav = self.compression_buffer[look_ahead]
|
|
204
|
+
if lav < 33:
|
|
205
|
+
if look_ahead + lav + 1 < self.total_read_bytes:
|
|
206
|
+
look_ahead += lav + 2
|
|
207
|
+
else:
|
|
208
|
+
break
|
|
209
|
+
elif lav < 128:
|
|
210
|
+
compressed_diff_entries = (lav - 33) // 4 + 1
|
|
211
|
+
if look_ahead + compressed_diff_entries < self.total_read_bytes:
|
|
212
|
+
look_ahead += compressed_diff_entries + 1
|
|
213
|
+
else:
|
|
214
|
+
break
|
|
215
|
+
else:
|
|
216
|
+
if look_ahead + 1 < self.total_read_bytes:
|
|
217
|
+
look_ahead += 2
|
|
218
|
+
else:
|
|
219
|
+
break
|
|
220
|
+
self.decompression_pos = self.decompress_pbd8(look_ahead)
|
|
221
|
+
self.compression_pos = look_ahead
|
|
222
|
+
|
|
223
|
+
@cython.boundscheck(False)
|
|
224
|
+
@cython.wraparound(False)
|
|
225
|
+
cdef void update_compression_buffer16(self):
|
|
226
|
+
cdef:
|
|
227
|
+
long long look_ahead = self.compression_pos
|
|
228
|
+
unsigned char lav, compressed_diff_bytes
|
|
229
|
+
while look_ahead < self.total_read_bytes:
|
|
230
|
+
lav = self.compression_buffer[look_ahead]
|
|
231
|
+
if lav < 32:
|
|
232
|
+
if look_ahead + (lav + 1) * 2 < self.total_read_bytes:
|
|
233
|
+
look_ahead += (lav + 1) * 2 + 1
|
|
234
|
+
else:
|
|
235
|
+
break
|
|
236
|
+
elif lav < 80:
|
|
237
|
+
compressed_diff_bytes = ((lav - 31) * 3 - 1) // 8 + 1
|
|
238
|
+
if look_ahead + compressed_diff_bytes < self.total_read_bytes:
|
|
239
|
+
look_ahead += compressed_diff_bytes + 1
|
|
240
|
+
else:
|
|
241
|
+
break
|
|
242
|
+
elif lav < 183:
|
|
243
|
+
compressed_diff_bytes = ((lav - 79) * 4 - 1) // 8 + 1
|
|
244
|
+
if look_ahead + compressed_diff_bytes < self.total_read_bytes:
|
|
245
|
+
look_ahead += compressed_diff_bytes + 1
|
|
246
|
+
else:
|
|
247
|
+
break
|
|
248
|
+
elif lav < 223:
|
|
249
|
+
compressed_diff_bytes = ((lav - 182) * 5 - 1) // 8 + 1
|
|
250
|
+
if look_ahead + compressed_diff_bytes < self.total_read_bytes:
|
|
251
|
+
look_ahead += compressed_diff_bytes + 1
|
|
252
|
+
else:
|
|
253
|
+
break
|
|
254
|
+
else:
|
|
255
|
+
if look_ahead + 2 < self.total_read_bytes:
|
|
256
|
+
look_ahead += 3
|
|
257
|
+
else:
|
|
258
|
+
break
|
|
259
|
+
self.decompression_pos = self.decompress_pbd16(look_ahead)
|
|
260
|
+
self.compression_pos = look_ahead
|
|
261
|
+
|
|
262
|
+
@cython.boundscheck(False)
|
|
263
|
+
@cython.wraparound(False)
|
|
264
|
+
@cython.cdivision(True)
|
|
265
|
+
cpdef np.ndarray load(self, path: str | os.PathLike):
|
|
266
|
+
"""
|
|
267
|
+
:param path: output image path of v3dpbd.
|
|
268
|
+
:return: a 4D numpy array of either uint8 or uint16.
|
|
269
|
+
"""
|
|
270
|
+
file_size = os.path.getsize(path)
|
|
271
|
+
assert file_size >= HEADER_SIZE, "File size smaller than header size."
|
|
272
|
+
cdef:
|
|
273
|
+
short datatype
|
|
274
|
+
long long current_read_bytes, channel_len, remaining_bytes
|
|
275
|
+
const unsigned char[:] p = str(path).encode('utf-8')
|
|
276
|
+
FILE* f = fopen(<const char*>&p[0], <char*>'rb')
|
|
277
|
+
if f is NULL:
|
|
278
|
+
raise Exception("Fail to open file for reading.")
|
|
279
|
+
try:
|
|
280
|
+
header = bytearray(HEADER_SIZE)
|
|
281
|
+
fread(PyByteArray_AsString(header), HEADER_SIZE, 1, f)
|
|
282
|
+
assert header.find(FORMAT_KEY) == 0, "Format key loading failed."
|
|
283
|
+
header = header[len(FORMAT_KEY):]
|
|
284
|
+
endian = '<' if header[:1] == LITTLE else '>'
|
|
285
|
+
self.endian_switch = header[:1] != self.endian_sys
|
|
286
|
+
datatype = struct.unpack(f'{endian}h', header[1:3])[0]
|
|
287
|
+
assert datatype in [1, 2], "Datatype can only be 1 or 2."
|
|
288
|
+
sz = struct.unpack(f'{endian}iiii', header[3:])
|
|
289
|
+
channel_len = sz[0] * sz[1] * sz[2]
|
|
290
|
+
remaining_bytes = file_size - HEADER_SIZE
|
|
291
|
+
self.total_read_bytes = 0
|
|
292
|
+
self.compression_buffer = bytearray(remaining_bytes)
|
|
293
|
+
self.decompression_buffer = bytearray(channel_len * sz[3] * datatype)
|
|
294
|
+
self.compression_pos = self.decompression_pos = 0
|
|
295
|
+
while remaining_bytes > 0:
|
|
296
|
+
current_read_bytes = min(remaining_bytes, self.read_step_size_bytes,
|
|
297
|
+
(self.total_read_bytes // channel_len + 1) *
|
|
298
|
+
channel_len - self.total_read_bytes)
|
|
299
|
+
fread(PyByteArray_AsString(self.compression_buffer) + self.total_read_bytes, current_read_bytes, 1, f)
|
|
300
|
+
self.total_read_bytes += current_read_bytes
|
|
301
|
+
remaining_bytes -= current_read_bytes
|
|
302
|
+
if datatype == 1:
|
|
303
|
+
self.update_compression_buffer8()
|
|
304
|
+
elif datatype == 2:
|
|
305
|
+
self.update_compression_buffer16()
|
|
306
|
+
else:
|
|
307
|
+
raise Exception("Invalid datatype")
|
|
308
|
+
return np.frombuffer(self.decompression_buffer, f'{endian}u{datatype}').reshape(sz[::-1])
|
|
309
|
+
finally:
|
|
310
|
+
fclose(f)
|
|
311
|
+
|
|
312
|
+
@cython.boundscheck(False)
|
|
313
|
+
@cython.wraparound(False)
|
|
314
|
+
cdef long long compress_pbd8(self):
|
|
315
|
+
cdef:
|
|
316
|
+
unsigned char cur_val, prior_val, retest
|
|
317
|
+
unsigned char[96] dbuffer
|
|
318
|
+
unsigned char * decomp = <unsigned char *> PyByteArray_AsString(self.decompression_buffer)
|
|
319
|
+
unsigned char * comp = <unsigned char *> PyByteArray_AsString(self.compression_buffer)
|
|
320
|
+
short delta
|
|
321
|
+
long long active_literal_index = -1, cur_dp, cp = 0, dp = 0
|
|
322
|
+
double re_efficiency, df_efficiency
|
|
323
|
+
assert self.decompression_pos > 0, "The buffer to save is empty."
|
|
324
|
+
while dp < self.decompression_pos:
|
|
325
|
+
if cp >= self.compression_pos:
|
|
326
|
+
raise Exception("compression running out of space, try enlarging the compression buffer.")
|
|
327
|
+
retest = 1
|
|
328
|
+
cur_val = decomp[dp]
|
|
329
|
+
cur_dp = dp + 1
|
|
330
|
+
while cur_dp < self.decompression_pos and retest < 128 and decomp[cur_dp] == cur_val:
|
|
331
|
+
retest += 1
|
|
332
|
+
cur_dp += 1
|
|
333
|
+
re_efficiency = retest / 2.
|
|
334
|
+
|
|
335
|
+
if re_efficiency < 4:
|
|
336
|
+
df_efficiency = 0.
|
|
337
|
+
cur_dp = dp
|
|
338
|
+
if dp > 0:
|
|
339
|
+
prior_val = decomp[dp - 1]
|
|
340
|
+
for cur_dp in range(dp, dp + min(self.decompression_pos - dp, 95)):
|
|
341
|
+
delta = decomp[cur_dp] - prior_val
|
|
342
|
+
if delta > 2 or delta < -1:
|
|
343
|
+
break
|
|
344
|
+
prior_val = decomp[cur_dp]
|
|
345
|
+
if delta == -1:
|
|
346
|
+
delta = 3
|
|
347
|
+
dbuffer[cur_dp - dp] = delta
|
|
348
|
+
else:
|
|
349
|
+
cur_dp += 1
|
|
350
|
+
df_efficiency = (cur_dp - dp) / ((cur_dp - dp) / 4. + 2)
|
|
351
|
+
if re_efficiency >= 4. or re_efficiency > df_efficiency and re_efficiency > 1.:
|
|
352
|
+
comp[cp] = retest + 127
|
|
353
|
+
cp += 1
|
|
354
|
+
comp[cp] = cur_val
|
|
355
|
+
cp += 1
|
|
356
|
+
active_literal_index = -1
|
|
357
|
+
dp += retest
|
|
358
|
+
elif df_efficiency > 1.:
|
|
359
|
+
comp[cp] = cur_dp - dp + 32
|
|
360
|
+
cp += 1
|
|
361
|
+
for delta in range(0, cur_dp - dp, 4):
|
|
362
|
+
comp[cp] = dbuffer[delta+3] << 6 | dbuffer[delta+2] << 4 | dbuffer[delta+1] << 2 | dbuffer[delta]
|
|
363
|
+
cp += 1
|
|
364
|
+
active_literal_index = -1
|
|
365
|
+
dp = cur_dp
|
|
366
|
+
else:
|
|
367
|
+
if active_literal_index < 0 or comp[active_literal_index] >= 32:
|
|
368
|
+
comp[cp] = 0
|
|
369
|
+
active_literal_index = cp
|
|
370
|
+
cp += 1
|
|
371
|
+
else:
|
|
372
|
+
comp[active_literal_index] += 1
|
|
373
|
+
comp[cp] = cur_val
|
|
374
|
+
cp += 1
|
|
375
|
+
dp += 1
|
|
376
|
+
return cp
|
|
377
|
+
|
|
378
|
+
@cython.boundscheck(False)
|
|
379
|
+
@cython.wraparound(False)
|
|
380
|
+
@cython.cdivision(True)
|
|
381
|
+
cdef long long compress_pbd16(self):
|
|
382
|
+
cdef:
|
|
383
|
+
unsigned char * decomp = <unsigned char *> PyByteArray_AsString(self.decompression_buffer)
|
|
384
|
+
unsigned char * comp = <unsigned char *> PyByteArray_AsString(self.compression_buffer)
|
|
385
|
+
unsigned char retest, carry, i = 0
|
|
386
|
+
char shift
|
|
387
|
+
unsigned char* pb
|
|
388
|
+
unsigned short* pcp2
|
|
389
|
+
unsigned short cur_val, prior_val
|
|
390
|
+
const unsigned short* decomp2 = <unsigned short*>&decomp[0]
|
|
391
|
+
long long decomp_len = self.decompression_pos // 2, active_literal_index = -1, dp2 = 0, cp = 0, cur_dp2
|
|
392
|
+
int delta
|
|
393
|
+
double re_efficiency
|
|
394
|
+
double[3] df_efficiency
|
|
395
|
+
unsigned char[3][256] dbuffer
|
|
396
|
+
long long[3] dc
|
|
397
|
+
assert self.decompression_pos > 0, "The buffer to save is empty."
|
|
398
|
+
while dp2 < decomp_len:
|
|
399
|
+
if cp >= self.compression_pos:
|
|
400
|
+
raise Exception("compression running out of space, try enlarging the compression buffer.")
|
|
401
|
+
retest = 1
|
|
402
|
+
cur_val = decomp2[dp2]
|
|
403
|
+
cur_dp2 = dp2 + 1
|
|
404
|
+
while cur_dp2 < decomp_len and retest < REPEAT_MAX_LEN and decomp2[cur_dp2] == cur_val:
|
|
405
|
+
retest += 1
|
|
406
|
+
cur_dp2 += 1
|
|
407
|
+
re_efficiency = retest / 3.
|
|
408
|
+
|
|
409
|
+
if re_efficiency < MAX_EFF[0]:
|
|
410
|
+
df_efficiency[0] = df_efficiency[1] = df_efficiency[2] = 0.
|
|
411
|
+
dc[0] = dc[1] = dc[2] = 0
|
|
412
|
+
if dp2 > 0:
|
|
413
|
+
for i in range(3):
|
|
414
|
+
prior_val = decomp2[dp2 - 1]
|
|
415
|
+
cur_dp2 = dp2
|
|
416
|
+
for cur_dp2 in range(dp2, dp2 + min(decomp_len - dp2, MAX_LEN[i])):
|
|
417
|
+
delta = decomp2[cur_dp2] - prior_val
|
|
418
|
+
if delta > ran[i][1] or delta < ran[i][0]:
|
|
419
|
+
break
|
|
420
|
+
prior_val = decomp2[cur_dp2]
|
|
421
|
+
dbuffer[i][cur_dp2 - dp2] = ran[i][1] - delta if delta < 0 else delta
|
|
422
|
+
else:
|
|
423
|
+
cur_dp2 += 1
|
|
424
|
+
df_efficiency[i] = (cur_dp2 - dp2) / ((cur_dp2 - dp2) * (3 + i) / 16. + 1)
|
|
425
|
+
dc[i] = cur_dp2
|
|
426
|
+
if not self.pbd16_full_blood:
|
|
427
|
+
break
|
|
428
|
+
else:
|
|
429
|
+
if df_efficiency[1] > df_efficiency[0]:
|
|
430
|
+
if df_efficiency[2] > df_efficiency[1]:
|
|
431
|
+
i = 2
|
|
432
|
+
else:
|
|
433
|
+
i = 1
|
|
434
|
+
elif df_efficiency[2] > df_efficiency[0]:
|
|
435
|
+
i = 2
|
|
436
|
+
if re_efficiency >= MAX_EFF[0] or re_efficiency > df_efficiency[i] and re_efficiency > 1.:
|
|
437
|
+
comp[cp] = retest + 222
|
|
438
|
+
cp += 1
|
|
439
|
+
pcp2 = <unsigned short*>&comp[cp]
|
|
440
|
+
pcp2[0] = cur_val
|
|
441
|
+
cp += 2
|
|
442
|
+
dp2 += retest
|
|
443
|
+
active_literal_index = -1
|
|
444
|
+
elif df_efficiency[i] > 1.:
|
|
445
|
+
comp[cp] = dc[i] - dp2 + gap[i]
|
|
446
|
+
cp += 1
|
|
447
|
+
carry = 0
|
|
448
|
+
shift = 8
|
|
449
|
+
for cur_dp2 in range(dc[i] - dp2):
|
|
450
|
+
shift -= shift_bits[i]
|
|
451
|
+
if shift > 0:
|
|
452
|
+
carry |= dbuffer[i][cur_dp2] << shift
|
|
453
|
+
else:
|
|
454
|
+
carry |= dbuffer[i][cur_dp2] >> -shift
|
|
455
|
+
comp[cp] = carry
|
|
456
|
+
cp += 1
|
|
457
|
+
shift += 8
|
|
458
|
+
carry = dbuffer[i][cur_dp2] << shift
|
|
459
|
+
else:
|
|
460
|
+
if shift != 8:
|
|
461
|
+
comp[cp] = carry
|
|
462
|
+
cp += 1
|
|
463
|
+
active_literal_index = -1
|
|
464
|
+
dp2 = dc[i]
|
|
465
|
+
else:
|
|
466
|
+
if active_literal_index < 0 or comp[active_literal_index] >= 31:
|
|
467
|
+
comp[cp] = 0
|
|
468
|
+
active_literal_index = cp
|
|
469
|
+
cp += 1
|
|
470
|
+
else:
|
|
471
|
+
comp[active_literal_index] += 1
|
|
472
|
+
pcp2 = <unsigned short*>&comp[cp]
|
|
473
|
+
pcp2[0] = cur_val
|
|
474
|
+
cp += 2
|
|
475
|
+
dp2 += 1
|
|
476
|
+
return cp
|
|
477
|
+
|
|
478
|
+
@cython.boundscheck(False)
|
|
479
|
+
@cython.wraparound(False)
|
|
480
|
+
cpdef void save(self, path: str | os.PathLike, np.ndarray img):
|
|
481
|
+
"""
|
|
482
|
+
:param path: output image path of v3dpbd.
|
|
483
|
+
:param img: 4D numpy array (C,Z,Y,X) of either uint8 or uint16.
|
|
484
|
+
"""
|
|
485
|
+
assert img.ndim == 4, "The image has to be 4D"
|
|
486
|
+
assert img.dtype in [np.uint8, np.uint16], "The pixel type has to be uint8 or uint16"
|
|
487
|
+
cdef:
|
|
488
|
+
bytearray header
|
|
489
|
+
int[4] sz = [img.shape[0], img.shape[1], img.shape[2], img.shape[3]]
|
|
490
|
+
int[:] size = sz
|
|
491
|
+
const unsigned char[:] p = str(path).encode('utf-8')
|
|
492
|
+
long long compression_size, channel_len
|
|
493
|
+
short datatype
|
|
494
|
+
FILE * f = fopen(<const char *> &p[0], <char *> 'wb')
|
|
495
|
+
if f is NULL:
|
|
496
|
+
raise Exception("Fail to open file for writing.")
|
|
497
|
+
try:
|
|
498
|
+
endian = '<' if self.endian_sys == LITTLE else '>'
|
|
499
|
+
if img.dtype == np.uint8:
|
|
500
|
+
datatype = 1
|
|
501
|
+
elif img.dtype == np.uint16:
|
|
502
|
+
datatype = 2
|
|
503
|
+
else:
|
|
504
|
+
raise Exception("Unsupported datatype.")
|
|
505
|
+
header = bytearray(FORMAT_KEY + self.endian_sys + struct.pack(f'{endian}hiiii', datatype, *size[::-1]))
|
|
506
|
+
assert fwrite(PyByteArray_AsString(header), HEADER_SIZE, 1, f) == 1, "Header writing failed."
|
|
507
|
+
channel_len = sz[0] * sz[1] * sz[2]
|
|
508
|
+
self.compression_pos = channel_len * sz[3] * datatype * COMPRESSION_ENLARGEMENT
|
|
509
|
+
self.compression_buffer = bytearray(self.compression_pos)
|
|
510
|
+
if datatype == 2 and img.dtype.byteorder not in ['=', endian]:
|
|
511
|
+
img = img.byteswap()
|
|
512
|
+
self.decompression_buffer = bytearray(img.tobytes())
|
|
513
|
+
self.decompression_pos = len(self.decompression_buffer)
|
|
514
|
+
if datatype == 1:
|
|
515
|
+
compression_size = self.compress_pbd8()
|
|
516
|
+
elif datatype == 2:
|
|
517
|
+
compression_size = self.compress_pbd16()
|
|
518
|
+
else:
|
|
519
|
+
raise Exception("Invalid datatype.")
|
|
520
|
+
assert fwrite(<void*>PyByteArray_AsString(self.compression_buffer), compression_size, 1, f) == 1, \
|
|
521
|
+
"Buffer saving failed."
|
|
522
|
+
finally:
|
|
523
|
+
fclose(f)
|
|
Binary file
|