swcgeom 0.19.4__cp313-cp313-macosx_14_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of swcgeom might be problematic. Click here for more details.

Files changed (72) hide show
  1. swcgeom/__init__.py +21 -0
  2. swcgeom/analysis/__init__.py +13 -0
  3. swcgeom/analysis/feature_extractor.py +454 -0
  4. swcgeom/analysis/features.py +218 -0
  5. swcgeom/analysis/lmeasure.py +750 -0
  6. swcgeom/analysis/sholl.py +201 -0
  7. swcgeom/analysis/trunk.py +183 -0
  8. swcgeom/analysis/visualization.py +191 -0
  9. swcgeom/analysis/visualization3d.py +81 -0
  10. swcgeom/analysis/volume.py +143 -0
  11. swcgeom/core/__init__.py +19 -0
  12. swcgeom/core/branch.py +129 -0
  13. swcgeom/core/branch_tree.py +65 -0
  14. swcgeom/core/compartment.py +107 -0
  15. swcgeom/core/node.py +130 -0
  16. swcgeom/core/path.py +155 -0
  17. swcgeom/core/population.py +341 -0
  18. swcgeom/core/swc.py +247 -0
  19. swcgeom/core/swc_utils/__init__.py +19 -0
  20. swcgeom/core/swc_utils/assembler.py +35 -0
  21. swcgeom/core/swc_utils/base.py +180 -0
  22. swcgeom/core/swc_utils/checker.py +107 -0
  23. swcgeom/core/swc_utils/io.py +204 -0
  24. swcgeom/core/swc_utils/normalizer.py +163 -0
  25. swcgeom/core/swc_utils/subtree.py +70 -0
  26. swcgeom/core/tree.py +384 -0
  27. swcgeom/core/tree_utils.py +277 -0
  28. swcgeom/core/tree_utils_impl.py +58 -0
  29. swcgeom/images/__init__.py +9 -0
  30. swcgeom/images/augmentation.py +149 -0
  31. swcgeom/images/contrast.py +87 -0
  32. swcgeom/images/folder.py +217 -0
  33. swcgeom/images/io.py +578 -0
  34. swcgeom/images/loaders/__init__.py +8 -0
  35. swcgeom/images/loaders/pbd.cpython-313-darwin.so +0 -0
  36. swcgeom/images/loaders/pbd.pyx +523 -0
  37. swcgeom/images/loaders/raw.cpython-313-darwin.so +0 -0
  38. swcgeom/images/loaders/raw.pyx +183 -0
  39. swcgeom/transforms/__init__.py +20 -0
  40. swcgeom/transforms/base.py +136 -0
  41. swcgeom/transforms/branch.py +223 -0
  42. swcgeom/transforms/branch_tree.py +74 -0
  43. swcgeom/transforms/geometry.py +270 -0
  44. swcgeom/transforms/image_preprocess.py +107 -0
  45. swcgeom/transforms/image_stack.py +219 -0
  46. swcgeom/transforms/images.py +206 -0
  47. swcgeom/transforms/mst.py +183 -0
  48. swcgeom/transforms/neurolucida_asc.py +498 -0
  49. swcgeom/transforms/path.py +56 -0
  50. swcgeom/transforms/population.py +36 -0
  51. swcgeom/transforms/tree.py +265 -0
  52. swcgeom/transforms/tree_assembler.py +161 -0
  53. swcgeom/utils/__init__.py +18 -0
  54. swcgeom/utils/debug.py +23 -0
  55. swcgeom/utils/download.py +119 -0
  56. swcgeom/utils/dsu.py +58 -0
  57. swcgeom/utils/ellipse.py +131 -0
  58. swcgeom/utils/file.py +90 -0
  59. swcgeom/utils/neuromorpho.py +581 -0
  60. swcgeom/utils/numpy_helper.py +70 -0
  61. swcgeom/utils/plotter_2d.py +134 -0
  62. swcgeom/utils/plotter_3d.py +35 -0
  63. swcgeom/utils/renderer.py +145 -0
  64. swcgeom/utils/sdf.py +324 -0
  65. swcgeom/utils/solid_geometry.py +154 -0
  66. swcgeom/utils/transforms.py +367 -0
  67. swcgeom/utils/volumetric_object.py +483 -0
  68. swcgeom-0.19.4.dist-info/METADATA +86 -0
  69. swcgeom-0.19.4.dist-info/RECORD +72 -0
  70. swcgeom-0.19.4.dist-info/WHEEL +5 -0
  71. swcgeom-0.19.4.dist-info/licenses/LICENSE +201 -0
  72. swcgeom-0.19.4.dist-info/top_level.txt +1 -0
@@ -0,0 +1,498 @@
1
+
2
+ # SPDX-FileCopyrightText: 2022 - 2025 Zexin Yuan <pypi@yzx9.xyz>
3
+ #
4
+ # SPDX-License-Identifier: Apache-2.0
5
+
6
+ """Neurolucida related transformation."""
7
+
8
+ import os
9
+ import re
10
+ from enum import Enum, auto
11
+ from typing import IO, Any, NamedTuple, cast
12
+
13
+ from typing_extensions import override
14
+
15
+ from swcgeom.core import Tree
16
+ from swcgeom.core.swc_utils import SWCNames, SWCTypes, get_names, get_types
17
+ from swcgeom.transforms.base import Transform
18
+
19
+ __all__ = ["NeurolucidaAscToSwc"]
20
+
21
+
22
+ class NeurolucidaAscToSwc(Transform[str, Tree]):
23
+ """Convert neurolucida asc format to swc format."""
24
+
25
+ @override
26
+ def __call__(self, x: str) -> Tree:
27
+ return self.convert(x)
28
+
29
+ @classmethod
30
+ def convert(cls, fname: str) -> Tree:
31
+ with open(fname, "r") as f:
32
+ tree = cls.from_stream(f, source=os.path.abspath(fname))
33
+
34
+ return tree
35
+
36
+ @classmethod
37
+ def from_stream(cls, x: IO[str], *, source: str = "") -> Tree:
38
+ parser = Parser(x, source=source)
39
+ ast = parser.parse()
40
+ tree = cls.from_ast(ast)
41
+ return tree
42
+
43
+ @staticmethod
44
+ def from_ast(
45
+ ast: "AST",
46
+ *,
47
+ names: SWCNames | None = None,
48
+ types: SWCTypes | None = None,
49
+ ) -> Tree:
50
+ names = get_names(names)
51
+ types = get_types(types)
52
+ ndata = {n: [] for n in names.cols()}
53
+
54
+ next_id = 0
55
+ typee = [types.undefined]
56
+
57
+ def walk_ast(root: ASTNode, pid: int = -1) -> None:
58
+ nonlocal next_id, typee
59
+ match root.type:
60
+ case ASTType.ROOT:
61
+ for n in root.children:
62
+ walk_ast(n)
63
+
64
+ case ASTType.TREE:
65
+ match root.value:
66
+ case "AXON":
67
+ typee.append(types.axon)
68
+ case "DENDRITE":
69
+ typee.append(types.basal_dendrite)
70
+
71
+ for n in root.children:
72
+ walk_ast(n)
73
+
74
+ typee.pop()
75
+
76
+ case ASTType.NODE:
77
+ x, y, z, r = root.value
78
+ idx = next_id
79
+ next_id += 1
80
+
81
+ ndata[names.id].append(idx)
82
+ ndata[names.type].append(typee[-1])
83
+ ndata[names.x].append(x)
84
+ ndata[names.y].append(y)
85
+ ndata[names.z].append(z)
86
+ ndata[names.r].append(r)
87
+ ndata[names.pid].append(pid)
88
+
89
+ for n in root.children:
90
+ walk_ast(n, pid=idx)
91
+
92
+ walk_ast(ast)
93
+ tree = Tree(
94
+ next_id,
95
+ source=ast.source,
96
+ names=names,
97
+ **ndata, # type: ignore
98
+ )
99
+ return tree
100
+
101
+
102
+ # -----------------
103
+ # ASC format parser
104
+ # -----------------
105
+
106
+ # AST
107
+
108
+
109
+ class ASTType(Enum):
110
+ ROOT = auto()
111
+ TREE = auto()
112
+ NODE = auto()
113
+ COLOR = auto()
114
+ COMMENT = auto()
115
+
116
+
117
+ class ASTNode:
118
+ parent: "ASTNode | None" = None
119
+
120
+ def __init__(
121
+ self,
122
+ type: ASTType,
123
+ value: Any = None,
124
+ tokens: list["Token"] | None = None,
125
+ children: list["ASTNode"] | None = None,
126
+ ):
127
+ self.type = type
128
+ self.value = value
129
+ self.tokens = tokens or []
130
+ self.children = children or []
131
+ for child in self.children:
132
+ child.parent = self
133
+
134
+ def add_child(self, child: "ASTNode") -> None:
135
+ self.children.append(child)
136
+ child.parent = self
137
+ if child.tokens is not None:
138
+ self.tokens.extend(child.tokens)
139
+
140
+ def __eq__(self, __value: object) -> bool:
141
+ """
142
+ Compare two ASTNode objects.
143
+
144
+ NOTE: The `parent`, `tokens` attribute is not compared.
145
+ """
146
+ return (
147
+ isinstance(__value, ASTNode)
148
+ and self.type == __value.type
149
+ and self.value == __value.value
150
+ and self.children == __value.children
151
+ )
152
+
153
+
154
+ class AST(ASTNode):
155
+ def __init__(self, children: list[ASTNode] | None = None, source: str = ""):
156
+ super().__init__(ASTType.ROOT, children=children)
157
+ self.source = source
158
+
159
+
160
+ # ASC values
161
+
162
+
163
+ class ASCNode(NamedTuple):
164
+ x: float
165
+ y: float
166
+ z: float
167
+ r: float
168
+
169
+
170
+ class ASCColor(NamedTuple):
171
+ color: str
172
+
173
+ def __eq__(self, __value: object) -> bool:
174
+ return (
175
+ isinstance(__value, ASCColor)
176
+ and self.color.upper() == __value.color.upper()
177
+ )
178
+
179
+
180
+ class ASCComment(NamedTuple):
181
+ comment: str
182
+
183
+
184
+ # Error
185
+
186
+
187
+ class TokenTypeError(ValueError):
188
+ def __init__(self, token: "Token", expected: str):
189
+ super().__init__(
190
+ f"Unexpected token {token.type.name} `{token.value}` at {token.lineno}:{token.column}, expected {expected}"
191
+ )
192
+
193
+
194
+ class LiteralTokenError(ValueError):
195
+ def __init__(self, token: "Token", expected: str):
196
+ super().__init__(
197
+ f"Unexpected LITERAL token {token.value} at {token.lineno}:{token.column}, expected {expected}"
198
+ )
199
+
200
+
201
+ class AssertionTokenTypeError(Exception):
202
+ pass
203
+
204
+
205
+ # Parser
206
+
207
+
208
+ class Parser:
209
+ def __init__(self, r: IO[str], *, source: str = ""):
210
+ self.lexer = Lexer(r)
211
+ self.next_token = None
212
+ self.source = source
213
+ self._read_token()
214
+
215
+ def parse(self) -> AST:
216
+ try:
217
+ return self._parse()
218
+ except AssertionTokenTypeError as assertion_err:
219
+ msg = (
220
+ f"Error parsing {self.source}" if self.source != "" else "Error parsing"
221
+ )
222
+ original_error = assertion_err.__cause__
223
+ err = ValueError(msg)
224
+ if original_error is None:
225
+ raise err
226
+
227
+ ignores = ["_assert_and_cunsume", "_assert"]
228
+ current = assertion_err.__traceback__
229
+ while current is not None:
230
+ if (
231
+ current.tb_next is not None
232
+ and current.tb_next.tb_frame.f_code.co_name in ignores
233
+ ):
234
+ current.tb_next = None
235
+ else:
236
+ current = current.tb_next
237
+
238
+ original_error.__traceback__ = assertion_err.__traceback__
239
+
240
+ raise err from original_error
241
+ except Exception as original_error:
242
+ msg = f"Error parsing {self.source}" if self.source else "Error parsing"
243
+ raise ValueError(msg) from original_error
244
+
245
+ def _parse(self) -> AST:
246
+ root = AST(source=self.source)
247
+
248
+ token = self._assert_and_cunsume(TokenType.BRACKET_LEFT)
249
+ root.tokens.append(token)
250
+
251
+ while (token := self.next_token) is not None:
252
+ if token.type == TokenType.BRACKET_RIGHT:
253
+ break
254
+
255
+ if token.type != TokenType.BRACKET_LEFT:
256
+ raise TokenTypeError(token, "BRACKET_LEFT, BRACKET_RIGHT")
257
+
258
+ root.tokens.append(token)
259
+ self._consume()
260
+
261
+ token = self._assert(self.next_token, TokenType.LITERAL)
262
+ match str.upper(token.value):
263
+ case "AXON" | "DENDRITE":
264
+ self._parse_tree(root)
265
+
266
+ case "COLOR":
267
+ self._parse_color(root) # TODO: bug
268
+
269
+ case _:
270
+ raise LiteralTokenError(token, "AXON, DENDRITE, COLOR")
271
+
272
+ token = self._assert(self.next_token, TokenType.BRACKET_RIGHT)
273
+ token = self._assert_and_cunsume(TokenType.BRACKET_RIGHT)
274
+ root.tokens.append(token)
275
+ return root
276
+
277
+ def _parse_tree(self, root: ASTNode) -> None:
278
+ t1 = self._assert_and_cunsume(TokenType.LITERAL)
279
+ node = ASTNode(ASTType.TREE, str.upper(t1.value), tokens=[t1])
280
+
281
+ t2 = self._assert_and_cunsume(TokenType.BRACKET_RIGHT)
282
+ node.tokens.append(t2)
283
+
284
+ t3 = self._assert_and_cunsume(TokenType.BRACKET_LEFT)
285
+ node.tokens.append(t3)
286
+
287
+ self._parse_subtree(node)
288
+ root.add_child(node)
289
+
290
+ def _parse_subtree(self, root: ASTNode) -> None:
291
+ flag = True # flag to check if the brachet_left can be consumed
292
+ current = root
293
+ while (token := self.next_token) is not None:
294
+ match token.type:
295
+ case TokenType.BRACKET_LEFT:
296
+ self._read_token()
297
+ if flag:
298
+ flag = False
299
+ else:
300
+ self._parse_subtree(current)
301
+
302
+ case TokenType.BRACKET_RIGHT:
303
+ break
304
+
305
+ case TokenType.FLOAT:
306
+ current = self._parse_node(current)
307
+ flag = True
308
+
309
+ case TokenType.LITERAL:
310
+ match str.upper(token.value):
311
+ case "COLOR":
312
+ self._parse_color(current)
313
+ case _:
314
+ raise LiteralTokenError(token, "COLOR")
315
+
316
+ flag = True
317
+
318
+ case TokenType.OR:
319
+ current = root
320
+ self._read_token()
321
+ flag = True
322
+
323
+ case TokenType.COMMENT:
324
+ self._parse_comment(current)
325
+
326
+ case _:
327
+ excepted = (
328
+ "BRACKET_LEFT, BRACKET_RIGHT, LITERAL, FLOAT, OR, COMMENT"
329
+ )
330
+ raise TokenTypeError(token, excepted)
331
+
332
+ current.tokens.append(token)
333
+
334
+ def _parse_node(self, root: ASTNode) -> ASTNode:
335
+ # FLOAT FLOAT FLOAT FLOAT )
336
+ t1 = self._assert_and_cunsume(TokenType.FLOAT)
337
+ t2 = self._assert(self.next_token, TokenType.FLOAT)
338
+ self._read_token()
339
+ t3 = self._assert(self.next_token, TokenType.FLOAT)
340
+ self._read_token()
341
+ t4 = self._assert(self.next_token, TokenType.FLOAT)
342
+ self._read_token()
343
+ t5 = self._assert_and_cunsume(TokenType.BRACKET_RIGHT)
344
+
345
+ x, y, z, r = t1.value, t2.value, t3.value, t4.value
346
+ node = ASTNode(ASTType.NODE, ASCNode(x, y, z, r), tokens=[t1, t2, t3, t4, t5])
347
+ root.add_child(node)
348
+ return node
349
+
350
+ def _parse_color(self, root: ASTNode) -> ASTNode:
351
+ # COLOR COLOR_VALUE )
352
+ t1 = self._assert_and_cunsume(TokenType.LITERAL)
353
+ t2 = self._assert_and_cunsume(TokenType.LITERAL)
354
+ t3 = self._assert_and_cunsume(TokenType.BRACKET_RIGHT)
355
+
356
+ node = ASTNode(ASTType.COLOR, ASCColor(t2.value), tokens=[t1, t2, t3])
357
+ root.add_child(node)
358
+ return node
359
+
360
+ def _parse_comment(self, root: ASTNode) -> ASTNode:
361
+ # ; COMMENT
362
+ t1 = self._assert_and_cunsume(TokenType.COMMENT)
363
+ node = ASTNode(ASTType.COMMENT, ASCComment(t1.value), tokens=[t1])
364
+ root.add_child(node) # ? where the comment should be added
365
+ return node
366
+
367
+ def _read_token(self) -> None:
368
+ self.next_token = next(self.lexer, None)
369
+
370
+ def _assert_and_cunsume(self, type: "TokenType") -> "Token":
371
+ token = self._consume()
372
+ token = self._assert(token, type)
373
+ return cast(Token, token)
374
+
375
+ def _assert(self, token: "Token | None", type: "TokenType") -> "Token":
376
+ if token is None:
377
+ raise AssertionTokenTypeError() from ValueError("Unexpected EOF")
378
+
379
+ if token.type != type:
380
+ raise AssertionTokenTypeError() from TokenTypeError(token, type.name)
381
+
382
+ return token
383
+
384
+ def _consume(self) -> "Token | None":
385
+ token = self.next_token
386
+ self._read_token()
387
+ return token
388
+
389
+
390
+ # -----------------
391
+ # ASC format lexer
392
+ # -----------------
393
+
394
+
395
+ class TokenType(Enum):
396
+ BRACKET_LEFT = auto()
397
+ BRACKET_RIGHT = auto()
398
+ COMMENT = auto()
399
+ OR = auto()
400
+ FLOAT = auto()
401
+ LITERAL = auto()
402
+
403
+
404
+ class Token:
405
+ def __init__(self, type: TokenType, value: Any, lineno: int, column: int):
406
+ self.type = type
407
+ self.value = value
408
+ self.lineno = lineno
409
+ self.column = column
410
+
411
+ def __repr__(self) -> str:
412
+ return f"Token({self.type.name}, {self.value}, Position={self.lineno}:{self.column})"
413
+
414
+
415
+ RE_FLOAT = re.compile(r"[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?")
416
+
417
+
418
+ class Lexer:
419
+ def __init__(self, r: IO[str]):
420
+ self.r = r
421
+ self.lineno = 1
422
+ self.column = 1
423
+ self.next_char = self.r.read(1)
424
+
425
+ def __iter__(self):
426
+ return self
427
+
428
+ def __next__(self) -> Token:
429
+ match word := self._read_word():
430
+ case "":
431
+ raise StopIteration
432
+
433
+ case "(":
434
+ return self._token(TokenType.BRACKET_LEFT, word)
435
+
436
+ case ")":
437
+ return self._token(TokenType.BRACKET_RIGHT, word)
438
+
439
+ case ";":
440
+ return self._token(TokenType.COMMENT, self._read_line())
441
+
442
+ case "|":
443
+ return self._token(TokenType.OR, word)
444
+
445
+ case _ if RE_FLOAT.match(word) is not None:
446
+ return self._token(TokenType.FLOAT, float(word))
447
+
448
+ case _:
449
+ return self._token(TokenType.LITERAL, word)
450
+
451
+ def _read_char(self) -> bool:
452
+ self.next_char = self.r.read(1)
453
+ if self.next_char == "":
454
+ return False
455
+
456
+ if self.next_char == "\n":
457
+ self.lineno += 1
458
+ self.column = 1
459
+ else:
460
+ self.column += 1
461
+ return True
462
+
463
+ def _read_word(self) -> str:
464
+ # skip leading spaces
465
+ while self.next_char != "" and self.next_char in " \t\n":
466
+ self._read_char()
467
+
468
+ token = ""
469
+ while self.next_char != "" and self.next_char not in " \t\n();|":
470
+ token += self.next_char
471
+ self._read_char()
472
+
473
+ if token != "":
474
+ return token
475
+
476
+ if self.next_char == "":
477
+ return ""
478
+
479
+ ch = self.next_char
480
+ self._read_char()
481
+ return ch
482
+
483
+ def _read_line(self) -> str:
484
+ if self.next_char != "\n":
485
+ line = self.r.readline()
486
+ line = self.next_char + line
487
+ if line.endswith("\n"):
488
+ line = line[:-1]
489
+ else:
490
+ line = ""
491
+
492
+ self.lineno += 1
493
+ self.column = 1
494
+ self.next_char = self.r.read(1)
495
+ return line
496
+
497
+ def _token(self, type: TokenType, value: Any) -> Token:
498
+ return Token(type, value, self.lineno, self.column)
@@ -0,0 +1,56 @@
1
+ # SPDX-FileCopyrightText: 2022 - 2025 Zexin Yuan <pypi@yzx9.xyz>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ """Transformation in path."""
6
+
7
+ from typing_extensions import override
8
+
9
+ from swcgeom.core import Path, Tree, redirect_tree
10
+ from swcgeom.transforms.base import Transform
11
+
12
+ __all__ = ["PathToTree", "PathReverser"]
13
+
14
+
15
+ class PathToTree(Transform[Path, Tree]):
16
+ """Transform path to tree."""
17
+
18
+ @override
19
+ def __call__(self, x: Path) -> Tree:
20
+ t = Tree(
21
+ x.number_of_nodes(),
22
+ type=x.type(),
23
+ id=x.id(),
24
+ x=x.x(),
25
+ y=x.y(),
26
+ z=x.z(),
27
+ r=x.r(),
28
+ pid=x.pid(),
29
+ source=x.source,
30
+ comments=x.comments.copy(),
31
+ names=x.names,
32
+ )
33
+ return t
34
+
35
+
36
+ class PathReverser(Transform[Path, Path]):
37
+ r"""Reverse path.
38
+
39
+ ```text
40
+ a -> b -> ... -> y -> z
41
+ // to
42
+ a <- b <- ... <- y <- z
43
+ ```
44
+ """
45
+
46
+ def __init__(self) -> None:
47
+ super().__init__()
48
+ self.to_tree = PathToTree()
49
+
50
+ @override
51
+ def __call__(self, x: Path) -> Path:
52
+ x[0].type, x[-1].type = x[-1].type, x[0].type
53
+ t = self.to_tree(x)
54
+ t = redirect_tree(t, x[-1].id)
55
+ p = t.get_paths()[0]
56
+ return p
@@ -0,0 +1,36 @@
1
+
2
+ # SPDX-FileCopyrightText: 2022 - 2025 Zexin Yuan <pypi@yzx9.xyz>
3
+ #
4
+ # SPDX-License-Identifier: Apache-2.0
5
+
6
+ """Transformation in population."""
7
+
8
+ from typing_extensions import override
9
+
10
+ from swcgeom.core import Population, Tree
11
+ from swcgeom.transforms.base import Transform
12
+
13
+ __all__ = ["PopulationTransform"]
14
+
15
+
16
+ class PopulationTransform(Transform[Population, Population]):
17
+ """Apply transformation for each tree in population."""
18
+
19
+ def __init__(self, transform: Transform[Tree, Tree]):
20
+ super().__init__()
21
+ self.transform = transform
22
+
23
+ @override
24
+ def __call__(self, population: Population) -> Population:
25
+ trees: list[Tree] = []
26
+ for t in population:
27
+ new_t = self.transform(t)
28
+ if new_t.source == "":
29
+ new_t.source = t.source
30
+ trees.append(new_t)
31
+
32
+ return Population(trees, root=population.root)
33
+
34
+ @override
35
+ def extra_repr(self) -> str:
36
+ return f"transform={self.transform}"