kernpy 0.0.2__py3-none-any.whl → 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kernpy/__init__.py +215 -0
- kernpy/__main__.py +217 -0
- kernpy/core/__init__.py +119 -0
- kernpy/core/_io.py +48 -0
- kernpy/core/base_antlr_importer.py +61 -0
- kernpy/core/base_antlr_spine_parser_listener.py +196 -0
- kernpy/core/basic_spine_importer.py +43 -0
- kernpy/core/document.py +965 -0
- kernpy/core/dyn_importer.py +30 -0
- kernpy/core/dynam_spine_importer.py +42 -0
- kernpy/core/error_listener.py +51 -0
- kernpy/core/exporter.py +535 -0
- kernpy/core/fing_spine_importer.py +42 -0
- kernpy/core/generated/kernSpineLexer.interp +444 -0
- kernpy/core/generated/kernSpineLexer.py +535 -0
- kernpy/core/generated/kernSpineLexer.tokens +236 -0
- kernpy/core/generated/kernSpineParser.interp +425 -0
- kernpy/core/generated/kernSpineParser.py +9954 -0
- kernpy/core/generated/kernSpineParser.tokens +236 -0
- kernpy/core/generated/kernSpineParserListener.py +1200 -0
- kernpy/core/generated/kernSpineParserVisitor.py +673 -0
- kernpy/core/generic.py +426 -0
- kernpy/core/gkern.py +526 -0
- kernpy/core/graphviz_exporter.py +89 -0
- kernpy/core/harm_spine_importer.py +41 -0
- kernpy/core/import_humdrum_old.py +853 -0
- kernpy/core/importer.py +285 -0
- kernpy/core/importer_factory.py +43 -0
- kernpy/core/kern_spine_importer.py +73 -0
- kernpy/core/mens_spine_importer.py +23 -0
- kernpy/core/mhxm_spine_importer.py +44 -0
- kernpy/core/pitch_models.py +338 -0
- kernpy/core/root_spine_importer.py +58 -0
- kernpy/core/spine_importer.py +45 -0
- kernpy/core/text_spine_importer.py +43 -0
- kernpy/core/tokenizers.py +239 -0
- kernpy/core/tokens.py +2011 -0
- kernpy/core/transposer.py +300 -0
- kernpy/io/__init__.py +14 -0
- kernpy/io/public.py +355 -0
- kernpy/polish_scores/__init__.py +13 -0
- kernpy/polish_scores/download_polish_dataset.py +357 -0
- kernpy/polish_scores/iiif.py +47 -0
- kernpy/test_grammar.sh +22 -0
- kernpy/util/__init__.py +14 -0
- kernpy/util/helpers.py +55 -0
- kernpy/util/store_cache.py +35 -0
- kernpy/visualize_analysis.sh +23 -0
- kernpy-1.0.0.dist-info/METADATA +501 -0
- kernpy-1.0.0.dist-info/RECORD +51 -0
- {kernpy-0.0.2.dist-info → kernpy-1.0.0.dist-info}/WHEEL +1 -2
- kernpy/example.py +0 -1
- kernpy-0.0.2.dist-info/LICENSE +0 -19
- kernpy-0.0.2.dist-info/METADATA +0 -19
- kernpy-0.0.2.dist-info/RECORD +0 -7
- kernpy-0.0.2.dist-info/top_level.txt +0 -1
kernpy/core/document.py
ADDED
@@ -0,0 +1,965 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
|
3
|
+
from copy import copy, deepcopy
|
4
|
+
from collections import deque, defaultdict
|
5
|
+
from abc import ABC, abstractmethod
|
6
|
+
from enum import Enum
|
7
|
+
from typing import List, Optional, Dict, Union
|
8
|
+
from collections.abc import Sequence
|
9
|
+
from queue import Queue
|
10
|
+
|
11
|
+
from kernpy.core import TokenCategory, CORE_HEADERS, TERMINATOR
|
12
|
+
from kernpy.core import MetacommentToken, AbstractToken, HeaderToken
|
13
|
+
from .transposer import transpose, Direction, NotationEncoding, AVAILABLE_INTERVALS
|
14
|
+
from .tokens import NoteRestToken, Subtoken
|
15
|
+
from .transposer import IntervalsByName
|
16
|
+
|
17
|
+
|
18
|
+
class SignatureNodes:
|
19
|
+
"""
|
20
|
+
SignatureNodes class.
|
21
|
+
|
22
|
+
This class is used to store the last signature nodes of a tree.
|
23
|
+
It is used to keep track of the last signature nodes.
|
24
|
+
|
25
|
+
Attributes: nodes (dict): A dictionary that stores the last signature nodes. This way, we can add several tokens
|
26
|
+
without repetitions. - The key is the signature descendant token class (KeyToken, MeterSymbolToken, etc...) - The
|
27
|
+
value = node
|
28
|
+
|
29
|
+
"""
|
30
|
+
|
31
|
+
def __init__(self):
|
32
|
+
"""
|
33
|
+
Create an instance of SignatureNodes. Initialize the nodes as an empty dictionary.
|
34
|
+
|
35
|
+
Examples:
|
36
|
+
>>> signature_nodes = SignatureNodes()
|
37
|
+
>>> signature_nodes.nodes
|
38
|
+
{}
|
39
|
+
"""
|
40
|
+
self.nodes = {}
|
41
|
+
|
42
|
+
def clone(self):
|
43
|
+
"""
|
44
|
+
Create a deep copy of the SignatureNodes instance.
|
45
|
+
Returns: A new instance of SignatureNodes with nodes copied.
|
46
|
+
|
47
|
+
# TODO: This method is equivalent to the following code:
|
48
|
+
# from copy import deepcopy
|
49
|
+
# signature_nodes_to_copy = SignatureNodes()
|
50
|
+
# ...
|
51
|
+
# result = deepcopy(signature_nodes_to_copy)
|
52
|
+
# It should be tested.
|
53
|
+
"""
|
54
|
+
result = SignatureNodes()
|
55
|
+
result.nodes = copy(self.nodes)
|
56
|
+
return result
|
57
|
+
|
58
|
+
def update(self, node):
|
59
|
+
self.nodes[node.token.__class__.__name__] = node
|
60
|
+
|
61
|
+
|
62
|
+
class TreeTraversalInterface(ABC):
|
63
|
+
"""
|
64
|
+
TreeTraversalInterface class.
|
65
|
+
|
66
|
+
This class is used to traverse the tree. The `TreeTraversalInterface` class is responsible for implementing
|
67
|
+
the `visit` method.
|
68
|
+
"""
|
69
|
+
|
70
|
+
@abstractmethod
|
71
|
+
def visit(self, node):
|
72
|
+
pass
|
73
|
+
|
74
|
+
|
75
|
+
class Node:
|
76
|
+
"""
|
77
|
+
Node class.
|
78
|
+
|
79
|
+
This class represents a node in a tree.
|
80
|
+
The `Node` class is responsible for storing the main information of the **kern file.
|
81
|
+
|
82
|
+
Attributes:
|
83
|
+
id(int): The unique id of the node.
|
84
|
+
token(Optional[AbstractToken]): The specific token of the node. The token can be a `KeyToken`, `MeterSymbolToken`, etc...
|
85
|
+
parent(Optional['Node']): A reference to the parent `Node`. If the parent is the root, the parent is None.
|
86
|
+
children(List['Node']): A list of the children `Node`.
|
87
|
+
stage(int): The stage of the node in the tree. The stage is similar to a row in the **kern file.
|
88
|
+
last_spine_operator_node(Optional['Node']): The last spine operator node.
|
89
|
+
last_signature_nodes(Optional[SignatureNodes]): A reference to the last `SignatureNodes` instance.
|
90
|
+
header_node(Optional['Node']): The header node.
|
91
|
+
"""
|
92
|
+
NextID = 1 # static counter
|
93
|
+
|
94
|
+
def __init__(self,
|
95
|
+
stage: int,
|
96
|
+
token: Optional[AbstractToken],
|
97
|
+
parent: Optional['Node'],
|
98
|
+
last_spine_operator_node: Optional['Node'],
|
99
|
+
last_signature_nodes: Optional[SignatureNodes],
|
100
|
+
header_node: Optional['Node']
|
101
|
+
):
|
102
|
+
"""
|
103
|
+
Create an instance of Node.
|
104
|
+
|
105
|
+
Args:
|
106
|
+
stage (int): The stage of the node in the tree. The stage is similar to a row in the **kern file.
|
107
|
+
token (Optional[AbstractToken]): The specific token of the node. The token can be a `KeyToken`, `MeterSymbolToken`, etc...
|
108
|
+
parent (Optional['Node']): A reference to the parent `Node`. If the parent is the root, the parent is None.
|
109
|
+
last_spine_operator_node (Optional['Node']): The last spine operator node.
|
110
|
+
last_signature_nodes (Optional[SignatureNodes]): A reference to the last `SignatureNodes` instance.
|
111
|
+
header_node (Optional['Node']): The header node.
|
112
|
+
"""
|
113
|
+
self.id = Node.NextID
|
114
|
+
Node.NextID += 1
|
115
|
+
self.token = token
|
116
|
+
self.parent = parent
|
117
|
+
self.children = []
|
118
|
+
self.stage = stage
|
119
|
+
self.header_node = header_node
|
120
|
+
if last_signature_nodes is not None:
|
121
|
+
self.last_signature_nodes = last_signature_nodes.clone() #TODO Documentar todo esto - composición
|
122
|
+
# self.last_signature_nodes = copy.deepcopy(last_signature_nodes) # TODO: Ver en SignatureNodes.clone
|
123
|
+
else:
|
124
|
+
self.last_signature_nodes = SignatureNodes()
|
125
|
+
self.last_spine_operator_node = last_spine_operator_node
|
126
|
+
|
127
|
+
def count_nodes_by_stage(self) -> List[int]:
|
128
|
+
"""
|
129
|
+
Count the number of nodes in each stage of the tree.
|
130
|
+
|
131
|
+
Examples:
|
132
|
+
>>> node = Node(0, None, None, None, None, None)
|
133
|
+
>>> ...
|
134
|
+
>>> node.count_nodes_by_stage()
|
135
|
+
[2, 2, 2, 2, 3, 3, 3, 2]
|
136
|
+
|
137
|
+
Returns:
|
138
|
+
List[int]: A list with the number of nodes in each stage of the tree.
|
139
|
+
"""
|
140
|
+
level_counts = defaultdict(int)
|
141
|
+
queue = deque([(self, 0)]) # (node, level)
|
142
|
+
# breadth-first search (BFS)
|
143
|
+
while queue:
|
144
|
+
node, level = queue.popleft()
|
145
|
+
level_counts[level] += 1
|
146
|
+
for child in node.children:
|
147
|
+
queue.append((child, level + 1))
|
148
|
+
|
149
|
+
# Convert the level_counts dictionary to a list of counts
|
150
|
+
max_level = max(level_counts.keys())
|
151
|
+
counts_by_level = [level_counts[level] for level in range(max_level + 1)]
|
152
|
+
|
153
|
+
return counts_by_level
|
154
|
+
|
155
|
+
def dfs(self, tree_traversal: TreeTraversalInterface):
|
156
|
+
"""
|
157
|
+
Depth-first search (DFS)
|
158
|
+
|
159
|
+
Args:
|
160
|
+
tree_traversal (TreeTraversalInterface): The tree traversal interface. Object used to visit the nodes of the tree.
|
161
|
+
"""
|
162
|
+
node = self
|
163
|
+
tree_traversal.visit(node)
|
164
|
+
for child in self.children:
|
165
|
+
child.dfs(tree_traversal)
|
166
|
+
|
167
|
+
def dfs_iterative(self, tree_traversal: TreeTraversalInterface):
|
168
|
+
"""
|
169
|
+
Depth-first search (DFS). Iterative version.
|
170
|
+
|
171
|
+
Args:
|
172
|
+
tree_traversal (TreeTraversalInterface): The tree traversal interface. Object used to visit the nodes of the tree.
|
173
|
+
|
174
|
+
Returns: None
|
175
|
+
"""
|
176
|
+
stack = [self]
|
177
|
+
while stack:
|
178
|
+
node = stack.pop()
|
179
|
+
tree_traversal.visit(node)
|
180
|
+
stack.extend(reversed(node.children)) # Add children in reverse order to maintain DFS order
|
181
|
+
|
182
|
+
def __eq__(self, other):
|
183
|
+
"""
|
184
|
+
Compare two nodes.
|
185
|
+
|
186
|
+
Args:
|
187
|
+
other: The other node to compare.
|
188
|
+
|
189
|
+
Returns: True if the nodes are equal, False otherwise.
|
190
|
+
"""
|
191
|
+
if other is None or not isinstance(other, Node):
|
192
|
+
return False
|
193
|
+
|
194
|
+
return self.id == other.id
|
195
|
+
|
196
|
+
def __ne__(self, other):
|
197
|
+
"""
|
198
|
+
Compare two nodes.
|
199
|
+
|
200
|
+
Args:
|
201
|
+
other: The other node to compare.
|
202
|
+
|
203
|
+
Returns: True if the nodes are not equal, False otherwise.
|
204
|
+
"""
|
205
|
+
return not self.__eq__(other)
|
206
|
+
|
207
|
+
def __hash__(self):
|
208
|
+
"""
|
209
|
+
Get the hash of the node.
|
210
|
+
|
211
|
+
Returns: The hash of the node.
|
212
|
+
"""
|
213
|
+
return hash(self.id)
|
214
|
+
|
215
|
+
def __str__(self):
|
216
|
+
"""
|
217
|
+
Get the string representation of the node.
|
218
|
+
|
219
|
+
Returns: The string representation of the node.
|
220
|
+
"""
|
221
|
+
return f"{{{self.stage}: {self.token}}}"
|
222
|
+
|
223
|
+
|
224
|
+
class BoundingBoxMeasures:
|
225
|
+
"""
|
226
|
+
BoundingBoxMeasures class.
|
227
|
+
"""
|
228
|
+
|
229
|
+
def __init__(
|
230
|
+
self,
|
231
|
+
bounding_box,
|
232
|
+
from_measure: int,
|
233
|
+
to_measure: int
|
234
|
+
):
|
235
|
+
"""
|
236
|
+
Create an instance of BoundingBoxMeasures.
|
237
|
+
|
238
|
+
Args:
|
239
|
+
bounding_box: The bounding box object of the node.
|
240
|
+
from_measure (int): The first measure of the score in the BoundingBoxMeasures object.
|
241
|
+
to_measure (int): The last measure of the score in the BoundingBoxMeasures object.
|
242
|
+
"""
|
243
|
+
self.from_measure = from_measure
|
244
|
+
self.to_measure = to_measure
|
245
|
+
self.bounding_box = bounding_box
|
246
|
+
|
247
|
+
|
248
|
+
class MultistageTree:
|
249
|
+
"""
|
250
|
+
MultistageTree class.
|
251
|
+
"""
|
252
|
+
|
253
|
+
def __init__(self):
|
254
|
+
"""
|
255
|
+
Constructor for MultistageTree class.
|
256
|
+
|
257
|
+
Create an empty Node object to serve as the root, \
|
258
|
+
and start the stages list by placing this root node inside a new list.
|
259
|
+
|
260
|
+
"""
|
261
|
+
self.root = Node(0, None, None, None, None, None)
|
262
|
+
self.stages = [] # First stage (0-index) is the root (Node with None token and header_node). The core header is in stage 1.
|
263
|
+
self.stages.append([self.root])
|
264
|
+
|
265
|
+
def add_node(
|
266
|
+
self,
|
267
|
+
stage: int,
|
268
|
+
parent: Node,
|
269
|
+
token: Optional[AbstractToken],
|
270
|
+
last_spine_operator_node: Optional[Node],
|
271
|
+
previous_signature_nodes: Optional[SignatureNodes],
|
272
|
+
header_node: Optional[Node] = None
|
273
|
+
) -> Node:
|
274
|
+
"""
|
275
|
+
Add a new node to the tree.
|
276
|
+
Args:
|
277
|
+
stage (int):
|
278
|
+
parent (Node):
|
279
|
+
token (Optional[AbstractToken]):
|
280
|
+
last_spine_operator_node (Optional[Node]):
|
281
|
+
previous_signature_nodes (Optional[SignatureNodes]):
|
282
|
+
header_node (Optional[Node]):
|
283
|
+
|
284
|
+
Returns: Node - The added node object.
|
285
|
+
|
286
|
+
"""
|
287
|
+
node = Node(stage, token, parent, last_spine_operator_node, previous_signature_nodes, header_node)
|
288
|
+
if stage == len(self.stages):
|
289
|
+
self.stages.append([node])
|
290
|
+
elif stage > len(self.stages):
|
291
|
+
raise ValueError(f'Cannot add node in stage {stage} when there are only {len(self.stages)} stages')
|
292
|
+
else:
|
293
|
+
self.stages[stage].append(node)
|
294
|
+
|
295
|
+
parent.children.append(node)
|
296
|
+
return node
|
297
|
+
|
298
|
+
def dfs(self, visit_method) -> None:
|
299
|
+
"""
|
300
|
+
Depth-first search (DFS)
|
301
|
+
|
302
|
+
Args:
|
303
|
+
visit_method (TreeTraversalInterface): The tree traversal interface.
|
304
|
+
|
305
|
+
Returns: None
|
306
|
+
|
307
|
+
"""
|
308
|
+
self.root.dfs(visit_method)
|
309
|
+
|
310
|
+
def dfs_iterative(self, visit_method) -> None:
|
311
|
+
"""
|
312
|
+
Depth-first search (DFS). Iterative version.
|
313
|
+
|
314
|
+
Args:
|
315
|
+
visit_method (TreeTraversalInterface): The tree traversal interface.
|
316
|
+
|
317
|
+
Returns: None
|
318
|
+
|
319
|
+
"""
|
320
|
+
self.root.dfs_iterative(visit_method)
|
321
|
+
|
322
|
+
def __deepcopy__(self, memo):
|
323
|
+
"""
|
324
|
+
Create a deep copy of the MultistageTree object.
|
325
|
+
"""
|
326
|
+
# Create a new empty MultistageTree object
|
327
|
+
new_tree = MultistageTree()
|
328
|
+
|
329
|
+
# Deepcopy the root
|
330
|
+
new_tree.root = deepcopy(self.root, memo)
|
331
|
+
|
332
|
+
# Deepcopy the stages list
|
333
|
+
new_tree.stages = deepcopy(self.stages, memo)
|
334
|
+
|
335
|
+
return new_tree
|
336
|
+
|
337
|
+
|
338
|
+
class Document:
|
339
|
+
"""
|
340
|
+
Document class.
|
341
|
+
|
342
|
+
This class store the score content using an agnostic tree structure.
|
343
|
+
|
344
|
+
Attributes:
|
345
|
+
tree (MultistageTree): The tree structure of the document where all the nodes are stored. \
|
346
|
+
Each stage of the tree corresponds to a row in the Humdrum **kern file encoding.
|
347
|
+
measure_start_tree_stages (List[List[Node]]): The list of nodes that corresponds to the measures. \
|
348
|
+
Empty list by default.
|
349
|
+
The index of the list is starting from 1. Rows after removing empty lines and line comments
|
350
|
+
page_bounding_boxes (Dict[int, BoundingBoxMeasures]): The dictionary of page bounding boxes. \
|
351
|
+
- key: page number
|
352
|
+
- value: BoundingBoxMeasures object
|
353
|
+
header_stage (int): The index of the stage that contains the headers. None by default.
|
354
|
+
"""
|
355
|
+
|
356
|
+
def __init__(self, tree: MultistageTree):
|
357
|
+
"""
|
358
|
+
Constructor for Document class.
|
359
|
+
|
360
|
+
Args:
|
361
|
+
tree (MultistageTree): The tree structure of the document where all the nodes are stored.
|
362
|
+
"""
|
363
|
+
self.tree = tree # TODO: ? Should we use copy.deepcopy() here?
|
364
|
+
self.measure_start_tree_stages = []
|
365
|
+
self.page_bounding_boxes = {}
|
366
|
+
self.header_stage = None
|
367
|
+
|
368
|
+
FIRST_MEASURE = 1
|
369
|
+
|
370
|
+
def get_header_stage(self) -> Union[List[Node], List[List[Node]]]:
|
371
|
+
"""
|
372
|
+
Get the Node list of the header stage.
|
373
|
+
|
374
|
+
Returns: (Union[List[Node], List[List[Node]]]) The Node list of the header stage.
|
375
|
+
|
376
|
+
Raises: Exception - If the document has no header stage.
|
377
|
+
"""
|
378
|
+
if self.header_stage:
|
379
|
+
return self.tree.stages[self.header_stage]
|
380
|
+
else:
|
381
|
+
raise Exception('No header stage found')
|
382
|
+
|
383
|
+
def get_leaves(self) -> List[Node]:
|
384
|
+
"""
|
385
|
+
Get the leaves of the tree.
|
386
|
+
|
387
|
+
Returns: (List[Node]) The leaves of the tree.
|
388
|
+
"""
|
389
|
+
return self.tree.stages[len(self.tree.stages) - 1]
|
390
|
+
|
391
|
+
def get_spine_count(self) -> int:
|
392
|
+
"""
|
393
|
+
Get the number of spines in the document.
|
394
|
+
|
395
|
+
Returns (int): The number of spines in the document.
|
396
|
+
"""
|
397
|
+
return len(self.get_header_stage()) # TODO: test refactor
|
398
|
+
|
399
|
+
def get_first_measure(self) -> int:
|
400
|
+
"""
|
401
|
+
Get the index of the first measure of the document.
|
402
|
+
|
403
|
+
Returns: (Int) The index of the first measure of the document.
|
404
|
+
|
405
|
+
Raises: Exception - If the document has no measures.
|
406
|
+
|
407
|
+
Examples:
|
408
|
+
>>> import kernpy as kp
|
409
|
+
>>> document, err = kp.read('score.krn')
|
410
|
+
>>> document.get_first_measure()
|
411
|
+
1
|
412
|
+
"""
|
413
|
+
if len(self.measure_start_tree_stages) == 0:
|
414
|
+
raise Exception('No measures found')
|
415
|
+
|
416
|
+
return self.FIRST_MEASURE
|
417
|
+
|
418
|
+
def measures_count(self) -> int:
|
419
|
+
"""
|
420
|
+
Get the index of the last measure of the document.
|
421
|
+
|
422
|
+
Returns: (Int) The index of the last measure of the document.
|
423
|
+
|
424
|
+
Raises: Exception - If the document has no measures.
|
425
|
+
|
426
|
+
Examples:
|
427
|
+
>>> document, _ = kernpy.read('score.krn')
|
428
|
+
>>> document.measures_count()
|
429
|
+
10
|
430
|
+
>>> for i in range(document.get_first_measure(), document.measures_count() + 1):
|
431
|
+
>>> options = kernpy.ExportOptions(from_measure=i, to_measure=i+4)
|
432
|
+
"""
|
433
|
+
if len(self.measure_start_tree_stages) == 0:
|
434
|
+
raise Exception('No measures found')
|
435
|
+
|
436
|
+
return len(self.measure_start_tree_stages)
|
437
|
+
|
438
|
+
def get_metacomments(self, KeyComment: Optional[str] = None, clear: bool = False) -> List[str]:
|
439
|
+
"""
|
440
|
+
Get all metacomments in the document
|
441
|
+
|
442
|
+
Args:
|
443
|
+
KeyComment: Filter by a specific metacomment key: e.g. Use 'COM' to get only comments starting with\
|
444
|
+
'!!!COM: '. If None, all metacomments are returned.
|
445
|
+
clear: If True, the metacomment key is removed from the comment. E.g. '!!!COM: Coltrane' -> 'Coltrane'.\
|
446
|
+
If False, the metacomment key is kept. E.g. '!!!COM: Coltrane' -> '!!!COM: Coltrane'. \
|
447
|
+
The clear functionality is equivalent to the following code:
|
448
|
+
```python
|
449
|
+
comment = '!!!COM: Coltrane'
|
450
|
+
clean_comment = comment.replace(f"!!!{KeyComment}: ", "")
|
451
|
+
```
|
452
|
+
Other formats are not supported.
|
453
|
+
|
454
|
+
Returns: A list of metacomments.
|
455
|
+
|
456
|
+
Examples:
|
457
|
+
>>> document.get_metacomments()
|
458
|
+
['!!!COM: Coltrane', '!!!voices: 1', '!!!OPR: Blue Train']
|
459
|
+
>>> document.get_metacomments(KeyComment='COM')
|
460
|
+
['!!!COM: Coltrane']
|
461
|
+
>>> document.get_metacomments(KeyComment='COM', clear=True)
|
462
|
+
['Coltrane']
|
463
|
+
>>> document.get_metacomments(KeyComment='non_existing_key')
|
464
|
+
[]
|
465
|
+
"""
|
466
|
+
traversal = MetacommentsTraversal()
|
467
|
+
self.tree.dfs_iterative(traversal)
|
468
|
+
result = []
|
469
|
+
for metacomment in traversal.metacomments:
|
470
|
+
if KeyComment is None or metacomment.encoding.startswith(f"!!!{KeyComment}"):
|
471
|
+
new_comment = metacomment.encoding
|
472
|
+
if clear:
|
473
|
+
new_comment = metacomment.encoding.replace(f"!!!{KeyComment}: ", "")
|
474
|
+
result.append(new_comment)
|
475
|
+
|
476
|
+
return result
|
477
|
+
|
478
|
+
@classmethod
|
479
|
+
def tokens_to_encodings(cls, tokens: Sequence[AbstractToken]):
|
480
|
+
"""
|
481
|
+
Get the encodings of a list of tokens.
|
482
|
+
|
483
|
+
The method is equivalent to the following code:
|
484
|
+
>>> tokens = kp.get_all_tokens()
|
485
|
+
>>> [token.encoding for token in tokens if token.encoding is not None]
|
486
|
+
|
487
|
+
Args:
|
488
|
+
tokens (Sequence[AbstractToken]): list - A list of tokens.
|
489
|
+
|
490
|
+
Returns: List[str] - A list of token encodings.
|
491
|
+
|
492
|
+
Examples:
|
493
|
+
>>> tokens = document.get_all_tokens()
|
494
|
+
>>> Document.tokens_to_encodings(tokens)
|
495
|
+
['!!!COM: Coltrane', '!!!voices: 1', '!!!OPR: Blue Train']
|
496
|
+
"""
|
497
|
+
encodings = [token.encoding for token in tokens if token.encoding is not None]
|
498
|
+
return encodings
|
499
|
+
|
500
|
+
def get_all_tokens(self, filter_by_categories: Optional[Sequence[TokenCategory]] = None) -> List[AbstractToken]:
|
501
|
+
"""
|
502
|
+
Args:
|
503
|
+
filter_by_categories (Optional[Sequence[TokenCategory]]): A list of categories to filter the tokens. If None, all tokens are returned.
|
504
|
+
|
505
|
+
Returns:
|
506
|
+
List[AbstractToken] - A list of all tokens.
|
507
|
+
|
508
|
+
Examples:
|
509
|
+
>>> tokens = document.get_all_tokens()
|
510
|
+
>>> Document.tokens_to_encodings(tokens)
|
511
|
+
>>> [type(t) for t in tokens]
|
512
|
+
[<class 'kernpy.core.token.Token'>, <class 'kernpy.core.token.Token'>, <class 'kernpy.core.token.Token'>]
|
513
|
+
"""
|
514
|
+
computed_categories = TokenCategory.valid(include=filter_by_categories)
|
515
|
+
traversal = TokensTraversal(False, computed_categories)
|
516
|
+
self.tree.dfs_iterative(traversal)
|
517
|
+
return traversal.tokens
|
518
|
+
|
519
|
+
def get_all_tokens_encodings(
|
520
|
+
self,
|
521
|
+
filter_by_categories: Optional[Sequence[TokenCategory]] = None
|
522
|
+
) -> List[str]:
|
523
|
+
"""
|
524
|
+
Args:
|
525
|
+
filter_by_categories (Optional[Sequence[TokenCategory]]): A list of categories to filter the tokens. If None, all tokens are returned.
|
526
|
+
|
527
|
+
|
528
|
+
Returns:
|
529
|
+
list[str] - A list of all token encodings.
|
530
|
+
|
531
|
+
Examples:
|
532
|
+
>>> tokens = document.get_all_tokens_encodings()
|
533
|
+
>>> Document.tokens_to_encodings(tokens)
|
534
|
+
['!!!COM: Coltrane', '!!!voices: 1', '!!!OPR: Blue Train']
|
535
|
+
"""
|
536
|
+
tokens = self.get_all_tokens(filter_by_categories)
|
537
|
+
return Document.tokens_to_encodings(tokens)
|
538
|
+
|
539
|
+
def get_unique_tokens(
|
540
|
+
self,
|
541
|
+
filter_by_categories: Optional[Sequence[TokenCategory]] = None
|
542
|
+
) -> List[AbstractToken]:
|
543
|
+
"""
|
544
|
+
Get unique tokens.
|
545
|
+
|
546
|
+
Args:
|
547
|
+
filter_by_categories (Optional[Sequence[TokenCategory]]): A list of categories to filter the tokens. If None, all tokens are returned.
|
548
|
+
|
549
|
+
Returns:
|
550
|
+
List[AbstractToken] - A list of unique tokens.
|
551
|
+
|
552
|
+
"""
|
553
|
+
computed_categories = TokenCategory.valid(include=filter_by_categories)
|
554
|
+
traversal = TokensTraversal(True, computed_categories)
|
555
|
+
self.tree.dfs_iterative(traversal)
|
556
|
+
return traversal.tokens
|
557
|
+
|
558
|
+
def get_unique_token_encodings(
|
559
|
+
self,
|
560
|
+
filter_by_categories: Optional[Sequence[TokenCategory]] = None
|
561
|
+
) -> List[str]:
|
562
|
+
"""
|
563
|
+
Get unique token encodings.
|
564
|
+
|
565
|
+
Args:
|
566
|
+
filter_by_categories (Optional[Sequence[TokenCategory]]): A list of categories to filter the tokens. If None, all tokens are returned.
|
567
|
+
|
568
|
+
Returns: List[str] - A list of unique token encodings.
|
569
|
+
|
570
|
+
"""
|
571
|
+
tokens = self.get_unique_tokens(filter_by_categories)
|
572
|
+
return Document.tokens_to_encodings(tokens)
|
573
|
+
|
574
|
+
def get_voices(self, clean: bool = False):
|
575
|
+
"""
|
576
|
+
Get the voices of the document.
|
577
|
+
|
578
|
+
Args
|
579
|
+
clean (bool): Remove the first '!' from the voice name.
|
580
|
+
|
581
|
+
Returns: A list of voices.
|
582
|
+
|
583
|
+
Examples:
|
584
|
+
>>> document.get_voices()
|
585
|
+
['!sax', '!piano', '!bass']
|
586
|
+
>>> document.get_voices(clean=True)
|
587
|
+
['sax', 'piano', 'bass']
|
588
|
+
>>> document.get_voices(clean=False)
|
589
|
+
['!sax', '!piano', '!bass']
|
590
|
+
"""
|
591
|
+
from kernpy.core import TokenCategory
|
592
|
+
voices = self.get_all_tokens(filter_by_categories=[TokenCategory.INSTRUMENTS])
|
593
|
+
|
594
|
+
if clean:
|
595
|
+
voices = [voice[1:] for voice in voices]
|
596
|
+
return voices
|
597
|
+
|
598
|
+
def clone(self):
|
599
|
+
"""
|
600
|
+
Create a deep copy of the Document instance.
|
601
|
+
|
602
|
+
Returns: A new instance of Document with the tree copied.
|
603
|
+
|
604
|
+
"""
|
605
|
+
result = Document(copy(self.tree))
|
606
|
+
result.measure_start_tree_stages = copy(self.measure_start_tree_stages)
|
607
|
+
result.page_bounding_boxes = copy(self.page_bounding_boxes)
|
608
|
+
result.header_stage = copy(self.header_stage)
|
609
|
+
|
610
|
+
return result
|
611
|
+
|
612
|
+
def append_spines(self, spines) -> None:
|
613
|
+
"""
|
614
|
+
Append the spines directly to current document tree.
|
615
|
+
|
616
|
+
Args:
|
617
|
+
spines(list): A list of spines to append.
|
618
|
+
|
619
|
+
Returns: None
|
620
|
+
|
621
|
+
Examples:
|
622
|
+
>>> import kernpy as kp
|
623
|
+
>>> doc, _ = kp.read('score.krn')
|
624
|
+
>>> spines = [
|
625
|
+
>>> '4e\t4f\t4g\t4a\n4b\t4c\t4d\t4e\n=\t=\t=\t=\n',
|
626
|
+
>>> '4c\t4d\t4e\t4f\n4g\t4a\t4b\t4c\n=\t=\t=\t=\n',
|
627
|
+
>>> ]
|
628
|
+
>>> doc.append_spines(spines)
|
629
|
+
None
|
630
|
+
"""
|
631
|
+
raise NotImplementedError()
|
632
|
+
if len(spines) != self.get_spine_count():
|
633
|
+
raise Exception(f"Spines count mismatch: {len(spines)} != {self.get_spine_count()}")
|
634
|
+
|
635
|
+
for spine in spines:
|
636
|
+
return
|
637
|
+
|
638
|
+
def add(self, other: 'Document', *, check_core_spines_only: Optional[bool] = False) -> 'Document':
|
639
|
+
"""
|
640
|
+
Concatenate one document to the current document: Modify the current object!
|
641
|
+
|
642
|
+
Args:
|
643
|
+
other: The document to concatenate.
|
644
|
+
check_core_spines_only: If True, only the core spines (**kern and **mens) are checked. If False, all spines are checked.
|
645
|
+
|
646
|
+
Returns ('Document'): The current document (self) with the other document concatenated.
|
647
|
+
"""
|
648
|
+
if not Document.match(self, other, check_core_spines_only=check_core_spines_only):
|
649
|
+
raise Exception(f'Documents are not compatible for addition. '
|
650
|
+
f'Headers do not match with check_core_spines_only={check_core_spines_only}. '
|
651
|
+
f'self: {self.get_header_nodes()}, other: {other.get_header_nodes()}. ')
|
652
|
+
|
653
|
+
current_header_nodes = self.get_header_stage()
|
654
|
+
other_header_nodes = other.get_header_stage()
|
655
|
+
|
656
|
+
current_leaf_nodes = self.get_leaves()
|
657
|
+
flatten = lambda lst: [item for sublist in lst for item in sublist]
|
658
|
+
other_first_level_children = [flatten(c.children) for c in other_header_nodes] # avoid header stage
|
659
|
+
|
660
|
+
for current_leaf, other_first_level_child in zip(current_leaf_nodes, other_first_level_children, strict=False):
|
661
|
+
# Ignore extra spines from other document.
|
662
|
+
# But if there are extra spines in the current document, it will raise an exception.
|
663
|
+
if current_leaf.token.encoding == TERMINATOR:
|
664
|
+
# remove the '*-' token from the current document
|
665
|
+
current_leaf_index = current_leaf.parent.children.index(current_leaf)
|
666
|
+
current_leaf.parent.children.pop(current_leaf_index)
|
667
|
+
current_leaf.parent.children.insert(current_leaf_index, other_first_level_child)
|
668
|
+
|
669
|
+
self.tree.add_node(
|
670
|
+
stage=len(self.tree.stages) - 1, # TODO: check offset 0, +1, -1 ????
|
671
|
+
parent=current_leaf,
|
672
|
+
token=other_first_level_child.token,
|
673
|
+
last_spine_operator_node=other_first_level_child.last_spine_operator_node,
|
674
|
+
previous_signature_nodes=other_first_level_child.last_signature_nodes,
|
675
|
+
header_node=other_first_level_child.header_node
|
676
|
+
)
|
677
|
+
|
678
|
+
return self
|
679
|
+
|
680
|
+
def get_header_nodes(self) -> List[HeaderToken]:
|
681
|
+
"""
|
682
|
+
Get the header nodes of the current document.
|
683
|
+
|
684
|
+
Returns: List[HeaderToken]: A list with the header nodes of the current document.
|
685
|
+
"""
|
686
|
+
return [token for token in self.get_all_tokens(filter_by_categories=None) if isinstance(token, HeaderToken)]
|
687
|
+
|
688
|
+
def get_spine_ids(self) -> List[int]:
|
689
|
+
"""
|
690
|
+
Get the indexes of the current document.
|
691
|
+
|
692
|
+
Returns List[int]: A list with the indexes of the current document.
|
693
|
+
|
694
|
+
Examples:
|
695
|
+
>>> document.get_all_spine_indexes()
|
696
|
+
[0, 1, 2, 3, 4]
|
697
|
+
"""
|
698
|
+
header_nodes = self.get_header_nodes()
|
699
|
+
return [node.spine_id for node in header_nodes]
|
700
|
+
|
701
|
+
def frequencies(self, token_categories: Optional[Sequence[TokenCategory]] = None) -> Dict:
|
702
|
+
"""
|
703
|
+
Frequency of tokens in the document.
|
704
|
+
|
705
|
+
|
706
|
+
Args:
|
707
|
+
token_categories (Optional[Sequence[TokenCategory]]): If None, all tokens are considered.
|
708
|
+
Returns (Dict):
|
709
|
+
A dictionary with the category and the number of occurrences of each token.
|
710
|
+
|
711
|
+
"""
|
712
|
+
tokens = self.get_all_tokens(filter_by_categories=token_categories)
|
713
|
+
frequencies = {}
|
714
|
+
for t in tokens:
|
715
|
+
if t.encoding in frequencies:
|
716
|
+
frequencies[t.encoding]['occurrences'] += 1
|
717
|
+
else:
|
718
|
+
frequencies[t.encoding] = {
|
719
|
+
'occurrences': 1,
|
720
|
+
'category': t.category.name,
|
721
|
+
}
|
722
|
+
|
723
|
+
return frequencies
|
724
|
+
|
725
|
+
def split(self) -> List['Document']:
|
726
|
+
"""
|
727
|
+
Split the current document into a list of documents, one for each **kern spine.
|
728
|
+
Each resulting document will contain one **kern spine along with all non-kern spines.
|
729
|
+
|
730
|
+
Returns:
|
731
|
+
List['Document']: A list of documents, where each document contains one **kern spine
|
732
|
+
and all non-kern spines from the original document.
|
733
|
+
|
734
|
+
Examples:
|
735
|
+
>>> document.split()
|
736
|
+
[<Document: score.krn>, <Document: score.krn>, <Document: score.krn>]
|
737
|
+
"""
|
738
|
+
raise NotImplementedError
|
739
|
+
new_documents = []
|
740
|
+
self_document_copy = deepcopy(self)
|
741
|
+
kern_header_nodes = [node for node in self_document_copy.get_header_nodes() if node.encoding == '**kern']
|
742
|
+
other_header_nodes = [node for node in self_document_copy.get_header_nodes() if node.encoding != '**kern']
|
743
|
+
spine_ids = self_document_copy.get_spine_ids()
|
744
|
+
|
745
|
+
for header_node in kern_header_nodes:
|
746
|
+
if header_node.spine_id not in spine_ids:
|
747
|
+
continue
|
748
|
+
|
749
|
+
spine_ids.remove(header_node.spine_id)
|
750
|
+
|
751
|
+
new_tree = deepcopy(self.tree)
|
752
|
+
prev_node = new_tree.root
|
753
|
+
while not isinstance(prev_node, HeaderToken):
|
754
|
+
prev_node = prev_node.children[0]
|
755
|
+
|
756
|
+
if not prev_node or not isinstance(prev_node, HeaderToken):
|
757
|
+
raise Exception(f'Header node not found: {prev_node} in {header_node}')
|
758
|
+
|
759
|
+
new_children = list(filter(lambda x: x.spine_id == header_node.spine_id, prev_node.children))
|
760
|
+
new_tree.root = new_children
|
761
|
+
|
762
|
+
new_document = Document(new_tree)
|
763
|
+
|
764
|
+
new_documents.append(new_document)
|
765
|
+
|
766
|
+
return new_documents
|
767
|
+
|
768
|
+
@classmethod
|
769
|
+
def to_concat(cls, first_doc: 'Document', second_doc: 'Document', deep_copy: bool = True) -> 'Document':
|
770
|
+
"""
|
771
|
+
Concatenate two documents.
|
772
|
+
|
773
|
+
Args:
|
774
|
+
first_doc (Document): The first document.
|
775
|
+
second_doc (Document: The second document.
|
776
|
+
deep_copy (bool): If True, the documents are deep copied. If False, the documents are shallow copied.
|
777
|
+
|
778
|
+
Returns: A new instance of Document with the documents concatenated.
|
779
|
+
"""
|
780
|
+
first_doc = first_doc.clone() if deep_copy else first_doc
|
781
|
+
second_doc = second_doc.clone() if deep_copy else second_doc
|
782
|
+
first_doc.add(second_doc)
|
783
|
+
|
784
|
+
return first_doc
|
785
|
+
|
786
|
+
@classmethod
|
787
|
+
def match(cls, a: 'Document', b: 'Document', *, check_core_spines_only: Optional[bool] = False) -> bool:
|
788
|
+
"""
|
789
|
+
Match two documents. Two documents match if they have the same spine structure.
|
790
|
+
|
791
|
+
Args:
|
792
|
+
a (Document): The first document.
|
793
|
+
b (Document): The second document.
|
794
|
+
check_core_spines_only (Optional[bool]): If True, only the core spines (**kern and **mens) are checked. If False, all spines are checked.
|
795
|
+
|
796
|
+
Returns: True if the documents match, False otherwise.
|
797
|
+
|
798
|
+
Examples:
|
799
|
+
|
800
|
+
"""
|
801
|
+
if check_core_spines_only:
|
802
|
+
return [token.encoding for token in a.get_header_nodes() if token.encoding in CORE_HEADERS] \
|
803
|
+
== [token.encoding for token in b.get_header_nodes() if token.encoding in CORE_HEADERS]
|
804
|
+
else:
|
805
|
+
return [token.encoding for token in a.get_header_nodes()] \
|
806
|
+
== [token.encoding for token in b.get_header_nodes()]
|
807
|
+
|
808
|
+
|
809
|
+
def to_transposed(self, interval: str, direction: str = Direction.UP.value) -> 'Document':
|
810
|
+
"""
|
811
|
+
Create a new document with the transposed notes without modifying the original document.
|
812
|
+
|
813
|
+
Args:
|
814
|
+
interval (str): The name of the interval to transpose. It can be 'P4', 'P5', 'M2', etc. Check the \
|
815
|
+
kp.AVAILABLE_INTERVALS for the available intervals.
|
816
|
+
direction (str): The direction to transpose. It can be 'up' or 'down'.
|
817
|
+
|
818
|
+
Returns:
|
819
|
+
|
820
|
+
"""
|
821
|
+
if interval not in AVAILABLE_INTERVALS:
|
822
|
+
raise ValueError(
|
823
|
+
f"Interval {interval!r} is not available. "
|
824
|
+
f"Available intervals are: {AVAILABLE_INTERVALS}"
|
825
|
+
)
|
826
|
+
|
827
|
+
if direction not in (Direction.UP.value, Direction.DOWN.value):
|
828
|
+
raise ValueError(
|
829
|
+
f"Direction {direction!r} is not available. "
|
830
|
+
f"Available directions are: "
|
831
|
+
f"{Direction.UP.value!r}, {Direction.DOWN.value!r}"
|
832
|
+
)
|
833
|
+
|
834
|
+
new_document = self.clone()
|
835
|
+
|
836
|
+
# BFS through the tree
|
837
|
+
root = new_document.tree.root
|
838
|
+
queue = Queue()
|
839
|
+
queue.put(root)
|
840
|
+
|
841
|
+
while not queue.empty():
|
842
|
+
node = queue.get()
|
843
|
+
|
844
|
+
if isinstance(node.token, NoteRestToken):
|
845
|
+
orig_token = node.token
|
846
|
+
|
847
|
+
new_subtokens = []
|
848
|
+
transposed_pitch_encoding = None
|
849
|
+
|
850
|
+
# Transpose each pitch subtoken in the pitch–duration list
|
851
|
+
for subtoken in orig_token.pitch_duration_subtokens:
|
852
|
+
if subtoken.category == TokenCategory.PITCH:
|
853
|
+
# transpose() returns a new pitch subtoken
|
854
|
+
tp = transpose(
|
855
|
+
input_encoding=subtoken.encoding,
|
856
|
+
interval=IntervalsByName[interval],
|
857
|
+
direction=direction,
|
858
|
+
input_format=NotationEncoding.HUMDRUM.value,
|
859
|
+
output_format=NotationEncoding.HUMDRUM.value,
|
860
|
+
)
|
861
|
+
new_subtokens.append(Subtoken(tp, subtoken.category))
|
862
|
+
transposed_pitch_encoding = tp
|
863
|
+
else:
|
864
|
+
# leave duration subtokens untouched
|
865
|
+
new_subtokens.append(Subtoken(subtoken.encoding, subtoken.category))
|
866
|
+
|
867
|
+
# Replace the node’s token with a new NoteRestToken
|
868
|
+
node.token = NoteRestToken(
|
869
|
+
encoding=transposed_pitch_encoding,
|
870
|
+
pitch_duration_subtokens=new_subtokens,
|
871
|
+
decoration_subtokens=orig_token.decoration_subtokens,
|
872
|
+
)
|
873
|
+
|
874
|
+
# enqueue children
|
875
|
+
for child in node.children:
|
876
|
+
queue.put(child)
|
877
|
+
|
878
|
+
# Return the transposed clone
|
879
|
+
return new_document
|
880
|
+
|
881
|
+
|
882
|
+
def __iter__(self):
|
883
|
+
"""
|
884
|
+
Get the indexes to export all the document.
|
885
|
+
|
886
|
+
Returns: An iterator with the indexes to export the document.
|
887
|
+
"""
|
888
|
+
return iter(range(self.get_first_measure(), self.measures_count() + 1))
|
889
|
+
|
890
|
+
def __next__(self):
|
891
|
+
"""
|
892
|
+
Get the next index to export the document.
|
893
|
+
|
894
|
+
Returns: The next index to export the document.
|
895
|
+
"""
|
896
|
+
return next(iter(range(self.get_first_measure(), self.measures_count() + 1)))
|
897
|
+
|
898
|
+
|
899
|
+
# tree traversal utils
|
900
|
+
class MetacommentsTraversal(TreeTraversalInterface):
|
901
|
+
def __init__(self):
|
902
|
+
self.metacomments = []
|
903
|
+
|
904
|
+
def visit(self, node):
|
905
|
+
if isinstance(node.token, MetacommentToken):
|
906
|
+
self.metacomments.append(node.token)
|
907
|
+
|
908
|
+
|
909
|
+
class TokensTraversal(TreeTraversalInterface):
|
910
|
+
def __init__(
|
911
|
+
self,
|
912
|
+
non_repeated: bool,
|
913
|
+
filter_by_categories
|
914
|
+
):
|
915
|
+
"""
|
916
|
+
Create an instance of `TokensTraversal`.
|
917
|
+
Args:
|
918
|
+
non_repeated: If True, only unique tokens are returned. If False, all tokens are returned.
|
919
|
+
filter_by_categories: A list of categories to filter the tokens. If None, all tokens are returned.
|
920
|
+
"""
|
921
|
+
self.tokens = []
|
922
|
+
self.seen_encodings = []
|
923
|
+
self.non_repeated = non_repeated
|
924
|
+
self.filter_by_categories = [t for t in TokenCategory] if filter_by_categories is None else filter_by_categories
|
925
|
+
|
926
|
+
def visit(self, node):
|
927
|
+
if (node.token
|
928
|
+
and (not self.non_repeated or node.token.encoding not in self.seen_encodings)
|
929
|
+
and (self.filter_by_categories is None or node.token.category in self.filter_by_categories)
|
930
|
+
):
|
931
|
+
self.tokens.append(node.token)
|
932
|
+
if self.non_repeated:
|
933
|
+
self.seen_encodings.append(node.token.encoding)
|
934
|
+
|
935
|
+
|
936
|
+
class TraversalFactory:
|
937
|
+
class Categories(Enum):
|
938
|
+
METACOMMENTS = "metacomments"
|
939
|
+
TOKENS = "tokens"
|
940
|
+
|
941
|
+
@classmethod
|
942
|
+
def create(
|
943
|
+
cls,
|
944
|
+
traversal_type: str,
|
945
|
+
non_repeated: bool,
|
946
|
+
filter_by_categories: Optional[Sequence[TokenCategory]]
|
947
|
+
) -> TreeTraversalInterface:
|
948
|
+
"""
|
949
|
+
Create an instance of `TreeTraversalInterface` based on the `traversal_type`.
|
950
|
+
Args:
|
951
|
+
non_repeated:
|
952
|
+
filter_by_categories:
|
953
|
+
traversal_type: The type of traversal to use. Possible values are:
|
954
|
+
- "metacomments"
|
955
|
+
- "tokens"
|
956
|
+
|
957
|
+
Returns: An instance of `TreeTraversalInterface`.
|
958
|
+
"""
|
959
|
+
if traversal_type == cls.Categories.METACOMMENTS.value:
|
960
|
+
return MetacommentsTraversal()
|
961
|
+
elif traversal_type == cls.Categories.TOKENS.value:
|
962
|
+
return TokensTraversal(non_repeated, filter_by_categories)
|
963
|
+
|
964
|
+
raise ValueError(f"Unknown traversal type: {traversal_type}")
|
965
|
+
|