kernpy 0.0.2__py3-none-any.whl → 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. kernpy/__init__.py +30 -0
  2. kernpy/__main__.py +127 -0
  3. kernpy/core/__init__.py +119 -0
  4. kernpy/core/_io.py +48 -0
  5. kernpy/core/base_antlr_importer.py +61 -0
  6. kernpy/core/base_antlr_spine_parser_listener.py +196 -0
  7. kernpy/core/basic_spine_importer.py +43 -0
  8. kernpy/core/document.py +965 -0
  9. kernpy/core/dyn_importer.py +30 -0
  10. kernpy/core/dynam_spine_importer.py +42 -0
  11. kernpy/core/error_listener.py +51 -0
  12. kernpy/core/exporter.py +535 -0
  13. kernpy/core/fing_spine_importer.py +42 -0
  14. kernpy/core/generated/kernSpineLexer.interp +444 -0
  15. kernpy/core/generated/kernSpineLexer.py +535 -0
  16. kernpy/core/generated/kernSpineLexer.tokens +236 -0
  17. kernpy/core/generated/kernSpineParser.interp +425 -0
  18. kernpy/core/generated/kernSpineParser.py +9954 -0
  19. kernpy/core/generated/kernSpineParser.tokens +236 -0
  20. kernpy/core/generated/kernSpineParserListener.py +1200 -0
  21. kernpy/core/generated/kernSpineParserVisitor.py +673 -0
  22. kernpy/core/generic.py +426 -0
  23. kernpy/core/gkern.py +526 -0
  24. kernpy/core/graphviz_exporter.py +89 -0
  25. kernpy/core/harm_spine_importer.py +41 -0
  26. kernpy/core/import_humdrum_old.py +853 -0
  27. kernpy/core/importer.py +285 -0
  28. kernpy/core/importer_factory.py +43 -0
  29. kernpy/core/kern_spine_importer.py +73 -0
  30. kernpy/core/mens_spine_importer.py +23 -0
  31. kernpy/core/mhxm_spine_importer.py +44 -0
  32. kernpy/core/pitch_models.py +338 -0
  33. kernpy/core/root_spine_importer.py +58 -0
  34. kernpy/core/spine_importer.py +45 -0
  35. kernpy/core/text_spine_importer.py +43 -0
  36. kernpy/core/tokenizers.py +239 -0
  37. kernpy/core/tokens.py +2011 -0
  38. kernpy/core/transposer.py +300 -0
  39. kernpy/io/__init__.py +14 -0
  40. kernpy/io/public.py +355 -0
  41. kernpy/polish_scores/__init__.py +13 -0
  42. kernpy/polish_scores/download_polish_dataset.py +357 -0
  43. kernpy/polish_scores/iiif.py +47 -0
  44. kernpy/test_grammar.sh +22 -0
  45. kernpy/util/__init__.py +14 -0
  46. kernpy/util/helpers.py +55 -0
  47. kernpy/util/store_cache.py +35 -0
  48. kernpy/visualize_analysis.sh +23 -0
  49. kernpy-1.0.1.dist-info/METADATA +497 -0
  50. kernpy-1.0.1.dist-info/RECORD +51 -0
  51. {kernpy-0.0.2.dist-info → kernpy-1.0.1.dist-info}/WHEEL +1 -2
  52. kernpy/example.py +0 -1
  53. kernpy-0.0.2.dist-info/LICENSE +0 -19
  54. kernpy-0.0.2.dist-info/METADATA +0 -19
  55. kernpy-0.0.2.dist-info/RECORD +0 -7
  56. kernpy-0.0.2.dist-info/top_level.txt +0 -1
kernpy/__init__.py CHANGED
@@ -0,0 +1,30 @@
1
+ """
2
+ # kernpy
3
+
4
+ =====
5
+
6
+
7
+ Python Humdrum **kern and **mens utilities package.
8
+
9
+
10
+
11
+ Execute the following command to run **kernpy** as a module:
12
+ ```shell
13
+ python -m kernpy --help
14
+ python -m kernpy <command> <options>
15
+ ```
16
+ """
17
+
18
+
19
+ from .core import *
20
+
21
+ from .io import *
22
+
23
+ from .util import *
24
+
25
+ from .polish_scores import *
26
+
27
+
28
+
29
+
30
+
kernpy/__main__.py ADDED
@@ -0,0 +1,127 @@
1
+ """
2
+ This module contains the main function for the kernpy package.
3
+
4
+ Usage:
5
+ python -m kernpy
6
+ """
7
+
8
+ import argparse
9
+ import sys
10
+ from pathlib import Path
11
+
12
+ from kernpy import polish_scores, ekern_to_krn, kern_to_ekern
13
+
14
+
15
+ def create_parser() -> argparse.ArgumentParser:
16
+ parser = argparse.ArgumentParser(description="kernpy CLI tool")
17
+
18
+ parser.add_argument('--verbose', type=int, default=1, help='Verbosity level')
19
+
20
+ group = parser.add_mutually_exclusive_group(required=True)
21
+ group.add_argument('--ekern2kern', action='store_true', help='Convert files from ekern to kern')
22
+ group.add_argument('--kern2ekern', action='store_true', help='Convert files from kern to ekern')
23
+ group.add_argument('--polish', action='store_true', help='Run Polish Exporter')
24
+ group.add_argument('--generate_fragments', action='store_true', help='Generate Fragments')
25
+
26
+ parser.add_argument('--input_path', type=str, help='Input file or directory')
27
+ parser.add_argument('--output_path', type=str, help='Output file or directory')
28
+ parser.add_argument('-r', '--recursive', action='store_true', help='Enable recursive directory processing')
29
+
30
+ # Polish Exporter
31
+ parser.add_argument('--input_directory', type=str, help='Polish: Input directory')
32
+ parser.add_argument('--output_directory', type=str, help='Polish: Output directory')
33
+ parser.add_argument('--instrument', type=str, help='Polish: Instrument name')
34
+ parser.add_argument('--kern_type', type=str, help='Polish: "krn" or "ekrn"')
35
+ parser.add_argument('--kern_spines_filter', type=str, help='Polish: Filter for number of kern spines')
36
+ parser.add_argument('--remove_empty_dirs', action='store_true', help='Polish: Remove empty directories')
37
+
38
+
39
+ return parser
40
+
41
+
42
+ def find_files(directory: Path, patterns: list[str], recursive: bool = False) -> list[Path]:
43
+ files = []
44
+ for pattern in patterns:
45
+ if recursive:
46
+ files.extend(directory.rglob(pattern))
47
+ else:
48
+ files.extend(directory.glob(pattern))
49
+ return files
50
+
51
+
52
+ def handle_ekern2kern(args):
53
+ input_path = Path(args.input_path)
54
+ output_path = Path(args.output_path) if args.output_path else None
55
+
56
+ if input_path.is_file():
57
+ out = output_path or input_path.with_suffix(".krn")
58
+ ekern_to_krn(str(input_path), str(out))
59
+ if args.verbose:
60
+ print(f"Converted: {input_path} → {out}")
61
+ return
62
+
63
+ files = find_files(input_path, ["*.ekrn", "*.ekern"], recursive=args.recursive)
64
+ for file in files:
65
+ out = file.with_suffix(".krn")
66
+ try:
67
+ ekern_to_krn(str(file), str(out))
68
+ if args.verbose:
69
+ print(f"Converted: {file} → {out}")
70
+ except Exception as e:
71
+ print(f"Error converting {file}: {e}", file=sys.stderr)
72
+
73
+
74
+ def handle_kern2ekern(args):
75
+ input_path = Path(args.input_path)
76
+ output_path = Path(args.output_path) if args.output_path else None
77
+
78
+ if input_path.is_file():
79
+ out = output_path or input_path.with_suffix(".ekrn")
80
+ kern_to_ekern(str(input_path), str(out))
81
+ if args.verbose:
82
+ print(f"Converted: {input_path} → {out}")
83
+ return
84
+
85
+ files = find_files(input_path, ["*.krn", "*.kern"], recursive=args.recursive)
86
+ for file in files:
87
+ out = file.with_suffix(".ekrn")
88
+ try:
89
+ kern_to_ekern(str(file), str(out))
90
+ if args.verbose:
91
+ print(f"Converted: {file} → {out}")
92
+ except Exception as e:
93
+ print(f"Error converting {file}: {e}", file=sys.stderr)
94
+
95
+
96
+ def handle_polish_exporter(args):
97
+ if args.verbose:
98
+ print(f"Running Polish Exporter on {args.input_directory} → {args.output_directory}")
99
+
100
+ polish_scores.download_polish_dataset.main(
101
+ input_directory=args.input_directory,
102
+ output_directory=args.output_directory,
103
+ kern_spines_filter=args.kern_spines_filter,
104
+ exporter_kern_type=args.kern_type,
105
+ remove_empty_directories=args.remove_empty_dirs,
106
+ )
107
+
108
+
109
+ def main():
110
+ parser = create_parser()
111
+ args = parser.parse_args()
112
+
113
+ if args.verbose > 2:
114
+ print("Arguments:")
115
+ for key, val in vars(args).items():
116
+ print(f" {key}: {val}")
117
+
118
+ if args.ekern2kern:
119
+ handle_ekern2kern(args)
120
+ elif args.kern2ekern:
121
+ handle_kern2ekern(args)
122
+ elif args.polish:
123
+ handle_polish_exporter(args)
124
+
125
+
126
+ if __name__ == "__main__":
127
+ main()
@@ -0,0 +1,119 @@
1
+ """
2
+ kernpy.core
3
+
4
+ =====
5
+
6
+ This module contains the core functionality of the `kernpy` package.
7
+ """
8
+
9
+ from .tokens import *
10
+ from .document import *
11
+ from .importer import *
12
+ from .exporter import *
13
+ from .graphviz_exporter import *
14
+ from .importer_factory import *
15
+ from .dyn_importer import *
16
+ from .dynam_spine_importer import *
17
+ from .fing_spine_importer import *
18
+ from .harm_spine_importer import *
19
+ from .kern_spine_importer import *
20
+ from .mens_spine_importer import *
21
+ from .root_spine_importer import *
22
+ from .text_spine_importer import *
23
+ from .mhxm_spine_importer import *
24
+ from .basic_spine_importer import *
25
+ from .generic import *
26
+ from .tokenizers import *
27
+ from .transposer import *
28
+ from .pitch_models import *
29
+ from .gkern import *
30
+
31
+
32
+ __all__ = [
33
+ 'Document',
34
+ 'TokenCategory',
35
+ 'Importer',
36
+ 'ExportOptions',
37
+ 'Exporter',
38
+ 'Encoding',
39
+ 'GraphvizExporter',
40
+ 'ekern_to_krn',
41
+ 'kern_to_ekern',
42
+ 'get_kern_from_ekern',
43
+ 'Encoding',
44
+ 'Tokenizer',
45
+ 'KernTokenizer',
46
+ 'EkernTokenizer',
47
+ 'BekernTokenizer',
48
+ 'BkernTokenizer',
49
+ 'TokenizerFactory',
50
+ 'Token',
51
+ 'KernTokenizer',
52
+ 'BEKERN_CATEGORIES',
53
+ 'DynSpineImporter',
54
+ 'DynamSpineImporter',
55
+ 'FingSpineImporter',
56
+ 'HarmSpineImporter',
57
+ 'KernSpineImporter',
58
+ 'MensSpineImporter',
59
+ 'RootSpineImporter',
60
+ 'TextSpineImporter',
61
+ 'MxhmSpineImporter',
62
+ 'BasicSpineImporter',
63
+ 'SpineOperationToken',
64
+ 'PitchRest',
65
+ 'Duration',
66
+ 'DurationClassical',
67
+ 'DurationMensural',
68
+ 'read',
69
+ 'create',
70
+ 'export',
71
+ 'store',
72
+ 'store_graph',
73
+ 'transposer',
74
+ 'get_spine_types',
75
+ 'createImporter',
76
+ 'TokenCategoryHierarchyMapper',
77
+ 'TOKEN_SEPARATOR',
78
+ 'DECORATION_SEPARATOR',
79
+ 'Subtoken',
80
+ 'AbstractToken',
81
+ 'SimpleToken',
82
+ 'ComplexToken',
83
+ 'CompoundToken',
84
+ 'NoteRestToken',
85
+ 'HeaderToken',
86
+ 'HeaderTokenGenerator',
87
+ 'NotationEncoding',
88
+ 'AgnosticPitch',
89
+ 'PitchExporter',
90
+ 'PitchExporterFactory',
91
+ 'HumdrumPitchExporter',
92
+ 'AmericanPitchExporter',
93
+ 'PitchImporter',
94
+ 'PitchImporterFactory',
95
+ 'HumdrumPitchImporter',
96
+ 'AmericanPitchImporter',
97
+ 'Direction',
98
+ 'Intervals',
99
+ 'IntervalsByName',
100
+ 'transpose',
101
+ 'transpose_agnostics',
102
+ 'transpose_encoding_to_agnostic',
103
+ 'transpose_agnostic_to_encoding',
104
+ 'PositionInStaff',
105
+ 'distance',
106
+ 'agnostic_distance',
107
+ 'PitchPositionReferenceSystem',
108
+ 'Clef',
109
+ 'GClef',
110
+ 'F3Clef',
111
+ 'F4Clef',
112
+ 'C1Clef',
113
+ 'C2Clef',
114
+ 'C3Clef',
115
+ 'C4Clef',
116
+ 'ClefFactory',
117
+ 'AVAILABLE_INTERVALS'
118
+ ]
119
+
kernpy/core/_io.py ADDED
@@ -0,0 +1,48 @@
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ from pathlib import Path
5
+ from typing import Optional, Union
6
+
7
+
8
+ def _write(path: Union[str, Path], content: str) -> None:
9
+ """
10
+ Store content in a file.
11
+
12
+ Args:
13
+ path (str): Path to the file.
14
+ content (str): Content to be stored in the file.
15
+
16
+ Returns: None
17
+
18
+ """
19
+ if not os.path.exists(os.path.dirname(Path(path).absolute())):
20
+ os.makedirs(os.path.dirname(path), exist_ok=True)
21
+
22
+ with open(path, 'w+') as f:
23
+ f.write(content)
24
+
25
+
26
+ def find_all_files(
27
+ path: Path,
28
+ extension: Optional[str] = None) -> list:
29
+ """
30
+ Find all files with the given extension in the given directory.
31
+ Args:
32
+ path (str): Path to the directory where the files are located.
33
+ extension (Optional[str]): Extension of the files to be found. If None, all files are returned.
34
+
35
+ Returns (List[str]): List of paths to the found files.
36
+
37
+ Examples:
38
+ >>> find_all_files('/kern_files', 'krn')
39
+ ...
40
+
41
+ >>> find_all_files('/files' )
42
+ ...
43
+ """
44
+ p = Path(path)
45
+ if extension is None:
46
+ return list(p.glob('**/*'))
47
+ else:
48
+ return list(p.glob(f'**/*.{extension}'))
@@ -0,0 +1,61 @@
1
+ from __future__ import annotations
2
+
3
+ from abc import ABC, abstractmethod
4
+
5
+ from antlr4 import CommonTokenStream, ParseTreeWalker
6
+ from antlr4.InputStream import InputStream
7
+
8
+
9
+ class BaseANTLRImporter(ABC):
10
+ def __init__(self, input_string):
11
+ char_stream = InputStream(input_string)
12
+ self.lexer = self.createLexer(char_stream)
13
+ token_stream = CommonTokenStream(self.lexer)
14
+ self.parser = self.createParser(token_stream)
15
+
16
+ @abstractmethod
17
+ def createLexer(self, charStream):
18
+ pass
19
+
20
+ @abstractmethod
21
+ def createParser(self, tokenStream):
22
+ pass
23
+
24
+ @abstractmethod
25
+ def startRule(self):
26
+ pass
27
+
28
+
29
+ class BaseANTLRListenerImporter(BaseANTLRImporter):
30
+ def __init__(self, input_string):
31
+ super().__init__(input_string)
32
+ self.listener = self.createListener()
33
+
34
+ def start(self):
35
+ tree = self.startRule()
36
+ walker = ParseTreeWalker()
37
+ walker.walk(self.listener, tree)
38
+ #ParseTreeWalker.DEFAULT.walk(walker, tree)
39
+
40
+ @abstractmethod
41
+ def createListener(self):
42
+ pass
43
+
44
+
45
+ class BaseANTLRVisitorImporter(BaseANTLRImporter):
46
+ def __init__(self, input):
47
+ super().__init__(input)
48
+ self.visitor = self.createVisitor()
49
+
50
+ def start(self):
51
+ tree_context = self.startRule()
52
+ self.visitStart(tree_context)
53
+
54
+ @abstractmethod
55
+ def createVisitor(self):
56
+ pass
57
+
58
+ @abstractmethod
59
+ def visitStart(self, start_context):
60
+ pass
61
+
@@ -0,0 +1,196 @@
1
+ from .generated.kernSpineParserListener import kernSpineParserListener
2
+ from .generated.kernSpineParser import kernSpineParser
3
+ from .tokens import BarToken, SimpleToken, TokenCategory, Subtoken, ChordToken, BoundingBox, \
4
+ BoundingBoxToken, ClefToken, KeySignatureToken, TimeSignatureToken, MeterSymbolToken, BarToken, NoteRestToken, \
5
+ KeyToken, InstrumentToken
6
+
7
+ from typing import List
8
+
9
+
10
+ class BaseANTLRSpineParserListener(kernSpineParserListener):
11
+ def __init__(self):
12
+ self.token = None
13
+
14
+ self.first_chord_element = None
15
+ self.chord_tokens = None
16
+ self.duration_subtokens = []
17
+ self.diatonic_pitch_and_octave_subtoken = None
18
+ self.accidental_subtoken = None
19
+ # self.decorations = {} # in order to standardize the order of decorators, we map the different properties to their class names
20
+ # We cannot order it using the class name because there are rules with subrules, such as ties, or articulations. We order it using the encoding itself
21
+ self.decorations: List[Subtoken] = []
22
+ self.in_chord = False
23
+ # self.page_start_rows = [] # TODO
24
+ self.measure_start_rows = []
25
+ self.last_bounding_box = None
26
+
27
+ def enterStart(self, ctx: kernSpineParser.StartContext):
28
+ self.token = None
29
+ self.duration_subtokens = []
30
+ self.diatonic_pitch_and_octave_subtoken = None
31
+ self.accidental_subtoken = None
32
+ # self.decorations = {}
33
+ self.decorations = []
34
+
35
+ # def process_decorations(self, ctx: ParserRuleContext):
36
+ # # in order to standardize the order of note decorators, we map the different properties to their class names
37
+ # decorations = {}
38
+ #
39
+ # for child in ctx.getChildren():
40
+ # # all decorations have just a child
41
+ # if child.getChildCount() != 1:
42
+ # raise Exception('Only 1 decoration child expected, and found ' + child.getChildCount() + ', check '
43
+ # 'the '
44
+ # 'grammar')
45
+ # clazz = type(child.getChild(0))
46
+ # decoration_type = clazz.__name__
47
+ # if decoration_type in decorations:
48
+ # logging.warning(
49
+ # f'The decoration {decoration_type} is duplicated') # TODO Dar información de línea, columna - ¿lanzamos excepción? - hay algunas que sí pueden estar duplicadas? Barrados?
50
+ # decorations[decoration_type] = child.getText()
51
+ # for key in sorted(decorations.keys()):
52
+ # subtoken = Subtoken(decorations[key], TokenCategory.DECORATION)
53
+ # self.duration_subtoken.append(subtoken)
54
+
55
+ def exitDuration(self, ctx: kernSpineParser.DurationContext):
56
+ self.duration_subtokens = [Subtoken(ctx.modernDuration().getText(), TokenCategory.DURATION)]
57
+ for i in range(len(ctx.augmentationDot())):
58
+ self.duration_subtokens.append(Subtoken(".", TokenCategory.DURATION))
59
+
60
+ if ctx.graceNote():
61
+ self.duration_subtokens.append(Subtoken(ctx.graceNote().getText(), TokenCategory.DURATION))
62
+
63
+ if ctx.appoggiatura():
64
+ self.duration_subtokens.append(Subtoken(ctx.appoggiatura().getText(), TokenCategory.DURATION))
65
+
66
+ def exitDiatonicPitchAndOctave(self, ctx: kernSpineParser.DiatonicPitchAndOctaveContext):
67
+ self.diatonic_pitch_and_octave_subtoken = Subtoken(ctx.getText(), TokenCategory.PITCH)
68
+
69
+ def exitNoteDecoration(self, ctx: kernSpineParser.NoteDecorationContext):
70
+ # clazz = type(ctx.getChild(0))
71
+ # decoration_type = clazz.__name__
72
+ # if decoration_type in self.decorations:
73
+ # logging.warning(
74
+ # f'The decoration {decoration_type} is duplicated after reading {ctx.getText()}') # TODO Dar información de línea, columna - ¿lanzamos excepción? - hay algunas que sí pueden estar duplicadas? Barrados?
75
+
76
+ # self.decorations[decoration_type] = ctx.getText()
77
+ # We cannot order it using the class name because there are rules with subrules, such as ties, or articulations. We order it using the encoding itself. NOT YET!
78
+ decoration_encoding = ctx.getText()
79
+ decoration_subtoken = Subtoken(decoration_encoding, TokenCategory.DECORATION)
80
+ self.decorations.append(decoration_subtoken)
81
+
82
+ def exitRestDecoration(self, ctx: kernSpineParser.NoteDecorationContext):
83
+ # clazz = type(ctx.getChild(0))
84
+ # decoration_type = clazz.__name__
85
+ # if decoration_type in self.decorations:
86
+ # logging.warning(
87
+ # f'The decoration {decoration_type} is duplicated after reading {ctx.getText()}') # TODO Dar información de línea, columna - ¿lanzamos excepción? - hay algunas que sí pueden estar duplicadas? Barrados?
88
+
89
+ # self.decorations[decoration_type] = ctx.getText()
90
+ # We cannot order it using the class name because there are rules with subrules, such as ties, or articulations. We order it using the encoding itself
91
+ decoration = ctx.getText();
92
+ if decoration != '/' and decoration != '\\':
93
+ decoration_encoding = ctx.getText()
94
+ decoration_subtoken = Subtoken(decoration_encoding, TokenCategory.DECORATION)
95
+ self.decorations.append(decoration_subtoken)
96
+
97
+ def addNoteRest(self, ctx, pitchduration_subtokens):
98
+ # subtoken = Subtoken(self.decorations[key], TokenCategory.DECORATION)
99
+ token = NoteRestToken(ctx.getText(), pitchduration_subtokens, self.decorations)
100
+ if self.in_chord:
101
+ self.chord_tokens.append(token)
102
+ else:
103
+ self.token = token
104
+
105
+ def exitNote(self, ctx: kernSpineParser.NoteContext):
106
+ pitch_duration_tokens = []
107
+ for duration_subtoken in self.duration_subtokens:
108
+ pitch_duration_tokens.append(duration_subtoken)
109
+ pitch_duration_tokens.append(self.diatonic_pitch_and_octave_subtoken)
110
+ if ctx.alteration():
111
+ pitch_duration_tokens.append(Subtoken(ctx.alteration().getText(), TokenCategory.ALTERATION))
112
+
113
+ self.addNoteRest(ctx, pitch_duration_tokens)
114
+
115
+ def exitRest(self, ctx: kernSpineParser.RestContext):
116
+ pitch_duration_tokens = []
117
+ for duration_subtoken in self.duration_subtokens:
118
+ pitch_duration_tokens.append(duration_subtoken)
119
+ pitch_duration_tokens.append(Subtoken('r', TokenCategory.PITCH))
120
+ self.addNoteRest(ctx, pitch_duration_tokens)
121
+
122
+ def enterChord(self, ctx: kernSpineParser.ChordContext):
123
+ self.in_chord = True
124
+ self.chord_tokens = []
125
+
126
+ def exitChord(self, ctx: kernSpineParser.ChordContext):
127
+ self.in_chord = False
128
+ self.token = ChordToken(ctx.getText(), TokenCategory.CHORD, self.chord_tokens)
129
+
130
+ def exitEmpty(self, ctx: kernSpineParser.EmptyContext):
131
+ self.token = SimpleToken(ctx.getText(), TokenCategory.EMPTY)
132
+
133
+ def exitNonVisualTandemInterpretation(self, ctx: kernSpineParser.NonVisualTandemInterpretationContext):
134
+ self.token = SimpleToken(ctx.getText(), TokenCategory.OTHER)
135
+
136
+ def exitVisualTandemInterpretation(self, ctx: kernSpineParser.VisualTandemInterpretationContext):
137
+ self.token = SimpleToken(ctx.getText(), TokenCategory.ENGRAVED_SYMBOLS)
138
+
139
+ def exitOtherContextual(self, ctx: kernSpineParser.ContextualContext):
140
+ self.token = SimpleToken(ctx.getText(), TokenCategory.OTHER_CONTEXTUAL)
141
+
142
+ def exitClef(self, ctx: kernSpineParser.ClefContext):
143
+ self.token = ClefToken(ctx.getText())
144
+
145
+ def exitKeySignature(self, ctx: kernSpineParser.KeySignatureContext):
146
+ self.token = KeySignatureToken(ctx.getText())
147
+
148
+ def exitKeyCancel(self, ctx: kernSpineParser.KeyCancelContext):
149
+ self.token = KeySignatureToken(ctx.getText())
150
+
151
+ def exitKey(self, ctx: kernSpineParser.KeyContext):
152
+ self.token = KeyToken(ctx.getText())
153
+
154
+ def exitTimeSignature(self, ctx: kernSpineParser.TimeSignatureContext):
155
+ self.token = TimeSignatureToken(ctx.getText())
156
+
157
+ def exitMeterSymbol(self, ctx: kernSpineParser.MeterSymbolContext):
158
+ self.token = MeterSymbolToken(ctx.getText())
159
+
160
+ def exitStructural(self, ctx: kernSpineParser.StructuralContext):
161
+ self.token = SimpleToken(ctx.getText(), TokenCategory.STRUCTURAL)
162
+
163
+ def exitXywh(self, ctx: kernSpineParser.XywhContext):
164
+ self.last_bounding_box = BoundingBox(int(ctx.x().getText()), int(ctx.y().getText()), int(ctx.w().getText()),
165
+ int(ctx.h().getText()))
166
+
167
+ def exitBoundingBox(self, ctx: kernSpineParser.BoundingBoxContext):
168
+ page = ctx.pageNumber().getText()
169
+ bbox = BoundingBox(int(ctx.xywh().x().getText()), int(ctx.xywh().y().getText()), int(ctx.xywh().w().getText()),
170
+ int(ctx.xywh().h().getText()))
171
+ self.token = BoundingBoxToken(ctx.getText(), page, bbox)
172
+
173
+ def exitInstrument(self, ctx: kernSpineParser.InstrumentContext):
174
+ self.token = InstrumentToken(ctx.getText())
175
+
176
+
177
+ def exitBarline(self, ctx: kernSpineParser.BarlineContext):
178
+ txt_without_number = ''
179
+ if ctx.EQUAL(0) and ctx.EQUAL(1):
180
+ txt_without_number = '=='
181
+ elif ctx.EQUAL(0):
182
+ txt_without_number = '='
183
+ if ctx.barLineType():
184
+ txt_without_number += ctx.barLineType().getText()
185
+ if ctx.fermata():
186
+ txt_without_number += ctx.fermata().getText()
187
+
188
+ # correct wrong encodings
189
+ if txt_without_number == ':!:':
190
+ txt_without_number = ':|!|:'
191
+ elif txt_without_number == ':|!|:':
192
+ txt_without_number = ':|!|:'
193
+
194
+ self.token = BarToken(txt_without_number)
195
+ self.token.hidden = "-" in ctx.getText() # hidden
196
+
@@ -0,0 +1,43 @@
1
+ from __future__ import annotations
2
+ from typing import Optional
3
+
4
+ from .base_antlr_spine_parser_listener import BaseANTLRSpineParserListener
5
+ from .spine_importer import SpineImporter
6
+ from .spine_importer import SpineImporter
7
+ from .kern_spine_importer import KernSpineImporter, KernSpineListener
8
+ from .tokens import SimpleToken, TokenCategory, Token
9
+
10
+
11
+ class BasicSpineImporter(SpineImporter):
12
+ def __init__(self, verbose: Optional[bool] = False):
13
+ """
14
+ KernSpineImporter constructor.
15
+
16
+ Args:
17
+ verbose (Optional[bool]): Level of verbosity for error messages.
18
+ """
19
+ super().__init__(verbose=verbose)
20
+
21
+ def import_listener(self) -> BaseANTLRSpineParserListener:
22
+ return KernSpineListener() # TODO: Create a custom functional listener for BasicSpineImporter
23
+
24
+ def import_token(self, encoding: str) -> Token:
25
+ self._raise_error_if_wrong_input(encoding)
26
+
27
+ kern_spine_importer = KernSpineImporter()
28
+ token = kern_spine_importer.import_token(encoding)
29
+
30
+ ACCEPTED_CATEGORIES = {
31
+ TokenCategory.STRUCTURAL,
32
+ TokenCategory.SIGNATURES,
33
+ TokenCategory.EMPTY,
34
+ TokenCategory.BARLINES,
35
+ TokenCategory.IMAGE_ANNOTATIONS,
36
+ TokenCategory.BARLINES,
37
+ TokenCategory.COMMENTS,
38
+ }
39
+
40
+ if not any(TokenCategory.is_child(child=token.category, parent=cat) for cat in ACCEPTED_CATEGORIES):
41
+ return SimpleToken(encoding, TokenCategory.OTHER)
42
+
43
+ return token