kernpy 0.0.1__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. kernpy/__init__.py +215 -0
  2. kernpy/__main__.py +217 -0
  3. kernpy/core/__init__.py +119 -0
  4. kernpy/core/_io.py +48 -0
  5. kernpy/core/base_antlr_importer.py +61 -0
  6. kernpy/core/base_antlr_spine_parser_listener.py +196 -0
  7. kernpy/core/basic_spine_importer.py +43 -0
  8. kernpy/core/document.py +965 -0
  9. kernpy/core/dyn_importer.py +30 -0
  10. kernpy/core/dynam_spine_importer.py +42 -0
  11. kernpy/core/error_listener.py +51 -0
  12. kernpy/core/exporter.py +535 -0
  13. kernpy/core/fing_spine_importer.py +42 -0
  14. kernpy/core/generated/kernSpineLexer.interp +444 -0
  15. kernpy/core/generated/kernSpineLexer.py +535 -0
  16. kernpy/core/generated/kernSpineLexer.tokens +236 -0
  17. kernpy/core/generated/kernSpineParser.interp +425 -0
  18. kernpy/core/generated/kernSpineParser.py +9954 -0
  19. kernpy/core/generated/kernSpineParser.tokens +236 -0
  20. kernpy/core/generated/kernSpineParserListener.py +1200 -0
  21. kernpy/core/generated/kernSpineParserVisitor.py +673 -0
  22. kernpy/core/generic.py +426 -0
  23. kernpy/core/gkern.py +526 -0
  24. kernpy/core/graphviz_exporter.py +89 -0
  25. kernpy/core/harm_spine_importer.py +41 -0
  26. kernpy/core/import_humdrum_old.py +853 -0
  27. kernpy/core/importer.py +285 -0
  28. kernpy/core/importer_factory.py +43 -0
  29. kernpy/core/kern_spine_importer.py +73 -0
  30. kernpy/core/mens_spine_importer.py +23 -0
  31. kernpy/core/mhxm_spine_importer.py +44 -0
  32. kernpy/core/pitch_models.py +338 -0
  33. kernpy/core/root_spine_importer.py +58 -0
  34. kernpy/core/spine_importer.py +45 -0
  35. kernpy/core/text_spine_importer.py +43 -0
  36. kernpy/core/tokenizers.py +239 -0
  37. kernpy/core/tokens.py +2011 -0
  38. kernpy/core/transposer.py +300 -0
  39. kernpy/io/__init__.py +14 -0
  40. kernpy/io/public.py +355 -0
  41. kernpy/polish_scores/__init__.py +13 -0
  42. kernpy/polish_scores/download_polish_dataset.py +357 -0
  43. kernpy/polish_scores/iiif.py +47 -0
  44. kernpy/test_grammar.sh +22 -0
  45. kernpy/util/__init__.py +14 -0
  46. kernpy/util/helpers.py +55 -0
  47. kernpy/util/store_cache.py +35 -0
  48. kernpy/visualize_analysis.sh +23 -0
  49. kernpy-1.0.0.dist-info/METADATA +501 -0
  50. kernpy-1.0.0.dist-info/RECORD +51 -0
  51. {kernpy-0.0.1.dist-info → kernpy-1.0.0.dist-info}/WHEEL +1 -2
  52. kernpy/example.py +0 -0
  53. kernpy-0.0.1.dist-info/LICENSE +0 -19
  54. kernpy-0.0.1.dist-info/METADATA +0 -19
  55. kernpy-0.0.1.dist-info/RECORD +0 -7
  56. kernpy-0.0.1.dist-info/top_level.txt +0 -1
@@ -0,0 +1,300 @@
1
+ from __future__ import annotations
2
+
3
+ from copy import deepcopy
4
+ from typing import Optional
5
+
6
+ from .pitch_models import (
7
+ NotationEncoding,
8
+ AgnosticPitch,
9
+ PitchExporterFactory,
10
+ PitchImporterFactory,
11
+ Direction,
12
+ )
13
+
14
+
15
+ Intervals = {
16
+ -2: 'dd1',
17
+ -1: 'd1',
18
+ 0: 'P1',
19
+ 1: 'A1',
20
+ 2: 'AA1',
21
+ 3: 'dd2',
22
+ 4: 'd2',
23
+ 5: 'm2',
24
+ 6: 'M2',
25
+ 7: 'A2',
26
+ 8: 'AA2',
27
+ 9: 'dd3',
28
+ 10: 'd3',
29
+ 11: 'm3',
30
+ 12: 'M3',
31
+ 13: 'A3',
32
+ 14: 'AA3',
33
+ 15: 'dd4',
34
+ 16: 'd4',
35
+ 17: 'P4',
36
+ 18: 'A4',
37
+ 19: 'AA4',
38
+ 21: 'dd5',
39
+ # 20 is unused
40
+ 22: 'd5',
41
+ 23: 'P5',
42
+ 24: 'A5',
43
+ 25: 'AA5',
44
+ 26: 'dd6',
45
+ 27: 'd6',
46
+ 28: 'm6',
47
+ 29: 'M6',
48
+ 30: 'A6',
49
+ 31: 'AA6',
50
+ 32: 'dd7',
51
+ 33: 'd7',
52
+ 34: 'm7',
53
+ 35: 'M7',
54
+ 36: 'A7',
55
+ 37: 'AA7',
56
+ 40: 'octave'
57
+ }
58
+ """
59
+ Base-40 interval classes (d=diminished, m=minor, M=major, P=perfect, A=augmented)
60
+ """
61
+
62
+ IntervalsByName = {v: k for k, v in Intervals.items()} # reverse the key-value pairs
63
+ AVAILABLE_INTERVALS = sorted(IntervalsByName.keys())
64
+
65
+ LETTER_TO_SEMITONES = {
66
+ 'C': 0,
67
+ 'D': 2,
68
+ 'E': 4,
69
+ 'F': 5,
70
+ 'G': 7,
71
+ 'A': 9,
72
+ 'B': 11,
73
+ }
74
+
75
+
76
+ def transpose_agnostics(
77
+ input_pitch: AgnosticPitch,
78
+ interval: int,
79
+ direction: str = Direction.UP.value
80
+ ) -> AgnosticPitch:
81
+ """
82
+ Transpose an AgnosticPitch by a given interval.
83
+
84
+ Args:
85
+ input_pitch (AgnosticPitch): The pitch to transpose.
86
+ interval (int): The interval to transpose the pitch.
87
+ direction (str): The direction of the transposition. 'UP' or 'DOWN'. Default is 'UP'.
88
+
89
+ Returns :
90
+ AgnosticPitch: The transposed pitch.
91
+
92
+ Examples:
93
+ >>> transpose_agnostics(AgnosticPitch('C', 4), IntervalsByName['P4'])
94
+ AgnosticPitch('F', 4)
95
+ >>> transpose_agnostics(AgnosticPitch('C', 4), IntervalsByName['P4'], direction='down')
96
+ AgnosticPitch('G', 3)
97
+ >>> transpose_agnostics(AgnosticPitch('C#', 4), IntervalsByName['P4'])
98
+ AgnosticPitch('F#', 4)
99
+ >>> transpose_agnostics(AgnosticPitch('G', 4), IntervalsByName['m3'], direction='down')
100
+ AgnosticPitch('Bb', 4)
101
+
102
+ """
103
+ return AgnosticPitch.to_transposed(input_pitch, interval, direction)
104
+
105
+
106
+ def transpose_encoding_to_agnostic(
107
+ input_encoding: str,
108
+ interval: int,
109
+ input_format: str = NotationEncoding.HUMDRUM.value,
110
+ direction: str = Direction.UP.value
111
+ ) -> AgnosticPitch:
112
+ """
113
+ Transpose a pitch by a given interval.
114
+
115
+ The pitch must be in the American notation.
116
+
117
+ Args:
118
+ input_encoding (str): The pitch to transpose.
119
+ interval (int): The interval to transpose the pitch.
120
+ input_format (str): The encoding format of the pitch. Default is HUMDRUM.
121
+ direction (str): The direction of the transposition.'UP' or 'DOWN' Default is 'UP'.
122
+
123
+ Returns:
124
+ AgnosticPitch: The transposed pitch.
125
+
126
+ Examples:
127
+ >>> transpose_encoding_to_agnostic('ccc', IntervalsByName['P4'], input_format='kern')
128
+ AgnosticPitch('fff', 4)
129
+ >>> transpose_encoding_to_agnostic('ccc', IntervalsByName['P4'], input_format=NotationEncoding.HUMDRUM.value)
130
+ AgnosticPitch('fff', 4)
131
+ >>> transpose_encoding_to_agnostic('ccc', IntervalsByName['P4'], input_format='kern', direction='down')
132
+ AgnosticPitch('gg', 3)
133
+ >>> transpose_encoding_to_agnostic('ccc#', IntervalsByName['P4'])
134
+ AgnosticPitch('fff#', 4)
135
+ >>> transpose_encoding_to_agnostic('G4', IntervalsByName['m3'], input_format='american')
136
+ AgnosticPitch('Bb4', 4)
137
+ >>> transpose_encoding_to_agnostic('C3', IntervalsByName['P4'], input_format='american', direction='down')
138
+ AgnosticPitch('G2', 2)
139
+
140
+ """
141
+ importer = PitchImporterFactory.create(input_format)
142
+ pitch: AgnosticPitch = importer.import_pitch(input_encoding)
143
+
144
+ return transpose_agnostics(pitch, interval, direction=direction)
145
+
146
+
147
+ def transpose_agnostic_to_encoding(
148
+ agnostic_pitch: AgnosticPitch,
149
+ interval: int,
150
+ output_format: str = NotationEncoding.HUMDRUM.value,
151
+ direction: str = Direction.UP.value
152
+ ) -> str:
153
+ """
154
+ Transpose an AgnosticPitch by a given interval.
155
+
156
+ Args:
157
+ agnostic_pitch (AgnosticPitch): The pitch to transpose.
158
+ interval (int): The interval to transpose the pitch.
159
+ output_format (Optional[str]): The encoding format of the transposed pitch. Default is HUMDRUM.
160
+ direction (Optional[str]): The direction of the transposition.'UP' or 'DOWN' Default is 'UP'.
161
+
162
+ Returns (str):
163
+ str: The transposed pitch.
164
+
165
+ Examples:
166
+ >>> transpose_agnostic_to_encoding(AgnosticPitch('C', 4), IntervalsByName['P4'])
167
+ 'F4'
168
+ >>> transpose_agnostic_to_encoding(AgnosticPitch('C', 4), IntervalsByName['P4'], direction='down')
169
+ 'G3'
170
+ >>> transpose_agnostic_to_encoding(AgnosticPitch('C#', 4), IntervalsByName['P4'])
171
+ 'F#4'
172
+ >>> transpose_agnostic_to_encoding(AgnosticPitch('G', 4), IntervalsByName['m3'], direction='down')
173
+ 'Bb4'
174
+ """
175
+ exporter = PitchExporterFactory.create(output_format)
176
+ transposed_pitch = transpose_agnostics(agnostic_pitch, interval, direction=direction)
177
+ content = exporter.export_pitch(transposed_pitch)
178
+
179
+ return content
180
+
181
+
182
+
183
+
184
+
185
+ def transpose(
186
+ input_encoding: str,
187
+ interval: int,
188
+ input_format: str = NotationEncoding.HUMDRUM.value,
189
+ output_format: str = NotationEncoding.HUMDRUM.value,
190
+ direction: str = Direction.UP.value
191
+ ) -> str:
192
+ """
193
+ Transpose a pitch by a given interval.
194
+
195
+ The pitch must be in the American notation.
196
+
197
+ Args:
198
+ input_encoding (str): The pitch to transpose.
199
+ interval (int): The interval to transpose the pitch.
200
+ input_format (str): The encoding format of the pitch. Default is HUMDRUM.
201
+ output_format (str): The encoding format of the transposed pitch. Default is HUMDRUM.
202
+ direction (str): The direction of the transposition.'UP' or 'DOWN' Default is 'UP'.
203
+
204
+ Returns:
205
+ str: The transposed pitch.
206
+
207
+ Examples:
208
+ >>> transpose('ccc', IntervalsByName['P4'], input_format='kern', output_format='kern')
209
+ 'fff'
210
+ >>> transpose('ccc', IntervalsByName['P4'], input_format=NotationEncoding.HUMDRUM.value)
211
+ 'fff'
212
+ >>> transpose('ccc', IntervalsByName['P4'], input_format='kern', direction='down')
213
+ 'gg'
214
+ >>> transpose('ccc', IntervalsByName['P4'], input_format='kern', direction=Direction.DOWN.value)
215
+ 'gg'
216
+ >>> transpose('ccc#', IntervalsByName['P4'])
217
+ 'fff#'
218
+ >>> transpose('G4', IntervalsByName['m3'], input_format='american')
219
+ 'Bb4'
220
+ >>> transpose('G4', IntervalsByName['m3'], input_format=NotationEncoding.AMERICAN.value)
221
+ 'Bb4'
222
+ >>> transpose('C3', IntervalsByName['P4'], input_format='american', direction='down')
223
+ 'G2'
224
+
225
+
226
+ """
227
+ importer = PitchImporterFactory.create(input_format)
228
+ pitch: AgnosticPitch = importer.import_pitch(input_encoding)
229
+
230
+ transposed_pitch = transpose_agnostics(pitch, interval, direction=direction)
231
+
232
+ exporter = PitchExporterFactory.create(output_format)
233
+ content = exporter.export_pitch(transposed_pitch)
234
+
235
+ return content
236
+
237
+
238
+ def agnostic_distance(
239
+ first_pitch: AgnosticPitch,
240
+ second_pitch: AgnosticPitch,
241
+ ) -> int:
242
+ """
243
+ Calculate the distance in semitones between two pitches.
244
+
245
+ Args:
246
+ first_pitch (AgnosticPitch): The first pitch to compare.
247
+ second_pitch (AgnosticPitch): The second pitch to compare.
248
+
249
+ Returns:
250
+ int: The distance in semitones between the two pitches.
251
+
252
+ Examples:
253
+ >>> agnostic_distance(AgnosticPitch('C4'), AgnosticPitch('E4'))
254
+ 4
255
+ >>> agnostic_distance(AgnosticPitch('C4'), AgnosticPitch('B3'))
256
+ -1
257
+ """
258
+ def semitone_index(p: AgnosticPitch) -> int:
259
+ # base letter:
260
+ letter = p.name.replace('+', '').replace('-', '')
261
+ base = LETTER_TO_SEMITONES[letter]
262
+ # accidentals: '+' is one sharp, '-' one flat
263
+ alteration = p.name.count('+') - p.name.count('-')
264
+ return p.octave * 12 + base + alteration
265
+
266
+ return semitone_index(second_pitch) - semitone_index(first_pitch)
267
+
268
+
269
+ def distance(
270
+ first_encoding: str,
271
+ second_encoding: str,
272
+ *,
273
+ first_format: str = NotationEncoding.HUMDRUM.value,
274
+ second_format: str = NotationEncoding.HUMDRUM.value,
275
+ ) -> int:
276
+ """
277
+ Calculate the distance in semitones between two pitches.
278
+
279
+ Args:
280
+ first_encoding (str): The first pitch to compare.
281
+ second_encoding (str): The second pitch to compare.
282
+ first_format (str): The encoding format of the first pitch. Default is HUMDRUM.
283
+ second_format (str): The encoding format of the second pitch. Default is HUMDRUM.
284
+
285
+ Returns:
286
+ int: The distance in semitones between the two pitches.
287
+
288
+ Examples:
289
+ >>> distance('C4', 'E4')
290
+ 4
291
+ >>> distance('C4', 'B3')
292
+ -1
293
+ """
294
+ first_importer = PitchImporterFactory.create(first_format)
295
+ first_pitch: AgnosticPitch = first_importer.import_pitch(first_encoding)
296
+
297
+ second_importer = PitchImporterFactory.create(second_format)
298
+ second_pitch: AgnosticPitch = second_importer.import_pitch(second_encoding)
299
+
300
+ return agnostic_distance(first_pitch, second_pitch)
kernpy/io/__init__.py ADDED
@@ -0,0 +1,14 @@
1
+ from .public import *
2
+
3
+ __all__ = [
4
+ 'load',
5
+ 'loads',
6
+ 'dump',
7
+ 'dumps',
8
+ 'merge',
9
+ 'concat',
10
+ 'spine_types',
11
+ 'graph'
12
+ ]
13
+
14
+
kernpy/io/public.py ADDED
@@ -0,0 +1,355 @@
1
+ """
2
+ Public API for KernPy.
3
+
4
+ The main functions for handling the input and output of **kern files are provided here.
5
+ """
6
+ from __future__ import annotations
7
+
8
+ from pathlib import Path
9
+ from typing import List, Optional, Any, Union, Tuple, Sequence
10
+
11
+ from kernpy import Encoding
12
+ from kernpy.core import (
13
+ Document, Importer, Exporter, ExportOptions, GraphvizExporter,
14
+ generic,
15
+ TokenCategoryHierarchyMapper)
16
+
17
+
18
+ def load(fp: Union[str, Path], *, raise_on_errors: Optional[bool] = False, **kwargs) -> (Document, List[str]):
19
+ """
20
+ Load a Document object from a Humdrum **kern file-like object.
21
+
22
+ Args:
23
+ fp (Union[str, Path]): A path-like object representing a **kern file.
24
+ raise_on_errors (Optional[bool], optional): If True, raise an exception if any grammar error is detected\
25
+ during parsing.
26
+
27
+ Returns ((Document, List[str])): A tuple containing the Document object and a list of messages representing \
28
+ grammar errors detected during parsing. If the list is empty,\
29
+ the parsing did not detect any errors.
30
+
31
+ Raises:
32
+ ValueError: If the Humdrum **kern representation could not be parsed.
33
+
34
+ Examples:
35
+ >>> import kernpy as kp
36
+ >>> document, errors = kp.load('BWV565.krn')
37
+ >>> if len(errors) > 0:
38
+ >>> print(f"Grammar didn't recognize the following errors: {errors}")
39
+ ['Error: Invalid **kern spine: 1', 'Error: Invalid **kern spine: 2']
40
+ >>> # Anyway, we can use the Document
41
+ >>> print(document)
42
+ >>> else:
43
+ >>> print(document)
44
+ <kernpy.core.document.Document object at 0x7f8b3b7b3d90>
45
+ """
46
+ return generic.Generic.read(
47
+ path=fp,
48
+ strict=raise_on_errors,
49
+ )
50
+
51
+
52
+ def loads(s, *, raise_on_errors: Optional[bool] = False, **kwargs) -> (Document, List[str]):
53
+ """
54
+ Load a Document object from a string encoded in Humdrum **kern.
55
+
56
+ Args:
57
+ s (str): A string containing a **kern file.
58
+ raise_on_errors (Optional[bool], optional): If True, raise an exception if any grammar error is detected\
59
+ during parsing.
60
+
61
+ Returns ((Document, List[str])): A tuple containing the Document object and a list of messages representing \
62
+ grammar errors detected during parsing. If the list is empty,\
63
+ the parsing did not detect any errors.
64
+
65
+ Raises:
66
+ ValueError: If the Humdrum **kern representation could not be parsed.
67
+
68
+ Examples:
69
+ >>> import kernpy as kp
70
+ >>> document, errors = kp.loads('**kern\n*clefG2\n=1\n4c\n4d\n4e\n4f\n')
71
+ >>> if len(errors) > 0:
72
+ >>> print(f"Grammar didn't recognize the following errors: {errors}")
73
+ ['Error: Invalid **kern spine: 1']
74
+ >>> # Anyway, we can use the Document
75
+ >>> print(document)
76
+ >>> else:
77
+ >>> print(document)
78
+ <kernpy.core.document.Document object at 0x7f8b3b7b3d90>
79
+ """
80
+ return generic.Generic.create(
81
+ content=s,
82
+ strict=raise_on_errors,
83
+ )
84
+
85
+
86
+ def dump(document: Document, fp: Union[str, Path], *,
87
+ spine_types: [] = None,
88
+ include: [] = None,
89
+ exclude: [] = None,
90
+ from_measure: int = None,
91
+ to_measure: int = None,
92
+ tokenizer: Encoding = None,
93
+ instruments: [] = None,
94
+ show_measure_numbers: bool = None,
95
+ spine_ids: [int] = None
96
+ ) -> None:
97
+ """
98
+
99
+ Args:
100
+ document (Document): The Document object to write to the file.
101
+ fp (Union[str, Path]): The file path to write the Document object.
102
+ spine_types (Iterable): **kern, **mens, etc...
103
+ include (Iterable): The token categories to include in the exported file. When None, all the token categories will be exported.
104
+ exclude (Iterable): The token categories to exclude from the exported file. When None, no token categories will be excluded.
105
+ from_measure (int): The measure to start exporting. When None, the exporter will start from the beginning of the file. The first measure is 1
106
+ to_measure (int): The measure to end exporting. When None, the exporter will end at the end of the file.
107
+ tokenizer (Encoding): The type of the **kern file to export.
108
+ instruments (Iterable): The instruments to export. If None, all the instruments will be exported.
109
+ show_measure_numbers (Bool): Show the measure numbers in the exported file.
110
+ spine_ids (Iterable): The ids of the spines to export. When None, all the spines will be exported. \
111
+ Spines ids start from 0, and they are increased by 1 for each spine to the right.
112
+
113
+
114
+ Returns (None): None
115
+
116
+ Raises:
117
+ ValueError: If the document could not be exported.
118
+
119
+ Examples:
120
+ >>> import kernpy as kp
121
+ >>> document, errors = kp.load('BWV565.krn')
122
+ >>> kp.dump(document, 'BWV565_normalized.krn')
123
+ None
124
+ >>> # File 'BWV565_normalized.krn' will be created with the normalized **kern representation.
125
+ """
126
+ # Create an ExportOptions instance with only user-modified arguments
127
+ options = generic.Generic.parse_options_to_ExportOptions(
128
+ spine_types=spine_types,
129
+ include=include,
130
+ exclude=exclude,
131
+ from_measure=from_measure,
132
+ to_measure=to_measure,
133
+ kern_type=tokenizer,
134
+ instruments=instruments,
135
+ show_measure_numbers=show_measure_numbers,
136
+ spine_ids=spine_ids
137
+ )
138
+
139
+ return generic.Generic.store(
140
+ document=document,
141
+ path=fp,
142
+ options=options
143
+ )
144
+
145
+
146
+ def dumps(document: Document, *,
147
+ spine_types: [] = None,
148
+ include: [] = None,
149
+ exclude: [] = None,
150
+ from_measure: int = None,
151
+ to_measure: int = None,
152
+ tokenizer: Encoding = None,
153
+ instruments: [] = None,
154
+ show_measure_numbers: bool = None,
155
+ spine_ids: [int] = None
156
+ ) -> str:
157
+ """
158
+
159
+ Args:
160
+ document (Document): The Document object to write to the file.
161
+ fp (Union[str, Path]): The file path to write the Document object.
162
+ spine_types (Iterable): **kern, **mens, etc...
163
+ include (Iterable): The token categories to include in the exported file. When None, all the token categories will be exported.
164
+ exclude (Iterable): The token categories to exclude from the exported file. When None, no token categories will be excluded.
165
+ from_measure (int): The measure to start exporting. When None, the exporter will start from the beginning of the file. The first measure is 1
166
+ to_measure (int): The measure to end exporting. When None, the exporter will end at the end of the file.
167
+ tokenizer (Encoding): The type of the **kern file to export.
168
+ instruments (Iterable): The instruments to export. If None, all the instruments will be exported.
169
+ show_measure_numbers (Bool): Show the measure numbers in the exported file.
170
+ spine_ids (Iterable): The ids of the spines to export. When None, all the spines will be exported. \
171
+ Spines ids start from 0, and they are increased by 1 for each spine to the right.
172
+
173
+
174
+ Returns (None): None
175
+
176
+ Raises:
177
+ ValueError: If the document could not be exported.
178
+
179
+ Examples:
180
+ >>> import kernpy as kp
181
+ >>> document, errors = kp.load('score.krn')
182
+ >>> kp.dumps(document)
183
+ '**kern\n*clefG2\n=1\n4c\n4d\n4e\n4f\n*-'
184
+ """
185
+ # Create an ExportOptions instance with only user-modified arguments
186
+ options = generic.Generic.parse_options_to_ExportOptions(
187
+ spine_types=spine_types,
188
+ include=include,
189
+ exclude=exclude,
190
+ from_measure=from_measure,
191
+ to_measure=to_measure,
192
+ kern_type=tokenizer,
193
+ instruments=instruments,
194
+ show_measure_numbers=show_measure_numbers,
195
+ spine_ids=spine_ids
196
+ )
197
+
198
+ return generic.Generic.export(
199
+ document=document,
200
+ options=options
201
+ )
202
+
203
+
204
+ def graph(document: Document, fp: Optional[Union[str, Path]]) -> None:
205
+ """
206
+ Create a graph representation of a Document object using Graphviz. Save the graph as a .dot file or indicate the\
207
+ output file path or stream. If the output file path is None, the function will return the graphviz content as a\
208
+ string to the standard output.
209
+
210
+ Use the Graphviz software to convert the .dot file to an image.
211
+
212
+
213
+ Args:
214
+ document (Document): The Document object to export as a graphviz file.
215
+ fp (Optional[Union[str, Path]]): The file path to write the graphviz file. If None, the function will return the\
216
+ graphviz content as a string to the standard output.
217
+
218
+ Returns (None): None
219
+
220
+ Examples:
221
+ >>> import kernpy as kp
222
+ >>> document, errors = kp.load('score.krn')
223
+ >>> kp.graph(document, 'score.dot')
224
+ None
225
+ >>> # File 'score.dot' will be created with the graphviz representation of the Document object.
226
+ >>> kp.graph(document, None)
227
+ 'digraph G { ... }'
228
+ """
229
+ return generic.Generic.store_graph(
230
+ document=document,
231
+ path=fp
232
+ )
233
+
234
+
235
+ def concat(
236
+ contents: List[str],
237
+ *,
238
+ separator: Optional[str] = '\n',
239
+ ) -> Tuple[Document, List[Tuple[int, int]]]:
240
+ """
241
+ Concatenate multiple **kern fragments into a single Document object. \
242
+ All the fragments should be presented in order. Each fragment does not need to be a complete **kern file. \
243
+
244
+ Warnings:
245
+ Processing a large number of files in a row may take some time.
246
+ This method performs as many `kp.read` operations as there are fragments to concatenate.
247
+ Args:
248
+ contents (Sequence[str]): List of **kern strings
249
+ separator (Optional[str]): Separator string to separate the **kern fragments. Default is '\n' (newline).
250
+
251
+ Returns (Tuple[Document, List[Tuple[int, int]]]): Document object and \
252
+ and a List of Pairs (Tuple[int, int]) representing the measure fragment indexes of the concatenated document.
253
+
254
+ Examples:
255
+ >>> import kernpy as kp
256
+ >>> contents = ['**kern\n4e\n4f\n4g\n*-\n', '4a\n4b\n4c\n*-\n=\n', '4d\n4e\n4f\n*-\n']
257
+ >>> document, indexes = kp.concat(contents)
258
+ >>> indexes
259
+ [(0, 3), (3, 6), (6, 9)]
260
+ >>> document, indexes = kp.concat(contents, separator='\n')
261
+ >>> indexes
262
+ [(0, 3), (3, 6), (6, 9)]
263
+ >>> document, indexes = kp.concat(contents, separator='')
264
+ >>> indexes
265
+ [(0, 3), (3, 6), (6, 9)]
266
+ >>> for start, end in indexes:
267
+ >>> print(kp.dumps(document, from_measure=start, to_measure=end)))
268
+ """
269
+ return generic.Generic.concat(
270
+ contents=contents,
271
+ separator=separator,
272
+ )
273
+
274
+
275
+ def merge(
276
+ contents: List[str],
277
+ *,
278
+ raise_on_errors: Optional[bool] = False,
279
+ ) -> Tuple[Document, List[Tuple[int, int]]]:
280
+ """
281
+ Merge multiple **kern fragments into a single **kern string. \
282
+ All the fragments should be presented in order. Each fragment does not need to be a complete **kern file. \
283
+
284
+ Warnings:
285
+ Processing a large number of files in a row may take some time.
286
+ This method performs as many `kp.read` operations as there are fragments to concatenate.
287
+ Args:
288
+ contents (Sequence[str]): List of **kern strings
289
+ raise_on_errors (Optional[bool], optional): If True, raise an exception if any grammar error is detected\
290
+ during parsing.
291
+
292
+ Returns (Tuple[Document, List[Tuple[int, int]]]): Document object and \
293
+ and a List of Pairs (Tuple[int, int]) representing the measure fragment indexes of the concatenated document.
294
+
295
+ Examples:
296
+ >>> import kernpy as kp
297
+ >>> contents = ['**kern\n4e\n4f\n4g\n*-\n*-', '**kern\n4a\n4b\n4c\n*-\n=\n*-', '**kern\n4d\n4e\n4f\n*-\n*-']
298
+ >>> document, indexes = kp.concat(contents)
299
+ >>> indexes
300
+ [(0, 3), (3, 6), (6, 9)]
301
+ >>> document, indexes = kp.concat(contents, separator='\n')
302
+ >>> indexes
303
+ [(0, 3), (3, 6), (6, 9)]
304
+ >>> document, indexes = kp.concat(contents, separator='')
305
+ >>> indexes
306
+ [(0, 3), (3, 6), (6, 9)]
307
+ >>> for start, end in indexes:
308
+ >>> print(kp.dumps(document, from_measure=start, to_measure=end)))
309
+ """
310
+ return generic.Generic.merge(
311
+ contents=contents,
312
+ strict=raise_on_errors
313
+ )
314
+
315
+
316
+ def spine_types(
317
+ document: Document,
318
+ headers: Optional[Sequence[str]] = None
319
+ ) -> List[str]:
320
+ """
321
+ Get the spines of a Document object.
322
+
323
+ Args:
324
+ document (Document): Document object to get spines from
325
+ headers (Optional[Sequence[str]]): List of spine types to get. If None, all spines are returned. Using a \
326
+ header will return all the spines of that type.
327
+
328
+ Returns (List[str]): List of spines
329
+
330
+ Examples:
331
+ >>> import kernpy as kp
332
+ >>> document, _ = kp.read('path/to/file.krn')
333
+ >>> kp.spine_types(document)
334
+ ['**kern', '**kern', '**kern', '**kern', '**root', '**harm']
335
+ >>> kp.spine_types(document, None)
336
+ ['**kern', '**kern', '**kern', '**kern', '**root', '**harm']
337
+ >>> kp.spine_types(document, headers=None)
338
+ ['**kern', '**kern', '**kern', '**kern', '**root', '**harm']
339
+ >>> kp.spine_types(document, headers=['**kern'])
340
+ ['**kern', '**kern', '**kern', '**kern']
341
+ >>> kp.spine_types(document, headers=['**kern', '**root'])
342
+ ['**kern', '**kern', '**kern', '**kern', '**root']
343
+ >>> kp.spine_types(document, headers=['**kern', '**root', '**harm'])
344
+ ['**kern', '**kern', '**kern', '**kern', '**root', '**harm']
345
+ >>> kp.spine_types(document, headers=[])
346
+ []
347
+ """
348
+ return generic.Generic.get_spine_types(
349
+ document=document,
350
+ spine_types=headers
351
+ )
352
+
353
+
354
+
355
+
@@ -0,0 +1,13 @@
1
+ """
2
+ polish_scores
3
+
4
+ ====
5
+
6
+ This module provides a way to download and process the Polish Scores dataset.
7
+ """
8
+
9
+ from .download_polish_dataset import (
10
+ convert_and_download_file,
11
+ main as download_polish_scores,
12
+ )
13
+