kernpy 0.0.2__py3-none-any.whl → 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. kernpy/__init__.py +30 -0
  2. kernpy/__main__.py +127 -0
  3. kernpy/core/__init__.py +119 -0
  4. kernpy/core/_io.py +48 -0
  5. kernpy/core/base_antlr_importer.py +61 -0
  6. kernpy/core/base_antlr_spine_parser_listener.py +196 -0
  7. kernpy/core/basic_spine_importer.py +43 -0
  8. kernpy/core/document.py +965 -0
  9. kernpy/core/dyn_importer.py +30 -0
  10. kernpy/core/dynam_spine_importer.py +42 -0
  11. kernpy/core/error_listener.py +51 -0
  12. kernpy/core/exporter.py +535 -0
  13. kernpy/core/fing_spine_importer.py +42 -0
  14. kernpy/core/generated/kernSpineLexer.interp +444 -0
  15. kernpy/core/generated/kernSpineLexer.py +535 -0
  16. kernpy/core/generated/kernSpineLexer.tokens +236 -0
  17. kernpy/core/generated/kernSpineParser.interp +425 -0
  18. kernpy/core/generated/kernSpineParser.py +9954 -0
  19. kernpy/core/generated/kernSpineParser.tokens +236 -0
  20. kernpy/core/generated/kernSpineParserListener.py +1200 -0
  21. kernpy/core/generated/kernSpineParserVisitor.py +673 -0
  22. kernpy/core/generic.py +426 -0
  23. kernpy/core/gkern.py +526 -0
  24. kernpy/core/graphviz_exporter.py +89 -0
  25. kernpy/core/harm_spine_importer.py +41 -0
  26. kernpy/core/import_humdrum_old.py +853 -0
  27. kernpy/core/importer.py +285 -0
  28. kernpy/core/importer_factory.py +43 -0
  29. kernpy/core/kern_spine_importer.py +73 -0
  30. kernpy/core/mens_spine_importer.py +23 -0
  31. kernpy/core/mhxm_spine_importer.py +44 -0
  32. kernpy/core/pitch_models.py +338 -0
  33. kernpy/core/root_spine_importer.py +58 -0
  34. kernpy/core/spine_importer.py +45 -0
  35. kernpy/core/text_spine_importer.py +43 -0
  36. kernpy/core/tokenizers.py +239 -0
  37. kernpy/core/tokens.py +2011 -0
  38. kernpy/core/transposer.py +300 -0
  39. kernpy/io/__init__.py +14 -0
  40. kernpy/io/public.py +355 -0
  41. kernpy/polish_scores/__init__.py +13 -0
  42. kernpy/polish_scores/download_polish_dataset.py +357 -0
  43. kernpy/polish_scores/iiif.py +47 -0
  44. kernpy/test_grammar.sh +22 -0
  45. kernpy/util/__init__.py +14 -0
  46. kernpy/util/helpers.py +55 -0
  47. kernpy/util/store_cache.py +35 -0
  48. kernpy/visualize_analysis.sh +23 -0
  49. kernpy-1.0.1.dist-info/METADATA +497 -0
  50. kernpy-1.0.1.dist-info/RECORD +51 -0
  51. {kernpy-0.0.2.dist-info → kernpy-1.0.1.dist-info}/WHEEL +1 -2
  52. kernpy/example.py +0 -1
  53. kernpy-0.0.2.dist-info/LICENSE +0 -19
  54. kernpy-0.0.2.dist-info/METADATA +0 -19
  55. kernpy-0.0.2.dist-info/RECORD +0 -7
  56. kernpy-0.0.2.dist-info/top_level.txt +0 -1
@@ -0,0 +1,853 @@
1
+ import csv
2
+ import string
3
+ import logging
4
+ from collections.abc import Iterable
5
+ from enum import Enum
6
+
7
+ from .importer_factory import createImporter
8
+ from .tokens import HeaderToken, SpineOperationToken, TokenCategory, BoundingBoxToken, KeySignatureToken, \
9
+ TimeSignatureToken, MeterSymbolToken, ClefToken, BarToken, MetacommentToken, ErrorToken, FieldCommentToken, \
10
+ BEKERN_CATEGORIES, TOKEN_SEPARATOR, DECORATION_SEPARATOR
11
+
12
+
13
+ class Encoding(Enum): # TODO: Eventually, polymorphism will be used to export different types of kern files
14
+ """
15
+ Options for exporting a kern file.
16
+
17
+ Example:
18
+ # Create the importer
19
+ >>> hi = HumdrumImporter()
20
+
21
+ # Read the file
22
+ >>> hi.import_file('file.krn')
23
+
24
+ # Export the file
25
+ >>> options = ExportOptions(spine_types=['**kern'], token_categories=BEKERN_CATEGORIES, kernType=Encoding.normalizedKern)
26
+ >>> exported = hi.doExport(options)
27
+
28
+ """
29
+ unprocessed = 0
30
+ eKern = 1
31
+ normalizedKern = 2
32
+
33
+
34
+ class ExportOptions:
35
+ def __init__(self, spine_types=None, token_categories=None, from_measure: int = None, to_measure: int = None, kern_type: Encoding = Encoding.normalizedKern, instruments=None):
36
+ """
37
+ Create a new ExportOptions object.
38
+
39
+ Args:
40
+ spine_types (Iterable): **kern, **mens, etc...
41
+ token_categories (Iterable): TokenCategory
42
+ from_measure (int): The measure to start exporting. When None, the exporter will start from the beginning of the file.
43
+ to_measure (int): The measure to end exporting. When None, the exporter will end at the end of the file.
44
+ kern_type (Encoding): The type of the kern file to export.
45
+ instruments (Iterable): The instruments to export. When None, all the instruments will be exported.
46
+
47
+
48
+ Example:
49
+ >>> from kernpy import HumdrumImporter, ExportOptions
50
+
51
+ Create the importer and read the file
52
+ >>> hi = HumdrumImporter()
53
+ >>> hi.import_file('file.krn')
54
+
55
+ Export the file with the specified options
56
+ >>> options = ExportOptions(spine_types=['**kern'], token_categories=BEKERN_CATEGORIES)
57
+ >>> exported_data = hi.doExport(options)
58
+
59
+ Export only the lyirics
60
+ >>> options = ExportOptions(spine_types=['**kern'], token_categories=[TokenCategory.LYRICS])
61
+ >>> exported_data = hi.doExport(options)
62
+
63
+ Export the comments
64
+ >>> options = ExportOptions(spine_types=['**kern'], token_categories=[TokenCategory.LINE_COMMENTS, TokenCategory.FIELD_COMMENTS])
65
+ >>> exported_data = hi.doExport(options)
66
+
67
+ Export using the eKern version
68
+ >>> options = ExportOptions(spine_types=['**kern'], token_categories=BEKERN_CATEGORIES, kern_type=Encoding.eKern)
69
+ >>> exported_data = hi.doExport(options)
70
+
71
+ """
72
+ self.spine_types = spine_types or []
73
+ self.from_measure = from_measure
74
+ self.to_measure = to_measure
75
+ self.token_categories = token_categories or []
76
+ self.kern_type = kern_type
77
+ self.instruments = instruments or []
78
+
79
+
80
+ class BoundingBoxMeasures:
81
+ def __init__(self, bounding_box, from_measure, to_measure):
82
+ self.from_measure = from_measure
83
+ self.to_measure = to_measure
84
+ self.bounding_box = bounding_box
85
+
86
+
87
+ class Spine:
88
+ def __init__(self, spine_type, importer):
89
+ self.spine_type = spine_type # **mens, **kern, etc...
90
+ self.rows = [] # each row will contain just one item or an array of items of type Token
91
+ self.importer = importer
92
+ self.importing_subspines = 1 # 0 for terminated subspines - used just for importing
93
+ self.next_row_subspine_variation = 0 # when a spine operation is added or removed, the subspines number must be modified for the next row
94
+
95
+ def size(self):
96
+ return len(self.rows)
97
+
98
+ def __len__(self):
99
+ return self.size()
100
+
101
+ def isTerminated(self):
102
+ return self.importing_subspines > 0
103
+
104
+ def getNumSubspines(self, row_number):
105
+ if row_number < 0:
106
+ raise Exception(f'Negative row number {row_number}')
107
+ if row_number >= len(self.rows):
108
+ raise Exception(f'Row number {row_number} out of bounds {len(self.rows)}')
109
+
110
+ return len(self.rows[row_number])
111
+
112
+ def addRow(self):
113
+ if self.importing_subspines != 0: # if not terminated
114
+ self.rows.append([])
115
+ if self.next_row_subspine_variation > 0:
116
+ new_subspines = self.importing_subspines + self.next_row_subspine_variation
117
+ elif self.next_row_subspine_variation < 0:
118
+ new_subspines = self.importing_subspines + (
119
+ self.next_row_subspine_variation + 1) # e.g. *v *v *v for three spines lead to 1 spine
120
+ else:
121
+ new_subspines = self.importing_subspines
122
+ logging.debug(f'Adding row to spine, previous subspines={self.importing_subspines}, new={new_subspines}')
123
+ self.importing_subspines = new_subspines
124
+ self.next_row_subspine_variation = 0
125
+
126
+ def addToken(self, token):
127
+ if not token:
128
+ raise Exception('Trying to add a empty token')
129
+
130
+ row = len(self.rows) - 1
131
+ if not isinstance(token, MetacommentToken) and len(self.rows[row]) >= self.importing_subspines:
132
+ raise Exception(
133
+ f'There are already {len(self.rows[row])} subspines, and this spine should have at most {self.importing_subspines}')
134
+
135
+ self.rows[row].append(token)
136
+
137
+ def increaseSubspines(self):
138
+ self.next_row_subspine_variation = self.next_row_subspine_variation + 1
139
+
140
+ def decreaseSubspines(self):
141
+ self.next_row_subspine_variation = self.next_row_subspine_variation - 1
142
+
143
+ def terminate(self):
144
+ self.importing_subspines = 0
145
+
146
+ def isFullRow(self):
147
+ if self.importing_subspines == 0:
148
+ return True
149
+ else:
150
+ row = len(self.rows) - 1
151
+ return len(self.rows[row]) >= self.importing_subspines
152
+
153
+ def isContentOfType(self, row, clazz):
154
+ self.checkRowIndex(row)
155
+ for subspine in self.rows[row]:
156
+ if isinstance(subspine, clazz):
157
+ return True
158
+ return False
159
+
160
+ def checkRowIndex(self, row):
161
+ if row < 0:
162
+ raise Exception(f'Negative row {row}')
163
+ if row >= len(self.rows):
164
+ raise Exception(f'Row {row} out of bounds {len(self.rows)}')
165
+
166
+ def getRowContent(self, row, kern_type: Encoding, token_categories: Iterable) -> string:
167
+ self.checkRowIndex(row)
168
+
169
+ result = ''
170
+ for subspine in self.rows[row]:
171
+ if subspine.category == TokenCategory.STRUCTURAL or subspine.category in token_categories:
172
+ if len(result) > 0:
173
+ result += '\t'
174
+ if kern_type == Encoding.unprocessed:
175
+ result += subspine.encoding
176
+ elif kern_type in {Encoding.eKern, Encoding.normalizedKern}:
177
+ if subspine.hidden:
178
+ exp = '.'
179
+ else:
180
+ exp = subspine.export()
181
+ if kern_type == Encoding.normalizedKern:
182
+ exp = get_kern_from_ekern(exp)
183
+ if not exp:
184
+ raise Exception(f'Subspine {subspine.encoding} is exported as None')
185
+ result += exp
186
+ else:
187
+ raise ValueError(f'Unknown kern type {kern_type}.\nView {help(Encoding)} ')
188
+
189
+ return result
190
+
191
+
192
+ class Signatures:
193
+ def __init__(self, header_row, clef_row, key_signature_row, time_signature_row, meter_symbol_row):
194
+ self.last_header_row = header_row
195
+ self.last_clef_row = clef_row
196
+ self.last_key_signature_row = key_signature_row
197
+ self.last_time_signature_row = time_signature_row
198
+ self.last_meter_symbol_row = meter_symbol_row
199
+
200
+ def clone(self):
201
+ return Signatures(self.last_header_row, self.last_clef_row, self.last_key_signature_row,
202
+ self.last_time_signature_row, self.last_meter_symbol_row)
203
+
204
+
205
+ class HumdrumImporter:
206
+ HEADERS = {"**mens", "**kern", "**text", "**harm", "**mxhm", "**root", "**dyn", "**dynam", "**fing"}
207
+ SPINE_OPERATIONS = {"*-", "*+", "*^", "*v"}
208
+
209
+ def __init__(self):
210
+ self.spines = []
211
+ self.current_spine_index = 0
212
+ # self.page_start_rows = []
213
+ self.measure_start_rows = [] # starting from 1. Rows after removing empty lines and line comments
214
+ self.page_bounding_boxes = {}
215
+ self.last_measure_number = None
216
+ self.last_bounding_box = None
217
+ self.errors = []
218
+
219
+ def getMetacomments(self, KeyComment: str = None, clean: bool = True): # each metacomment is contained in all spines as a reference to the same object
220
+ """
221
+ Get the metacomments of the file.
222
+
223
+ Args:
224
+ KeyComment: The key of the metacomment. (optional).\
225
+ If not specified, all the metacomments will be returned.
226
+ If specified, all the content of the metacomment with the specified key will be returned.
227
+ clean: If True, the metacomments will be returned applying a .strip(). Only valid if KeyComment is not None.
228
+
229
+ Returns:
230
+ A list with the metacomments.\
231
+ if KeyComment is not None, a list be returned anyway. \
232
+ If there are no metacomments with the specified key, an empty list will be returned.
233
+
234
+ Example:
235
+ >>> from kernpy import HumdrumImporter
236
+ >>> importer = HumdrumImporter()
237
+
238
+ # Read the file
239
+ >>> importer.import_file('file.krn')
240
+
241
+ # Get all the metacomments
242
+ >>> all_metacomments = importer.getMetacomments()
243
+ # ... modify the metacomments using your own logic
244
+
245
+ # Get the metacomments with the key: get the composer:
246
+ >>> composer = importer.getMetacomments(KeyComment='!!!COM')
247
+
248
+ # check if your kern file format is compatible with the expected format. If it is not, do not clen it:
249
+ >>> raw_compose = importer.getMetacomments(KeyComment='!!!COM', clean=False)
250
+
251
+ """
252
+ result = []
253
+ for token in self.spines[0].rows:
254
+ if isinstance(token[0], MetacommentToken):
255
+ if clean:
256
+ result.append(token[0].encoding.strip())
257
+ else:
258
+ result.append(token[0].encoding)
259
+
260
+ if KeyComment is not None:
261
+ clean_rows = [row.replace('!!!', '').replace('!!', '') for row in result]
262
+ filtered_rows = [row for row in clean_rows if row.startswith(KeyComment)]
263
+ valid_rows = [row.replace(KeyComment, '').strip()[2:] for row in filtered_rows] if clean else filtered_rows
264
+ return valid_rows
265
+
266
+ return result
267
+
268
+ def doImport(self, reader):
269
+ importers = {}
270
+ header_row_number = None
271
+ row_number = 1
272
+ pending_metacomments = [] # those appearing before the headers
273
+ for row in reader:
274
+ for spine in self.spines:
275
+ self.current_spine_index = 0
276
+ spine.addRow()
277
+ if len(row) > 0: # the last one
278
+ if row[0].startswith("!!"):
279
+ mt = MetacommentToken(row[0])
280
+ if len(self.spines) == 0:
281
+ pending_metacomments.append(mt)
282
+ else:
283
+ for spine in self.spines:
284
+ spine.addToken(mt) # the same reference for all spines
285
+ else:
286
+ is_barline = False
287
+ for column in row:
288
+ if column in self.HEADERS:
289
+ if header_row_number is not None and header_row_number != row_number:
290
+ raise Exception(
291
+ f"Several header rows not supported, there is a header row in #{header_row_number} and another in #{row_number} ")
292
+
293
+ header_row_number = row_number
294
+ importer = importers.get(column)
295
+ if not importer:
296
+ importer = createImporter(column)
297
+ importers[column] = importer
298
+ spine = Spine(column, importer) # TODO: Add instrument
299
+ for pending_metacomment in pending_metacomments:
300
+ spine.addRow()
301
+ spine.addToken(pending_metacomment) # same reference for all spines
302
+
303
+ token = HeaderToken(column)
304
+ spine.addRow()
305
+ spine.addToken(token)
306
+ self.spines.append(spine)
307
+ else:
308
+ try:
309
+ current_spine = self.getNextSpine()
310
+ logging.debug(
311
+ f'Row #{row_number}, current spine #{self.current_spine_index} of size {current_spine.importing_subspines}, and importer {current_spine.importer}')
312
+ except Exception as e:
313
+ raise Exception(
314
+ f'Cannot get next spine at row {row_number}: {e} while reading row {row} ')
315
+
316
+ if column in self.SPINE_OPERATIONS:
317
+ current_spine.addToken(SpineOperationToken(column))
318
+
319
+ if column == '*-':
320
+ current_spine.terminate()
321
+ elif column == "*+" or column == "*^":
322
+ current_spine.increaseSubspines()
323
+ elif column == "*v":
324
+ current_spine.decreaseSubspines()
325
+ else:
326
+ if column.startswith("!"):
327
+ token = FieldCommentToken(column)
328
+ else:
329
+ try:
330
+ token = current_spine.importer.run(column)
331
+ except Exception as error:
332
+ token = ErrorToken(column, row_number, error)
333
+ self.errors.append(token)
334
+ if not token:
335
+ raise Exception(
336
+ f'No token generated for input {column} in row number #{row_number} using importer {current_spine.importer}')
337
+ current_spine.addToken(token)
338
+ if token.category == TokenCategory.BARLINES or token.category == TokenCategory.CORE and len(
339
+ self.measure_start_rows) == 0:
340
+ is_barline = True
341
+ elif isinstance(token, BoundingBoxToken):
342
+ self.handleBoundingBox(token)
343
+
344
+ if is_barline:
345
+ self.measure_start_rows.append(row_number)
346
+ self.last_measure_number = len(self.measure_start_rows)
347
+ if self.last_bounding_box:
348
+ self.last_bounding_box.to_measure = self.last_measure_number
349
+ row_number = row_number + 1
350
+
351
+ def doImportFile(self, file_path: string):
352
+ """
353
+ Import the content from the importer to the file.
354
+ Args:
355
+ file_path: The path to the file.
356
+
357
+ Returns:
358
+ None
359
+
360
+ Example:
361
+ # Create the importer and read the file
362
+ >>> hi = HumdrumImporter()
363
+ >>> hi.import_file('file.krn')
364
+ """
365
+ with open(file_path, 'r', newline='', encoding='utf-8', errors='ignore') as file:
366
+ reader = csv.reader(file, delimiter='\t')
367
+ self.doImport(reader)
368
+
369
+ def doImportString(self, text: string):
370
+ lines = text.splitlines()
371
+ reader = csv.reader(lines)
372
+ self.doImport(reader)
373
+
374
+ def getSpine(self, index: int) -> Spine:
375
+ if index < 0:
376
+ raise Exception(f'Negative index {index}')
377
+ elif index >= len(self.spines):
378
+ raise Exception(f'Index {index} out of bounds for an array of {len(self.spines)} spines')
379
+ return self.spines[index]
380
+
381
+ def getNextSpine(self):
382
+ spine = self.getSpine(self.current_spine_index)
383
+ while spine.isFullRow() and self.current_spine_index < (len(self.spines) - 1):
384
+ self.current_spine_index = self.current_spine_index + 1
385
+ spine = self.getSpine(self.current_spine_index)
386
+
387
+ if self.current_spine_index == len(self.spines):
388
+ raise Exception('All spines are full, the spine divisions may be wrong')
389
+
390
+ return spine
391
+
392
+ def doExportNormalizedKern(self, options: ExportOptions) -> string:
393
+ options.kern_type = Encoding.normalizedKern
394
+ return self.doExport(options)
395
+
396
+ def doExportEKern(self, options: ExportOptions) -> string:
397
+ options.kern_type = Encoding.eKern
398
+ return self.doExport(options)
399
+
400
+ def doExportUnprocessed(self, options: ExportOptions) -> string:
401
+ options.kern_type = Encoding.unprocessed
402
+ return self.doExport(options)
403
+
404
+ def handleBoundingBox(self, token: BoundingBoxToken):
405
+ page_number = token.page_number
406
+ last_page_bb = self.page_bounding_boxes.get(page_number)
407
+ if last_page_bb is None:
408
+ # print(f'Adding {page_number}')
409
+ if self.last_measure_number is None:
410
+ self.last_measure_number = 0
411
+ self.last_bounding_box = BoundingBoxMeasures(token.bounding_box, self.last_measure_number,
412
+ self.last_measure_number)
413
+ self.page_bounding_boxes[page_number] = self.last_bounding_box
414
+ else:
415
+ # print(f'Extending page {page_number}')
416
+ last_page_bb.bounding_box.extend(token.bounding_box)
417
+ last_page_bb.to_measure = self.last_measure_number
418
+
419
+ def getMaxRows(self):
420
+ return max(spine.size() for spine in self.spines)
421
+
422
+ def checkMeasure(self, measure_number):
423
+ if measure_number < 0:
424
+ raise Exception(f'The measure number must be >=1, and it is {measure_number}')
425
+
426
+ max_measures = len(self.measure_start_rows)
427
+ if measure_number > max_measures:
428
+ raise Exception(f'The measure number must be <= {max_measures}, and it is {measure_number}')
429
+
430
+ def doExport(self, options: ExportOptions) -> string:
431
+ max_rows = self.getMaxRows()
432
+ signatures_at_each_row = []
433
+ row_contents = []
434
+
435
+ if options.from_measure is not None and options.from_measure < 0:
436
+ raise ValueError(f'option from_measure must be >=0 but {options.from_measure} was found. ')
437
+ if options.to_measure is not None and options.to_measure > len(self.measure_start_rows):
438
+ #"TODO: DAVID, check options.to_measure bounds. len(self.measure_start_rows) or len(self.measure_start_rows) - 1"
439
+ raise ValueError(f'option to_measure must be <= {len(self.measure_start_rows)} but {options.to_measure} was found. ')
440
+ if options.to_measure is not None and options.from_measure is not None and options.to_measure < options.from_measure:
441
+ raise ValueError(f'option to_measure must be >= from_measure but {options.to_measure} < {options.from_measure} was found. ')
442
+
443
+ last_signature = None
444
+ for i in range(max_rows):
445
+ row_result = ''
446
+ if last_signature:
447
+ current_signature = last_signature.clone()
448
+ else:
449
+ current_signature = Signatures(None, None, None, None, None)
450
+ last_signature = current_signature
451
+ empty = True
452
+ for spine in self.spines:
453
+ if spine.spine_type in options.spine_types:
454
+ if i < spine.size(): # required because the spine may be terminated
455
+ if len(row_result) > 0:
456
+ row_result += '\t'
457
+
458
+ content = spine.getRowContent(i, options.kern_type, options.token_categories)
459
+
460
+ if content and content != '.' and content != '*':
461
+ empty = False
462
+ if options.from_measure: # if not, we don't need to compute this value
463
+ if spine.isContentOfType(i, HeaderToken):
464
+ current_signature.last_header_row = i
465
+ elif spine.isContentOfType(i, ClefToken):
466
+ current_signature.last_clef_row = i
467
+ elif spine.isContentOfType(i, KeySignatureToken):
468
+ current_signature.last_key_signature_row = i
469
+ elif spine.isContentOfType(i, TimeSignatureToken):
470
+ current_signature.last_time_signature_row = i
471
+ elif spine.isContentOfType(i, MeterSymbolToken):
472
+ current_signature.last_meter_symbol_row = i
473
+
474
+ row_result += content
475
+ if not empty:
476
+ row_contents.append(row_result)
477
+ else:
478
+ row_contents.append(None) # in order to maintain the indexes
479
+
480
+ signatures_at_each_row.append(current_signature)
481
+
482
+ # if last_header_row is None:
483
+ # raise Exception('No header row found')
484
+ #
485
+ # if last_clef_row is None:
486
+ # raise Exception('No clef row found')
487
+ #
488
+ # if last_time_signature_row is None and last_meter_symbol_row is None:
489
+ # raise Exception('No time signature or meter symbol row found')
490
+
491
+ result = ''
492
+ if options.from_measure is None and options.to_measure is None:
493
+ for row_content in row_contents:
494
+ if row_content:
495
+ result += row_content
496
+ result += '\n'
497
+ else:
498
+ if options.from_measure:
499
+ self.checkMeasure(options.from_measure)
500
+ else:
501
+ options.from_measure = 0
502
+
503
+ if options.to_measure:
504
+ self.checkMeasure(options.to_measure)
505
+ else:
506
+ options.to_measure = len(self.measure_start_rows)
507
+
508
+ from_row = self.measure_start_rows[options.from_measure - 1] - 1 # measures and rows are counted from 1
509
+ if options.to_measure == len(self.measure_start_rows):
510
+ to_row = self.measure_start_rows[options.to_measure - 1]
511
+ else:
512
+ to_row = self.measure_start_rows[options.to_measure] # to the next one
513
+ signature = signatures_at_each_row[from_row]
514
+
515
+ # first, attach the signatures if not in the exported range
516
+ result = self.addSignatureRowIfRequired(row_contents, result, from_row, signature.last_header_row)
517
+ result = self.addSignatureRowIfRequired(row_contents, result, from_row, signature.last_clef_row)
518
+ result = self.addSignatureRowIfRequired(row_contents, result, from_row, signature.last_key_signature_row)
519
+ result = self.addSignatureRowIfRequired(row_contents, result, from_row, signature.last_time_signature_row)
520
+ result = self.addSignatureRowIfRequired(row_contents, result, from_row, signature.last_meter_symbol_row)
521
+
522
+ for row in range(from_row, to_row):
523
+ row_content = row_contents[row]
524
+ if row_content:
525
+ result += row_content
526
+ result += '\n'
527
+
528
+ if to_row < max_rows:
529
+ row_content = ''
530
+ for spine in self.spines:
531
+ if spine.spine_type in options.spine_types and not spine.isTerminated():
532
+ if len(row_content) > 0:
533
+ row_content += '\t'
534
+ row_content += '*-'
535
+ result += row_content
536
+ result += '\n'
537
+ return result
538
+
539
+ def addSignatureRowIfRequired(self, row_contents, result, from_row, signature_row):
540
+ if signature_row is not None and signature_row < from_row:
541
+ srow = row_contents[signature_row]
542
+ result += srow
543
+ result += '\n'
544
+ return result
545
+
546
+ def getErrorMessages(self):
547
+ result = ''
548
+ for err in self.errors:
549
+ result += str(err)
550
+ result += '\n'
551
+ return result
552
+
553
+ def hasErrors(self):
554
+ return len(self.errors) > 0
555
+
556
+ def has_token(self, token_goal: str):
557
+ """
558
+ Check if the importer has a specific token.
559
+
560
+ Args:
561
+ token_goal: The token to check.
562
+
563
+ Returns:
564
+ True if the importer has the token, False otherwise.
565
+
566
+ Example:
567
+ # Create the importer
568
+ >>> hi = HumdrumImporter()
569
+
570
+ # Read the file
571
+ >>> hi.import_file('file.krn')
572
+
573
+ # Check if the importer has a specific token
574
+ >>> has_f_4_clef = hi.has_token('*clefF4')
575
+ """
576
+ for spine in self.spines:
577
+ for row in spine.rows:
578
+ if any(token.encoding == token_goal for token in row):
579
+ return True
580
+
581
+ return False
582
+
583
+ def has_category(self, token_category_goal: TokenCategory):
584
+ """
585
+ Check if the importer has a specific token.
586
+
587
+ Args:
588
+ token_category_goal: The token category to check.
589
+ Returns:
590
+ True if the importer has the token category, False otherwise.
591
+
592
+ Example:
593
+ # Create the importer
594
+ >>> hi = HumdrumImporter()
595
+
596
+ # Read the file
597
+ >>> hi.import_file('file.krn')
598
+
599
+ # Check if the importer has a specific token
600
+ >>> has_barlines = hi.has_category(TokenCategory.BARLINES)
601
+
602
+ """
603
+ for spine in self.spines:
604
+ for row in spine.rows:
605
+ for token in row:
606
+ if token.category == token_category_goal:
607
+ return True
608
+ return False
609
+
610
+ def get_all_tokens(self, apply_strip: bool = True, remove_measure_numbers: bool = False, filter_by_categories: Iterable = None) -> list:
611
+ """
612
+ Get all the tokens in the importer.
613
+
614
+ Args:
615
+ apply_strip: If True, the tokens will be stripped. False otherwise. Default is True.
616
+ remove_measure_numbers: If True, the measure numbers will be removed. False otherwise. Default is False.
617
+ filter_by_categories: An Iterable (like a list) with the categories to filter the tokens. Default is None.\
618
+ Only the tokens with the categories in the list will be returned.
619
+
620
+
621
+ Returns:
622
+ A list with all the tokens in the importer.
623
+
624
+ Example:
625
+ # Create the importer
626
+ >>> hi = HumdrumImporter()
627
+
628
+ # Read the file
629
+ >>> hi.import_file('file.krn')
630
+
631
+ # Get all the tokens
632
+ >>> all_tokens = hi.get_all_tokens()
633
+
634
+ # Get all the tokens without measure numbers
635
+ >>> all_tokens = hi.get_all_tokens(remove_measure_numbers=True)
636
+
637
+ # Get all the tokens without measure numbers and filtered by categories
638
+ >>> all_tokens = hi.get_all_tokens(remove_measure_numbers=True, filter_by_categories=[TokenCategory.BARLINES, TokenCategory.FINGERING, TokenCategory.CORE])
639
+
640
+ # Get all tokens used in the bekern codification
641
+ >>> all_tokens = hi.get_all_tokens(remove_measure_numbers=True, filter_by_categories=BEKERN_CATEGORIES)
642
+
643
+ """
644
+ MEASURE_START = '='
645
+ DIGITS_TO_REMOVE = string.digits
646
+ result = []
647
+ for spine in self.spines:
648
+ for row in spine.rows:
649
+ for token in row:
650
+ if filter_by_categories is not None and token.category not in filter_by_categories:
651
+ continue
652
+
653
+ if remove_measure_numbers and token.encoding.startswith(MEASURE_START):
654
+ token.encoding = token.encoding.lstrip(DIGITS_TO_REMOVE)
655
+
656
+ if apply_strip:
657
+ token.encoding = token.encoding.strip()
658
+
659
+ result.append(token.encoding)
660
+
661
+ return result
662
+
663
+ def get_unique_tokens(self, apply_strip: bool = True, remove_measure_numbers: bool = False, filter_by_categories: Iterable = None) -> list:
664
+ """
665
+ Get the unique tokens in the importer.
666
+
667
+ Args:
668
+ apply_strip: If True, the tokens will be stripped. False otherwise. Default is True.
669
+ remove_measure_numbers: If True, the measure numbers will be removed. False otherwise. Default is False.
670
+ filter_by_categories: An Iterable (like a list) with the categories to filter the tokens. Default is None.\
671
+ Only the tokens with the categories in the list will be returned.
672
+
673
+ Returns:
674
+ A list with the unique tokens in the importer.
675
+
676
+ Example:
677
+ # Create the importer
678
+ >>> hi = HumdrumImporter()
679
+
680
+ # Read the file
681
+ >>> hi.import_file('file.krn')
682
+
683
+ # Get the unique tokens
684
+ >>> unique_tokens = hi.get_unique_tokens()
685
+
686
+ # Get the unique tokens without measure numbers
687
+ >>> unique_tokens = hi.get_unique_tokens(remove_measure_numbers=True)
688
+
689
+ # Get the unique tokens without measure numbers and filtered by categories
690
+ >>> unique_tokens = hi.get_unique_tokens(remove_measure_numbers=True, filter_by_categories=[TokenCategory.BARLINES, TokenCategory.KEYSIGNATURE, TokenCategory.CORE])
691
+
692
+ # Get the unique tokens used in the bekern codification
693
+ >>> unique_tokens = hi.get_all_tokens(remove_measure_numbers=True, filter_by_categories=BEKERN_CATEGORIES)
694
+
695
+ """
696
+ all_tokens = self.get_all_tokens(apply_strip=apply_strip, remove_measure_numbers=remove_measure_numbers, filter_by_categories=filter_by_categories)
697
+ return list(set(all_tokens))
698
+
699
+ def is_voice_in_tessitura(self, voice: int, tessitura: tuple) -> bool:
700
+ """
701
+ Check if a voice is in a tessitura.
702
+
703
+ Args:
704
+ voice: The voice to check.
705
+ tessitura: A tuple with the tessitura. The first element is the lower limit, and the second element is the upper limit.
706
+
707
+ Returns:
708
+ True if the voice is in the tessitura, False otherwise.
709
+
710
+ Example:
711
+ # Create the importer
712
+ >>> hi = HumdrumImporter()
713
+
714
+ # Read the file
715
+ >>> hi.import_file('file.krn')
716
+
717
+ # Check if the voice 1 is in the tessitura (C4, G4)
718
+ >>> is_in_tessitura = hi.is_voice_in_tessitura(1, ('c4', 'g4'))
719
+ """
720
+ raise NotImplementedError('This method is not implemented yet.') # TODO: Implementar el método
721
+ min_tessitura = tessitura[0].lower()
722
+ max_tessitura = tessitura[1].lower()
723
+
724
+ all_tokens = None
725
+ for row in self.spines[voice].rows:
726
+ all_tokens = [token.encoding.lower() for token in row if isinstance(token.category, SpineOperationToken)] # TODO: Buscar la categoria que solo deje pasar notas
727
+
728
+ for token in all_tokens:
729
+ if token < min_tessitura or token > max_tessitura:
730
+ return False
731
+
732
+ return True
733
+
734
+ def __len__(self):
735
+ """
736
+ Get the number of spines in the importer.
737
+ """
738
+ return len(self.spines)
739
+
740
+ def get_kern_from_ekern(ekern_content: string) -> string:
741
+ """
742
+ Read the content of a **ekern file and return the **kern content.
743
+
744
+ Args:
745
+ ekern_content: The content of the **ekern file.
746
+ Returns:
747
+ The content of the **kern file.
748
+
749
+ Example:
750
+ ```python
751
+ # Read **ekern file
752
+ ekern_file = 'path/to/file.ekrn'
753
+ with open(ekern_file, 'r') as file:
754
+ ekern_content = file.read()
755
+
756
+ # Get **kern content
757
+ kern_content = get_kern_from_ekern(ekern_content)
758
+ with open('path/to/file.krn', 'w') as file:
759
+ file.write(kern_content)
760
+
761
+ ```
762
+ """
763
+ content = ekern_content.replace("**ekern", "**kern") # TODO Constante según las cabeceras
764
+ content = content.replace(TOKEN_SEPARATOR, "")
765
+ content = content.replace(DECORATION_SEPARATOR, "")
766
+
767
+ return content
768
+
769
+
770
+ def ekern_to_krn(input_file, output_file) -> None:
771
+ """
772
+ Convert one .ekrn file to .krn file.
773
+
774
+ Args:
775
+ input_file: Filepath to the input **ekern
776
+ output_file: Filepath to the output **kern
777
+ Returns:
778
+ None
779
+
780
+ Example:
781
+ # Convert .ekrn to .krn
782
+ >>> ekern_to_krn('path/to/file.ekrn', 'path/to/file.krn')
783
+
784
+ # Convert a list of .ekrn files to .krn files
785
+ ```python
786
+ ekrn_files = your_modue.get_files()
787
+
788
+ # Use the wrapper to avoid stopping the process if an error occurs
789
+ def ekern_to_krn_wrapper(ekern_file, kern_file):
790
+ try:
791
+ ekern_to_krn(ekrn_files, output_folder)
792
+ except Exception as e:
793
+ print(f'Error:{e}')
794
+
795
+ # Convert all the files
796
+ for ekern_file in ekrn_files:
797
+ output_file = ekern_file.replace('.ekrn', '.krn')
798
+ ekern_to_krn_wrapper(ekern_file, output_file)
799
+ ```
800
+ """
801
+ with open(input_file, 'r') as file:
802
+ content = file.read()
803
+
804
+ kern_content = get_kern_from_ekern(content)
805
+
806
+ with open(output_file, 'w') as file:
807
+ file.write(kern_content)
808
+
809
+
810
+ def kern_to_ekern(input_file, output_file) -> None:
811
+ """
812
+ Convert one .krn file to .ekrn file
813
+
814
+ Args:
815
+ input_file: Filepath to the input **kern
816
+ output_file: Filepath to the output **ekern
817
+
818
+ Returns:
819
+ None
820
+
821
+ Example:
822
+ # Convert .krn to .ekrn
823
+ >>> kern_to_ekern('path/to/file.krn', 'path/to/file.ekrn')
824
+
825
+ # Convert a list of .krn files to .ekrn files
826
+ ```python
827
+ krn_files = your_module.get_files()
828
+
829
+ # Use the wrapper to avoid stopping the process if an error occurs
830
+ def kern_to_ekern_wrapper(krn_file, ekern_file):
831
+ try:
832
+ kern_to_ekern(krn_file, ekern_file)
833
+ except Exception as e:
834
+ print(f'Error:{e}')
835
+
836
+ # Convert all the files
837
+ for krn_file in krn_files:
838
+ output_file = krn_file.replace('.krn', '.ekrn')
839
+ kern_to_ekern_wrapper(krn_file, output_file)
840
+ ```
841
+
842
+ """
843
+ importer = HumdrumImporter()
844
+ importer.doImportFile(input_file)
845
+
846
+ if len(importer.errors):
847
+ raise Exception(f'ERROR: {input_file} has errors {importer.getErrorMessages()}')
848
+
849
+ export_options = ExportOptions(spine_types=['**kern'], token_categories=BEKERN_CATEGORIES, kern_type=Encoding.eKern)
850
+ exported_ekern = importer.doExport(export_options)
851
+
852
+ with open(output_file, 'w') as file:
853
+ file.write(exported_ekern)