kernpy 1.0.1__py3-none-any.whl → 1.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -24,8 +24,11 @@ class BasicSpineImporter(SpineImporter):
24
24
  def import_token(self, encoding: str) -> Token:
25
25
  self._raise_error_if_wrong_input(encoding)
26
26
 
27
- kern_spine_importer = KernSpineImporter()
28
- token = kern_spine_importer.import_token(encoding)
27
+ try:
28
+ kern_spine_importer = KernSpineImporter()
29
+ token = kern_spine_importer.import_token(encoding)
30
+ except Exception as e:
31
+ return SimpleToken(encoding, TokenCategory.OTHER)
29
32
 
30
33
  ACCEPTED_CATEGORIES = {
31
34
  TokenCategory.STRUCTURAL,
@@ -23,8 +23,11 @@ class DynamSpineImporter(SpineImporter):
23
23
  def import_token(self, encoding: str) -> Token:
24
24
  self._raise_error_if_wrong_input(encoding)
25
25
 
26
- kern_spine_importer = KernSpineImporter()
27
- token = kern_spine_importer.import_token(encoding)
26
+ try:
27
+ kern_spine_importer = KernSpineImporter()
28
+ token = kern_spine_importer.import_token(encoding)
29
+ except Exception as e:
30
+ return SimpleToken(encoding, TokenCategory.DYNAMICS)
28
31
 
29
32
  ACCEPTED_CATEGORIES = {
30
33
  TokenCategory.STRUCTURAL,
kernpy/core/exporter.py CHANGED
@@ -153,7 +153,7 @@ class HeaderTokenGenerator:
153
153
  """
154
154
  HeaderTokenGenerator class.
155
155
 
156
- This class is used to translate the HeaderTokens to the specific tokenizer format.
156
+ This class is used to translate the HeaderTokens to the specific encoding format.
157
157
  """
158
158
  @classmethod
159
159
  def new(cls, *, token: HeaderToken, type: Encoding):
@@ -162,7 +162,7 @@ class HeaderTokenGenerator:
162
162
 
163
163
  Args:
164
164
  token (HeaderToken): The HeaderToken to be translated.
165
- type (Encoding): The tokenizer to be used.
165
+ type (Encoding): The encoding to be used.
166
166
 
167
167
  Examples:
168
168
  >>> header = HeaderToken('**kern', 0)
@@ -24,8 +24,11 @@ class FingSpineImporter(SpineImporter):
24
24
  def import_token(self, encoding: str) -> Token:
25
25
  self._raise_error_if_wrong_input(encoding)
26
26
 
27
- kern_spine_importer = KernSpineImporter()
28
- token = kern_spine_importer.import_token(encoding)
27
+ try:
28
+ kern_spine_importer = KernSpineImporter()
29
+ token = kern_spine_importer.import_token(encoding)
30
+ except Exception as e:
31
+ return SimpleToken(encoding, TokenCategory.FINGERING)
29
32
 
30
33
  ACCEPTED_CATEGORIES = {
31
34
  TokenCategory.STRUCTURAL,
@@ -23,8 +23,11 @@ class HarmSpineImporter(SpineImporter):
23
23
  def import_token(self, encoding: str) -> Token:
24
24
  self._raise_error_if_wrong_input(encoding)
25
25
 
26
- kern_spine_importer = KernSpineImporter()
27
- token = kern_spine_importer.import_token(encoding)
26
+ try:
27
+ kern_spine_importer = KernSpineImporter()
28
+ token = kern_spine_importer.import_token(encoding)
29
+ except Exception as e:
30
+ return SimpleToken(encoding, TokenCategory.HARMONY)
28
31
 
29
32
  ACCEPTED_CATEGORIES = {
30
33
  TokenCategory.STRUCTURAL,
@@ -4,7 +4,7 @@ from typing import Optional
4
4
  from .kern_spine_importer import KernSpineListener, KernSpineImporter
5
5
  from .base_antlr_spine_parser_listener import BaseANTLRSpineParserListener
6
6
  from .spine_importer import SpineImporter
7
- from .tokens import MHXMToken, Token, TokenCategory
7
+ from .tokens import MHXMToken, Token, TokenCategory, SimpleToken
8
8
 
9
9
 
10
10
  class MxhmSpineImporter(SpineImporter):
@@ -23,8 +23,11 @@ class MxhmSpineImporter(SpineImporter):
23
23
  def import_token(self, encoding: str) -> Token:
24
24
  self._raise_error_if_wrong_input(encoding)
25
25
 
26
- kern_spine_importer = KernSpineImporter()
27
- token = kern_spine_importer.import_token(encoding)
26
+ try:
27
+ kern_spine_importer = KernSpineImporter()
28
+ token = kern_spine_importer.import_token(encoding)
29
+ except Exception as e:
30
+ return SimpleToken(encoding, TokenCategory.HARMONY)
28
31
 
29
32
  ACCEPTED_CATEGORIES = {
30
33
  TokenCategory.STRUCTURAL,
@@ -39,6 +42,3 @@ class MxhmSpineImporter(SpineImporter):
39
42
  return SimpleToken(encoding, TokenCategory.HARMONY)
40
43
 
41
44
  return token
42
-
43
- return MHXMToken(encoding)
44
-
@@ -24,8 +24,11 @@ class TextSpineImporter(SpineImporter):
24
24
  def import_token(self, encoding: str) -> Token:
25
25
  self._raise_error_if_wrong_input(encoding)
26
26
 
27
- kern_spine_importer = KernSpineImporter()
28
- token = kern_spine_importer.import_token(encoding)
27
+ try:
28
+ kern_spine_importer = KernSpineImporter()
29
+ token = kern_spine_importer.import_token(encoding)
30
+ except Exception as e:
31
+ return SimpleToken(encoding, TokenCategory.LYRICS)
29
32
 
30
33
  ACCEPTED_CATEGORIES = {
31
34
  TokenCategory.STRUCTURAL,
kernpy/core/tokenizers.py CHANGED
@@ -18,7 +18,7 @@ class Encoding(Enum): # TODO: Eventually, polymorphism will be used to export d
18
18
  >>> doc, _ = kp.load('path/to/file.krn')
19
19
  >>>
20
20
  >>> # Save the file using the specified encoding
21
- >>> exported_content = kp.dumps(tokenizer=kp.Encoding.normalizedKern)
21
+ >>> exported_content = kp.dumps(encoding=kp.Encoding.normalizedKern)
22
22
  """
23
23
  eKern = 'ekern'
24
24
  normalizedKern = 'kern'
kernpy/io/public.py CHANGED
@@ -12,7 +12,9 @@ from kernpy import Encoding
12
12
  from kernpy.core import (
13
13
  Document, Importer, Exporter, ExportOptions, GraphvizExporter,
14
14
  generic,
15
- TokenCategoryHierarchyMapper)
15
+ TokenCategoryHierarchyMapper,
16
+ TokenCategory,
17
+ )
16
18
 
17
19
 
18
20
  def load(fp: Union[str, Path], *, raise_on_errors: Optional[bool] = False, **kwargs) -> (Document, List[str]):
@@ -84,13 +86,13 @@ def loads(s, *, raise_on_errors: Optional[bool] = False, **kwargs) -> (Document,
84
86
 
85
87
 
86
88
  def dump(document: Document, fp: Union[str, Path], *,
87
- spine_types: [] = None,
88
- include: [] = None,
89
- exclude: [] = None,
89
+ spine_types: [str] = None,
90
+ include: [TokenCategory] = None,
91
+ exclude: [TokenCategory] = None,
90
92
  from_measure: int = None,
91
93
  to_measure: int = None,
92
- tokenizer: Encoding = None,
93
- instruments: [] = None,
94
+ encoding: Encoding = None,
95
+ instruments: [str] = None,
94
96
  show_measure_numbers: bool = None,
95
97
  spine_ids: [int] = None
96
98
  ) -> None:
@@ -104,7 +106,7 @@ def dump(document: Document, fp: Union[str, Path], *,
104
106
  exclude (Iterable): The token categories to exclude from the exported file. When None, no token categories will be excluded.
105
107
  from_measure (int): The measure to start exporting. When None, the exporter will start from the beginning of the file. The first measure is 1
106
108
  to_measure (int): The measure to end exporting. When None, the exporter will end at the end of the file.
107
- tokenizer (Encoding): The type of the **kern file to export.
109
+ encoding (Encoding): The type of the **kern file to export.
108
110
  instruments (Iterable): The instruments to export. If None, all the instruments will be exported.
109
111
  show_measure_numbers (Bool): Show the measure numbers in the exported file.
110
112
  spine_ids (Iterable): The ids of the spines to export. When None, all the spines will be exported. \
@@ -130,7 +132,7 @@ def dump(document: Document, fp: Union[str, Path], *,
130
132
  exclude=exclude,
131
133
  from_measure=from_measure,
132
134
  to_measure=to_measure,
133
- kern_type=tokenizer,
135
+ kern_type=encoding,
134
136
  instruments=instruments,
135
137
  show_measure_numbers=show_measure_numbers,
136
138
  spine_ids=spine_ids
@@ -144,13 +146,13 @@ def dump(document: Document, fp: Union[str, Path], *,
144
146
 
145
147
 
146
148
  def dumps(document: Document, *,
147
- spine_types: [] = None,
148
- include: [] = None,
149
- exclude: [] = None,
149
+ spine_types: [str] = None,
150
+ include: [TokenCategory] = None,
151
+ exclude: [TokenCategory] = None,
150
152
  from_measure: int = None,
151
153
  to_measure: int = None,
152
- tokenizer: Encoding = None,
153
- instruments: [] = None,
154
+ encoding: Encoding = None,
155
+ instruments: [str] = None,
154
156
  show_measure_numbers: bool = None,
155
157
  spine_ids: [int] = None
156
158
  ) -> str:
@@ -164,7 +166,7 @@ def dumps(document: Document, *,
164
166
  exclude (Iterable): The token categories to exclude from the exported file. When None, no token categories will be excluded.
165
167
  from_measure (int): The measure to start exporting. When None, the exporter will start from the beginning of the file. The first measure is 1
166
168
  to_measure (int): The measure to end exporting. When None, the exporter will end at the end of the file.
167
- tokenizer (Encoding): The type of the **kern file to export.
169
+ encoding (Encoding): The type of the **kern file to export.
168
170
  instruments (Iterable): The instruments to export. If None, all the instruments will be exported.
169
171
  show_measure_numbers (Bool): Show the measure numbers in the exported file.
170
172
  spine_ids (Iterable): The ids of the spines to export. When None, all the spines will be exported. \
@@ -189,7 +191,7 @@ def dumps(document: Document, *,
189
191
  exclude=exclude,
190
192
  from_measure=from_measure,
191
193
  to_measure=to_measure,
192
- kern_type=tokenizer,
194
+ kern_type=encoding,
193
195
  instruments=instruments,
194
196
  show_measure_numbers=show_measure_numbers,
195
197
  spine_ids=spine_ids
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: kernpy
3
- Version: 1.0.1
3
+ Version: 1.0.3
4
4
  Summary: Python Humdrum **kern and **mens utilities
5
5
  Project-URL: Homepage, https://github.com/OMR-PRAIG-UA-ES/kernpy
6
6
  Project-URL: Documentation, https://github.com/OMR-PRAIG-UA-ES/kernpy#readme
@@ -174,32 +174,36 @@ kp.dump(document, "newfile_custom.krn",
174
174
  print(kp.BEKERN_CATEGORIES)
175
175
  ```
176
176
 
177
- - Use `tokenizer` to select how the categories are split. By default, the `normalizedKern` tokenizer is used.
177
+ - Use `encoding` to select how the categories are split. By default, the `normalizedKern` encoding is used.
178
178
 
179
179
  ```python
180
180
  import kernpy as kp
181
181
 
182
182
  kp.dump(document, "newfile_normalized.krn",
183
- tokenizer=kp.Encoding.normalizedKern) # Default tokenizer
183
+ encoding=kp.Encoding.normalizedKern) # Default encoding
184
184
  ```
185
- Select the proper Humdrum **kern tokenizer:
185
+ Select the proper Humdrum **kern encoding:
186
186
 
187
- `kernpy` provides different tokenizers to export the content each symbol in different formats.
187
+ `kernpy` provides different encodings to export the content each symbol in different formats.
188
188
 
189
- | Encoding | Tokenized | Description |
189
+ | Encoding | Output | Description |
190
190
  |----------|--------------|----------------------------------------|
191
191
  | kern | 2.bb-_L | Traditional Humdrum **kern encoding |
192
192
  | ekern | 2@.@bb@-·_·L | Extended Humdrum **kern encoding |
193
+ | bkern | 2.bb- | Basic Humdrum **kern encoding |
194
+ | bekern | 2@.@bb@- | Basic Extended Humdrum **kern encoding |
193
195
 
194
- Use the `Encoding` enum class to select the tokenizer:
196
+ Use the `Encoding` enum class to select the encoding:
195
197
 
196
198
  ```python
197
199
  import kernpy as kp
198
200
 
199
201
  doc, _ = kp.load('resource_dir/legacy/chor048.krn')
200
202
 
201
- kern_content = kp.dumps(doc, tokenizer=kp.Encoding.normalizedKern)
202
- ekern_content = kp.dumps(doc, tokenizer=kp.Encoding.eKern)
203
+ kern_content = kp.dumps(doc, encoding=kp.Encoding.normalizedKern)
204
+ ekern_content = kp.dumps(doc, encoding=kp.Encoding.eKern)
205
+ bkern_content = kp.dumps(doc, encoding=kp.KernTypeExporter.bKern)
206
+ bekern_content = kp.dumps(doc, encoding=kp.KernTypeExporter.bEkern)
203
207
  ```
204
208
 
205
209
  - Use `from_measure` and `to_measure` to select the measures to export. By default, all the measures are exported.
@@ -236,7 +240,7 @@ kp.dump(document, "newfile.krn",
236
240
  spine_types=['**kern'], # Export only the **kern spines
237
241
  include=kp.BEKERN_CATEGORIES, # Token categories to include
238
242
  exclude={kp.TokenCategory.PITCH}, # Token categories to exclude
239
- tokenizer=kp.Encoding.eKern, # Kern encoding
243
+ encoding=kp.Encoding.eKern, # Kern encoding
240
244
  from_measure=1, # First from measure 1
241
245
  to_measure=10, # Last measure exported
242
246
  spine_ids=[0, 1], # Export only the first and the second spine
@@ -339,7 +343,7 @@ for page_label, bounding_box_measure in doc.page_bounding_boxes.items():
339
343
  kp.dump(doc, f"foo_{page_label}.ekrn",
340
344
  spine_types=['**kern'],
341
345
  token_categories=kp.BEKERN_CATEGORIES,
342
- tokenizer=kp.Encoding.eKern,
346
+ encoding=kp.Encoding.eKern,
343
347
  from_measure=bounding_box_measure.from_measure,
344
348
  to_measure=bounding_box_measure.to_measure - 1 # TODO: Check bounds
345
349
  )
@@ -6,28 +6,28 @@ kernpy/core/__init__.py,sha256=gIbOKMWcPcQc4c6QpslWnAw9XkcMJ6OvedHANyDABbs,2581
6
6
  kernpy/core/_io.py,sha256=WqH30iDjfzGRCIzlqrAE9Oi_nfyleUpp8xGK8xZPh_w,1208
7
7
  kernpy/core/base_antlr_importer.py,sha256=2pPJ7sdWWCBU9Vw2ZAx-j-674DlSp82ajapeex-pF2I,1475
8
8
  kernpy/core/base_antlr_spine_parser_listener.py,sha256=HqKYACgzdiIc1fAoSaiLzfogkv7pAuiqUHlIRF8c--c,9837
9
- kernpy/core/basic_spine_importer.py,sha256=aYceG5wxlm7qTrd0KTR6-P4v4VVlYYPEe8omd3vmC0I,1523
9
+ kernpy/core/basic_spine_importer.py,sha256=NMVyjK6B8qGDzU8inW8IJO1vc1s-TuZ-P-RbXWMTdIQ,1637
10
10
  kernpy/core/document.py,sha256=Y_Wpwc-t1GkaGr2QVU0hqLRFJv9wJ_Nm6UitgwAlT8w,34892
11
11
  kernpy/core/dyn_importer.py,sha256=dKgZqVIdEELUu8HY63ClLJAMQXXItjHmaYxxOwFZRiY,1025
12
- kernpy/core/dynam_spine_importer.py,sha256=crg4LwsWr56VZ6GLYAO1CDnLuum9f1tlX_pXCqKemEc,1450
12
+ kernpy/core/dynam_spine_importer.py,sha256=Efw1g9J4Y1LvINsrEJnw7RDi9-cmwcM5RrkamIDa8iI,1567
13
13
  kernpy/core/error_listener.py,sha256=bTlAZvVIQx_6BLZ3oRut_pn24JnV8_7GnNONiwoMMPU,1516
14
- kernpy/core/exporter.py,sha256=5MWOk1JmPtpXfjN_VBv5jxUhYu8hhAZwt9eB5u9f1eM,21371
15
- kernpy/core/fing_spine_importer.py,sha256=5GLPPakmZVRcNbbaoCn1p1cQdat5QttflbpKu3AytQU,1382
14
+ kernpy/core/exporter.py,sha256=t2Ep-qe_r1CvEmPb3KoMpzHd2W3SfEcjY8XhFOUqGxs,21369
15
+ kernpy/core/fing_spine_importer.py,sha256=hrP0lipiBFnfq_NpsF91uv6Xj0Z-LXDzzy6ls8dKR0c,1500
16
16
  kernpy/core/generic.py,sha256=9OTDE2t49laVk6fgtydK5yZh7SCfZwViP_dZUJlskA8,10967
17
17
  kernpy/core/gkern.py,sha256=nRYFLFJ0uOLJwio0DKxQ0YIqLixy6-qCM-eY8DNdxfM,16332
18
18
  kernpy/core/graphviz_exporter.py,sha256=lzeJ0sJoKCDYam1oLJ4pjnlf_NHBJFH0V9w0Pjic5SI,3447
19
- kernpy/core/harm_spine_importer.py,sha256=L2eVxW4-tMG3JCtCwLVx6En-ulSn9S90wdUoffj_U5s,1379
19
+ kernpy/core/harm_spine_importer.py,sha256=88JplRAtgKi39z0u4Mpv_QR0Di4YMPdapNHU6Nk7INY,1495
20
20
  kernpy/core/import_humdrum_old.py,sha256=NZTQgANQc2BCYJRbGG6AyxcGHxjyy8XXa9dz88zrnMk,35304
21
21
  kernpy/core/importer.py,sha256=Rg4vIvEl2yhMwYrN5NenhdQG4_2dNByCRvHCEBJgWI8,13684
22
22
  kernpy/core/importer_factory.py,sha256=mXYWaAMyd8q1o2hJg5m0gxpBHX-nX6KGPyWl-zjrX7o,1570
23
23
  kernpy/core/kern_spine_importer.py,sha256=9jKDTDDcH81U4Vws4x5msUDq89QzwbdEGYb6uHt-cDI,2581
24
24
  kernpy/core/mens_spine_importer.py,sha256=GGutGL3qeatIkdHZxiOJNcT5mc70acJ728xu-VVLkOU,692
25
- kernpy/core/mhxm_spine_importer.py,sha256=45QhuqIgXKkLMFkZYk17sdgOH8dfzK08mWE24fGZ-n4,1410
25
+ kernpy/core/mhxm_spine_importer.py,sha256=uLcwfahqAhTWqW8-YOpRKgevh-uuklgv6EiCtbBVAS0,1502
26
26
  kernpy/core/pitch_models.py,sha256=TpSt1uxGYhbhXs0viB1dNo4jiGxyCOHWseyRHTpfH-0,9730
27
27
  kernpy/core/root_spine_importer.py,sha256=w7oFbMeXakHF9OAlgKWm_HaGJoNq6u7BE_JUnvLiLnU,1893
28
28
  kernpy/core/spine_importer.py,sha256=_VoWcVGfDMhxDn-hCu18g_29Aw2c2bjLnaXuvW2Fbsc,1430
29
- kernpy/core/text_spine_importer.py,sha256=mz1qX2nviJs0VsGsN87jk6BVWCIE7mmB9IxEx4RdhT8,1522
30
- kernpy/core/tokenizers.py,sha256=dJsaCdPStruzToaYA-RyM9beJ9tQyZRjM4qKLK0d0k4,7899
29
+ kernpy/core/text_spine_importer.py,sha256=AhLeLC-obEEQ5cuuk8bpdQ-GlFVFIbeVzoW6thbxYlA,1637
30
+ kernpy/core/tokenizers.py,sha256=-m-isPSvWbikTLj5pfzG-4UCLrjxPfqHJD7s4znLHCo,7898
31
31
  kernpy/core/tokens.py,sha256=0XhbYm6F9NDfYpP5gbrgzZL3qB_xWs74faTwAZghpyM,66515
32
32
  kernpy/core/transposer.py,sha256=EqsEy7Z3bWIXdxUTruBrg8z9sWdJ3Cd_KIzo_6ZuNNA,9352
33
33
  kernpy/core/generated/kernSpineLexer.interp,sha256=Y3_seqmHQu3czO9fHE8XtRwTrk8g4Txf0IKiMaHBqeg,28801
@@ -39,13 +39,13 @@ kernpy/core/generated/kernSpineParser.tokens,sha256=ciGB91o4t7VM-tgSLtGB-BUFzHNo
39
39
  kernpy/core/generated/kernSpineParserListener.py,sha256=fcOPxYkGbNbIw8Qk9aqYXnuTB48WWJCn_ZYxTdoFtpY,40015
40
40
  kernpy/core/generated/kernSpineParserVisitor.py,sha256=YSEdcb4kuIi-E4rAFhDKsCVu0lyPdlqrCMtQAeVDhZY,23821
41
41
  kernpy/io/__init__.py,sha256=9103cE_Ao1tu2mi4EaFni-YmnCnwtJqQqFqgeFuT7rA,147
42
- kernpy/io/public.py,sha256=p5f22Vse54vU3QYDBn5Z5iJXhRLrF9OTG1MYD8QhdGc,13782
42
+ kernpy/io/public.py,sha256=ge7A2N77efVG5dMpLDH2jxtkVi_2EOq-TnhDggyg9vQ,13861
43
43
  kernpy/polish_scores/__init__.py,sha256=EjILt5N5T9cKKdhWwflS_MU4TXuLhdHOYhW8txaicrI,217
44
44
  kernpy/polish_scores/download_polish_dataset.py,sha256=VqACpxDiM1yQYDs4QHHlZ87ZW2gkjl6R_bxYh5E67K4,14633
45
45
  kernpy/polish_scores/iiif.py,sha256=yITdrQbMsGmm8qag8TSousjCjS2io148Jk9xwEKa3B4,1526
46
46
  kernpy/util/__init__.py,sha256=dfW3nRSMkGQS7p7YebM271_0H8_pVw4IiDKeINs_LI8,152
47
47
  kernpy/util/helpers.py,sha256=Xbj3nWNyErdOpLHYd4uVyfwleXvmC5_FlYEhxaeTtS8,1549
48
48
  kernpy/util/store_cache.py,sha256=AA7SFCGQ6ryy-c02wbLy8gIQ2C-VdM1JSWFoWIK_KLA,1186
49
- kernpy-1.0.1.dist-info/METADATA,sha256=1SpNbdsg1KHO4FnR7vSeabwlsueGoA8y-4RDN-v2vZk,15096
50
- kernpy-1.0.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
51
- kernpy-1.0.1.dist-info/RECORD,,
49
+ kernpy-1.0.3.dist-info/METADATA,sha256=NIFoh9en6RHm59sa5VvC5EPez0TnltGLHL7xXjx9ttM,15357
50
+ kernpy-1.0.3.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
51
+ kernpy-1.0.3.dist-info/RECORD,,
File without changes