kernpy 1.0.1__py3-none-any.whl → 1.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
kernpy/core/exporter.py CHANGED
@@ -153,7 +153,7 @@ class HeaderTokenGenerator:
153
153
  """
154
154
  HeaderTokenGenerator class.
155
155
 
156
- This class is used to translate the HeaderTokens to the specific tokenizer format.
156
+ This class is used to translate the HeaderTokens to the specific encoding format.
157
157
  """
158
158
  @classmethod
159
159
  def new(cls, *, token: HeaderToken, type: Encoding):
@@ -162,7 +162,7 @@ class HeaderTokenGenerator:
162
162
 
163
163
  Args:
164
164
  token (HeaderToken): The HeaderToken to be translated.
165
- type (Encoding): The tokenizer to be used.
165
+ type (Encoding): The encoding to be used.
166
166
 
167
167
  Examples:
168
168
  >>> header = HeaderToken('**kern', 0)
kernpy/core/tokenizers.py CHANGED
@@ -18,7 +18,7 @@ class Encoding(Enum): # TODO: Eventually, polymorphism will be used to export d
18
18
  >>> doc, _ = kp.load('path/to/file.krn')
19
19
  >>>
20
20
  >>> # Save the file using the specified encoding
21
- >>> exported_content = kp.dumps(tokenizer=kp.Encoding.normalizedKern)
21
+ >>> exported_content = kp.dumps(encoding=kp.Encoding.normalizedKern)
22
22
  """
23
23
  eKern = 'ekern'
24
24
  normalizedKern = 'kern'
kernpy/io/public.py CHANGED
@@ -12,7 +12,9 @@ from kernpy import Encoding
12
12
  from kernpy.core import (
13
13
  Document, Importer, Exporter, ExportOptions, GraphvizExporter,
14
14
  generic,
15
- TokenCategoryHierarchyMapper)
15
+ TokenCategoryHierarchyMapper,
16
+ TokenCategory,
17
+ )
16
18
 
17
19
 
18
20
  def load(fp: Union[str, Path], *, raise_on_errors: Optional[bool] = False, **kwargs) -> (Document, List[str]):
@@ -84,13 +86,13 @@ def loads(s, *, raise_on_errors: Optional[bool] = False, **kwargs) -> (Document,
84
86
 
85
87
 
86
88
  def dump(document: Document, fp: Union[str, Path], *,
87
- spine_types: [] = None,
88
- include: [] = None,
89
- exclude: [] = None,
89
+ spine_types: [str] = None,
90
+ include: [TokenCategory] = None,
91
+ exclude: [TokenCategory] = None,
90
92
  from_measure: int = None,
91
93
  to_measure: int = None,
92
- tokenizer: Encoding = None,
93
- instruments: [] = None,
94
+ encoding: Encoding = None,
95
+ instruments: [str] = None,
94
96
  show_measure_numbers: bool = None,
95
97
  spine_ids: [int] = None
96
98
  ) -> None:
@@ -104,7 +106,7 @@ def dump(document: Document, fp: Union[str, Path], *,
104
106
  exclude (Iterable): The token categories to exclude from the exported file. When None, no token categories will be excluded.
105
107
  from_measure (int): The measure to start exporting. When None, the exporter will start from the beginning of the file. The first measure is 1
106
108
  to_measure (int): The measure to end exporting. When None, the exporter will end at the end of the file.
107
- tokenizer (Encoding): The type of the **kern file to export.
109
+ encoding (Encoding): The type of the **kern file to export.
108
110
  instruments (Iterable): The instruments to export. If None, all the instruments will be exported.
109
111
  show_measure_numbers (Bool): Show the measure numbers in the exported file.
110
112
  spine_ids (Iterable): The ids of the spines to export. When None, all the spines will be exported. \
@@ -130,7 +132,7 @@ def dump(document: Document, fp: Union[str, Path], *,
130
132
  exclude=exclude,
131
133
  from_measure=from_measure,
132
134
  to_measure=to_measure,
133
- kern_type=tokenizer,
135
+ kern_type=encoding,
134
136
  instruments=instruments,
135
137
  show_measure_numbers=show_measure_numbers,
136
138
  spine_ids=spine_ids
@@ -144,13 +146,13 @@ def dump(document: Document, fp: Union[str, Path], *,
144
146
 
145
147
 
146
148
  def dumps(document: Document, *,
147
- spine_types: [] = None,
148
- include: [] = None,
149
- exclude: [] = None,
149
+ spine_types: [str] = None,
150
+ include: [TokenCategory] = None,
151
+ exclude: [TokenCategory] = None,
150
152
  from_measure: int = None,
151
153
  to_measure: int = None,
152
- tokenizer: Encoding = None,
153
- instruments: [] = None,
154
+ encoding: Encoding = None,
155
+ instruments: [str] = None,
154
156
  show_measure_numbers: bool = None,
155
157
  spine_ids: [int] = None
156
158
  ) -> str:
@@ -164,7 +166,7 @@ def dumps(document: Document, *,
164
166
  exclude (Iterable): The token categories to exclude from the exported file. When None, no token categories will be excluded.
165
167
  from_measure (int): The measure to start exporting. When None, the exporter will start from the beginning of the file. The first measure is 1
166
168
  to_measure (int): The measure to end exporting. When None, the exporter will end at the end of the file.
167
- tokenizer (Encoding): The type of the **kern file to export.
169
+ encoding (Encoding): The type of the **kern file to export.
168
170
  instruments (Iterable): The instruments to export. If None, all the instruments will be exported.
169
171
  show_measure_numbers (Bool): Show the measure numbers in the exported file.
170
172
  spine_ids (Iterable): The ids of the spines to export. When None, all the spines will be exported. \
@@ -189,7 +191,7 @@ def dumps(document: Document, *,
189
191
  exclude=exclude,
190
192
  from_measure=from_measure,
191
193
  to_measure=to_measure,
192
- kern_type=tokenizer,
194
+ kern_type=encoding,
193
195
  instruments=instruments,
194
196
  show_measure_numbers=show_measure_numbers,
195
197
  spine_ids=spine_ids
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: kernpy
3
- Version: 1.0.1
3
+ Version: 1.0.2
4
4
  Summary: Python Humdrum **kern and **mens utilities
5
5
  Project-URL: Homepage, https://github.com/OMR-PRAIG-UA-ES/kernpy
6
6
  Project-URL: Documentation, https://github.com/OMR-PRAIG-UA-ES/kernpy#readme
@@ -174,32 +174,32 @@ kp.dump(document, "newfile_custom.krn",
174
174
  print(kp.BEKERN_CATEGORIES)
175
175
  ```
176
176
 
177
- - Use `tokenizer` to select how the categories are split. By default, the `normalizedKern` tokenizer is used.
177
+ - Use `encoding` to select how the categories are split. By default, the `normalizedKern` encoding is used.
178
178
 
179
179
  ```python
180
180
  import kernpy as kp
181
181
 
182
182
  kp.dump(document, "newfile_normalized.krn",
183
- tokenizer=kp.Encoding.normalizedKern) # Default tokenizer
183
+ encoding=kp.Encoding.normalizedKern) # Default encoding
184
184
  ```
185
- Select the proper Humdrum **kern tokenizer:
185
+ Select the proper Humdrum **kern encoding:
186
186
 
187
- `kernpy` provides different tokenizers to export the content each symbol in different formats.
187
+ `kernpy` provides different encodings to export the content each symbol in different formats.
188
188
 
189
- | Encoding | Tokenized | Description |
189
+ | Encoding | Output | Description |
190
190
  |----------|--------------|----------------------------------------|
191
191
  | kern | 2.bb-_L | Traditional Humdrum **kern encoding |
192
192
  | ekern | 2@.@bb@-·_·L | Extended Humdrum **kern encoding |
193
193
 
194
- Use the `Encoding` enum class to select the tokenizer:
194
+ Use the `Encoding` enum class to select the encoding:
195
195
 
196
196
  ```python
197
197
  import kernpy as kp
198
198
 
199
199
  doc, _ = kp.load('resource_dir/legacy/chor048.krn')
200
200
 
201
- kern_content = kp.dumps(doc, tokenizer=kp.Encoding.normalizedKern)
202
- ekern_content = kp.dumps(doc, tokenizer=kp.Encoding.eKern)
201
+ kern_content = kp.dumps(doc, encoding=kp.Encoding.normalizedKern)
202
+ ekern_content = kp.dumps(doc, encoding=kp.Encoding.eKern)
203
203
  ```
204
204
 
205
205
  - Use `from_measure` and `to_measure` to select the measures to export. By default, all the measures are exported.
@@ -236,7 +236,7 @@ kp.dump(document, "newfile.krn",
236
236
  spine_types=['**kern'], # Export only the **kern spines
237
237
  include=kp.BEKERN_CATEGORIES, # Token categories to include
238
238
  exclude={kp.TokenCategory.PITCH}, # Token categories to exclude
239
- tokenizer=kp.Encoding.eKern, # Kern encoding
239
+ encoding=kp.Encoding.eKern, # Kern encoding
240
240
  from_measure=1, # First from measure 1
241
241
  to_measure=10, # Last measure exported
242
242
  spine_ids=[0, 1], # Export only the first and the second spine
@@ -339,7 +339,7 @@ for page_label, bounding_box_measure in doc.page_bounding_boxes.items():
339
339
  kp.dump(doc, f"foo_{page_label}.ekrn",
340
340
  spine_types=['**kern'],
341
341
  token_categories=kp.BEKERN_CATEGORIES,
342
- tokenizer=kp.Encoding.eKern,
342
+ encoding=kp.Encoding.eKern,
343
343
  from_measure=bounding_box_measure.from_measure,
344
344
  to_measure=bounding_box_measure.to_measure - 1 # TODO: Check bounds
345
345
  )
@@ -11,7 +11,7 @@ kernpy/core/document.py,sha256=Y_Wpwc-t1GkaGr2QVU0hqLRFJv9wJ_Nm6UitgwAlT8w,34892
11
11
  kernpy/core/dyn_importer.py,sha256=dKgZqVIdEELUu8HY63ClLJAMQXXItjHmaYxxOwFZRiY,1025
12
12
  kernpy/core/dynam_spine_importer.py,sha256=crg4LwsWr56VZ6GLYAO1CDnLuum9f1tlX_pXCqKemEc,1450
13
13
  kernpy/core/error_listener.py,sha256=bTlAZvVIQx_6BLZ3oRut_pn24JnV8_7GnNONiwoMMPU,1516
14
- kernpy/core/exporter.py,sha256=5MWOk1JmPtpXfjN_VBv5jxUhYu8hhAZwt9eB5u9f1eM,21371
14
+ kernpy/core/exporter.py,sha256=t2Ep-qe_r1CvEmPb3KoMpzHd2W3SfEcjY8XhFOUqGxs,21369
15
15
  kernpy/core/fing_spine_importer.py,sha256=5GLPPakmZVRcNbbaoCn1p1cQdat5QttflbpKu3AytQU,1382
16
16
  kernpy/core/generic.py,sha256=9OTDE2t49laVk6fgtydK5yZh7SCfZwViP_dZUJlskA8,10967
17
17
  kernpy/core/gkern.py,sha256=nRYFLFJ0uOLJwio0DKxQ0YIqLixy6-qCM-eY8DNdxfM,16332
@@ -27,7 +27,7 @@ kernpy/core/pitch_models.py,sha256=TpSt1uxGYhbhXs0viB1dNo4jiGxyCOHWseyRHTpfH-0,9
27
27
  kernpy/core/root_spine_importer.py,sha256=w7oFbMeXakHF9OAlgKWm_HaGJoNq6u7BE_JUnvLiLnU,1893
28
28
  kernpy/core/spine_importer.py,sha256=_VoWcVGfDMhxDn-hCu18g_29Aw2c2bjLnaXuvW2Fbsc,1430
29
29
  kernpy/core/text_spine_importer.py,sha256=mz1qX2nviJs0VsGsN87jk6BVWCIE7mmB9IxEx4RdhT8,1522
30
- kernpy/core/tokenizers.py,sha256=dJsaCdPStruzToaYA-RyM9beJ9tQyZRjM4qKLK0d0k4,7899
30
+ kernpy/core/tokenizers.py,sha256=-m-isPSvWbikTLj5pfzG-4UCLrjxPfqHJD7s4znLHCo,7898
31
31
  kernpy/core/tokens.py,sha256=0XhbYm6F9NDfYpP5gbrgzZL3qB_xWs74faTwAZghpyM,66515
32
32
  kernpy/core/transposer.py,sha256=EqsEy7Z3bWIXdxUTruBrg8z9sWdJ3Cd_KIzo_6ZuNNA,9352
33
33
  kernpy/core/generated/kernSpineLexer.interp,sha256=Y3_seqmHQu3czO9fHE8XtRwTrk8g4Txf0IKiMaHBqeg,28801
@@ -39,13 +39,13 @@ kernpy/core/generated/kernSpineParser.tokens,sha256=ciGB91o4t7VM-tgSLtGB-BUFzHNo
39
39
  kernpy/core/generated/kernSpineParserListener.py,sha256=fcOPxYkGbNbIw8Qk9aqYXnuTB48WWJCn_ZYxTdoFtpY,40015
40
40
  kernpy/core/generated/kernSpineParserVisitor.py,sha256=YSEdcb4kuIi-E4rAFhDKsCVu0lyPdlqrCMtQAeVDhZY,23821
41
41
  kernpy/io/__init__.py,sha256=9103cE_Ao1tu2mi4EaFni-YmnCnwtJqQqFqgeFuT7rA,147
42
- kernpy/io/public.py,sha256=p5f22Vse54vU3QYDBn5Z5iJXhRLrF9OTG1MYD8QhdGc,13782
42
+ kernpy/io/public.py,sha256=ge7A2N77efVG5dMpLDH2jxtkVi_2EOq-TnhDggyg9vQ,13861
43
43
  kernpy/polish_scores/__init__.py,sha256=EjILt5N5T9cKKdhWwflS_MU4TXuLhdHOYhW8txaicrI,217
44
44
  kernpy/polish_scores/download_polish_dataset.py,sha256=VqACpxDiM1yQYDs4QHHlZ87ZW2gkjl6R_bxYh5E67K4,14633
45
45
  kernpy/polish_scores/iiif.py,sha256=yITdrQbMsGmm8qag8TSousjCjS2io148Jk9xwEKa3B4,1526
46
46
  kernpy/util/__init__.py,sha256=dfW3nRSMkGQS7p7YebM271_0H8_pVw4IiDKeINs_LI8,152
47
47
  kernpy/util/helpers.py,sha256=Xbj3nWNyErdOpLHYd4uVyfwleXvmC5_FlYEhxaeTtS8,1549
48
48
  kernpy/util/store_cache.py,sha256=AA7SFCGQ6ryy-c02wbLy8gIQ2C-VdM1JSWFoWIK_KLA,1186
49
- kernpy-1.0.1.dist-info/METADATA,sha256=1SpNbdsg1KHO4FnR7vSeabwlsueGoA8y-4RDN-v2vZk,15096
50
- kernpy-1.0.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
51
- kernpy-1.0.1.dist-info/RECORD,,
49
+ kernpy-1.0.2.dist-info/METADATA,sha256=Uj7S-i6JIAMSEA4eyc-PTACGQX-hch51Ze2fnOJH1C4,15085
50
+ kernpy-1.0.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
51
+ kernpy-1.0.2.dist-info/RECORD,,
File without changes