partitura 1.3.0__py3-none-any.whl → 1.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. partitura/directions.py +3 -0
  2. partitura/display.py +0 -1
  3. partitura/io/__init__.py +41 -35
  4. partitura/io/exportmatch.py +52 -10
  5. partitura/io/exportmidi.py +37 -19
  6. partitura/io/exportmusicxml.py +6 -92
  7. partitura/io/exportparangonada.py +18 -19
  8. partitura/io/importkern.py +2 -4
  9. partitura/io/importmatch.py +121 -39
  10. partitura/io/importmei.py +161 -34
  11. partitura/io/importmidi.py +23 -14
  12. partitura/io/importmusic21.py +0 -1
  13. partitura/io/importmusicxml.py +48 -63
  14. partitura/io/importparangonada.py +0 -1
  15. partitura/io/matchfile_base.py +0 -21
  16. partitura/io/matchfile_utils.py +29 -17
  17. partitura/io/matchlines_v0.py +0 -22
  18. partitura/io/matchlines_v1.py +8 -42
  19. partitura/io/musescore.py +68 -41
  20. partitura/musicanalysis/__init__.py +1 -1
  21. partitura/musicanalysis/note_array_to_score.py +147 -92
  22. partitura/musicanalysis/note_features.py +66 -51
  23. partitura/musicanalysis/performance_codec.py +140 -96
  24. partitura/musicanalysis/performance_features.py +190 -129
  25. partitura/musicanalysis/pitch_spelling.py +0 -2
  26. partitura/musicanalysis/tonal_tension.py +0 -6
  27. partitura/musicanalysis/voice_separation.py +1 -22
  28. partitura/performance.py +178 -5
  29. partitura/score.py +154 -74
  30. partitura/utils/__init__.py +1 -1
  31. partitura/utils/generic.py +3 -7
  32. partitura/utils/misc.py +0 -1
  33. partitura/utils/music.py +108 -66
  34. partitura/utils/normalize.py +75 -35
  35. partitura/utils/synth.py +1 -7
  36. {partitura-1.3.0.dist-info → partitura-1.4.0.dist-info}/METADATA +2 -2
  37. partitura-1.4.0.dist-info/RECORD +51 -0
  38. {partitura-1.3.0.dist-info → partitura-1.4.0.dist-info}/WHEEL +1 -1
  39. partitura-1.3.0.dist-info/RECORD +0 -51
  40. {partitura-1.3.0.dist-info → partitura-1.4.0.dist-info}/LICENSE +0 -0
  41. {partitura-1.3.0.dist-info → partitura-1.4.0.dist-info}/top_level.txt +0 -0
@@ -10,36 +10,39 @@ import types
10
10
  from typing import Union, List
11
11
  import warnings
12
12
  import numpy as np
13
- import matplotlib.pyplot as plt
14
- from scipy import stats
15
- from scipy.optimize import least_squares
16
13
  from scipy.signal import find_peaks
17
14
  import numpy.lib.recfunctions as rfn
18
15
  from partitura.score import ScoreLike
19
16
  from partitura.performance import PerformanceLike, PerformedPart
20
17
  from partitura.utils.generic import interp1d
21
- from partitura.musicanalysis.performance_codec import to_matched_score, onsetwise_to_notewise, encode_tempo
18
+ from partitura.musicanalysis.performance_codec import (
19
+ to_matched_score,
20
+ onsetwise_to_notewise,
21
+ encode_tempo,
22
+ )
22
23
 
23
24
 
24
25
  __all__ = [
25
26
  "make_performance_features",
26
27
  ]
27
28
 
28
- # ordinal
29
+ # ordinal
29
30
  OLS = ["ppp", "pp", "p", "mp", "mf", "f", "ff", "fff"]
30
31
 
31
32
 
32
33
  class InvalidPerformanceFeatureException(Exception):
33
34
  pass
34
35
 
35
- def make_performance_features(score: ScoreLike,
36
- performance: PerformanceLike,
37
- alignment: list,
38
- feature_functions: Union[List, str],
39
- add_idx: bool = True
40
- ):
36
+
37
+ def make_performance_features(
38
+ score: ScoreLike,
39
+ performance: PerformanceLike,
40
+ alignment: list,
41
+ feature_functions: Union[List, str],
42
+ add_idx: bool = True,
43
+ ):
41
44
  """
42
- Compute the performance features. This function is defined in the same
45
+ Compute the performance features. This function is defined in the same
43
46
  style of note_features.make_note_features
44
47
 
45
48
  Parameters
@@ -53,8 +56,8 @@ def make_performance_features(score: ScoreLike,
53
56
  feature_functions : list or str
54
57
  A list of performance feature functions. Elements of the list can be either
55
58
  the functions themselves or the names of a feature function as
56
- strings (or a mix).
57
- currently implemented:
59
+ strings (or a mix).
60
+ currently implemented:
58
61
  asynchrony_feature, articulation_feature, dynamics_feature, pedal_feature
59
62
  add_idx: bool
60
63
  add score note idx column to feature array
@@ -63,7 +66,9 @@ def make_performance_features(score: ScoreLike,
63
66
  -------
64
67
  performance_features : structured array
65
68
  """
66
- m_score, unique_onset_idxs, snote_ids = compute_matched_score(score, performance, alignment)
69
+ m_score, unique_onset_idxs, snote_ids = compute_matched_score(
70
+ score, performance, alignment
71
+ )
67
72
 
68
73
  acc = []
69
74
  if isinstance(feature_functions, str) and feature_functions == "all":
@@ -105,28 +110,38 @@ def make_performance_features(score: ScoreLike,
105
110
  raise InvalidPerformanceFeatureException(msg)
106
111
 
107
112
  # prefix feature names by function name
108
- feature_names = ["{}.{}".format(func.__name__, n) for n in features.dtype.names]
109
- features = rfn.rename_fields(features, dict(zip(features.dtype.names, feature_names)))
113
+ feature_names = [
114
+ "{}.{}".format(func.__name__, n) for n in features.dtype.names
115
+ ]
116
+ features = rfn.rename_fields(
117
+ features, dict(zip(features.dtype.names, feature_names))
118
+ )
110
119
 
111
120
  acc.append(features)
112
121
 
113
122
  if add_idx:
114
- acc.append(np.array([(idx) for idx in snote_ids], dtype = [("id","U256")]))
123
+ acc.append(np.array([(idx) for idx in snote_ids], dtype=[("id", "U256")]))
115
124
 
116
125
  performance_features = rfn.merge_arrays(acc, flatten=True, usemask=False)
117
126
  full_performance_features = rfn.join_by("id", performance_features, m_score)
118
127
  full_performance_features = full_performance_features.data
119
128
 
120
- sort_idx = np.lexsort((full_performance_features["duration"],
121
- full_performance_features["pitch"],
122
- full_performance_features["onset"]))
129
+ sort_idx = np.lexsort(
130
+ (
131
+ full_performance_features["duration"],
132
+ full_performance_features["pitch"],
133
+ full_performance_features["onset"],
134
+ )
135
+ )
123
136
  full_performance_features = full_performance_features[sort_idx]
124
- return full_performance_features
137
+ return full_performance_features
125
138
 
126
139
 
127
- def compute_matched_score(score: ScoreLike,
128
- performance: PerformanceLike,
129
- alignment: list,):
140
+ def compute_matched_score(
141
+ score: ScoreLike,
142
+ performance: PerformanceLike,
143
+ alignment: list,
144
+ ):
130
145
  """
131
146
  Compute the matched score and add the score features
132
147
 
@@ -145,18 +160,29 @@ def compute_matched_score(score: ScoreLike,
145
160
  unique_onset_idxs : list
146
161
  """
147
162
 
148
- m_score, snote_ids = to_matched_score(score, performance, alignment, include_score_markings=True)
163
+ m_score, snote_ids = to_matched_score(
164
+ score, performance, alignment, include_score_markings=True
165
+ )
149
166
  (time_params, unique_onset_idxs) = encode_tempo(
150
167
  score_onsets=m_score["onset"],
151
168
  performed_onsets=m_score["p_onset"],
152
169
  score_durations=m_score["duration"],
153
170
  performed_durations=m_score["p_duration"],
154
171
  return_u_onset_idx=True,
155
- tempo_smooth="average"
172
+ tempo_smooth="average",
173
+ )
174
+ m_score = rfn.append_fields(
175
+ m_score,
176
+ ["beat_period", "timing", "articulation_log", "id"],
177
+ [
178
+ time_params["beat_period"],
179
+ time_params["timing"],
180
+ time_params["articulation_log"],
181
+ snote_ids,
182
+ ],
183
+ ["f4", "f4", "f4", "U256"],
184
+ usemask=False,
156
185
  )
157
- m_score = rfn.append_fields(m_score, ["beat_period", "id"],
158
- [time_params['beat_period'], snote_ids],
159
- ["f4", "U256"], usemask=False,)
160
186
  return m_score, unique_onset_idxs, snote_ids
161
187
 
162
188
 
@@ -205,98 +231,113 @@ def print_performance_feats_functions():
205
231
  )
206
232
 
207
233
 
208
-
209
234
  # alias
210
235
  list_performance_feature_functions = list_performance_feats_functions
211
236
  print_performance_feature_functions = print_performance_feats_functions
212
237
 
213
238
  ### Asynchrony
214
239
 
215
- def asynchrony_feature(m_score: np.ndarray,
216
- unique_onset_idxs: list,
217
- performance: PerformanceLike):
240
+
241
+ def asynchrony_feature(
242
+ m_score: np.ndarray, unique_onset_idxs: list, performance: PerformanceLike
243
+ ):
218
244
  """
219
- Compute the asynchrony attributes from the alignment.
245
+ Compute the asynchrony attributes from the alignment.
220
246
 
221
247
  Parameters
222
248
  ----------
223
249
  m_score : list
224
- correspondance between score and performance notes, with score markings.
250
+ correspondance between score and performance notes, with score markings.
225
251
  unique_onset_idxs : list
226
252
  a list of arrays with the note indexes that have the same onset
227
253
  performance: PerformedPart
228
254
  The original PerformedPart object
229
-
255
+
230
256
  Returns
231
257
  -------
232
258
  async_ : structured array
233
259
  structured array (broadcasted to the note level) with the following fields
234
- delta [0, 1]: the largest time difference (in seconds) between onsets in this group
235
- pitch_cor [-1, 1]: correlation between timing and pitch, min-scaling
260
+ delta [0, 1]: the largest time difference (in seconds) between onsets in this group
261
+ pitch_cor [-1, 1]: correlation between timing and pitch, min-scaling
236
262
  vel_cor [-1, 1]: correlation between timing and velocity, min-scaling
237
- voice_std [0, 1]: std of the avg timing (in seconds) of each voice in this group
263
+ voice_std [0, 1]: std of the avg timing (in seconds) of each voice in this group
238
264
  """
239
-
240
- async_ = np.zeros(len(unique_onset_idxs), dtype=[(
241
- "delta", "f4"), ("pitch_cor", "f4"), ("vel_cor", "f4"), ("voice_std", "f4")])
242
- for i, onset_idxs in enumerate(unique_onset_idxs):
243
265
 
266
+ async_ = np.zeros(
267
+ len(unique_onset_idxs),
268
+ dtype=[
269
+ ("delta", "f4"),
270
+ ("pitch_cor", "f4"),
271
+ ("vel_cor", "f4"),
272
+ ("voice_std", "f4"),
273
+ ],
274
+ )
275
+ for i, onset_idxs in enumerate(unique_onset_idxs):
244
276
  note_group = m_score[onset_idxs]
245
277
 
246
- onset_times = note_group['p_onset']
278
+ onset_times = note_group["p_onset"]
247
279
  delta = min(onset_times.max() - onset_times.min(), 1)
248
- async_[i]['delta'] = delta
280
+ async_[i]["delta"] = delta
249
281
 
250
- midi_pitch = note_group['pitch']
251
- midi_pitch = midi_pitch - midi_pitch.min() # min-scaling
282
+ midi_pitch = note_group["pitch"]
283
+ midi_pitch = midi_pitch - midi_pitch.min() # min-scaling
252
284
  onset_times = onset_times - onset_times.min()
253
- cor = (-1) * np.corrcoef(midi_pitch, onset_times)[0, 1] if (
254
- len(midi_pitch) > 1 and sum(midi_pitch) != 0 and sum(onset_times) != 0) else 0
285
+ cor = (
286
+ (-1) * np.corrcoef(midi_pitch, onset_times)[0, 1]
287
+ if (len(midi_pitch) > 1 and sum(midi_pitch) != 0 and sum(onset_times) != 0)
288
+ else 0
289
+ )
255
290
  # cor=nan if there is only one note in the group
256
- async_[i]['pitch_cor'] = cor
291
+ async_[i]["pitch_cor"] = cor
257
292
 
258
- assert not np.isnan(cor)
293
+ assert not np.isnan(cor)
259
294
 
260
- midi_vel = note_group['velocity'].astype(float)
295
+ midi_vel = note_group["velocity"].astype(float)
261
296
  midi_vel = midi_vel - midi_vel.min()
262
- cor = (-1) * np.corrcoef(midi_vel, onset_times)[0, 1] if (
263
- sum(midi_vel) != 0 and sum(onset_times) != 0) else 0
264
- async_[i]['vel_cor'] = cor
297
+ cor = (
298
+ (-1) * np.corrcoef(midi_vel, onset_times)[0, 1]
299
+ if (sum(midi_vel) != 0 and sum(onset_times) != 0)
300
+ else 0
301
+ )
302
+ async_[i]["vel_cor"] = cor
265
303
 
266
- assert not np.isnan(cor)
304
+ assert not np.isnan(cor)
267
305
 
268
- voices = np.unique(note_group['voice'])
306
+ voices = np.unique(note_group["voice"])
269
307
  voices_onsets = []
270
308
  for voice in voices:
271
- note_in_voice = note_group[note_group['voice'] == voice]
272
- voices_onsets.append(note_in_voice['p_onset'].mean())
273
- async_[i]['voice_std'] = min(np.std(np.array(voices_onsets)), 1)
274
-
309
+ note_in_voice = note_group[note_group["voice"] == voice]
310
+ voices_onsets.append(note_in_voice["p_onset"].mean())
311
+ async_[i]["voice_std"] = min(np.std(np.array(voices_onsets)), 1)
312
+
275
313
  return onsetwise_to_notewise(async_, unique_onset_idxs)
276
314
 
277
315
 
278
- ### Dynamics
316
+ ### Dynamics
279
317
 
280
318
 
281
319
  ### Articulation
282
320
 
283
- def articulation_feature(m_score : np.ndarray,
284
- unique_onset_idxs: list,
285
- performance: PerformanceLike,
286
- return_mask=False):
321
+
322
+ def articulation_feature(
323
+ m_score: np.ndarray,
324
+ unique_onset_idxs: list,
325
+ performance: PerformanceLike,
326
+ return_mask=False,
327
+ ):
287
328
  """
288
329
  Compute the articulation attributes (key overlap ratio) from the alignment.
289
330
  Key overlap ratio is the ratio between key overlap time (KOT) and IOI, result in a value between (-1, inf)
290
331
  -1 is the dummy value. For normalization purposes we empirically cap the maximum to 5.
291
-
332
+
292
333
  References
293
334
  ----------
294
335
  .. [1] B.Repp: Acoustics, Perception, and Production of Legato Articulation on a Digital Piano
295
-
336
+
296
337
  Parameters
297
338
  ----------
298
339
  m_score : list
299
- correspondance between score and performance notes, with score markings.
340
+ correspondance between score and performance notes, with score markings.
300
341
  unique_onset_idxs : list
301
342
  a list of arrays with the note indexes that have the same onset
302
343
  performance: PerformedPart
@@ -308,41 +349,52 @@ def articulation_feature(m_score : np.ndarray,
308
349
  -------
309
350
  kor_ : structured array (1, n_notes)
310
351
  structured array on the note level with fields kor (-1, 5]
311
- """
312
-
313
- m_score = rfn.append_fields(m_score, "offset", m_score['onset'] + m_score['duration'], usemask=False)
314
- m_score = rfn.append_fields(m_score, "p_offset", m_score['p_onset'] + m_score['p_duration'], usemask=False)
352
+ """
353
+
354
+ m_score = rfn.append_fields(
355
+ m_score, "offset", m_score["onset"] + m_score["duration"], usemask=False
356
+ )
357
+ m_score = rfn.append_fields(
358
+ m_score, "p_offset", m_score["p_onset"] + m_score["p_duration"], usemask=False
359
+ )
315
360
 
316
361
  kor_ = np.full(len(m_score), -1, dtype=[("kor", "f4")])
317
362
  if return_mask:
318
- mask = np.full(len(m_score), False, dtype=[("legato", "?"), ("staccato", "?"), ("repeated", "?")])
363
+ mask = np.full(
364
+ len(m_score),
365
+ False,
366
+ dtype=[("legato", "?"), ("staccato", "?"), ("repeated", "?")],
367
+ )
319
368
 
320
369
  # consider the note transition by each voice
321
- for voice in np.unique(m_score['voice']):
322
- match_voiced = m_score[m_score['voice'] == voice]
370
+ for voice in np.unique(m_score["voice"]):
371
+ match_voiced = m_score[m_score["voice"] == voice]
323
372
  for _, note_info in enumerate(match_voiced):
324
-
325
- if note_info['onset'] == match_voiced['onset'].max(): # last beat
373
+ if note_info["onset"] == match_voiced["onset"].max(): # last beat
326
374
  break
327
- next_note_info = get_next_note(note_info, match_voiced) # find most plausible transition
375
+ next_note_info = get_next_note(
376
+ note_info, match_voiced
377
+ ) # find most plausible transition
328
378
 
329
- if next_note_info: # in some cases no meaningful transition
379
+ if next_note_info: # in some cases no meaningful transition
330
380
  j = np.where(m_score == note_info)[0].item() # original position
331
381
 
332
- if (note_info['offset'] == next_note_info['onset']):
333
- kor_[j]['kor'] = get_kor(note_info, next_note_info)
382
+ if note_info["offset"] == next_note_info["onset"]:
383
+ kor_[j]["kor"] = get_kor(note_info, next_note_info)
334
384
 
335
- if return_mask: # return the
336
- if (note_info['slur_feature.slur_incr'] > 0) or (note_info['slur_feature.slur_decr'] > 0):
337
- mask[j]['legato'] = True
385
+ if return_mask: # return the
386
+ if (note_info["slur_feature.slur_incr"] > 0) or (
387
+ note_info["slur_feature.slur_decr"] > 0
388
+ ):
389
+ mask[j]["legato"] = True
338
390
 
339
- if note_info['articulation'] == 'staccato':
340
- mask[j]['staccato'] = True
391
+ if note_info["articulation"] == "staccato":
392
+ mask[j]["staccato"] = True
393
+
394
+ # KOR for repeated notes
395
+ if note_info["pitch"] == next_note_info["pitch"]:
396
+ mask[j]["repeated"] = True
341
397
 
342
- # KOR for repeated notes
343
- if (note_info['pitch'] == next_note_info['pitch']):
344
- mask[j]['repeated'] = True
345
-
346
398
  if return_mask:
347
399
  return kor_, mask
348
400
  else:
@@ -369,21 +421,21 @@ def get_kor(e1, e2):
369
421
  Key overlap ratio
370
422
 
371
423
  """
372
- kot = e1['p_offset'] - e2['p_onset']
373
- ioi = e2['p_onset'] - e1['p_onset']
424
+ kot = e1["p_offset"] - e2["p_onset"]
425
+ ioi = e2["p_onset"] - e1["p_onset"]
374
426
 
375
427
  if ioi <= 0:
376
428
  kor = 0
377
429
 
378
430
  kor = kot / ioi
379
-
431
+
380
432
  return min(kor, 5)
381
433
 
382
434
 
383
435
  def get_next_note(note_info, match_voiced):
384
436
  """
385
- get the next note in the same voice that's a reasonable transition
386
-
437
+ get the next note in the same voice that's a reasonable transition
438
+
387
439
  Parameters
388
440
  ----------
389
441
  note_info : np.ndarray
@@ -397,37 +449,36 @@ def get_next_note(note_info, match_voiced):
397
449
  the next note
398
450
  """
399
451
 
400
- next_position = min(o for o in match_voiced['onset'] if o > note_info['onset'])
452
+ next_position = min(o for o in match_voiced["onset"] if o > note_info["onset"])
401
453
 
402
454
  # if the next note is not immediate successor of the previous one...
403
- if next_position != note_info['onset'] + note_info['duration']:
455
+ if next_position != note_info["onset"] + note_info["duration"]:
404
456
  return None
405
-
406
- next_position_notes = match_voiced[match_voiced['onset'] == next_position]
457
+
458
+ next_position_notes = match_voiced[match_voiced["onset"] == next_position]
407
459
 
408
460
  # from the notes in the next position, find the one that's closest pitch-wise.
409
- closest_idx = np.abs((next_position_notes['pitch'] - note_info['pitch'])).argmin()
461
+ closest_idx = np.abs((next_position_notes["pitch"] - note_info["pitch"])).argmin()
410
462
 
411
463
  return next_position_notes[closest_idx]
412
464
 
413
465
 
414
- ### Pedals
466
+ ### Pedals
467
+
415
468
 
416
- def pedal_feature(m_score : list,
417
- unique_onset_idxs: list,
418
- performance: PerformanceLike):
469
+ def pedal_feature(m_score: list, unique_onset_idxs: list, performance: PerformanceLike):
419
470
  """
420
- Compute the pedal features.
471
+ Compute the pedal features.
421
472
 
422
473
  Parameters
423
474
  ----------
424
475
  m_score : list
425
- correspondance between score and performance notes, with score markings.
476
+ correspondance between score and performance notes, with score markings.
426
477
  unique_onset_idxs : list
427
478
  a list of arrays with the note indexes that have the same onset
428
479
  performance: PerformedPart
429
480
  The original PerformedPart object
430
-
481
+
431
482
  Returns
432
483
  -------
433
484
  pedal_ : structured array (4, n_notes) with fields
@@ -436,8 +487,8 @@ def pedal_feature(m_score : list,
436
487
  to_prev_release [0, 10]: delta time from note onset to the previous pedal release 'peak'
437
488
  to_next_release [0, 10]: delta time from note offset to the next pedal release 'peak'
438
489
  (think about something relates to the real duration)
439
- """
440
-
490
+ """
491
+
441
492
  onset_offset_pedals, ramp_func = pedal_ramp(performance.performedparts[0], m_score)
442
493
 
443
494
  x = np.linspace(0, 100, 200)
@@ -446,24 +497,29 @@ def pedal_feature(m_score : list,
446
497
  peaks, _ = find_peaks(-y, prominence=10)
447
498
  peak_timepoints = x[peaks]
448
499
 
449
- release_times = np.zeros(len(m_score), dtype=[("to_prev_release", "f4"), ("to_next_release", "f4")])
500
+ release_times = np.zeros(
501
+ len(m_score), dtype=[("to_prev_release", "f4"), ("to_next_release", "f4")]
502
+ )
450
503
  for i, note in enumerate(m_score):
451
- peaks_before = peak_timepoints[note['p_onset'] >= peak_timepoints]
452
- peaks_after = peak_timepoints[(note['p_onset'] + note['p_duration']) <= peak_timepoints]
504
+ peaks_before = peak_timepoints[note["p_onset"] >= peak_timepoints]
505
+ peaks_after = peak_timepoints[
506
+ (note["p_onset"] + note["p_duration"]) <= peak_timepoints
507
+ ]
453
508
  if len(peaks_before):
454
- release_times[i]["to_prev_release"] = min(note['p_onset'] - peaks_before.max(), 10)
509
+ release_times[i]["to_prev_release"] = min(
510
+ note["p_onset"] - peaks_before.max(), 10
511
+ )
455
512
  if len(peaks_after):
456
- release_times[i]["to_next_release"] = min(peaks_after.min() - (note['p_onset'] + note['p_duration']), 10)
457
-
458
- # plt.plot(x[peaks], y[peaks], "x")
459
- # plt.plot(x, y)
460
- # plt.show()
513
+ release_times[i]["to_next_release"] = min(
514
+ peaks_after.min() - (note["p_onset"] + note["p_duration"]), 10
515
+ )
461
516
 
462
- return rfn.merge_arrays([onset_offset_pedals, release_times], flatten=True, usemask=False)
517
+ return rfn.merge_arrays(
518
+ [onset_offset_pedals, release_times], flatten=True, usemask=False
519
+ )
463
520
 
464
521
 
465
- def pedal_ramp(ppart: PerformedPart,
466
- m_score: np.ndarray):
522
+ def pedal_ramp(ppart: PerformedPart, m_score: np.ndarray):
467
523
  """Pedal ramp in the same shape as the m_score.
468
524
 
469
525
  Returns:
@@ -472,13 +528,13 @@ def pedal_ramp(ppart: PerformedPart,
472
528
  """
473
529
  pedal_controls = ppart.controls
474
530
  W = np.zeros((len(m_score), 2))
475
- onset_timepoints = m_score['p_onset']
476
- offset_timepoints = m_score['p_onset'] + m_score['p_duration']
531
+ onset_timepoints = m_score["p_onset"]
532
+ offset_timepoints = m_score["p_onset"] + m_score["p_duration"]
477
533
 
478
- timepoints = [control['time'] for control in pedal_controls]
479
- values = [control['value'] for control in pedal_controls]
534
+ timepoints = [control["time"] for control in pedal_controls]
535
+ values = [control["value"] for control in pedal_controls]
480
536
 
481
- if len(timepoints) <= 1: # the case there is no pedal
537
+ if len(timepoints) <= 1: # the case there is no pedal
482
538
  timepoints, values = [0, 0], [0, 0]
483
539
 
484
540
  agg_ramp_func = interp1d(timepoints, values, bounds_error=False, fill_value=0)
@@ -488,7 +544,12 @@ def pedal_ramp(ppart: PerformedPart,
488
544
  # Filter out NaN values
489
545
  W[np.isnan(W)] = 0.0
490
546
 
491
- return np.array([tuple(i) for i in W], dtype=[("onset_value", "f4"), ("offset_value", "f4")]), agg_ramp_func
547
+ return (
548
+ np.array(
549
+ [tuple(i) for i in W], dtype=[("onset_value", "f4"), ("offset_value", "f4")]
550
+ ),
551
+ agg_ramp_func,
552
+ )
492
553
 
493
554
 
494
555
  ### Phrasing
@@ -171,7 +171,6 @@ def compute_chroma_vector_array(chroma_array, K_pre, K_post):
171
171
 
172
172
 
173
173
  def compute_morph_array(chroma_array, chroma_vector_array):
174
-
175
174
  n = len(chroma_array)
176
175
  # Line 1: Initialize morph array
177
176
  morph_array = np.empty(n, dtype=int)
@@ -233,7 +232,6 @@ def compute_morph_array(chroma_array, chroma_vector_array):
233
232
 
234
233
 
235
234
  def compute_ocm_chord_list(sorted_ocp, chroma_array, morph_array):
236
-
237
235
  # Lines 1-3
238
236
  ocm_array = np.column_stack((sorted_ocp[:, 0], chroma_array, morph_array)).astype(
239
237
  int
@@ -249,7 +249,6 @@ class CloudDiameter(TonalTension):
249
249
  """
250
250
 
251
251
  def compute_tension(self, cloud, scale_factor=SCALE_FACTOR, **kwargs):
252
-
253
252
  if len(cloud) > 1:
254
253
  return cloud_diameter(cloud) * scale_factor
255
254
  else:
@@ -264,13 +263,11 @@ class TensileStrain(TonalTension):
264
263
  def __init__(
265
264
  self, tonic_idx=0, mode="major", w=DEFAULT_WEIGHTS, alpha=ALPHA, beta=BETA
266
265
  ):
267
-
268
266
  self.update_key(tonic_idx, mode, w, alpha, beta)
269
267
 
270
268
  def compute_tension(
271
269
  self, cloud, duration, scale_factor=SCALE_FACTOR, *args, **kwargs
272
270
  ):
273
-
274
271
  if duration.sum() == 0:
275
272
  return 0
276
273
 
@@ -279,7 +276,6 @@ class TensileStrain(TonalTension):
279
276
  return e_distance(cloud_ce, self.key_ce) * scale_factor
280
277
 
281
278
  def update_key(self, tonic_idx, mode, w=DEFAULT_WEIGHTS, alpha=ALPHA, beta=BETA):
282
-
283
279
  if mode in ("major", None, 1):
284
280
  self.key_ce = major_key(tonic_idx, w=w)
285
281
  elif mode in ("minor", -1):
@@ -297,7 +293,6 @@ class CloudMomentum(TonalTension):
297
293
  def compute_tension(
298
294
  self, cloud, duration, reset=False, scale_factor=SCALE_FACTOR, *args, **kwargs
299
295
  ):
300
-
301
296
  if duration.sum() == 0:
302
297
  return 0
303
298
 
@@ -328,7 +323,6 @@ def notes_to_idx(note_array):
328
323
 
329
324
 
330
325
  def prepare_note_array(note_info):
331
-
332
326
  note_array = ensure_notearray(
333
327
  note_info, include_pitch_spelling=True, include_key_signature=True
334
328
  )