sonusai 0.17.0__py3-none-any.whl → 0.17.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. sonusai/audiofe.py +22 -51
  2. sonusai/calc_metric_spenh.py +206 -213
  3. sonusai/doc/doc.py +1 -1
  4. sonusai/mixture/__init__.py +2 -0
  5. sonusai/mixture/audio.py +12 -0
  6. sonusai/mixture/datatypes.py +11 -3
  7. sonusai/mixture/mixdb.py +101 -0
  8. sonusai/mixture/soundfile_audio.py +39 -0
  9. sonusai/mixture/speaker_metadata.py +35 -0
  10. sonusai/mixture/torchaudio_audio.py +22 -0
  11. sonusai/mkmanifest.py +1 -1
  12. sonusai/onnx_predict.py +114 -410
  13. sonusai/queries/queries.py +1 -1
  14. sonusai/speech/__init__.py +3 -0
  15. sonusai/speech/l2arctic.py +116 -0
  16. sonusai/speech/librispeech.py +99 -0
  17. sonusai/speech/mcgill.py +70 -0
  18. sonusai/speech/textgrid.py +100 -0
  19. sonusai/speech/timit.py +135 -0
  20. sonusai/speech/types.py +12 -0
  21. sonusai/speech/vctk.py +52 -0
  22. sonusai/speech/voxceleb2.py +86 -0
  23. sonusai/utils/__init__.py +2 -1
  24. sonusai/utils/asr_manifest_functions/__init__.py +0 -1
  25. sonusai/utils/asr_manifest_functions/data.py +0 -8
  26. sonusai/utils/asr_manifest_functions/librispeech.py +1 -1
  27. sonusai/utils/asr_manifest_functions/mcgill_speech.py +1 -1
  28. sonusai/utils/asr_manifest_functions/vctk_noisy_speech.py +1 -1
  29. sonusai/utils/braced_glob.py +7 -3
  30. sonusai/utils/onnx_utils.py +110 -106
  31. sonusai/utils/path_info.py +7 -0
  32. {sonusai-0.17.0.dist-info → sonusai-0.17.2.dist-info}/METADATA +2 -1
  33. {sonusai-0.17.0.dist-info → sonusai-0.17.2.dist-info}/RECORD +35 -30
  34. {sonusai-0.17.0.dist-info → sonusai-0.17.2.dist-info}/WHEEL +1 -1
  35. sonusai/calc_metric_spenh-save.py +0 -1334
  36. sonusai/onnx_predict-old.py +0 -240
  37. sonusai/onnx_predict-save.py +0 -487
  38. sonusai/ovino_predict.py +0 -508
  39. sonusai/ovino_query_devices.py +0 -47
  40. sonusai/torchl_onnx-old.py +0 -216
  41. {sonusai-0.17.0.dist-info → sonusai-0.17.2.dist-info}/entry_points.txt +0 -0
@@ -1,487 +0,0 @@
1
- """sonusai onnx_predict
2
-
3
- usage: onnx_predict [-hvlwr] [--include GLOB] [-i MIXID] MODEL DATA ...
4
-
5
- options:
6
- -h, --help
7
- -v, --verbose Be verbose.
8
- -l, --list-device-details List details of all OpenVINO available devices
9
- -i MIXID, --mixid MIXID Mixture ID(s) to generate if input is a mixture database. [default: *].
10
- --include GLOB Search only files whose base name matches GLOB. [default: *.{wav,flac}].
11
- -w, --write-wav Calculate inverse transform of prediction and write .wav files
12
- -r, --reset Reset model between each file.
13
-
14
- Run prediction (inference) using an onnx model on a SonusAI mixture dataset or audio files from a regex path.
15
- The OnnxRuntime (ORT) inference engine is used to execute the inference.
16
-
17
- Inputs:
18
- MODEL ONNX model .onnx file of a trained model (weights are expected to be in the file).
19
-
20
- DATA The input data must be one of the following:
21
- * WAV
22
- Using the given model, generate feature data and run prediction. A model file must be
23
- provided. The MIXID is ignored.
24
-
25
- * directory
26
- Using the given SonusAI mixture database directory, generate feature and truth data if not found.
27
- Run prediction. The MIXID is required.
28
-
29
-
30
- Note there are multiple ways to process model prediction over multiple audio data files:
31
- 1. TSE (timestep single extension): mixture transform frames are fit into the timestep dimension and the model run as
32
- a single inference call. If batch_size is > 1 then run multiple mixtures in one call with shorter mixtures
33
- zero-padded to the size of the largest mixture.
34
- 2. TME (timestep multi-extension): mixture is split into multiple timesteps, i.e. batch[0] is starting timesteps, ...
35
- Note that batches are run independently, thus sequential state from one set of timesteps to the next will not be
36
- maintained, thus results for such models (i.e. conv, LSTMs, in the tstep dimension) would not match using TSE mode.
37
-
38
- TBD not sure below make sense, need to continue ??
39
- 2. BSE (batch single extension): mixture transform frames are fit into the batch dimension. This make sense only if
40
- independent predictions are made on each frame w/o considering previous frames (i.e.
41
- timesteps=1 or there is no timestep dimension in the model (timesteps=0).
42
- 3.classification
43
-
44
- Outputs the following to ovpredict-<TIMESTAMP> directory:
45
- <id>.h5
46
- dataset: predict
47
- onnx_predict.log
48
-
49
- """
50
-
51
- from sonusai import logger
52
- from typing import Any, List, Optional, Tuple
53
- from os.path import basename, splitext, exists, isfile
54
- import onnxruntime as ort
55
- import onnx
56
- from onnx import ValueInfoProto
57
-
58
-
59
- def load_ort_session(model_path, providers=['CPUExecutionProvider']):
60
- if exists(model_path) and isfile(model_path):
61
- model_basename = basename(model_path)
62
- model_root = splitext(model_basename)[0]
63
- logger.info(f'Importing model from {model_basename}')
64
- try:
65
- session = ort.InferenceSession(model_path, providers=providers)
66
- options = ort.SessionOptions()
67
- except Exception as e:
68
- logger.exception(f'Error: could not load onnx model from {model_path}: {e}')
69
- raise SystemExit(1)
70
- else:
71
- logger.exception(f'Error: model file does not exist: {model_path}')
72
- raise SystemExit(1)
73
-
74
- logger.info(f'Opened session with provider options: {session._provider_options}.')
75
- try:
76
- meta = session.get_modelmeta()
77
- hparams = eval(meta.custom_metadata_map["hparams"])
78
- logger.info(f'Sonusai hyper-parameter metadata was found in model with {len(hparams)} parameters, '
79
- f'checking for required ones ...')
80
- # Print to log here will fail if required parameters not available.
81
- logger.info(f'feature {hparams["feature"]}')
82
- logger.info(f'batch_size {hparams["batch_size"]}')
83
- logger.info(f'timesteps {hparams["timesteps"]}')
84
- logger.info(f'flatten, add1ch {hparams["flatten"]}, {hparams["add1ch"]}')
85
- logger.info(f'truth_mutex {hparams["truth_mutex"]}')
86
- except:
87
- hparams = None
88
- logger.warning(f'Warning: onnx model does not have required Sonusai hyper-parameters.')
89
-
90
- in_names = [n.name for n in session.get_inputs()]
91
- out_names = [n.name for n in session.get_outputs()]
92
-
93
- return session, options, model_root, hparams, in_names, out_names,
94
-
95
-
96
- def main() -> None:
97
- from docopt import docopt
98
-
99
- import sonusai
100
- from sonusai.utils import trim_docstring
101
-
102
- args = docopt(trim_docstring(__doc__), version=sonusai.__version__, options_first=True)
103
-
104
- verbose = args['--verbose']
105
- listdd = args['--list-device-details']
106
- writewav= args['--write-wav']
107
- mixids = args['--mixid']
108
- reset = args['--reset']
109
- include = args['--include']
110
- model_path = args['MODEL']
111
- datapaths = args['DATA']
112
-
113
- providers = ort.get_available_providers()
114
- logger.info(f'Loaded Onnx runtime, available providers: {providers}.')
115
-
116
- session, options, model_root, hparams, in_names, out_names = load_ort_session(model_path)
117
- if hparams is None:
118
- logger.error(f'Error: onnx model does not have required Sonusai hyper-parameters, can not proceed.')
119
- raise SystemExit(1)
120
-
121
- from os.path import join, dirname, isdir, normpath, realpath, abspath
122
- from sonusai.utils.asr_manifest_functions import PathInfo
123
- from sonusai.utils import braced_iglob
124
- from sonusai.mixture import MixtureDatabase
125
-
126
- mixdb_path = None
127
- entries = None
128
- if len(datapaths) == 1 and isdir(datapaths[0]): # Assume it's a single path to sonusai mixdb subdir
129
- in_basename = basename(normpath(datapaths[0]))
130
- mixdb_path= datapaths[0]
131
- logger.debug(f'Attempting to load mixture database from {mixdb_path}')
132
- mixdb = MixtureDatabase(mixdb_path)
133
- logger.debug(f'Sonusai mixture db load success: found {mixdb.num_mixtures} mixtures with {mixdb.num_classes} classes')
134
- p_mixids = mixdb.mixids_to_list(mixids)
135
- if len(p_mixids) != mixdb.num_mixtures:
136
- logger.info(f'Processing a subset of {p_mixids} from available mixtures.')
137
- else: # search all datapaths for .wav, .flac (or whatever is specified in include)
138
- in_basename = ''
139
- entries: list[PathInfo] = []
140
- for p in datapaths:
141
- location = join(realpath(abspath(p)), '**', include)
142
- logger.debug(f'Processing {location}')
143
- for file in braced_iglob(pathname=location, recursive=True):
144
- name = file
145
- entries.append(PathInfo(abs_path=file, audio_filepath=name))
146
-
147
- from sonusai.utils import create_ts_name
148
- from os import makedirs
149
- from sonusai import create_file_handler
150
- from sonusai import initial_log_messages
151
- from sonusai import update_console_handler
152
- output_dir = create_ts_name('opredict-' + in_basename)
153
- makedirs(output_dir, exist_ok=True)
154
- # Setup logging file
155
- create_file_handler(join(output_dir, 'onnx-predict.log'))
156
- update_console_handler(verbose)
157
- initial_log_messages('onnx_predict')
158
- # Reprint some info messages
159
- logger.info(f'Loaded OnnxRuntime, available providers: {providers}.')
160
- logger.info(f'Read and compiled onnx model from {model_path}.')
161
- if len(datapaths) == 1 and isdir(datapaths[0]): # Assume it's a single path to sonusai mixdb subdir
162
- logger.info(f'Loaded mixture database from {datapaths}')
163
- logger.info(f'Sonusai mixture db: found {mixdb.num_mixtures} mixtures with {mixdb.num_classes} classes')
164
- else:
165
- logger.info(f'{len(datapaths)} data paths specified, found {len(entries)} audio files.')
166
-
167
-
168
- if mixdb_path is not None: # mixdb input
169
- # Assume (of course) that mixdb feature, etc. is what model expects
170
- if hparams["feature"] != mixdb.feature:
171
- logger.warning(f'Mixture feature does not match model feature, this inference run may fail.')
172
- feature_mode = mixdb.feature # no choice, can't use hparams["feature"] since it's different than the mixdb
173
-
174
- #if hparams["num_classes"] != mixdb.num_classes: # needs to be i.e. mixdb.feature_parameters
175
- #if mixdb.num_classes != model_num_classes:
176
- # logger.error(f'Feature parameters in mixture db {mixdb.num_classes} does not match num_classes in model {inp0shape[-1]}')
177
- # raise SystemExit(1)
178
-
179
- from sonusai.utils import reshape_inputs
180
- from sonusai.utils import reshape_outputs
181
- from sonusai.mixture import get_audio_from_feature
182
- from sonusai.utils import write_wav
183
- import numpy as np
184
- import h5py
185
- if hparams["batch_size"] == 1:
186
- for mixid in p_mixids:
187
- feature, _ = mixdb.mixture_ft(mixid) # frames x stride x feature_params
188
- if hparams["timesteps"] == 0:
189
- tsteps = 0 # no timestep dim, reshape will take care
190
- else:
191
- tsteps = feature.shape[0] # fit frames into timestep dimension (TSE mode)
192
- feature, _ = reshape_inputs(feature=feature,
193
- batch_size=1,
194
- timesteps=tsteps,
195
- flatten=hparams["flatten"],
196
- add1ch=hparams["add1ch"])
197
- # run inference, ort session wants i.e. batch x tsteps x feat_params, outputs numpy BxTxFP or BxFP
198
- predict = session.run(out_names, {in_names[0]: feature})[0]
199
- #predict, _ = reshape_outputs(predict=predict[0], timesteps=frames) # frames x feat_params
200
- output_name = join(output_dir, mixdb.mixtures[mixid].name)
201
- with h5py.File(output_name, 'a') as f:
202
- if 'predict' in f:
203
- del f['predict']
204
- f.create_dataset('predict', data=predict)
205
- if writewav: # note only makes sense if model is predicting audio, i.e. tstep dimension exists
206
- # predict_audio wants [frames, channels, feature_parameters] equiv. to tsteps, batch, bins
207
- predict = np.transpose(predict, [1, 0, 2])
208
- predict_audio = get_audio_from_feature(feature=predict, feature_mode=feature_mode)
209
- owav_name = splitext(output_name)[0]+'_predict.wav'
210
- write_wav(owav_name, predict_audio)
211
-
212
-
213
- #
214
- # # sampler = None
215
- # # p_datagen = TorchFromMixtureDatabase(mixdb=mixdb,
216
- # # mixids=p_mixids,
217
- # # batch_size=batch_size,
218
- # # cut_len=0,
219
- # # flatten=model.flatten,
220
- # # add1ch=model.add1ch,
221
- # # random_cut=False,
222
- # # sampler=sampler,
223
- # # drop_last=False,
224
- # # num_workers=dlcpu)
225
- #
226
- # # Info needed to set up inverse transform
227
- # half = model.num_classes // 2
228
- # fg = FeatureGenerator(feature_mode=feature,
229
- # num_classes=model.num_classes,
230
- # truth_mutex=model.truth_mutex)
231
- # itf = TorchInverseTransform(N=fg.itransform_N,
232
- # R=fg.itransform_R,
233
- # bin_start=fg.bin_start,
234
- # bin_end=fg.bin_end,
235
- # ttype=fg.itransform_ttype)
236
- #
237
- # enable_truth_wav = False
238
- # enable_mix_wav = False
239
- # if wavdbg:
240
- # if mixdb.target_files[0].truth_settings[0].function == 'target_mixture_f':
241
- # enable_mix_wav = True
242
- # enable_truth_wav = True
243
- # elif mixdb.target_files[0].truth_settings[0].function == 'target_f':
244
- # enable_truth_wav = True
245
- #
246
- # if reset:
247
- # logger.info(f'Running {mixdb.num_mixtures} mixtures individually with model reset ...')
248
- # for idx, val in enumerate(p_datagen):
249
- # # truth = val[1]
250
- # feature = val[0]
251
- # with torch.no_grad():
252
- # ypred = model(feature)
253
- # output_name = join(output_dir, mixdb.mixtures[idx].name)
254
- # pdat = ypred.detach().numpy()
255
- # if timesteps > 0:
256
- # logger.debug(f'In and out tsteps: {feature.shape[1]},{pdat.shape[1]}')
257
- # logger.debug(f'Writing predict shape {pdat.shape} to {output_name}')
258
- # with h5py.File(output_name, 'a') as f:
259
- # if 'predict' in f:
260
- # del f['predict']
261
- # f.create_dataset('predict', data=pdat)
262
- #
263
- # if wavdbg:
264
- # owav_base = splitext(output_name)[0]
265
- # tmp = torch.complex(ypred[..., :half], ypred[..., half:]).permute(2, 0, 1).detach()
266
- # itf.reset()
267
- # predwav, _ = itf.execute_all(tmp)
268
- # # predwav, _ = calculate_audio_from_transform(tmp.numpy(), itf, trim=True)
269
- # write_wav(owav_base + '.wav', predwav.permute([1, 0]).numpy(), 16000)
270
- # if enable_truth_wav:
271
- # # Note this support truth type target_f and target_mixture_f
272
- # tmp = torch.complex(val[0][..., :half], val[0][..., half:2 * half]).permute(2, 0, 1).detach()
273
- # itf.reset()
274
- # truthwav, _ = itf.execute_all(tmp)
275
- # write_wav(owav_base + '_truth.wav', truthwav.permute([1, 0]).numpy(), 16000)
276
- #
277
- # if enable_mix_wav:
278
- # tmp = torch.complex(val[0][..., 2 * half:3 * half], val[0][..., 3 * half:]).permute(2, 0, 1)
279
- # itf.reset()
280
- # mixwav, _ = itf.execute_all(tmp.detach())
281
- # write_wav(owav_base + '_mix.wav', mixwav.permute([1, 0]).numpy(), 16000)
282
- #
283
- #
284
- #
285
- #
286
- #
287
- #
288
- #
289
- #
290
- #
291
- #
292
- #
293
- #
294
- #
295
- #
296
- #
297
- #
298
- #
299
- # from os import makedirs
300
- # from os.path import isdir
301
- # from os.path import join
302
- # from os.path import splitext
303
- #
304
- # import h5py
305
- # import onnxruntime as rt
306
- # import numpy as np
307
- #
308
- # from sonusai.mixture import Feature
309
- # from sonusai.mixture import Predict
310
- # from sonusai.utils import SonusAIMetaData
311
- # from sonusai import create_file_handler
312
- # from sonusai import initial_log_messages
313
- # from sonusai import update_console_handler
314
- # from sonusai.mixture import MixtureDatabase
315
- # from sonusai.mixture import get_feature_from_audio
316
- # from sonusai.mixture import read_audio
317
- # from sonusai.utils import create_ts_name
318
- # from sonusai.utils import get_frames_per_batch
319
- # from sonusai.utils import get_sonusai_metadata
320
- #
321
- # output_dir = create_ts_name('ovpredict')
322
- # makedirs(output_dir, exist_ok=True)
323
- #
324
- #
325
- #
326
- # model = rt.InferenceSession(model_name, providers=['CPUExecutionProvider'])
327
- # model_metadata = get_sonusai_metadata(model)
328
- #
329
- # batch_size = model_metadata.input_shape[0]
330
- # if model_metadata.timestep:
331
- # timesteps = model_metadata.input_shape[1]
332
- # else:
333
- # timesteps = 0
334
- # num_classes = model_metadata.output_shape[-1]
335
- #
336
- # frames_per_batch = get_frames_per_batch(batch_size, timesteps)
337
- #
338
- # logger.info('')
339
- # logger.info(f'feature {model_metadata.feature}')
340
- # logger.info(f'num_classes {num_classes}')
341
- # logger.info(f'batch_size {batch_size}')
342
- # logger.info(f'timesteps {timesteps}')
343
- # logger.info(f'flatten {model_metadata.flattened}')
344
- # logger.info(f'add1ch {model_metadata.channel}')
345
- # logger.info(f'truth_mutex {model_metadata.mutex}')
346
- # logger.info(f'input_shape {model_metadata.input_shape}')
347
- # logger.info(f'output_shape {model_metadata.output_shape}')
348
- # logger.info('')
349
- #
350
- # if splitext(entries)[1] == '.wav':
351
- # # Convert WAV to feature data
352
- # logger.info('')
353
- # logger.info(f'Run prediction on {entries}')
354
- # audio = read_audio()
355
- # feature = get_feature_from_audio(audio=audio, feature_mode=model_metadata.feature)
356
- #
357
- # predict = pad_and_predict(feature=feature,
358
- # model_name=model_name,
359
- # model_metadata=model_metadata,
360
- # frames_per_batch=frames_per_batch,
361
- # batch_size=batch_size,
362
- # timesteps=timesteps,
363
- # reset=reset)
364
- #
365
- # output_name = splitext()[0] + '.h5'
366
- # with h5py.File(output_name, 'a') as f:
367
- # if 'feature' in f:
368
- # del f['feature']
369
- # f.create_dataset(name='feature', data=feature)
370
- #
371
- # if 'predict' in f:
372
- # del f['predict']
373
- # f.create_dataset(name='predict', data=predict)
374
- #
375
- # logger.info(f'Saved results to {output_name}')
376
- # return
377
- #
378
- # if not isdir():
379
- # logger.exception(f'Do not know how to process input from {entries}')
380
- # raise SystemExit(1)
381
- #
382
- # mixdb = MixtureDatabase()
383
- #
384
- # if mixdb.feature != model_metadata.feature:
385
- # logger.exception(f'Feature in mixture database does not match feature in model')
386
- # raise SystemExit(1)
387
- #
388
- # mixids = mixdb.mixids_to_list(mixids)
389
- # if reset:
390
- # # reset mode cycles through each file one at a time
391
- # for mixid in mixids:
392
- # feature, _ = mixdb.mixture_ft(mixid)
393
- #
394
- # predict = pad_and_predict(feature=feature,
395
- # model_name=model_name,
396
- # model_metadata=model_metadata,
397
- # frames_per_batch=frames_per_batch,
398
- # batch_size=batch_size,
399
- # timesteps=timesteps,
400
- # reset=reset)
401
- #
402
- # output_name = join(output_dir, mixdb.mixtures[mixid].name)
403
- # with h5py.File(output_name, 'a') as f:
404
- # if 'predict' in f:
405
- # del f['predict']
406
- # f.create_dataset(name='predict', data=predict)
407
- # else:
408
- # features: list[Feature] = []
409
- # file_indices: list[slice] = []
410
- # total_frames = 0
411
- # for mixid in mixids:
412
- # current_feature, _ = mixdb.mixture_ft(mixid)
413
- # current_frames = current_feature.shape[0]
414
- # features.append(current_feature)
415
- # file_indices.append(slice(total_frames, total_frames + current_frames))
416
- # total_frames += current_frames
417
- # feature = np.vstack([features[i] for i in range(len(features))])
418
- #
419
- # predict = pad_and_predict(feature=feature,
420
- # model_name=model_name,
421
- # model_metadata=model_metadata,
422
- # frames_per_batch=frames_per_batch,
423
- # batch_size=batch_size,
424
- # timesteps=timesteps,
425
- # reset=reset)
426
- #
427
- # # Write data to separate files
428
- # for idx, mixid in enumerate(mixids):
429
- # output_name = join(output_dir, mixdb.mixtures[mixid].name)
430
- # with h5py.File(output_name, 'a') as f:
431
- # if 'predict' in f:
432
- # del f['predict']
433
- # f.create_dataset('predict', data=predict[file_indices[idx]])
434
- #
435
- # logger.info(f'Saved results to {output_dir}')
436
- #
437
-
438
- # def pad_and_predict(feature: Feature,
439
- # model_name: str,
440
- # model_metadata: SonusAIMetaData,
441
- # frames_per_batch: int,
442
- # batch_size: int,
443
- # timesteps: int,
444
- # reset: bool) -> Predict:
445
- # import onnxruntime as rt
446
- # import numpy as np
447
- #
448
- # from sonusai.utils import reshape_inputs
449
- # from sonusai.utils import reshape_outputs
450
- #
451
- # frames = feature.shape[0]
452
- # padding = frames_per_batch - frames % frames_per_batch
453
- # feature = np.pad(array=feature, pad_width=((0, padding), (0, 0), (0, 0)))
454
- # feature, _ = reshape_inputs(feature=feature,
455
- # batch_size=batch_size,
456
- # timesteps=timesteps,
457
- # flatten=model_metadata.flattened,
458
- # add1ch=model_metadata.channel)
459
- # sequences = feature.shape[0] // model_metadata.input_shape[0]
460
- # feature = np.reshape(feature, [sequences, *model_metadata.input_shape])
461
- #
462
- # model = rt.InferenceSession(model_name, providers=['CPUExecutionProvider'])
463
- # output_names = [n.name for n in model.get_outputs()]
464
- # input_names = [n.name for n in model.get_inputs()]
465
- #
466
- # predict = []
467
- # for sequence in range(sequences):
468
- # predict.append(model.run(output_names, {input_names[0]: feature[sequence]}))
469
- # if reset:
470
- # model = rt.InferenceSession(model_name, providers=['CPUExecutionProvider'])
471
- #
472
- # predict_arr = np.vstack(predict)
473
- # # Combine [sequences, batch_size, ...] into [frames, ...]
474
- # predict_shape = predict_arr.shape
475
- # predict_arr = np.reshape(predict_arr, [predict_shape[0] * predict_shape[1], *predict_shape[2:]])
476
- # predict_arr, _ = reshape_outputs(predict=predict_arr, timesteps=timesteps)
477
- # predict_arr = predict_arr[:frames, :]
478
- #
479
- # return predict_arr
480
-
481
-
482
- if __name__ == '__main__':
483
- try:
484
- main()
485
- except KeyboardInterrupt:
486
- logger.info('Canceled due to keyboard interrupt')
487
- raise SystemExit(0)