tictacsync 0.8a0__tar.gz → 0.9a0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tictacsync might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: tictacsync
3
- Version: 0.8a0
3
+ Version: 0.9a0
4
4
  Summary: command for syncing audio video recordings
5
5
  Home-page: https://tictacsync.org/
6
6
  Author: Raymond Lutz
@@ -21,7 +21,7 @@ setup(
21
21
  'numpy>=1.24.3',
22
22
  'rich>=10.12.0',
23
23
  'lmfit',
24
- 'skimage',
24
+ 'scikit-image',
25
25
  'scipy>=1.10.1',
26
26
  ],
27
27
  python_requires='>=3.10',
@@ -32,7 +32,7 @@ setup(
32
32
  'multi2polywav = tictacsync.multi2polywav:main',
33
33
  ]
34
34
  },
35
- version = '0.8a',
35
+ version = '0.9a',
36
36
  description = "command for syncing audio video recordings",
37
37
  long_description_content_type='text/markdown',
38
38
  long_description = long_descr,
@@ -72,6 +72,9 @@ def process_files(medias):
72
72
  def process_single(file, args):
73
73
  # argument is a single file
74
74
  m = device_scanner.media_at_path(None, Path(file))
75
+ if args.plotting:
76
+ print('\nPlots can be zoomed and panned...')
77
+ print('Close window for next one.')
75
78
  a_rec = yaltc.Recording(m, do_plots=args.plotting)
76
79
  time = a_rec.get_start_time()
77
80
  # time = a_rec.get_start_time(plots=args.plotting)
@@ -97,10 +100,10 @@ def process_single(file, args):
97
100
  def main():
98
101
  parser = argparse.ArgumentParser()
99
102
  parser.add_argument(
100
- "directory",
103
+ "path",
101
104
  type=str,
102
105
  nargs=1,
103
- help="path of media directory"
106
+ help="directory_name or media_file"
104
107
  )
105
108
  # parser.add_argument("directory", nargs="?", help="path of media directory")
106
109
  # parser.add_argument('-v', action='store_true')
@@ -108,29 +111,29 @@ def main():
108
111
  dest='verbose_output',
109
112
  help='Set verbose ouput')
110
113
  parser.add_argument('-o', nargs=1,
111
- help='where to write synced clips')
114
+ help='Where to write the SyncedMedia folder [default to "path" ]')
112
115
  parser.add_argument('-p', action='store_true', default=False,
113
116
  dest='plotting',
114
- help='Make plots')
117
+ help='Produce plots')
115
118
  parser.add_argument('--isos', action='store_true', default=False,
116
119
  dest='write_ISOs',
117
120
  help='Write ISO sound files')
118
121
  parser.add_argument('--nosync', action='store_true',
119
122
  dest='nosync',
120
- help='just scan and decode')
123
+ help='Just scan and decode')
121
124
  parser.add_argument('--terse', action='store_true',
122
125
  dest='terse',
123
- help='terse output')
126
+ help='Terse output')
124
127
  args = parser.parse_args()
125
128
  if args.verbose_output:
126
129
  logger.add(sys.stderr, level="DEBUG")
127
130
  # logger.add(sys.stdout, filter="__main__")
128
131
  # logger.add(sys.stdout, filter="yaltc")
129
- # logger.add(sys.stdout, filter=lambda r: r["function"] == "_read_sound_find_TicTacCode")
132
+ # logger.add(sys.stdout, filter=lambda r: r["function"] == "build_audio_and_write_video")
130
133
  # logger.add(sys.stdout, filter=lambda r: r["function"] == "_detect_sync_pulse_position")
131
134
  # logger.add(sys.stdout, filter=lambda r: r["function"] == "_get_device_mix")
132
135
  # logger.add(sys.stdout, filter=lambda r: r["function"] == "_sox_mix_files")
133
- top_dir = args.directory[0]
136
+ top_dir = args.path[0]
134
137
  if os.path.isfile(top_dir):
135
138
  file = top_dir
136
139
  process_single(file, args)
@@ -888,6 +888,8 @@ class AudioStitcherVideoMerger:
888
888
  self.videoclip.synced_audio = \
889
889
  _sox_keep(concatenate_audio_file, [sox_kept_channel])
890
890
  self._merge_audio_and_video()
891
+ if asked_ISOs:
892
+ print('WARNING: you asked for ISO files but found one audio channel only...')
891
893
  return #########################################################
892
894
  #
893
895
  # if not returned yet from fct, either multitracks and/or multi
@@ -916,6 +918,7 @@ class AudioStitcherVideoMerger:
916
918
  # [(dev1, [mono1_ch1, mono1_ch2]), (dev2, [mono2_ch1, mono2_ch2)]] in
917
919
  # devices_and_monofiles:
918
920
  if asked_ISOs:
921
+ logger.debug('will output ISO files...')
919
922
  devices_and_monofiles = [(device, _split_channels(multi_chan_audio))
920
923
  for device, multi_chan_audio
921
924
  in merged_audio_files_by_device]
@@ -29,8 +29,9 @@ try:
29
29
  except:
30
30
  import device_scanner
31
31
 
32
- TEENSY_MAX_LAG = 128/44100 # sec, duration of a default length audio block
32
+ TEENSY_MAX_LAG = 1.01*128/44100 # sec, duration of a default length audio block
33
33
 
34
+ # see extract_seems_TicTacCode() for duration criterion values
34
35
 
35
36
  CACHING = True
36
37
  DEL_TEMP = False
@@ -266,7 +267,7 @@ class Decoder:
266
267
  # extra_window_duration = SOUND_EXTRACT_LENGTH - 1
267
268
  # eff_w = total_w - extra_window_duration
268
269
  # logger.debug('effective_word_duration %f (two regions)'%eff_w)
269
- if not 0.5 < total_w < 0.655:
270
+ if not 0.5 < total_w < 0.656:
270
271
  failing_comment = 'two regions duration %f not in [0.50 0.655]\n%s'%(total_w, widths)
271
272
  # fig, ax = plt.subplots()
272
273
  # p(ax, sound_extract_one_bit)
@@ -276,13 +277,18 @@ class Decoder:
276
277
 
277
278
  def _plot_extract(self):
278
279
  fig, ax = plt.subplots()
279
- ax.plot(self.sound_extract, marker='o', markersize='1',
280
+ start = self.sound_extract_position
281
+ i_samples = np.arange(start, start + len(self.sound_extract))
282
+ yt = ax.get_yaxis_transform()
283
+ ax.hlines(0, 0, 1,
284
+ transform=yt, alpha=0.3,
285
+ linewidth=2, colors='black')
286
+ ax.plot(i_samples, self.sound_extract, marker='o', markersize='1',
280
287
  linewidth=1.5,alpha=0.3, color='blue' )
281
- ax.plot(self.sound_extract_one_bit*np.max(np.abs(self.sound_extract)),
288
+ ax.plot(i_samples, self.sound_extract_one_bit*np.max(np.abs(self.sound_extract)),
282
289
  marker='o', markersize='1',
283
290
  linewidth=1.5,alpha=0.3,color='red')
284
291
  xt = ax.get_xaxis_transform()
285
- yt = ax.get_yaxis_transform()
286
292
  ax.hlines(self.pulse_detection_level, 0, 1,
287
293
  transform=yt, alpha=0.3,
288
294
  linewidth=2, colors='green')
@@ -295,7 +301,8 @@ class Decoder:
295
301
  custom_lines,
296
302
  'detection level, signal, detected region'.split(','),
297
303
  loc='lower right')
298
- ax.set_title('Finding word and sync pulse')
304
+ ax.set_title('Finding word + sync pulse')
305
+ plt.xlabel("Position in file (samples)")
299
306
  plt.show()
300
307
 
301
308
  def get_time_in_sound_extract(self):
@@ -449,14 +456,7 @@ class Decoder:
449
456
  start = round(0.5*symbol_length) # half symbol
450
457
  end = start + symbol_length
451
458
  word_begining = whole_word[start:]
452
- # word_one_bit = np.abs(word_begining)>self.pulse_detection_level
453
- # N_ones = round(1.5*SYMBOL_LENGTH*1e-3*self.samplerate) # so it includes sync pulse
454
- # word_one_bit = closing(word_one_bit, np.ones(N_ones))
455
459
  gt_detection_level = np.argwhere(np.abs(word_begining)>self.pulse_detection_level)
456
- # print(gt_detection_level)
457
- # plt.plot(word_one_bit)
458
- # plt.plot(word_begining/abs(np.max(word_begining)))
459
- # plt.show()
460
460
  word_start = gt_detection_level[0][0]
461
461
  word_end = gt_detection_level[-1][0]
462
462
  self.effective_word_duration = (word_end - word_start)/self.samplerate
@@ -473,7 +473,10 @@ class Decoder:
473
473
  1e3*TEENSY_MAX_LAG))
474
474
  logger.debug('relative audio_block gap %.2f'%(relative_gap))
475
475
  if relative_gap > 1:
476
- print('bug with relative_gap')
476
+ print('Warning: gap between spike and word is too big for %s'%self.rec)
477
+ print('Audio update() gap between sync pulse and word start: ')
478
+ print('%.2f ms (max value %.2f)'%(1e3*gap/self.samplerate,
479
+ 1e3*TEENSY_MAX_LAG))
477
480
  symbol_width_samples_theor = self.samplerate*SYMBOL_LENGTH*1e-3
478
481
  symbol_width_samples_eff = self.effective_word_duration * \
479
482
  self.samplerate/(N_SYMBOLS - 1)
@@ -486,14 +489,23 @@ class Decoder:
486
489
  symbols_indices = symbol_positions.round().astype(int)
487
490
  if self.do_plots:
488
491
  fig, ax = plt.subplots()
489
- ax.plot(whole_word, marker='o', markersize='1',
492
+ ax.hlines(0, 0, 1,
493
+ transform=ax.get_yaxis_transform(), alpha=0.3,
494
+ linewidth=2, colors='black')
495
+ start = self.sound_extract_position
496
+ i_samples = np.arange(start, start + len(whole_word))
497
+ ax.plot(i_samples, whole_word, marker='o', markersize='1',
490
498
  linewidth=1.5,alpha=0.3, color='blue' )
491
499
  xt = ax.get_xaxis_transform()
492
500
  for x in symbols_indices:
493
- ax.vlines(x, 0, 1,
501
+ ax.vlines(x + start, 0, 1,
494
502
  transform=xt,
495
503
  linewidth=0.6, colors='green')
496
504
  ax.set_title('Slicing the 34 bits word:')
505
+ plt.xlabel("Position in file (samples)")
506
+ ax.vlines(start, 0, 1,
507
+ transform=xt,
508
+ linewidth=0.6, colors='red')
497
509
  plt.show()
498
510
  slice_width = round(SYMBOL_LENGTH*1e-3*self.samplerate)
499
511
  slices = [whole_word[i:i+slice_width] for i in symbols_indices]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: tictacsync
3
- Version: 0.8a0
3
+ Version: 0.9a0
4
4
  Summary: command for syncing audio video recordings
5
5
  Home-page: https://tictacsync.org/
6
6
  Author: Raymond Lutz
@@ -5,5 +5,5 @@ matplotlib>=3.7.1
5
5
  numpy>=1.24.3
6
6
  rich>=10.12.0
7
7
  lmfit
8
- skimage
8
+ scikit-image
9
9
  scipy>=1.10.1
File without changes
File without changes
File without changes