tictacsync 0.1a9__py2-none-any.whl → 0.1a11__py2-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tictacsync might be problematic. Click here for more details.

@@ -173,11 +173,11 @@ class Scanner:
173
173
  continue
174
174
  devices = [m['dev UID'] for m in media_same_length]
175
175
  if len(set(devices)) !=1:
176
- print('files with same length but diff device?')
176
+ print('There are files with same length but from different devices?')
177
177
  for media in media_same_length:
178
- print(' %s'%media['path'])
179
- print('put the offending file in its own folder and')
180
- print('rerun. Quitting...')
178
+ print(' [gold1]%s[/gold1]'%media['path'])
179
+ print('Please put the offending file in its own folder and rerun.')
180
+ print('Quitting...')
181
181
  quit()
182
182
  self.found_multifiles.append(media_same_length)
183
183
  self.found_media_files = unifile_recordings
@@ -234,7 +234,7 @@ class Scanner:
234
234
  _pathname(out_path),
235
235
  combine_type='merge')
236
236
  logger.debug('sox.build status: %s'%status)
237
- print('Merged poly WAV [gold1]%s [/gold1]from [gold1]%s[/gold1]'%(
237
+ print('Built poly WAV [gold1]%s [/gold1]from multi file recordings [gold1]%s[/gold1]'%(
238
238
  _pathname(out_path), multi))
239
239
  logger.debug('new Scanner.found_media_files: %s'%self.found_media_files)
240
240
 
@@ -326,6 +326,13 @@ class Scanner:
326
326
 
327
327
  Returns nothing
328
328
  """
329
+ def _list_duplicates(seq):
330
+ seen = set()
331
+ seen_add = seen.add
332
+ # adds all elements it doesn't know yet to seen and all other to seen_twice
333
+ seen_twice = set( x for x in seq if x in seen or seen_add(x) )
334
+ # turn the set into a list (as requested)
335
+ return list( seen_twice )
329
336
  folder_key = lambda m: m['path'].parent
330
337
  medias = sorted(self.found_media_files, key=folder_key)
331
338
  # build lists for multiple reference of iterators
@@ -335,9 +342,14 @@ class Scanner:
335
342
  name_of_folders = [p.name for p in complete_path_folders]
336
343
  logger.debug('complete_path_folders with media files %s'%complete_path_folders)
337
344
  logger.debug('name_of_folders with media files %s'%name_of_folders)
338
- unique_folder_names = set(name_of_folders)
339
- if len(unique_folder_names) != len(name_of_folders):
340
- print('There is conflicts for some folder names:')
345
+ # unique_folder_names = set(name_of_folders)
346
+ repeated_folders = _list_duplicates(name_of_folders)
347
+ logger.debug('repeated_folders %s'%repeated_folders)
348
+ if repeated_folders:
349
+ print('There are conflicts for some repeated folder names:')
350
+ for f in [str(p) for p in repeated_folders]:
351
+ print(' [gold1]%s[/gold1]'%f)
352
+ print('Here are the complete paths:')
341
353
  for f in [str(p) for p in complete_path_folders]:
342
354
  print(' [gold1]%s[/gold1]'%f)
343
355
  print('please rename and rerun. Quitting..')
tictacsync/entry.py CHANGED
@@ -110,18 +110,15 @@ def main():
110
110
  # logger.add(sys.stdout, filter="device_scanner")
111
111
  # logger.add(sys.stdout, filter="yaltc")
112
112
  # logger.add(sys.stdout, filter="timeline")
113
- # logger.add(sys.stdout, filter=lambda r: r["function"] == "get_start_time")
113
+ # logger.add(sys.stdout, filter=lambda r: r["function"] == "_get_word_envelope")
114
114
  # logger.add(sys.stdout, filter=lambda r: r["function"] == "get_timecode")
115
115
  # logger.add(sys.stdout, filter=lambda r: r["function"] == "_get_BFSK_symbols_boundaries")
116
116
  # logger.add(sys.stdout, filter=lambda r: r["function"] == "_get_BFSK_word_boundaries")
117
117
  top_dir = args.directory[0]
118
118
  if os.path.isfile(top_dir):
119
- # argumnent is a single file: do plots
119
+ # argumnent is a single file
120
120
  m = device_scanner.media_dict_from_path(Path(top_dir))
121
121
  a_rec = yaltc.Recording(m)
122
- # a_rec._read_sound_find_YaLTC(0.5, 1.56)
123
- # a_rec.decoder.make_silence_analysis_plot(
124
- # title='word detection for\n%s'%(top_dir))
125
122
  time = a_rec.get_start_time(plots=args.plotting)
126
123
  if time != None:
127
124
  frac_time = int(time.microsecond / 1e2)
@@ -143,14 +140,8 @@ def main():
143
140
  print('\n\nFound [gold1]%i[/gold1] media files from [gold1]%i[/gold1] devices'%(
144
141
  len(scanner.found_media_files),
145
142
  scanner.get_devices_number()), end='')
146
- # print( ' devices: [gold1]%s[/gold1]'%(
147
- # ' '.join(list(scanner.devices_names.values())),
148
- # ))
149
143
  print('\nThese recordings will be analysed for timestamps:\n')
150
144
  for m in (scanner.found_media_files):
151
- # if len(p) > 1:
152
- # continue
153
- # print(' ', '[gold1]%s[/gold1]'%file_alias)
154
145
  print(' ', '[gold1]%s[/gold1]'%m['path'].name)
155
146
  print()
156
147
  if args.verbose_output: # verbose, so no progress bars
@@ -166,6 +157,7 @@ def main():
166
157
  table.add_column("UTC times\nstart:end", justify="center", style='gold1')
167
158
  table.add_column("Clock drift\n(ppm)", justify="right", style='gold1')
168
159
  table.add_column("SN ratio\n(dB)", justify="center", style='gold1')
160
+ table.add_column("Date\n", justify="center", style='gold1')
169
161
  recordings_with_time = [
170
162
  rec
171
163
  for rec in rec_with_yaltc
@@ -181,6 +173,7 @@ def main():
181
173
  [print(rec, end=' ') for rec in rec_WO_time]
182
174
  print('\n')
183
175
  for r in recordings_with_time:
176
+ date = r.get_start_time().strftime("%y-%m-%d")
184
177
  start_HHMMSS = r.get_start_time().strftime("%Hh%Mm%Ss")
185
178
  end_MMSS = r.get_end_time().strftime("%Mm%Ss")
186
179
  times_range = start_HHMMSS + ':' + end_MMSS
@@ -192,9 +185,11 @@ def main():
192
185
  # '%.6f'%(r.true_samplerate/1e3),
193
186
  '%2i'%(r.get_samplerate_drift()),
194
187
  '%.0f'%r.decoder.SN_ratio,
188
+ date
195
189
  )
196
190
  console = Console()
197
191
  console.print(table)
192
+ print('\n')
198
193
  n_devices = scanner.get_devices_number()
199
194
  # if n_devices > 2:
200
195
  # print('\nMerging for more than 2 devices is not implemented yet, quitting...')
@@ -204,13 +199,17 @@ def main():
204
199
  quit()
205
200
  matcher = timeline.Matcher(recordings_with_time)
206
201
  matcher.scan_audio_for_each_ref_rec()
207
- print()
208
- for stitcher in track(matcher.video_mergers,
209
- description="4/4 Merging sound to videos:"):
210
- stitcher.build_audio_and_write_video(top_dir)
211
-
212
- # [stitcher.build_audio_and_write_video(top_dir) for stitcher in matcher.video_mergers]
213
- print()
202
+ if not matcher.video_mergers:
203
+ print('\nNothing to sync, bye.\n')
204
+ quit()
205
+ if args.verbose_output: # verbose, so no progress bars
206
+ for stitcher in matcher.video_mergers:
207
+ stitcher.build_audio_and_write_video(top_dir)
208
+ else:
209
+ for stitcher in track(matcher.video_mergers,
210
+ description="4/4 Merging sound to videos:"):
211
+ stitcher.build_audio_and_write_video(top_dir)
212
+ print("\n")
214
213
  for stitcher in matcher.video_mergers:
215
214
  print('[gold1]%s[/gold1]'%stitcher.ref_recording.AVpath.name, end='')
216
215
  for audio in stitcher.matched_audio_recordings:
tictacsync/timeline.py CHANGED
@@ -446,12 +446,12 @@ class Matcher:
446
446
 
447
447
  def _rename_all_recs(self):
448
448
  """
449
- Add _sncd to filenames of synced video files. Change stored name only:
449
+ Add _synced to filenames of synced video files. Change stored name only:
450
450
  files have yet to be written to.
451
451
  """
452
452
  for rec in self.recordings:
453
453
  rec_extension = rec.AVpath.suffix
454
- rel_path_new_name = '%s%s%s'%(rec.AVpath.stem,'_sncd',rec_extension)
454
+ rel_path_new_name = '%s%s%s'%(rec.AVpath.stem,'_synced',rec_extension)
455
455
  rec.new_rec_name = Path(rel_path_new_name)
456
456
  logger.debug('for %s new name: %s'%(
457
457
  _pathname(rec.AVpath),
@@ -481,15 +481,17 @@ class Matcher:
481
481
  reference_tag,
482
482
  ref_rec))
483
483
  audio_stitch = AudioStitcherVideoMerger(ref_rec)
484
- self.video_mergers.append(audio_stitch)
485
484
  for audio in audio_recs:
486
485
  if self._does_overlap(ref_rec, audio):
487
486
  audio_stitch.add_matched_audio(audio)
488
487
  logger.debug('recording %s overlaps,'%(audio))
489
488
  # print(' recording [gold1]%s[/gold1] overlaps,'%(audio))
490
- if len(audio_stitch.matched_audio_recordings) == 0:
489
+ if len(audio_stitch.matched_audio_recordings) > 0:
490
+ self.video_mergers.append(audio_stitch)
491
+ else:
491
492
  logger.debug('\n nothing\n')
492
- # print(' none overlap?')
493
+ print('No overlap found for %s'%ref_rec.AVpath.name)
494
+ del audio_stitch
493
495
  logger.debug('%i video_mergers created'%len(self.video_mergers))
494
496
 
495
497
  def _does_overlap(self, ref_rec, audio_rec):
tictacsync/yaltc.py CHANGED
@@ -51,7 +51,8 @@ TRIAL_TIMES = [ # in seconds
51
51
  (3.5, -3.5),
52
52
  ]
53
53
  SOUND_EXTRACT_LENGTH = 1.56 # second
54
- SYMBOL_LENGTH_TOLERANCE = 0.05 # relative
54
+ SYMBOL_LENGTH_TOLERANCE = 0.06 # relative
55
+ FSK_TOLERANCE = 60 # Hz
55
56
  SAMD21_LATENCY = 63 # microseconds, for DAC conversion
56
57
  YEAR_ZERO = 2021
57
58
 
@@ -62,6 +63,7 @@ SYMBOL_LENGTH = 14.286 # ms, from FSKfreqCalculator.py
62
63
  N_SYMBOLS_SAMD21 = 35 # including sync pulse
63
64
  ##################
64
65
 
66
+ BPF_LOW_FRQ, BPF_HIGH_FRQ = (0.5*F1, 2*F2)
65
67
 
66
68
  try:
67
69
  layouts, _ = (
@@ -189,17 +191,18 @@ class Decoder:
189
191
  SN_ratio : float
190
192
  signal over noise ratio in dB.
191
193
 
192
- pulse_level : float
194
+ pulse_detection_level : float
193
195
  level used to detect sync pulse
194
196
 
195
197
  silent_zone_indices : tuple of ints
196
198
  silent zone boundary positions relative to the start
197
199
  of self.sound_extract.
200
+
201
+ estimated_pulse_position : int
202
+ pulse position (samples) relative to the start of self.sound_extract
198
203
 
199
- pulse_position : dict
200
- key 'value' : positions relative to the start of self.sound_extract
201
- of pulse;
202
- key 'type' : "estimated" or "detected"
204
+ detected_pulse_position : int
205
+ pulse position (samples) relative to the start of self.sound_extract
203
206
 
204
207
  cached_convolution_fit : dict
205
208
  if _fit_triangular_signal_to_convoluted_env() has already been called,
@@ -222,10 +225,10 @@ class Decoder:
222
225
  def clear_decoder(self):
223
226
  self.sound_data_extract = None
224
227
  self.cached_convolution_fit = {'sound_extract_position': None}
225
- self.pulse_level = None
228
+ self.pulse_detection_level = None
226
229
  self.silent_zone_indices = None
227
- self.pulse_position = {'value': None}
228
-
230
+ self.detected_pulse_position = None
231
+ self.estimated_pulse_position = None
229
232
 
230
233
  def set_sound_extract_and_sr(self, extract, s_r, where):
231
234
  self.sound_extract = extract
@@ -253,21 +256,10 @@ class Decoder:
253
256
 
254
257
  """
255
258
  WINDOW_LENGTH, POLYORDER = (15, 3) # parameters found by experiment, hit and miss
256
- abs_hil = np.abs(scipy.signal.hilbert(self.sound_extract))
257
- envelope = scipy.signal.savgol_filter(abs_hil,
259
+ absolute_of_hilbert = np.abs(scipy.signal.hilbert(self.sound_extract))
260
+ envelope = scipy.signal.savgol_filter(absolute_of_hilbert,
258
261
  WINDOW_LENGTH, POLYORDER)
259
-
260
- # plt.plot(self.sound_extract)
261
- # plt.plot(abs_hil)
262
- # plt.plot(envelope)
263
- # plt.show()
264
-
265
- # mean = envelope.mean()
266
- # if mean: # in case of zero padding
267
- # factor = 0.5/mean # since 50% duty cycle
268
- # else:
269
- # factor = 1
270
- # return factor*envelope
262
+ logger.debug('self.sound_extract envelope length %i samples'%len(envelope))
271
263
  return envelope
272
264
 
273
265
  def _get_signal_level(self):
@@ -276,30 +268,30 @@ class Decoder:
276
268
 
277
269
  def _get_pulse_position(self):
278
270
  # relative to extract beginning
279
- # return precise value
280
- if self.pulse_position['value'] is not None:
281
- return self.pulse_position['value']
271
+ if self.detected_pulse_position:
272
+ logger.debug('returning detected value')
273
+ return self.detected_pulse_position
274
+ if self.estimated_pulse_position:
275
+ return self.estimated_pulse_position
282
276
  _, silence_center_x = self._fit_triangular_signal_to_convoluted_env()
283
277
  # symbol_width_samples = 1e-3*SYMBOL_LENGTH
284
- pulse_relative_pos = int(0.5*(0.5 - 1e-3*SYMBOL_LENGTH)*self.samplerate)
285
- approx_pulse_x = silence_center_x + pulse_relative_pos
286
- self.pulse_position = {}
287
- self.pulse_position['value'] = approx_pulse_x
288
- self.pulse_position['type'] = 'estimated'
289
- return self.pulse_position['value']
290
-
291
- def _get_pulse_level(self):
278
+ self.estimated_pulse_position = silence_center_x + int(0.5*(
279
+ 0.5 - 1e-3*SYMBOL_LENGTH)*self.samplerate)
280
+ logger.debug('returning estimated value from silence mid position')
281
+ return self.estimated_pulse_position
282
+
283
+ def _get_pulse_detection_level(self):
292
284
  # return the geometric mean between silence and BFSK levels
293
- if self.pulse_level is None:
285
+ if self.pulse_detection_level is None:
294
286
  silence_floor = self._get_silence_floor()
295
287
  # lower_BFSK_level = silence_floor
296
- pulse_position = self._get_pulse_position()
297
- lower_BFSK_level = self._get_minimal_bfsk(pulse_position)
288
+ # pulse_position = self._get_pulse_position()
289
+ lower_BFSK_level = self._get_minimal_bfsk()
298
290
  value = math.sqrt(silence_floor * lower_BFSK_level)
299
- self.pulse_level = value
291
+ self.pulse_detection_level = value
300
292
  return value
301
293
  else:
302
- return self.pulse_level
294
+ return self.pulse_detection_level
303
295
 
304
296
  def _get_square_convolution(self):
305
297
  """
@@ -332,37 +324,50 @@ class Decoder:
332
324
  x = range(start, len(convol) + start)
333
325
  return [*x], convol
334
326
 
335
- def _get_word_envelope(self, pulse_position):
327
+ def _get_word_envelope(self):
336
328
  """
337
329
  Chop the signal envelope keeping the word region and smooth it over the
338
330
  longest BFSK period
339
331
  """
340
- max_period = int(self.samplerate*max(1/F1,1/F2))
341
- logger.debug('max BFSK period %i samples'%max_period)
342
- period_window = np.ones(max_period,dtype=int)/max_period
332
+ SR = self.samplerate
343
333
  envelope = self._get_envelope()
344
- symbol_width_samples = 1e-3*SYMBOL_LENGTH*self.samplerate
334
+ pulse_position = self._get_pulse_position()
335
+ samples_to_end = len(self.sound_extract) - pulse_position
336
+ is_too_near_the_end = samples_to_end/SR < 0.5
337
+ logger.debug('pulse_position is_too_near_the_end %s'%
338
+ is_too_near_the_end)
339
+ if is_too_near_the_end:
340
+ pulse_position -= SR # one second sooner
341
+ symbol_width_samples = 1e-3*SYMBOL_LENGTH*SR
345
342
  word_start = int(pulse_position + 3*symbol_width_samples)
346
- word_end = int(pulse_position + 0.5*self.samplerate)
347
- word_end -= 2*symbol_width_samples # slide to the left a little
348
- logger.debug('word start, end: %i %i'%(word_start, word_end))
349
- w_envelope = envelope[word_start:int(word_end)]
350
- # plt.plot(w_envelope)
351
- # plt.show()
352
- return w_envelope
353
- # return np.convolve(w_envelope, period_window, mode='same')
343
+ word_end = int(pulse_position + 0.5*SR)
344
+ word_end -= int(2*symbol_width_samples) # slide to the left a little
345
+ logger.debug('word start, end: %i %i (in file)'%(
346
+ word_start + self.sound_extract_position,
347
+ word_end + self.sound_extract_position))
348
+ w_envelope = envelope[word_start : word_end]
349
+ word_envelope_truncated = word_end-word_start != len(w_envelope)
350
+ logger.debug('w_envelope is sliced out of bounds: %s'%(
351
+ str(word_envelope_truncated)))
352
+ logger.debug('word envelope length %i samples %f secs'%(
353
+ len(w_envelope), len(w_envelope)/SR))
354
+ max_period = int(self.samplerate*max(1/F1,1/F2))
355
+ logger.debug('max BFSK period %i in samples'%max_period)
356
+ period_window = np.ones(max_period,dtype=int)/max_period
357
+ # smooth over longest BFSK period
358
+ return np.convolve(w_envelope, period_window, mode='same')
354
359
 
355
- def _get_minimal_bfsk(self, pulse_position):
360
+ def _get_minimal_bfsk(self):
356
361
  """
357
362
  because of non-flat frequency response, bfsk bits dont have the same
358
363
  amplitude. This returns the least of both by detecting a bimodal
359
364
  gaussian distribution
360
365
 
361
366
  """
362
- # w_envelope = self._get_word_envelope(pulse_position)
367
+ # w_envelope = self._get_word_envelope()
363
368
  # word_start = int(min_position + shift + 0.3*self.samplerate)
364
369
  # word = w_envelope[word_start : int(word_start + 0.4*self.samplerate)]
365
- word = self._get_word_envelope(pulse_position)
370
+ word = self._get_word_envelope()
366
371
  # plt.plot(word)
367
372
  # plt.show()
368
373
  n = len(word)
@@ -406,12 +411,11 @@ class Decoder:
406
411
  logger.debug('yes, fit values cached:')
407
412
  v1 = self.cached_convolution_fit['chi_square']
408
413
  v2 = self.cached_convolution_fit['minimum position']
409
- logger.debug('chi_square: %s minimum position: %s'%(v1, v2))
414
+ v2_file = v2 + self.sound_extract_position
415
+ logger.debug('cached chi_sq: %s minimum position in file: %s'%(v1, v2_file))
410
416
  return (v1, v2)
411
417
  # cached!
412
418
  x_shifted, convolution = self._get_square_convolution()
413
-
414
- shift = x_shifted[0] # convolution is shorter than sound envelope
415
419
  # see numpy.convolve(..., mode='valid')
416
420
  x = np.arange(len(convolution))
417
421
  trig_params = lmfit.Parameters()
@@ -440,12 +444,13 @@ class Decoder:
440
444
  args=(x,), kws={'signal_data': convolution}
441
445
  )
442
446
  chi_square = fit_trig.chisqr
443
- min_position = int(fit_trig.params['min_position'].value)
444
- logger.debug('chi_square %.1f minimum convolution position %i'%
445
- (chi_square, min_position + shift))
447
+ shift = x_shifted[0] # convolution is shorter than sound envelope
448
+ min_position = int(fit_trig.params['min_position'].value) + shift
449
+ logger.debug('chi_square %.1f minimum convolution position %i in file'%
450
+ (chi_square, min_position + self.sound_extract_position))
446
451
  self.cached_convolution_fit['sound_extract_position'] = self.sound_extract_position
447
452
  self.cached_convolution_fit['chi_square'] = chi_square
448
- self.cached_convolution_fit['minimum position'] = min_position + shift
453
+ self.cached_convolution_fit['minimum position'] = min_position
449
454
 
450
455
  return chi_square, min_position + shift
451
456
 
@@ -467,9 +472,7 @@ class Decoder:
467
472
  def _get_silent_zone_indices(self):
468
473
  """
469
474
  Returns silent zone boundary positions relative to the start
470
- of self.sound_extract. Adjustment are made so a complete
471
- 0.5 second signal is at the left of the silent zone for word
472
- decoding.
475
+ of self.sound_extract.
473
476
 
474
477
  Returns
475
478
  -------
@@ -479,7 +482,7 @@ class Decoder:
479
482
  right indice.
480
483
 
481
484
  """
482
- if self.silent_zone_indices is not None:
485
+ if self.silent_zone_indices:
483
486
  return self.silent_zone_indices
484
487
  _, silence_center_position = self._fit_triangular_signal_to_convoluted_env()
485
488
  srate = self.samplerate
@@ -487,9 +490,10 @@ class Decoder:
487
490
  left_window_boundary = silence_center_position - half_window
488
491
  right_window_boundary = silence_center_position + half_window
489
492
  # margin = 0.75 * srate
490
- logger.debug('silent zone, left: %i, right %i, center %i'%
491
- (left_window_boundary, right_window_boundary,
492
- silence_center_position))
493
+ values = np.array([left_window_boundary, right_window_boundary,
494
+ silence_center_position])
495
+ values += self.sound_extract_position # samples pos in file
496
+ logger.debug('silent zone, left: %i, right %i, center %i'%tuple(values))
493
497
  self.silent_zone_indices = (left_window_boundary, right_window_boundary)
494
498
  return self.silent_zone_indices
495
499
 
@@ -519,7 +523,7 @@ class Decoder:
519
523
  x_convolution, convolution = self._get_square_convolution()
520
524
  scaled_convo = self._get_signal_level()*convolution
521
525
  # since 0 < convolution < 1
522
- trig_level = self._get_pulse_level()
526
+ trig_level = self._get_pulse_detection_level()
523
527
  sound_extract_position = self.sound_extract_position
524
528
  def x2f(nx):
525
529
  return nx + sound_extract_position
@@ -544,7 +548,7 @@ class Decoder:
544
548
  approx_pulse_x, 0.1, 0.9,
545
549
  transform=xt, linewidth=1, colors='yellow'
546
550
  )
547
- bfsk_min = self._get_minimal_bfsk(approx_pulse_x)
551
+ bfsk_min = self._get_minimal_bfsk()
548
552
  ax.hlines(
549
553
  bfsk_min, 0, 1,
550
554
  transform=yt, linewidth=1, colors='red'
@@ -597,15 +601,18 @@ class Decoder:
597
601
 
598
602
  def _detect_sync_pulse_position(self):
599
603
  """
600
- Determines noise level during silence period and use it to detect
601
- the sync pulse position. Computes SN_ratio and stores it.
604
+ Determines noise level during silence period and use it to detect the
605
+ sync pulse position. Computes SN_ratio and stores it. Start searching
606
+ around end of silent zone. Adjustment are made so a complete 0.5
607
+ second signal is at the right of the starting search position so a
608
+ complete 0.5 s word is available for decoding.
609
+
602
610
  Returns the pulse position relative to the extract beginning.
603
611
  """
604
- start_silent_zone, end_silent_zone = self._get_silent_zone_indices()
605
- pulse_level = self._get_pulse_level()
612
+ pulse_detection_level = self._get_pulse_detection_level()
606
613
  abs_signal = abs(self.sound_extract)
607
614
  mean_during_word = 2*abs_signal.mean()
608
- self.SN_ratio = 20*math.log10(mean_during_word/pulse_level)
615
+ self.SN_ratio = 20*math.log10(mean_during_word/pulse_detection_level)
609
616
  logger.debug('SN ratio: %f dB'%(self.SN_ratio))
610
617
  search_pulse_start_point = self._get_pulse_position()
611
618
  search_pulse_start_point -= 3*SYMBOL_LENGTH*1e-3*self.samplerate
@@ -623,17 +630,20 @@ class Decoder:
623
630
  logger.debug('search_pulse_start_point: %i in extract'%
624
631
  search_pulse_start_point)
625
632
  abs_signal_after_silence = abs_signal[search_pulse_start_point:]
633
+ # here the real searching with numpy.argmax()
626
634
  first_point = \
627
- np.argmax(abs_signal_after_silence > pulse_level)
635
+ np.argmax(abs_signal_after_silence > pulse_detection_level)
628
636
  first_point += search_pulse_start_point
629
637
  logger.debug('found sync pulse at %i in extract'%first_point)
638
+ self.detected_pulse_position = first_point
630
639
  return first_point
631
640
 
632
- def _get_word_width_parameters(self, pulse_position):
641
+ def _get_word_width_parameters(self):
633
642
  abs_signal = abs(self.sound_extract)
643
+ pulse_position = self._get_pulse_position()
634
644
  # half_amplitude = abs_signal.mean() # since 50% duty cycle OLD
635
645
  # params = {'word_width_threshold':WORDWIDTHFACTOR*half_amplitude} OLD
636
- bfsk_min = self._get_minimal_bfsk(pulse_position)
646
+ bfsk_min = self._get_minimal_bfsk()
637
647
  params = {'word_width_threshold': 0.8*bfsk_min}
638
648
  sr = self.samplerate
639
649
  presumed_symbol_length = SYMBOL_LENGTH*1e-3*sr
@@ -648,10 +658,11 @@ class Decoder:
648
658
  params['presumed_symbol_length'] = presumed_symbol_length
649
659
  return params
650
660
 
651
- def _get_BFSK_word_boundaries(self, pulse_position):
661
+ def _get_BFSK_word_boundaries(self):
652
662
  n_bits = N_SYMBOLS_SAMD21 - 1
653
663
  sr = self.samplerate
654
- wwp = self._get_word_width_parameters(pulse_position)
664
+ wwp = self._get_word_width_parameters()
665
+ pulse_position = self._get_pulse_position()
655
666
  # search_start_position = wwp['search_start_position']
656
667
  search_end_position = wwp['search_end_position']
657
668
  word_width_threshold = wwp['word_width_threshold']
@@ -688,10 +699,11 @@ class Decoder:
688
699
  logger.debug(' relative discrepancy %.4f%%'%(abs(100*relative_error)))
689
700
  return status, left_boundary, right_boundary
690
701
 
691
- def _get_BFSK_symbols_boundaries(self, pulse_position):
702
+ def _get_BFSK_symbols_boundaries(self):
692
703
  # returns indices of start of each slice and boundaries
704
+ pulse_position = self._get_pulse_position()
693
705
  boundaries_OK, left_boundary, right_boundary = \
694
- self._get_BFSK_word_boundaries(pulse_position)
706
+ self._get_BFSK_word_boundaries()
695
707
  if left_boundary is None:
696
708
  return None, None, None
697
709
  symbol_width_samples = \
@@ -740,7 +752,8 @@ class Decoder:
740
752
  logger.debug('slicing intervals, word_intervals = %s'%
741
753
  word_intervals)
742
754
  # skip sample after pulse, start at BFSK word
743
- slices = [self.sound_extract[slice(*pair)]
755
+ filtered_sound_extract = self._band_pass_filter(self.sound_extract)
756
+ slices = [filtered_sound_extract[slice(*pair)]
744
757
  for pair in word_intervals]
745
758
  np.set_printoptions(threshold=5)
746
759
  # logger.debug('data slices: \n%s'%pprint.pformat(slices))
@@ -755,13 +768,17 @@ class Decoder:
755
768
  freq_in_hertz = abs(freq * self.samplerate)
756
769
  return int(round(freq_in_hertz))
757
770
 
771
+ # def _get_bit_from_freq(self, freq):
772
+ # if math.isclose(freq, F1, abs_tol=FSK_TOLERANCE):
773
+ # return '0'
774
+ # if math.isclose(freq, F2, abs_tol=FSK_TOLERANCE):
775
+ # return '1'
776
+ # else:
777
+ # return None
778
+
758
779
  def _get_bit_from_freq(self, freq):
759
- if math.isclose(freq, F1, rel_tol=0.05):
760
- return '0'
761
- if math.isclose(freq, F2, rel_tol=0.05):
762
- return '1'
763
- else:
764
- return None
780
+ mid_FSK = 0.5*(F1 + F2)
781
+ return '1' if freq > mid_FSK else '0'
765
782
 
766
783
  def _get_int_from_binary_str(self, string_of_01s):
767
784
  return int(''.join(reversed(string_of_01s)),2)
@@ -790,15 +807,14 @@ class Decoder:
790
807
  # save figure in filename if set, otherwise
791
808
  # start an interactive plot, title is for matplotlib
792
809
  signal = self.sound_extract
810
+ # signal = self._band_pass_filter(signal)
793
811
  start = self.sound_extract_position
794
812
  x_signal_in_file = range(
795
813
  start,
796
814
  start + len(signal)
797
815
  )
816
+ wwp = self._get_word_width_parameters()
798
817
  start_silent_zone, end_silent_zone = self._get_silent_zone_indices()
799
- # sync_pulse = self._detect_sync_pulse_position()
800
- wwp = self._get_word_width_parameters(sync_pulse)
801
- # search_start_position = wwp['search_start_position'] + start
802
818
  search_end_position = wwp['search_end_position'] + start
803
819
  fig, ax = plt.subplots()
804
820
  plt.title(title)
@@ -835,9 +851,8 @@ class Decoder:
835
851
  [end_silent_zone + start], [0],
836
852
  marker='<', markersize='10',
837
853
  linewidth=0.3, color='green', alpha=0.3)
838
- # symbols_indices = self._get_BFSK_symbols_boundaries(sync_pulse)
839
854
  boundaries_OK, word_lft, word_rght = \
840
- self._get_BFSK_word_boundaries(sync_pulse)
855
+ self._get_BFSK_word_boundaries()
841
856
  ax.vlines(
842
857
  word_lft + start, 0, 1,
843
858
  transform=ax.get_xaxis_transform(),
@@ -868,18 +883,27 @@ class Decoder:
868
883
  dpi=height/fig.get_size_inches()[1])
869
884
  plt.close()
870
885
 
886
+ def _band_pass_filter(self, data):
887
+ # return filtered data
888
+ def _bandpass(data: np.ndarray, edges: list[float], sample_rate: float, poles: int = 5):
889
+ sos = scipy.signal.butter(poles, edges, 'bandpass', fs=sample_rate, output='sos')
890
+ filtered_data = scipy.signal.sosfiltfilt(sos, data)
891
+ return filtered_data
892
+ sample_rate = self.samplerate
893
+ times = np.arange(len(data))/sample_rate
894
+ return _bandpass(data, [BPF_LOW_FRQ, BPF_HIGH_FRQ], sample_rate)
895
+
871
896
  def get_time_in_sound_extract(self, plots):
872
897
  if self.sound_extract is None:
873
898
  return None
874
899
  if plots:
875
900
  self.make_silence_analysis_plot()
876
- start_silent_zone, end_silent_zone = self._get_silent_zone_indices()
877
901
  pulse_position = self._detect_sync_pulse_position()
878
902
  pulse_pos_in_file = pulse_position + self.sound_extract_position
879
903
  pulse_position_sec = pulse_pos_in_file/self.samplerate
880
904
  logger.debug('found sync pulse at sample %i in file'%pulse_pos_in_file)
881
905
  symbols_indices, word_lft, word_rght = \
882
- self._get_BFSK_symbols_boundaries(pulse_position)
906
+ self._get_BFSK_symbols_boundaries()
883
907
  if plots:
884
908
  title = 'Bit slicing at %s, %.2f s'%(pulse_pos_in_file,
885
909
  pulse_position_sec)
@@ -890,16 +914,27 @@ class Decoder:
890
914
  if symbols_indices is None:
891
915
  return None
892
916
  sliced_data = self._slice_sound_extract(symbols_indices)
917
+ # sliced_data = [self._band_pass_filter(data_slice)
918
+ # for data_slice
919
+ # in sliced_data
920
+ # ]
893
921
  frequencies = [self._get_main_frequency(data_slice)
894
922
  for data_slice
895
923
  in sliced_data
896
924
  ]
897
925
  logger.debug('frequencies = %s'%frequencies)
898
- bits = [self._get_bit_from_freq(f) for f in frequencies]
926
+ sr = self.samplerate
927
+ n_bits = N_SYMBOLS_SAMD21 - 1
928
+ eff_symbol_length = 1e3*(word_rght-word_lft)/(n_bits*sr)
929
+ length_ratio = eff_symbol_length / SYMBOL_LENGTH
930
+ logger.debug('symbol length_ratio (eff/supposed) %f'%length_ratio)
931
+ corrected_freq = np.array(frequencies)*length_ratio
932
+ logger.debug('corrected freq (using symbol length) = %s'%corrected_freq)
933
+ bits = [self._get_bit_from_freq(f) for f in corrected_freq]
899
934
  for i, bit in enumerate(bits):
900
935
  if bit == None:
901
936
  logger.warning('cant decode frequency %i for bit at %i-%i'%(
902
- frequencies[i],
937
+ corrected_freq[i],
903
938
  symbols_indices[i],
904
939
  symbols_indices[i+1]))
905
940
  if None in bits:
@@ -915,8 +950,6 @@ class Decoder:
915
950
  time_values['clock source'] = 'GPS' \
916
951
  if time_values['clock source'] == 1 else 'RTC'
917
952
  if self._demod_values_are_OK(time_values):
918
- self.pulse_position['value'] = pulse_position
919
- self.pulse_position['type'] = 'detected'
920
953
  return time_values
921
954
  else:
922
955
  return None
@@ -932,10 +965,10 @@ class Recording:
932
965
  media_without_YaLTC : pathlib.path
933
966
  path of video+sound file stripped of YaLTC channel
934
967
 
935
- device: str
968
+ device : str
936
969
  identifies the device used for the recording, set in __init__()
937
970
 
938
- new_rec_name: str
971
+ new_rec_name : str
939
972
  built using the device name, ex: "CAM_A001"
940
973
  set by Timeline._rename_all_recs()
941
974
 
@@ -950,25 +983,25 @@ class Recording:
950
983
  decoder : yaltc.decoder
951
984
  associated decoder object, if file is audiovideo
952
985
 
953
- true_samplerate: float
986
+ true_samplerate : float
954
987
  true sample rate using GPS time
955
988
 
956
- start_time: datetime or str
989
+ start_time : datetime or str
957
990
  time and date of the first sample in the file, cached
958
991
  after a call to get_start_time(). Value on initialization
959
992
  is None.
960
993
 
961
- sync_position: int
994
+ sync_position : int
962
995
  position of first detected syn pulse
963
996
 
964
- is_reference: bool (True for ref rec only)
997
+ is_reference : bool (True for ref rec only)
965
998
  in multi recorders set-ups, user decides if a sound-only recording
966
999
  is the time reference for all other audio recordings. By
967
1000
  default any video recording is the time reference for other audio,
968
1001
  so this attribute is only relevant to sound recordings and is
969
1002
  implicitly True for each video recordings (but not set)
970
1003
 
971
- device_relative_speed: float
1004
+ device_relative_speed : float
972
1005
  the ratio of the recording device clock speed relative to the
973
1006
  reference_rec clock device, in order to correct clock drift with
974
1007
  pysox tempo transform. If value < 1.0 then the recording is slower
@@ -977,7 +1010,7 @@ class Recording:
977
1010
  sound). A mean is calculated for all recordings of the same device
978
1011
  in Montage._get_concatenated_audiofile_for()
979
1012
 
980
- time_position: float
1013
+ time_position : float
981
1014
  The time (in seconds) at which the recording starts relative to the
982
1015
  reference recording. Updated by each Montage instance so the value
983
1016
  can change depending on the reference recording (a video or main
@@ -992,7 +1025,7 @@ class Recording:
992
1025
  contains the path of audio only of self.final_synced_file. Absolute
993
1026
  path to tempfile.
994
1027
 
995
- in_cam_audio_sync_error: int
1028
+ in_cam_audio_sync_error : int
996
1029
  in cam audio sync error, read in the camera folder. Negative value
997
1030
  for lagging video (audio leads) positive value for lagging audio
998
1031
  (video leads)
@@ -1067,18 +1100,19 @@ class Recording:
1067
1100
  # logger.warning('file has no audio')
1068
1101
  recording_init_fail = 'no audio in file'
1069
1102
  elif self.get_duration() < MINIMUM_LENGTH:
1070
- recording_init_fail = 'file too short, %f s'%self.get_duration()
1071
- if recording_init_fail == '':
1103
+ recording_init_fail = 'file too short, %f s\n'%self.get_duration()
1104
+ if recording_init_fail == '': # success
1072
1105
  self.decoder = Decoder(self)
1073
1106
  # self._set_multi_files_siblings()
1107
+ self._check_for_camera_error_correction()
1074
1108
  else:
1109
+ print('For file %s, '%self.AVpath)
1075
1110
  logger.warning('Recording init failed: %s'%recording_init_fail)
1076
1111
  print('Recording init failed: %s'%recording_init_fail)
1077
1112
  self.probe = None
1078
1113
  self.decoder = None
1079
1114
  logger.debug('ffprobe found: %s'%self.probe)
1080
1115
  logger.debug('n audio chan: %i'%self.get_audio_channels_nbr())
1081
- self._check_for_camera_error_correction()
1082
1116
 
1083
1117
  def __repr__(self):
1084
1118
  return 'Recording of %s'%_pathname(self.new_rec_name)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: tictacsync
3
- Version: 0.1a9
3
+ Version: 0.1a11
4
4
  Summary: command for syncing audio video recordings
5
5
  Home-page: https://sr.ht/~proflutz/TicTacSync/
6
6
  Author: Raymond Lutz
@@ -0,0 +1,11 @@
1
+ tictacsync/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ tictacsync/device_scanner.py,sha256=ybMGhSPbrfFvSWEjv0zYtwDSClVQRfGYQVJcakpuU54,18522
3
+ tictacsync/entry.py,sha256=kboDNw_VA5-bOPMiJS6JMftqlTW0FsHxrsHMfW5ckp0,8689
4
+ tictacsync/timeline.py,sha256=q0jhHqhj2cdjL4UIWv6_dAC7VfjQUb69Tu4VP5kULzg,34829
5
+ tictacsync/yaltc.py,sha256=qWLhV2PP1KWoLmSVlSGGYCiCzImm6bnbPTVYilYrzzQ,69123
6
+ tictacsync-0.1a11.dist-info/LICENSE,sha256=ZAOPXLh1zlQAnhHUd7oLslKM01YZ5UiAu3STYjwIxck,1068
7
+ tictacsync-0.1a11.dist-info/METADATA,sha256=RvufymvGNYowOv8iwEREhXTHXT96VaqtyRn1iFui4Y0,4256
8
+ tictacsync-0.1a11.dist-info/WHEEL,sha256=pqI-DBMA-Z6OTNov1nVxs7mwm6Yj2kHZGNp_6krVn1E,92
9
+ tictacsync-0.1a11.dist-info/entry_points.txt,sha256=7Ih9Xas4RWMDqt2adwXpt7x9j2YtXwj_jl-jNhkIArg,54
10
+ tictacsync-0.1a11.dist-info/top_level.txt,sha256=eaCWG-BsYTRR-gLTJbK4RfcaXajr0gjQ6wG97MkGRrg,11
11
+ tictacsync-0.1a11.dist-info/RECORD,,
@@ -1,11 +0,0 @@
1
- tictacsync/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- tictacsync/device_scanner.py,sha256=ObgT6pb4kzMwgRPkp7aeoSiFm8mBZuMOO8cAXH5mj1o,17868
3
- tictacsync/entry.py,sha256=Wkz8JH2dOM8ZHQMspl6-wY-MvBwxK1s51QqJ21w5mMQ,8758
4
- tictacsync/timeline.py,sha256=99lG2aQ7Bia34pjZbX2OZb60Fb_VAZHg6hg3eHaV-VQ,34745
5
- tictacsync/yaltc.py,sha256=mgPq4IW2UBXnH9lb37Ed4qOxawknPm_pgRF-xWOKPZg,66994
6
- tictacsync-0.1a9.dist-info/LICENSE,sha256=ZAOPXLh1zlQAnhHUd7oLslKM01YZ5UiAu3STYjwIxck,1068
7
- tictacsync-0.1a9.dist-info/METADATA,sha256=J5NcFL0mBAoo8RL3xGXZKoXPWG02Hb1sexnUf1FSF90,4255
8
- tictacsync-0.1a9.dist-info/WHEEL,sha256=pqI-DBMA-Z6OTNov1nVxs7mwm6Yj2kHZGNp_6krVn1E,92
9
- tictacsync-0.1a9.dist-info/entry_points.txt,sha256=7Ih9Xas4RWMDqt2adwXpt7x9j2YtXwj_jl-jNhkIArg,54
10
- tictacsync-0.1a9.dist-info/top_level.txt,sha256=eaCWG-BsYTRR-gLTJbK4RfcaXajr0gjQ6wG97MkGRrg,11
11
- tictacsync-0.1a9.dist-info/RECORD,,