meeting-noter 0.7.0__py3-none-any.whl → 1.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of meeting-noter might be problematic. Click here for more details.
- meeting_noter/__init__.py +1 -1
- meeting_noter/audio/encoder.py +8 -3
- meeting_noter/cli.py +211 -27
- meeting_noter/config.py +11 -0
- meeting_noter/daemon.py +80 -1
- meeting_noter/mic_monitor.py +3 -0
- meeting_noter/transcription/live_transcription.py +250 -0
- meeting_noter/update_checker.py +65 -0
- {meeting_noter-0.7.0.dist-info → meeting_noter-1.1.0.dist-info}/METADATA +2 -1
- {meeting_noter-0.7.0.dist-info → meeting_noter-1.1.0.dist-info}/RECORD +13 -11
- {meeting_noter-0.7.0.dist-info → meeting_noter-1.1.0.dist-info}/entry_points.txt +1 -0
- {meeting_noter-0.7.0.dist-info → meeting_noter-1.1.0.dist-info}/WHEEL +0 -0
- {meeting_noter-0.7.0.dist-info → meeting_noter-1.1.0.dist-info}/top_level.txt +0 -0
meeting_noter/__init__.py
CHANGED
meeting_noter/audio/encoder.py
CHANGED
|
@@ -105,12 +105,17 @@ class MP3Encoder:
|
|
|
105
105
|
|
|
106
106
|
def finalize(self) -> bytes:
|
|
107
107
|
"""Finalize encoding."""
|
|
108
|
-
if self._process is not None
|
|
108
|
+
if self._process is not None:
|
|
109
109
|
try:
|
|
110
|
-
self._process.stdin
|
|
110
|
+
if self._process.stdin is not None:
|
|
111
|
+
self._process.stdin.close()
|
|
111
112
|
except Exception:
|
|
112
113
|
pass
|
|
113
|
-
|
|
114
|
+
try:
|
|
115
|
+
self._process.wait(timeout=5.0)
|
|
116
|
+
except subprocess.TimeoutExpired:
|
|
117
|
+
self._process.kill()
|
|
118
|
+
self._process.wait()
|
|
114
119
|
self._process = None
|
|
115
120
|
return b""
|
|
116
121
|
|
meeting_noter/cli.py
CHANGED
|
@@ -104,10 +104,46 @@ def _launch_gui_background():
|
|
|
104
104
|
click.echo("Meeting Noter GUI launched.")
|
|
105
105
|
|
|
106
106
|
|
|
107
|
+
def _handle_version():
|
|
108
|
+
"""Handle --version: show version, check for updates, auto-update if enabled."""
|
|
109
|
+
import subprocess
|
|
110
|
+
|
|
111
|
+
from meeting_noter.update_checker import check_for_update
|
|
112
|
+
|
|
113
|
+
config = get_config()
|
|
114
|
+
|
|
115
|
+
click.echo(f"meeting-noter {__version__}")
|
|
116
|
+
|
|
117
|
+
# Check for updates
|
|
118
|
+
click.echo("Checking for updates...", nl=False)
|
|
119
|
+
new_version = check_for_update()
|
|
120
|
+
|
|
121
|
+
if new_version:
|
|
122
|
+
click.echo(f" update available: {new_version}")
|
|
123
|
+
|
|
124
|
+
if config.auto_update:
|
|
125
|
+
click.echo(f"Auto-updating to {new_version}...")
|
|
126
|
+
result = subprocess.run(
|
|
127
|
+
["pipx", "upgrade", "meeting-noter"],
|
|
128
|
+
capture_output=True,
|
|
129
|
+
text=True,
|
|
130
|
+
)
|
|
131
|
+
if result.returncode == 0:
|
|
132
|
+
click.echo(click.style(f"Updated to {new_version}", fg="green"))
|
|
133
|
+
else:
|
|
134
|
+
click.echo(click.style("Update failed. Run manually:", fg="yellow"))
|
|
135
|
+
click.echo(" pipx upgrade meeting-noter")
|
|
136
|
+
else:
|
|
137
|
+
click.echo("Run to update: pipx upgrade meeting-noter")
|
|
138
|
+
click.echo("Or enable auto-update: mn config auto-update true")
|
|
139
|
+
else:
|
|
140
|
+
click.echo(" up to date")
|
|
141
|
+
|
|
142
|
+
|
|
107
143
|
@click.group(cls=SuggestGroup, invoke_without_command=True)
|
|
108
|
-
@click.
|
|
144
|
+
@click.option("--version", "-V", is_flag=True, help="Show version and check for updates")
|
|
109
145
|
@click.pass_context
|
|
110
|
-
def cli(ctx):
|
|
146
|
+
def cli(ctx, version):
|
|
111
147
|
"""Meeting Noter - Offline meeting transcription.
|
|
112
148
|
|
|
113
149
|
\b
|
|
@@ -124,6 +160,10 @@ def cli(ctx):
|
|
|
124
160
|
meeting-noter config whisper-model base.en Set transcription model
|
|
125
161
|
meeting-noter config auto-transcribe false Disable auto-transcribe
|
|
126
162
|
"""
|
|
163
|
+
if version:
|
|
164
|
+
_handle_version()
|
|
165
|
+
ctx.exit(0)
|
|
166
|
+
|
|
127
167
|
if ctx.invoked_subcommand is None:
|
|
128
168
|
# No subcommand - start background watcher
|
|
129
169
|
ctx.invoke(watcher)
|
|
@@ -131,8 +171,9 @@ def cli(ctx):
|
|
|
131
171
|
|
|
132
172
|
@cli.command()
|
|
133
173
|
@click.argument("name", required=False)
|
|
174
|
+
@click.option("--live", "-l", is_flag=True, help="Show live transcription in terminal")
|
|
134
175
|
@require_setup
|
|
135
|
-
def start(name: Optional[str]):
|
|
176
|
+
def start(name: Optional[str], live: bool):
|
|
136
177
|
"""Start an interactive foreground recording session.
|
|
137
178
|
|
|
138
179
|
NAME is the meeting name (optional). If not provided, uses a timestamp
|
|
@@ -141,11 +182,14 @@ def start(name: Optional[str]):
|
|
|
141
182
|
Examples:
|
|
142
183
|
meeting-noter start # Uses timestamp name
|
|
143
184
|
meeting-noter start "Weekly Standup" # Uses custom name
|
|
185
|
+
meeting-noter start "Meeting" --live # With live transcription
|
|
144
186
|
|
|
145
187
|
Press Ctrl+C to stop recording. The recording will be automatically
|
|
146
188
|
transcribed if auto_transcribe is enabled in settings.
|
|
147
189
|
"""
|
|
148
190
|
from meeting_noter.daemon import run_foreground_capture
|
|
191
|
+
import threading
|
|
192
|
+
import time
|
|
149
193
|
|
|
150
194
|
config = get_config()
|
|
151
195
|
output_dir = config.recordings_dir
|
|
@@ -154,14 +198,59 @@ def start(name: Optional[str]):
|
|
|
154
198
|
# Use default timestamp name if not provided
|
|
155
199
|
meeting_name = name if name else generate_meeting_name()
|
|
156
200
|
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
201
|
+
# Live transcription display thread
|
|
202
|
+
stop_live_display = threading.Event()
|
|
203
|
+
|
|
204
|
+
def display_live_transcript():
|
|
205
|
+
"""Background thread to display live transcription."""
|
|
206
|
+
live_dir = output_dir / "live"
|
|
207
|
+
last_content = ""
|
|
208
|
+
|
|
209
|
+
# Wait for live file to appear
|
|
210
|
+
while not stop_live_display.is_set():
|
|
211
|
+
live_files = list(live_dir.glob("*.live.txt")) if live_dir.exists() else []
|
|
212
|
+
if live_files:
|
|
213
|
+
live_file = max(live_files, key=lambda p: p.stat().st_mtime)
|
|
214
|
+
break
|
|
215
|
+
time.sleep(0.5)
|
|
216
|
+
else:
|
|
217
|
+
return
|
|
218
|
+
|
|
219
|
+
# Tail the file
|
|
220
|
+
while not stop_live_display.is_set():
|
|
221
|
+
try:
|
|
222
|
+
content = live_file.read_text()
|
|
223
|
+
if len(content) > len(last_content):
|
|
224
|
+
new_content = content[len(last_content):]
|
|
225
|
+
for line in new_content.splitlines():
|
|
226
|
+
# Only show timestamp lines (transcriptions)
|
|
227
|
+
if line.strip() and line.startswith("["):
|
|
228
|
+
click.echo(click.style(line, fg="cyan"))
|
|
229
|
+
last_content = content
|
|
230
|
+
except Exception:
|
|
231
|
+
pass
|
|
232
|
+
time.sleep(0.5)
|
|
233
|
+
|
|
234
|
+
# Start live display thread if requested
|
|
235
|
+
live_thread = None
|
|
236
|
+
if live:
|
|
237
|
+
click.echo(click.style("Live transcription enabled", fg="cyan"))
|
|
238
|
+
live_thread = threading.Thread(target=display_live_transcript, daemon=True)
|
|
239
|
+
live_thread.start()
|
|
240
|
+
|
|
241
|
+
try:
|
|
242
|
+
run_foreground_capture(
|
|
243
|
+
output_dir=output_dir,
|
|
244
|
+
meeting_name=meeting_name,
|
|
245
|
+
auto_transcribe=config.auto_transcribe,
|
|
246
|
+
whisper_model=config.whisper_model,
|
|
247
|
+
transcripts_dir=config.transcripts_dir,
|
|
248
|
+
silence_timeout_minutes=config.silence_timeout,
|
|
249
|
+
)
|
|
250
|
+
finally:
|
|
251
|
+
stop_live_display.set()
|
|
252
|
+
if live_thread:
|
|
253
|
+
live_thread.join(timeout=1.0)
|
|
165
254
|
|
|
166
255
|
|
|
167
256
|
@cli.command(hidden=True) # Internal command used by watcher
|
|
@@ -355,7 +444,13 @@ def logs(follow: bool, lines: int):
|
|
|
355
444
|
)
|
|
356
445
|
@require_setup
|
|
357
446
|
def list_recordings(output_dir: Optional[str], limit: int):
|
|
358
|
-
"""List recent meeting recordings.
|
|
447
|
+
"""List recent meeting recordings.
|
|
448
|
+
|
|
449
|
+
\b
|
|
450
|
+
Examples:
|
|
451
|
+
meeting-noter list # Show last 10 recordings
|
|
452
|
+
meeting-noter list -n 20 # Show last 20 recordings
|
|
453
|
+
"""
|
|
359
454
|
from meeting_noter.output.writer import list_recordings as _list_recordings
|
|
360
455
|
|
|
361
456
|
config = get_config()
|
|
@@ -377,28 +472,116 @@ def list_recordings(output_dir: Optional[str], limit: int):
|
|
|
377
472
|
default=None,
|
|
378
473
|
help="Whisper model size (overrides config)",
|
|
379
474
|
)
|
|
380
|
-
@click.option(
|
|
381
|
-
"--live", "-l",
|
|
382
|
-
is_flag=True,
|
|
383
|
-
help="Real-time transcription of current recording",
|
|
384
|
-
)
|
|
385
475
|
@require_setup
|
|
386
|
-
def transcribe(file: Optional[str], output_dir: Optional[str], model: Optional[str]
|
|
476
|
+
def transcribe(file: Optional[str], output_dir: Optional[str], model: Optional[str]):
|
|
387
477
|
"""Transcribe a meeting recording.
|
|
388
478
|
|
|
389
|
-
|
|
390
|
-
|
|
479
|
+
\b
|
|
480
|
+
Examples:
|
|
481
|
+
meeting-noter transcribe # Transcribe latest recording
|
|
482
|
+
meeting-noter transcribe recording.mp3 # Transcribe specific file
|
|
483
|
+
meeting-noter transcribe -m base.en # Use larger model for accuracy
|
|
391
484
|
"""
|
|
392
|
-
from meeting_noter.transcription.engine import transcribe_file
|
|
485
|
+
from meeting_noter.transcription.engine import transcribe_file
|
|
393
486
|
|
|
394
487
|
config = get_config()
|
|
395
488
|
output_path = Path(output_dir) if output_dir else config.recordings_dir
|
|
396
489
|
whisper_model = model or config.whisper_model
|
|
397
490
|
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
491
|
+
transcribe_file(file, output_path, whisper_model, config.transcripts_dir)
|
|
492
|
+
|
|
493
|
+
|
|
494
|
+
@cli.command()
|
|
495
|
+
@require_setup
|
|
496
|
+
def live():
|
|
497
|
+
"""Show live transcription of an active recording.
|
|
498
|
+
|
|
499
|
+
Displays the real-time transcript as it's being generated.
|
|
500
|
+
Use in a separate terminal while recording with 'meeting-noter start'.
|
|
501
|
+
|
|
502
|
+
\b
|
|
503
|
+
Examples:
|
|
504
|
+
# Terminal 1: Start recording
|
|
505
|
+
meeting-noter start "Team Meeting"
|
|
506
|
+
|
|
507
|
+
# Terminal 2: Watch live transcript
|
|
508
|
+
meeting-noter live
|
|
509
|
+
|
|
510
|
+
Or use 'meeting-noter start "name" --live' to see both in one terminal.
|
|
511
|
+
"""
|
|
512
|
+
import time
|
|
513
|
+
|
|
514
|
+
config = get_config()
|
|
515
|
+
live_dir = config.recordings_dir / "live"
|
|
516
|
+
|
|
517
|
+
# Find the most recent .live.txt file in the live/ subfolder
|
|
518
|
+
if not live_dir.exists():
|
|
519
|
+
click.echo(click.style("No live transcript found.", fg="yellow"))
|
|
520
|
+
click.echo("Start a recording with: meeting-noter start")
|
|
521
|
+
return
|
|
522
|
+
|
|
523
|
+
live_files = sorted(
|
|
524
|
+
live_dir.glob("*.live.txt"),
|
|
525
|
+
key=lambda p: p.stat().st_mtime,
|
|
526
|
+
reverse=True,
|
|
527
|
+
)
|
|
528
|
+
|
|
529
|
+
if not live_files:
|
|
530
|
+
click.echo(click.style("No live transcript found.", fg="yellow"))
|
|
531
|
+
click.echo("Start a recording with: meeting-noter start")
|
|
532
|
+
return
|
|
533
|
+
|
|
534
|
+
live_file = live_files[0]
|
|
535
|
+
|
|
536
|
+
# Check if file is actively being written (modified in last 30 seconds)
|
|
537
|
+
file_age = time.time() - live_file.stat().st_mtime
|
|
538
|
+
if file_age > 30:
|
|
539
|
+
click.echo(click.style("No active recording found.", fg="yellow"))
|
|
540
|
+
click.echo(f"Most recent transcript ({live_file.name}) is {int(file_age)}s old.")
|
|
541
|
+
click.echo("Start a recording with: meeting-noter start")
|
|
542
|
+
return
|
|
543
|
+
|
|
544
|
+
click.echo(click.style("Live Transcription", fg="cyan", bold=True))
|
|
545
|
+
click.echo(f"Source: {live_file.name.replace('.live.txt', '.mp3')}")
|
|
546
|
+
click.echo("Press Ctrl+C to stop watching.\n")
|
|
547
|
+
click.echo("-" * 40)
|
|
548
|
+
|
|
549
|
+
# Tail the file
|
|
550
|
+
try:
|
|
551
|
+
last_content = ""
|
|
552
|
+
no_update_count = 0
|
|
553
|
+
|
|
554
|
+
while True:
|
|
555
|
+
try:
|
|
556
|
+
with open(live_file, "r") as f:
|
|
557
|
+
content = f.read()
|
|
558
|
+
|
|
559
|
+
# Print only new content
|
|
560
|
+
if len(content) > len(last_content):
|
|
561
|
+
new_content = content[len(last_content):]
|
|
562
|
+
# Print line by line for better formatting
|
|
563
|
+
for line in new_content.splitlines():
|
|
564
|
+
if line.strip():
|
|
565
|
+
click.echo(line)
|
|
566
|
+
last_content = content
|
|
567
|
+
no_update_count = 0
|
|
568
|
+
else:
|
|
569
|
+
no_update_count += 1
|
|
570
|
+
|
|
571
|
+
# Check if file hasn't been updated for 30+ seconds (recording likely ended)
|
|
572
|
+
file_age = time.time() - live_file.stat().st_mtime
|
|
573
|
+
if file_age > 30 and no_update_count > 5:
|
|
574
|
+
click.echo("\n" + click.style("Recording ended.", fg="yellow"))
|
|
575
|
+
break
|
|
576
|
+
|
|
577
|
+
except FileNotFoundError:
|
|
578
|
+
click.echo("\n" + click.style("Live transcript file removed.", fg="yellow"))
|
|
579
|
+
break
|
|
580
|
+
|
|
581
|
+
time.sleep(1)
|
|
582
|
+
|
|
583
|
+
except KeyboardInterrupt:
|
|
584
|
+
click.echo("\n" + click.style("Stopped watching.", fg="cyan"))
|
|
402
585
|
|
|
403
586
|
|
|
404
587
|
@cli.command()
|
|
@@ -463,6 +646,7 @@ CONFIG_KEYS = {
|
|
|
463
646
|
"transcripts-dir": ("transcripts_dir", "path", "Directory for transcripts"),
|
|
464
647
|
"whisper-model": ("whisper_model", "choice:tiny.en,base.en,small.en,medium.en,large-v3", "Whisper model for transcription"),
|
|
465
648
|
"auto-transcribe": ("auto_transcribe", "bool", "Auto-transcribe after recording"),
|
|
649
|
+
"auto-update": ("auto_update", "bool", "Auto-update when running --version"),
|
|
466
650
|
"silence-timeout": ("silence_timeout", "int", "Minutes of silence before auto-stop"),
|
|
467
651
|
"capture-system-audio": ("capture_system_audio", "bool", "Capture meeting participants via ScreenCaptureKit"),
|
|
468
652
|
}
|
|
@@ -553,7 +737,7 @@ def config(key: Optional[str], value: Optional[str]):
|
|
|
553
737
|
WATCHER_PID_FILE = Path.home() / ".meeting-noter-watcher.pid"
|
|
554
738
|
|
|
555
739
|
|
|
556
|
-
@cli.command()
|
|
740
|
+
@cli.command(hidden=True)
|
|
557
741
|
@click.option(
|
|
558
742
|
"--foreground", "-f",
|
|
559
743
|
is_flag=True,
|
|
@@ -665,7 +849,7 @@ def _run_watcher_loop():
|
|
|
665
849
|
stop_daemon(DEFAULT_PID_FILE)
|
|
666
850
|
|
|
667
851
|
|
|
668
|
-
@cli.command()
|
|
852
|
+
@cli.command(hidden=True)
|
|
669
853
|
@require_setup
|
|
670
854
|
def watch():
|
|
671
855
|
"""Watch for meetings interactively (foreground with prompts).
|
meeting_noter/config.py
CHANGED
|
@@ -32,6 +32,7 @@ DEFAULT_CONFIG = {
|
|
|
32
32
|
"transcripts_dir": str(Path.home() / "meetings"),
|
|
33
33
|
"whisper_model": "tiny.en",
|
|
34
34
|
"auto_transcribe": True,
|
|
35
|
+
"auto_update": True, # Auto-update when running --version
|
|
35
36
|
"silence_timeout": 5, # Minutes of silence before stopping recording
|
|
36
37
|
"capture_system_audio": True, # Capture other participants via ScreenCaptureKit
|
|
37
38
|
"show_menubar": False,
|
|
@@ -139,6 +140,16 @@ class Config:
|
|
|
139
140
|
"""Set show menubar setting."""
|
|
140
141
|
self._data["show_menubar"] = value
|
|
141
142
|
|
|
143
|
+
@property
|
|
144
|
+
def auto_update(self) -> bool:
|
|
145
|
+
"""Get auto-update setting."""
|
|
146
|
+
return self._data.get("auto_update", True)
|
|
147
|
+
|
|
148
|
+
@auto_update.setter
|
|
149
|
+
def auto_update(self, value: bool) -> None:
|
|
150
|
+
"""Set auto-update setting."""
|
|
151
|
+
self._data["auto_update"] = value
|
|
152
|
+
|
|
142
153
|
@property
|
|
143
154
|
def setup_complete(self) -> bool:
|
|
144
155
|
"""Check if setup has been completed."""
|
meeting_noter/daemon.py
CHANGED
|
@@ -157,7 +157,11 @@ def run_daemon(
|
|
|
157
157
|
remove_pid_file(pid_file)
|
|
158
158
|
|
|
159
159
|
|
|
160
|
-
def _run_capture_loop(
|
|
160
|
+
def _run_capture_loop(
|
|
161
|
+
output_dir: Path,
|
|
162
|
+
meeting_name: Optional[str] = None,
|
|
163
|
+
enable_live_transcription: bool = True,
|
|
164
|
+
):
|
|
161
165
|
"""Main capture loop.
|
|
162
166
|
|
|
163
167
|
Audio imports happen HERE, safely AFTER the fork.
|
|
@@ -171,6 +175,16 @@ def _run_capture_loop(output_dir: Path, meeting_name: Optional[str] = None):
|
|
|
171
175
|
|
|
172
176
|
config = get_config()
|
|
173
177
|
|
|
178
|
+
# Live transcription (imported here to avoid loading Whisper before fork)
|
|
179
|
+
live_transcriber = None
|
|
180
|
+
if enable_live_transcription:
|
|
181
|
+
try:
|
|
182
|
+
from meeting_noter.transcription.live_transcription import LiveTranscriber
|
|
183
|
+
LiveTranscriber # Just verify import works, create later
|
|
184
|
+
except ImportError as e:
|
|
185
|
+
print(f"Live transcription not available: {e}")
|
|
186
|
+
enable_live_transcription = False
|
|
187
|
+
|
|
174
188
|
print(f"Meeting Noter daemon started. Saving to {output_dir}")
|
|
175
189
|
sys.stdout.flush()
|
|
176
190
|
if meeting_name:
|
|
@@ -253,15 +267,42 @@ def _run_capture_loop(output_dir: Path, meeting_name: Optional[str] = None):
|
|
|
253
267
|
print(f"Recording started: {filepath.name}")
|
|
254
268
|
recording_started = True
|
|
255
269
|
audio_detected = True
|
|
270
|
+
|
|
271
|
+
# Start live transcription
|
|
272
|
+
if enable_live_transcription:
|
|
273
|
+
try:
|
|
274
|
+
from meeting_noter.transcription.live_transcription import LiveTranscriber
|
|
275
|
+
live_transcriber = LiveTranscriber(
|
|
276
|
+
output_path=filepath,
|
|
277
|
+
sample_rate=sample_rate,
|
|
278
|
+
channels=channels,
|
|
279
|
+
window_seconds=5.0,
|
|
280
|
+
slide_seconds=2.0,
|
|
281
|
+
model_size=config.whisper_model,
|
|
282
|
+
)
|
|
283
|
+
live_transcriber.start()
|
|
284
|
+
print(f"Live transcription: {live_transcriber.live_file_path.name}")
|
|
285
|
+
except Exception as e:
|
|
286
|
+
print(f"Failed to start live transcription: {e}")
|
|
287
|
+
live_transcriber = None
|
|
256
288
|
else:
|
|
257
289
|
# Currently recording
|
|
258
290
|
session.write(audio)
|
|
259
291
|
|
|
292
|
+
# Feed audio to live transcriber
|
|
293
|
+
if live_transcriber is not None:
|
|
294
|
+
live_transcriber.write(audio)
|
|
295
|
+
|
|
260
296
|
if has_audio:
|
|
261
297
|
audio_detected = True
|
|
262
298
|
|
|
263
299
|
# Check for extended silence (meeting ended)
|
|
264
300
|
if is_silence and audio_detected:
|
|
301
|
+
# Stop live transcription first
|
|
302
|
+
if live_transcriber is not None:
|
|
303
|
+
live_transcriber.stop()
|
|
304
|
+
live_transcriber = None
|
|
305
|
+
|
|
265
306
|
filepath, duration = session.stop()
|
|
266
307
|
if filepath:
|
|
267
308
|
print(f"Recording saved: {filepath.name} ({duration:.1f}s)")
|
|
@@ -275,6 +316,10 @@ def _run_capture_loop(output_dir: Path, meeting_name: Optional[str] = None):
|
|
|
275
316
|
finally:
|
|
276
317
|
capture.stop()
|
|
277
318
|
|
|
319
|
+
# Stop live transcription
|
|
320
|
+
if live_transcriber is not None:
|
|
321
|
+
live_transcriber.stop()
|
|
322
|
+
|
|
278
323
|
# Save any ongoing recording
|
|
279
324
|
if 'session' in locals() and session.is_active:
|
|
280
325
|
filepath, duration = session.stop()
|
|
@@ -377,6 +422,7 @@ def run_foreground_capture(
|
|
|
377
422
|
whisper_model: str = "tiny.en",
|
|
378
423
|
transcripts_dir: Optional[Path] = None,
|
|
379
424
|
silence_timeout_minutes: int = 5,
|
|
425
|
+
enable_live_transcription: bool = True,
|
|
380
426
|
) -> Optional[Path]:
|
|
381
427
|
"""Run audio capture in foreground with a named meeting.
|
|
382
428
|
|
|
@@ -390,6 +436,7 @@ def run_foreground_capture(
|
|
|
390
436
|
whisper_model: Whisper model to use for transcription
|
|
391
437
|
transcripts_dir: Directory for transcripts
|
|
392
438
|
silence_timeout_minutes: Stop after this many minutes of silence
|
|
439
|
+
enable_live_transcription: Whether to enable real-time transcription
|
|
393
440
|
|
|
394
441
|
Returns:
|
|
395
442
|
Path to the saved recording, or None if recording was too short
|
|
@@ -401,6 +448,9 @@ def run_foreground_capture(
|
|
|
401
448
|
|
|
402
449
|
config = get_config()
|
|
403
450
|
|
|
451
|
+
# Initialize live transcriber
|
|
452
|
+
live_transcriber = None
|
|
453
|
+
|
|
404
454
|
# Check audio device
|
|
405
455
|
if not check_audio_available():
|
|
406
456
|
click.echo(click.style("Error: ", fg="red") + "No audio input device found.")
|
|
@@ -461,6 +511,27 @@ def run_foreground_capture(
|
|
|
461
511
|
filepath = session.start()
|
|
462
512
|
click.echo(click.style("Recording: ", fg="green") + filepath.name)
|
|
463
513
|
|
|
514
|
+
# Start live transcription
|
|
515
|
+
if enable_live_transcription:
|
|
516
|
+
try:
|
|
517
|
+
from meeting_noter.transcription.live_transcription import LiveTranscriber
|
|
518
|
+
live_transcriber = LiveTranscriber(
|
|
519
|
+
output_path=filepath,
|
|
520
|
+
sample_rate=sample_rate,
|
|
521
|
+
channels=channels,
|
|
522
|
+
window_seconds=5.0,
|
|
523
|
+
slide_seconds=2.0,
|
|
524
|
+
model_size=whisper_model,
|
|
525
|
+
)
|
|
526
|
+
live_transcriber.start()
|
|
527
|
+
click.echo(
|
|
528
|
+
click.style("Live transcript: ", fg="cyan") +
|
|
529
|
+
str(live_transcriber.live_file_path)
|
|
530
|
+
)
|
|
531
|
+
except Exception as e:
|
|
532
|
+
click.echo(click.style(f"Live transcription not available: {e}", fg="yellow"))
|
|
533
|
+
live_transcriber = None
|
|
534
|
+
|
|
464
535
|
while not _stop_event.is_set():
|
|
465
536
|
audio = capture.get_audio(timeout=0.5)
|
|
466
537
|
if audio is None:
|
|
@@ -472,6 +543,10 @@ def run_foreground_capture(
|
|
|
472
543
|
|
|
473
544
|
session.write(audio)
|
|
474
545
|
|
|
546
|
+
# Feed audio to live transcriber
|
|
547
|
+
if live_transcriber is not None:
|
|
548
|
+
live_transcriber.write(audio)
|
|
549
|
+
|
|
475
550
|
# Check for extended silence
|
|
476
551
|
if silence_detector.update(audio):
|
|
477
552
|
click.echo("\n" + click.style("Stopped: ", fg="yellow") + "silence timeout reached")
|
|
@@ -487,6 +562,10 @@ def run_foreground_capture(
|
|
|
487
562
|
except Exception as e:
|
|
488
563
|
click.echo(click.style(f"\nError: {e}", fg="red"))
|
|
489
564
|
finally:
|
|
565
|
+
# Stop live transcription
|
|
566
|
+
if live_transcriber is not None:
|
|
567
|
+
live_transcriber.stop()
|
|
568
|
+
|
|
490
569
|
capture.stop()
|
|
491
570
|
|
|
492
571
|
# Save recording
|
meeting_noter/mic_monitor.py
CHANGED
|
@@ -169,6 +169,9 @@ def is_meeting_app_active() -> Optional[str]:
|
|
|
169
169
|
|
|
170
170
|
# Browser-based meetings (Google Meet, etc.)
|
|
171
171
|
if any(browser in owner_lower for browser in ["chrome", "safari", "firefox", "edge", "brave", "arc"]):
|
|
172
|
+
# Skip video streaming sites (YouTube, Vimeo, etc.)
|
|
173
|
+
if any(x in title_lower for x in ["youtube", "vimeo", "twitch", "netflix"]):
|
|
174
|
+
continue
|
|
172
175
|
if title_lower.startswith("meet -") or "meet.google.com" in title_lower:
|
|
173
176
|
return "Google Meet"
|
|
174
177
|
if " meeting" in title_lower and any(x in title_lower for x in ["zoom", "teams", "webex"]):
|
|
@@ -0,0 +1,250 @@
|
|
|
1
|
+
"""Live transcription during recording.
|
|
2
|
+
|
|
3
|
+
Buffers audio chunks and transcribes them in a background thread,
|
|
4
|
+
writing segments to a .live.txt file that can be tailed by the CLI.
|
|
5
|
+
|
|
6
|
+
Uses overlapping windows for lower latency: keeps a 5-second context window
|
|
7
|
+
but transcribes every 2 seconds, only outputting new content.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
|
|
12
|
+
import sys
|
|
13
|
+
import numpy as np
|
|
14
|
+
from collections import deque
|
|
15
|
+
from pathlib import Path
|
|
16
|
+
from queue import Queue, Empty
|
|
17
|
+
from threading import Thread, Event
|
|
18
|
+
from typing import Optional
|
|
19
|
+
from datetime import datetime
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class LiveTranscriber:
|
|
23
|
+
"""Transcribes audio in real-time during recording.
|
|
24
|
+
|
|
25
|
+
Uses overlapping windows approach:
|
|
26
|
+
- Maintains a rolling window of audio (default 5 seconds)
|
|
27
|
+
- Transcribes every `slide_seconds` (default 2 seconds)
|
|
28
|
+
- Only outputs new segments to avoid duplicates
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
def __init__(
|
|
32
|
+
self,
|
|
33
|
+
output_path: Path,
|
|
34
|
+
sample_rate: int = 48000,
|
|
35
|
+
channels: int = 2,
|
|
36
|
+
window_seconds: float = 5.0,
|
|
37
|
+
slide_seconds: float = 2.0,
|
|
38
|
+
model_size: str = "tiny.en",
|
|
39
|
+
):
|
|
40
|
+
"""Initialize the live transcriber.
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
output_path: Path to write live transcript (will use .live.txt suffix in live/ subfolder)
|
|
44
|
+
sample_rate: Audio sample rate
|
|
45
|
+
channels: Number of audio channels
|
|
46
|
+
window_seconds: Size of the context window for transcription
|
|
47
|
+
slide_seconds: How often to transcribe (lower = more responsive, higher CPU)
|
|
48
|
+
model_size: Whisper model to use (tiny.en recommended for speed)
|
|
49
|
+
"""
|
|
50
|
+
# Put live transcripts in a 'live/' subfolder to keep recordings folder clean
|
|
51
|
+
live_dir = output_path.parent / "live"
|
|
52
|
+
live_dir.mkdir(exist_ok=True)
|
|
53
|
+
self.output_path = live_dir / (output_path.stem + ".live.txt")
|
|
54
|
+
self.sample_rate = sample_rate
|
|
55
|
+
self.channels = channels
|
|
56
|
+
self.window_seconds = window_seconds
|
|
57
|
+
self.slide_seconds = slide_seconds
|
|
58
|
+
self.model_size = model_size
|
|
59
|
+
|
|
60
|
+
self._audio_queue: Queue[np.ndarray] = Queue()
|
|
61
|
+
self._stop_event = Event()
|
|
62
|
+
self._thread: Optional[Thread] = None
|
|
63
|
+
self._model = None
|
|
64
|
+
self._start_time: Optional[datetime] = None
|
|
65
|
+
self._recording_offset = 0.0 # Current position in recording (seconds)
|
|
66
|
+
self._last_output_end = 0.0 # End time of last outputted segment
|
|
67
|
+
|
|
68
|
+
def start(self):
|
|
69
|
+
"""Start the live transcription thread."""
|
|
70
|
+
self._stop_event.clear()
|
|
71
|
+
self._start_time = datetime.now()
|
|
72
|
+
self._recording_offset = 0.0
|
|
73
|
+
self._last_output_end = 0.0
|
|
74
|
+
|
|
75
|
+
# Create/clear the output file
|
|
76
|
+
with open(self.output_path, "w") as f:
|
|
77
|
+
f.write(f"Live Transcription - {self._start_time.strftime('%Y-%m-%d %H:%M:%S')}\n")
|
|
78
|
+
f.write("-" * 40 + "\n\n")
|
|
79
|
+
|
|
80
|
+
self._thread = Thread(target=self._transcribe_loop, daemon=True)
|
|
81
|
+
self._thread.start()
|
|
82
|
+
|
|
83
|
+
def write(self, audio: np.ndarray):
|
|
84
|
+
"""Add audio chunk to the transcription queue.
|
|
85
|
+
|
|
86
|
+
Args:
|
|
87
|
+
audio: Audio data (float32, -1 to 1)
|
|
88
|
+
"""
|
|
89
|
+
if not self._stop_event.is_set():
|
|
90
|
+
self._audio_queue.put(audio.copy())
|
|
91
|
+
|
|
92
|
+
def stop(self):
|
|
93
|
+
"""Stop the live transcription thread."""
|
|
94
|
+
self._stop_event.set()
|
|
95
|
+
if self._thread is not None:
|
|
96
|
+
self._thread.join(timeout=10.0)
|
|
97
|
+
self._thread = None
|
|
98
|
+
self._model = None
|
|
99
|
+
|
|
100
|
+
def _load_model(self):
|
|
101
|
+
"""Load the Whisper model (lazy loading)."""
|
|
102
|
+
if self._model is not None:
|
|
103
|
+
return
|
|
104
|
+
|
|
105
|
+
try:
|
|
106
|
+
from faster_whisper import WhisperModel
|
|
107
|
+
|
|
108
|
+
# Check for bundled model
|
|
109
|
+
bundled_path = None
|
|
110
|
+
try:
|
|
111
|
+
from meeting_noter_models import get_model_path
|
|
112
|
+
bundled_path = get_model_path()
|
|
113
|
+
if not (bundled_path.exists() and (bundled_path / "model.bin").exists()):
|
|
114
|
+
bundled_path = None
|
|
115
|
+
except ImportError:
|
|
116
|
+
pass
|
|
117
|
+
|
|
118
|
+
if bundled_path and self.model_size == "tiny.en":
|
|
119
|
+
self._model = WhisperModel(
|
|
120
|
+
str(bundled_path),
|
|
121
|
+
device="cpu",
|
|
122
|
+
compute_type="int8",
|
|
123
|
+
)
|
|
124
|
+
else:
|
|
125
|
+
self._model = WhisperModel(
|
|
126
|
+
self.model_size,
|
|
127
|
+
device="cpu",
|
|
128
|
+
compute_type="int8",
|
|
129
|
+
)
|
|
130
|
+
except Exception as e:
|
|
131
|
+
print(f"Failed to load Whisper model: {e}", file=sys.stderr)
|
|
132
|
+
self._model = None
|
|
133
|
+
|
|
134
|
+
def _transcribe_loop(self):
|
|
135
|
+
"""Main transcription loop with overlapping windows."""
|
|
136
|
+
# Rolling buffer using deque for efficient sliding
|
|
137
|
+
window_samples = int(self.window_seconds * self.sample_rate)
|
|
138
|
+
slide_samples = int(self.slide_seconds * self.sample_rate)
|
|
139
|
+
|
|
140
|
+
# Buffer holds raw audio samples
|
|
141
|
+
rolling_buffer: deque[float] = deque(maxlen=window_samples)
|
|
142
|
+
samples_since_last_transcribe = 0
|
|
143
|
+
|
|
144
|
+
# Load model on first use
|
|
145
|
+
self._load_model()
|
|
146
|
+
if self._model is None:
|
|
147
|
+
return
|
|
148
|
+
|
|
149
|
+
while not self._stop_event.is_set():
|
|
150
|
+
try:
|
|
151
|
+
# Collect audio chunks
|
|
152
|
+
try:
|
|
153
|
+
chunk = self._audio_queue.get(timeout=0.5)
|
|
154
|
+
|
|
155
|
+
# Add samples to rolling buffer
|
|
156
|
+
for sample in chunk:
|
|
157
|
+
rolling_buffer.append(sample)
|
|
158
|
+
|
|
159
|
+
samples_since_last_transcribe += len(chunk)
|
|
160
|
+
self._recording_offset += len(chunk) / self.sample_rate
|
|
161
|
+
|
|
162
|
+
except Empty:
|
|
163
|
+
if self._stop_event.is_set():
|
|
164
|
+
break
|
|
165
|
+
continue
|
|
166
|
+
|
|
167
|
+
# Transcribe every slide_seconds
|
|
168
|
+
if samples_since_last_transcribe >= slide_samples and len(rolling_buffer) >= slide_samples:
|
|
169
|
+
self._transcribe_window(rolling_buffer)
|
|
170
|
+
samples_since_last_transcribe = 0
|
|
171
|
+
|
|
172
|
+
except Exception as e:
|
|
173
|
+
print(f"Live transcription error: {e}", file=sys.stderr)
|
|
174
|
+
|
|
175
|
+
# Final transcription on stop
|
|
176
|
+
if len(rolling_buffer) > 0:
|
|
177
|
+
self._transcribe_window(rolling_buffer)
|
|
178
|
+
|
|
179
|
+
def _transcribe_window(self, rolling_buffer: deque):
|
|
180
|
+
"""Transcribe the current window and output new segments."""
|
|
181
|
+
if not rolling_buffer or self._model is None:
|
|
182
|
+
return
|
|
183
|
+
|
|
184
|
+
try:
|
|
185
|
+
# Convert deque to numpy array (ensure float32 for Whisper)
|
|
186
|
+
audio = np.array(list(rolling_buffer), dtype=np.float32)
|
|
187
|
+
|
|
188
|
+
# Convert stereo to mono if needed
|
|
189
|
+
if self.channels == 2 and len(audio) % 2 == 0:
|
|
190
|
+
audio = audio.reshape(-1, 2).mean(axis=1).astype(np.float32)
|
|
191
|
+
|
|
192
|
+
# Resample to 16kHz if needed (Whisper expects 16kHz)
|
|
193
|
+
if self.sample_rate != 16000:
|
|
194
|
+
audio = self._resample(audio, self.sample_rate, 16000).astype(np.float32)
|
|
195
|
+
|
|
196
|
+
# Calculate window timing
|
|
197
|
+
window_duration = len(rolling_buffer) / self.sample_rate
|
|
198
|
+
window_start = self._recording_offset - window_duration
|
|
199
|
+
|
|
200
|
+
# Transcribe using faster-whisper
|
|
201
|
+
segments, _ = self._model.transcribe(
|
|
202
|
+
audio,
|
|
203
|
+
beam_size=1, # Fastest
|
|
204
|
+
vad_filter=True,
|
|
205
|
+
vad_parameters=dict(min_silence_duration_ms=200),
|
|
206
|
+
)
|
|
207
|
+
|
|
208
|
+
# Write only NEW segments to file
|
|
209
|
+
with open(self.output_path, "a") as f:
|
|
210
|
+
for segment in segments:
|
|
211
|
+
# Calculate absolute timestamp
|
|
212
|
+
abs_start = window_start + segment.start
|
|
213
|
+
abs_end = window_start + segment.end
|
|
214
|
+
|
|
215
|
+
# Only output if this segment is new (starts after last output)
|
|
216
|
+
if abs_start >= self._last_output_end - 0.5: # 0.5s tolerance for overlap
|
|
217
|
+
text = segment.text.strip()
|
|
218
|
+
if text:
|
|
219
|
+
timestamp = self._format_timestamp(abs_start)
|
|
220
|
+
f.write(f"{timestamp} {text}\n")
|
|
221
|
+
f.flush()
|
|
222
|
+
self._last_output_end = abs_end
|
|
223
|
+
|
|
224
|
+
except Exception as e:
|
|
225
|
+
print(f"Transcription error: {e}", file=sys.stderr)
|
|
226
|
+
|
|
227
|
+
@staticmethod
|
|
228
|
+
def _resample(audio: np.ndarray, orig_sr: int, target_sr: int) -> np.ndarray:
|
|
229
|
+
"""Simple resampling using linear interpolation."""
|
|
230
|
+
if orig_sr == target_sr:
|
|
231
|
+
return audio
|
|
232
|
+
|
|
233
|
+
duration = len(audio) / orig_sr
|
|
234
|
+
target_length = int(duration * target_sr)
|
|
235
|
+
|
|
236
|
+
# Use numpy interpolation (returns float64, so cast back)
|
|
237
|
+
indices = np.linspace(0, len(audio) - 1, target_length)
|
|
238
|
+
return np.interp(indices, np.arange(len(audio)), audio).astype(np.float32)
|
|
239
|
+
|
|
240
|
+
@staticmethod
|
|
241
|
+
def _format_timestamp(seconds: float) -> str:
|
|
242
|
+
"""Format seconds as [MM:SS]."""
|
|
243
|
+
minutes = int(seconds // 60)
|
|
244
|
+
secs = int(seconds % 60)
|
|
245
|
+
return f"[{minutes:02d}:{secs:02d}]"
|
|
246
|
+
|
|
247
|
+
@property
|
|
248
|
+
def live_file_path(self) -> Path:
|
|
249
|
+
"""Get the path to the live transcript file."""
|
|
250
|
+
return self.output_path
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
"""Check for updates from PyPI."""
|
|
2
|
+
|
|
3
|
+
import threading
|
|
4
|
+
import urllib.request
|
|
5
|
+
import json
|
|
6
|
+
from typing import Optional, Tuple
|
|
7
|
+
|
|
8
|
+
from meeting_noter import __version__
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
PYPI_URL = "https://pypi.org/pypi/meeting-noter/json"
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def parse_version(version: str) -> Tuple[int, ...]:
|
|
15
|
+
"""Parse version string into tuple for comparison."""
|
|
16
|
+
try:
|
|
17
|
+
return tuple(int(x) for x in version.split("."))
|
|
18
|
+
except ValueError:
|
|
19
|
+
return (0, 0, 0)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def get_latest_version() -> Optional[str]:
|
|
23
|
+
"""Fetch the latest version from PyPI."""
|
|
24
|
+
try:
|
|
25
|
+
req = urllib.request.Request(
|
|
26
|
+
PYPI_URL,
|
|
27
|
+
headers={"Accept": "application/json", "User-Agent": "meeting-noter"}
|
|
28
|
+
)
|
|
29
|
+
with urllib.request.urlopen(req, timeout=5) as response:
|
|
30
|
+
data = json.loads(response.read().decode())
|
|
31
|
+
return data.get("info", {}).get("version")
|
|
32
|
+
except Exception:
|
|
33
|
+
return None
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def check_for_update() -> Optional[str]:
|
|
37
|
+
"""Check if an update is available.
|
|
38
|
+
|
|
39
|
+
Returns the new version string if an update is available, None otherwise.
|
|
40
|
+
"""
|
|
41
|
+
latest = get_latest_version()
|
|
42
|
+
if not latest:
|
|
43
|
+
return None
|
|
44
|
+
|
|
45
|
+
current = parse_version(__version__)
|
|
46
|
+
latest_parsed = parse_version(latest)
|
|
47
|
+
|
|
48
|
+
if latest_parsed > current:
|
|
49
|
+
return latest
|
|
50
|
+
return None
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def check_for_update_async(callback):
|
|
54
|
+
"""Check for updates in a background thread.
|
|
55
|
+
|
|
56
|
+
Args:
|
|
57
|
+
callback: Function to call with the new version string (or None if no update).
|
|
58
|
+
"""
|
|
59
|
+
def _check():
|
|
60
|
+
new_version = check_for_update()
|
|
61
|
+
if new_version:
|
|
62
|
+
callback(new_version)
|
|
63
|
+
|
|
64
|
+
thread = threading.Thread(target=_check, daemon=True)
|
|
65
|
+
thread.start()
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: meeting-noter
|
|
3
|
-
Version:
|
|
3
|
+
Version: 1.1.0
|
|
4
4
|
Summary: Offline meeting transcription for macOS with automatic meeting detection
|
|
5
5
|
Author: Victor
|
|
6
6
|
License: MIT
|
|
@@ -40,6 +40,7 @@ Requires-Dist: meeting-noter-models>=0.1.0; extra == "offline"
|
|
|
40
40
|
Provides-Extra: dev
|
|
41
41
|
Requires-Dist: pytest>=7.0; extra == "dev"
|
|
42
42
|
Requires-Dist: pytest-cov; extra == "dev"
|
|
43
|
+
Requires-Dist: pytest-mock; extra == "dev"
|
|
43
44
|
Requires-Dist: black; extra == "dev"
|
|
44
45
|
Requires-Dist: ruff; extra == "dev"
|
|
45
46
|
Requires-Dist: mypy; extra == "dev"
|
|
@@ -1,14 +1,15 @@
|
|
|
1
|
-
meeting_noter/__init__.py,sha256=
|
|
1
|
+
meeting_noter/__init__.py,sha256=OuitWUFUeSb7eg41mnnDD6QiJrcGVpXpdhDzyxtCmmU,103
|
|
2
2
|
meeting_noter/__main__.py,sha256=6sSOqH1o3jvgvkVzsVKmF6-xVGcUAbNVQkRl2CrygdE,120
|
|
3
|
-
meeting_noter/cli.py,sha256=
|
|
4
|
-
meeting_noter/config.py,sha256=
|
|
5
|
-
meeting_noter/daemon.py,sha256=
|
|
3
|
+
meeting_noter/cli.py,sha256=BAoUnQ-FPgwTp6-ON6WQkO-7Biwk1mYhwFaGdchHGHQ,34267
|
|
4
|
+
meeting_noter/config.py,sha256=_Jy-gZDTCN3TwH5fiLEWk-qFhoSGISO2xDQmACV_zRc,6334
|
|
5
|
+
meeting_noter/daemon.py,sha256=u9VrYe94o3lxabuIS9MDVPHSH7MqKqzTqGTuA7TNAIc,19767
|
|
6
6
|
meeting_noter/meeting_detector.py,sha256=St0qoMkvUERP4BaxnXO1M6fZDJpWqBf9In7z2SgWcWg,10564
|
|
7
7
|
meeting_noter/menubar.py,sha256=Gn6p8y5jA_HCWf1T3ademxH-vndpONHkf9vUlKs6XEo,14379
|
|
8
|
-
meeting_noter/mic_monitor.py,sha256=
|
|
8
|
+
meeting_noter/mic_monitor.py,sha256=P8vF4qaZcGrEzzJyVos78Vuf38NXHGNRREDsD-HyBHc,16211
|
|
9
|
+
meeting_noter/update_checker.py,sha256=sMmIiiZJL6K7wqLWE64Aj4hS8uspjUOirr6BG_IlL1I,1701
|
|
9
10
|
meeting_noter/audio/__init__.py,sha256=O7PU8CxHSHxMeHbc9Jdwt9kePLQzsPh81GQU7VHCtBY,44
|
|
10
11
|
meeting_noter/audio/capture.py,sha256=fDrT5oXfva8vdFlht9cv60NviKbksw2QeJ8eOtI19uE,6469
|
|
11
|
-
meeting_noter/audio/encoder.py,sha256=
|
|
12
|
+
meeting_noter/audio/encoder.py,sha256=OBsgUmlZPz-YZQZ7Rp8MAlMRaQxTsccjuTgCtvRebmc,6573
|
|
12
13
|
meeting_noter/audio/system_audio.py,sha256=jbHGjNCerI19weXap0a90Ik17lVTCT1hCEgRKYke-p8,13016
|
|
13
14
|
meeting_noter/gui/__init__.py,sha256=z5GxxaeXyjqyEa9ox0dQxuL5u_BART0bi7cI6rfntEI,103
|
|
14
15
|
meeting_noter/gui/__main__.py,sha256=A2HWdYod0bTgjQQIi21O7XpmgxLH36e_X0aygEUZLls,146
|
|
@@ -32,8 +33,9 @@ meeting_noter/resources/icon_512.png,sha256=o7X3ngYcppcIAAk9AcfPx94MUmrsPRp0qBTp
|
|
|
32
33
|
meeting_noter/resources/icon_64.png,sha256=TqG7Awx3kK8YdiX1e_z1odZonosZyQI2trlkNZCzUoI,607
|
|
33
34
|
meeting_noter/transcription/__init__.py,sha256=7GY9diP06DzFyoli41wddbrPv5bVDzH35bmnWlIJev4,29
|
|
34
35
|
meeting_noter/transcription/engine.py,sha256=G9NcSS6Q-UhW7PlQ0E85hQXn6BWao64nIvyw4NR2yxI,7208
|
|
35
|
-
meeting_noter
|
|
36
|
-
meeting_noter-
|
|
37
|
-
meeting_noter-
|
|
38
|
-
meeting_noter-
|
|
39
|
-
meeting_noter-
|
|
36
|
+
meeting_noter/transcription/live_transcription.py,sha256=AslB1T1_gxu7eSp7xc79_2SdfGrNJq7L_8bA1t6YoU4,9277
|
|
37
|
+
meeting_noter-1.1.0.dist-info/METADATA,sha256=bGwwBq9AeFcxYKf0vpD3WtENmibBNLl4mzf1fLDyiVs,6995
|
|
38
|
+
meeting_noter-1.1.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
39
|
+
meeting_noter-1.1.0.dist-info/entry_points.txt,sha256=osZoOmm-UBPCJ4b6DGH6JOAm7mofM2fK06eK6blplmg,83
|
|
40
|
+
meeting_noter-1.1.0.dist-info/top_level.txt,sha256=9Tuq04_0SXM0OXOHVbOHkHkB5tG3fqkrMrfzCMpbLpY,14
|
|
41
|
+
meeting_noter-1.1.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|