python-voiceio 0.3.11__tar.gz → 0.3.13__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {python_voiceio-0.3.11/python_voiceio.egg-info → python_voiceio-0.3.13}/PKG-INFO +1 -1
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/pyproject.toml +1 -1
- {python_voiceio-0.3.11 → python_voiceio-0.3.13/python_voiceio.egg-info}/PKG-INFO +1 -1
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/python_voiceio.egg-info/SOURCES.txt +1 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/tests/test_app_wiring.py +1 -1
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/tests/test_autocorrect.py +210 -0
- python_voiceio-0.3.13/tests/test_cli.py +264 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/tests/test_llm_api.py +45 -1
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/tests/test_postprocess.py +48 -1
- python_voiceio-0.3.13/voiceio/__init__.py +1 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/app.py +4 -1
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/autocorrect.py +196 -20
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/cli.py +140 -7
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/config.py +2 -1
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/llm_api.py +24 -6
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/postprocess.py +5 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/streaming.py +3 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/wizard.py +27 -0
- python_voiceio-0.3.11/voiceio/__init__.py +0 -1
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/LICENSE +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/README.md +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/python_voiceio.egg-info/dependency_links.txt +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/python_voiceio.egg-info/entry_points.txt +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/python_voiceio.egg-info/requires.txt +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/python_voiceio.egg-info/top_level.txt +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/setup.cfg +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/tests/test_backend_probes.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/tests/test_clipboard_read.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/tests/test_commands.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/tests/test_config.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/tests/test_corrections.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/tests/test_fallback.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/tests/test_health.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/tests/test_hints.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/tests/test_history.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/tests/test_ibus_typer.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/tests/test_llm.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/tests/test_numbers.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/tests/test_platform.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/tests/test_prebuffer.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/tests/test_prompt.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/tests/test_recorder_integration.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/tests/test_robustness.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/tests/test_streaming.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/tests/test_transcriber.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/tests/test_tts.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/tests/test_vad.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/tests/test_vocabulary.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/tests/test_wordfreq.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/__main__.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/backends.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/clipboard_read.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/commands.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/corrections.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/demo.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/feedback.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/health.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/hints.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/history.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/hotkeys/__init__.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/hotkeys/base.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/hotkeys/chain.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/hotkeys/evdev.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/hotkeys/pynput_backend.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/hotkeys/socket_backend.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/ibus/__init__.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/ibus/engine.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/llm.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/models/__init__.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/models/silero_vad.onnx +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/numbers.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/pidlock.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/platform.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/prompt.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/recorder.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/service.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/sounds/__init__.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/sounds/commit.wav +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/sounds/start.wav +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/sounds/stop.wav +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/transcriber.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/tray/__init__.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/tray/_icons.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/tray/_indicator.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/tray/_pystray.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/tts/__init__.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/tts/base.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/tts/chain.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/tts/edge_engine.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/tts/espeak.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/tts/piper_engine.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/tts/player.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/typers/__init__.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/typers/base.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/typers/chain.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/typers/clipboard.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/typers/ibus.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/typers/pynput_type.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/typers/wtype.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/typers/xdotool.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/typers/ydotool.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/vad.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/vocabulary.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/wordfreq.py +0 -0
- {python_voiceio-0.3.11 → python_voiceio-0.3.13}/voiceio/worker.py +0 -0
|
@@ -76,7 +76,7 @@ class TestHotkeyDebounce:
|
|
|
76
76
|
"""Verify that duplicate hotkey events are properly debounced."""
|
|
77
77
|
|
|
78
78
|
def test_rapid_duplicate_ignored(self):
|
|
79
|
-
"""Two on_hotkey calls within
|
|
79
|
+
"""Two on_hotkey calls within the debounce window should only trigger once."""
|
|
80
80
|
vio, _, _ = _make_vio()
|
|
81
81
|
|
|
82
82
|
vio.on_hotkey()
|
|
@@ -5,6 +5,7 @@ import json
|
|
|
5
5
|
from unittest.mock import MagicMock, patch
|
|
6
6
|
|
|
7
7
|
from voiceio.autocorrect import (
|
|
8
|
+
_REVIEW_BATCH_SIZE,
|
|
8
9
|
ReviewResult,
|
|
9
10
|
SuspiciousWord,
|
|
10
11
|
_find_similar_common,
|
|
@@ -12,6 +13,8 @@ from voiceio.autocorrect import (
|
|
|
12
13
|
_parse_review_response,
|
|
13
14
|
cluster_variants,
|
|
14
15
|
find_suspicious_words,
|
|
16
|
+
rank_review_items,
|
|
17
|
+
rank_review_score,
|
|
15
18
|
review_suspicious,
|
|
16
19
|
)
|
|
17
20
|
from voiceio.config import AutocorrectConfig, Config, LLMConfig
|
|
@@ -220,6 +223,53 @@ def test_parse_flat_array_fallback():
|
|
|
220
223
|
assert result.ask_user[0]["wrong"] == "foo"
|
|
221
224
|
|
|
222
225
|
|
|
226
|
+
def test_parse_ask_user_keeps_empty_right_with_reason():
|
|
227
|
+
"""ask_user items must survive even when LLM has no clean correction."""
|
|
228
|
+
response = json.dumps({
|
|
229
|
+
"auto_fix": [],
|
|
230
|
+
"ask_user": [
|
|
231
|
+
{"wrong": "tridle", "right": "", "reason": "unclear"},
|
|
232
|
+
{"wrong": "kaiki", "right": "kaiki", "reason": "uncertain"},
|
|
233
|
+
],
|
|
234
|
+
"vocabulary": [],
|
|
235
|
+
})
|
|
236
|
+
result = _parse_review_response(response)
|
|
237
|
+
assert len(result.ask_user) == 2
|
|
238
|
+
assert result.ask_user[0]["wrong"] == "tridle"
|
|
239
|
+
assert result.ask_user[0]["reason"] == "unclear"
|
|
240
|
+
# right == wrong gets normalized to empty string (no useful suggestion)
|
|
241
|
+
assert result.ask_user[1]["wrong"] == "kaiki"
|
|
242
|
+
assert result.ask_user[1]["right"] == ""
|
|
243
|
+
assert result.ask_user[1]["reason"] == "uncertain"
|
|
244
|
+
|
|
245
|
+
|
|
246
|
+
def test_parse_ask_user_drops_empty_wrong():
|
|
247
|
+
"""ask_user items with no `wrong` are still useless and get dropped."""
|
|
248
|
+
response = json.dumps({
|
|
249
|
+
"auto_fix": [],
|
|
250
|
+
"ask_user": [{"wrong": "", "right": "something", "reason": "noise"}],
|
|
251
|
+
"vocabulary": [],
|
|
252
|
+
})
|
|
253
|
+
result = _parse_review_response(response)
|
|
254
|
+
assert result.ask_user == []
|
|
255
|
+
|
|
256
|
+
|
|
257
|
+
def test_parse_auto_fix_still_strict():
|
|
258
|
+
"""auto_fix bucket still requires `right != wrong` — case-only change dropped."""
|
|
259
|
+
response = json.dumps({
|
|
260
|
+
"auto_fix": [
|
|
261
|
+
{"wrong": "hello", "right": "Hello"}, # case-only → drop
|
|
262
|
+
{"wrong": "olamma", "right": "Ollama"}, # legit → keep
|
|
263
|
+
{"wrong": "foo", "right": "", "reason": "?"}, # missing right → drop
|
|
264
|
+
],
|
|
265
|
+
"ask_user": [],
|
|
266
|
+
"vocabulary": [],
|
|
267
|
+
})
|
|
268
|
+
result = _parse_review_response(response)
|
|
269
|
+
assert len(result.auto_fix) == 1
|
|
270
|
+
assert result.auto_fix[0]["wrong"] == "olamma"
|
|
271
|
+
|
|
272
|
+
|
|
223
273
|
def test_parse_partial_buckets():
|
|
224
274
|
"""Missing buckets should default to empty."""
|
|
225
275
|
response = json.dumps({"auto_fix": [{"wrong": "a", "right": "b"}]})
|
|
@@ -262,3 +312,163 @@ def test_review_api_failure_falls_back(mock_chat):
|
|
|
262
312
|
words = [SuspiciousWord(word="foo", count=1)]
|
|
263
313
|
result = review_suspicious(cfg, words)
|
|
264
314
|
assert result == ReviewResult()
|
|
315
|
+
|
|
316
|
+
|
|
317
|
+
@patch("voiceio.llm_api.chat")
|
|
318
|
+
def test_review_batches_large_input(mock_chat):
|
|
319
|
+
"""Large suspicious lists are split across multiple chat() calls."""
|
|
320
|
+
n = _REVIEW_BATCH_SIZE * 3 + 7 # forces 4 batches
|
|
321
|
+
words = [SuspiciousWord(word=f"word{i}", count=1) for i in range(n)]
|
|
322
|
+
# Each call returns a unique vocab entry so we can verify all batches ran.
|
|
323
|
+
call_counter = {"i": 0}
|
|
324
|
+
|
|
325
|
+
def fake_chat(*_a, **_kw):
|
|
326
|
+
call_counter["i"] += 1
|
|
327
|
+
return json.dumps({
|
|
328
|
+
"auto_fix": [], "ask_user": [],
|
|
329
|
+
"vocabulary": [f"batch{call_counter['i']}"],
|
|
330
|
+
})
|
|
331
|
+
|
|
332
|
+
mock_chat.side_effect = fake_chat
|
|
333
|
+
cfg = Config(autocorrect=AutocorrectConfig(api_key="test-key"))
|
|
334
|
+
result = review_suspicious(cfg, words)
|
|
335
|
+
|
|
336
|
+
assert mock_chat.call_count == 4
|
|
337
|
+
# Batches run in parallel — order is non-deterministic, so compare as set.
|
|
338
|
+
assert set(result.vocabulary) == {"batch1", "batch2", "batch3", "batch4"}
|
|
339
|
+
|
|
340
|
+
|
|
341
|
+
@patch("voiceio.llm_api.chat")
|
|
342
|
+
def test_review_progress_callback(mock_chat):
|
|
343
|
+
"""on_progress is invoked once per batch and tops out at the total."""
|
|
344
|
+
# Vocab keeps the result non-empty so review_suspicious doesn't fall back.
|
|
345
|
+
mock_chat.return_value = json.dumps(
|
|
346
|
+
{"auto_fix": [], "ask_user": [], "vocabulary": ["x"]},
|
|
347
|
+
)
|
|
348
|
+
n = _REVIEW_BATCH_SIZE * 2 + 5
|
|
349
|
+
words = [SuspiciousWord(word=f"w{i}", count=1) for i in range(n)]
|
|
350
|
+
cfg = Config(autocorrect=AutocorrectConfig(api_key="test-key"))
|
|
351
|
+
|
|
352
|
+
progress: list[tuple[int, int]] = []
|
|
353
|
+
review_suspicious(cfg, words, on_progress=lambda d, t: progress.append((d, t)))
|
|
354
|
+
|
|
355
|
+
# Three batches → three progress calls. Order may vary (parallel),
|
|
356
|
+
# but the final call must report `total = n` and totals must be
|
|
357
|
+
# monotonically non-decreasing.
|
|
358
|
+
assert len(progress) == 3
|
|
359
|
+
assert all(t == n for _, t in progress)
|
|
360
|
+
assert progress[-1][0] == n
|
|
361
|
+
assert all(progress[i][0] <= progress[i + 1][0] for i in range(len(progress) - 1))
|
|
362
|
+
|
|
363
|
+
|
|
364
|
+
@patch("voiceio.autocorrect._REVIEW_OVERALL_TIMEOUT_PER_BATCH", 0.2)
|
|
365
|
+
@patch("voiceio.autocorrect._REVIEW_MAX_WORKERS", 2)
|
|
366
|
+
@patch("voiceio.llm_api.chat")
|
|
367
|
+
def test_review_overall_timeout_returns_partial(mock_chat):
|
|
368
|
+
"""A hung batch beyond the overall deadline doesn't stall the whole review."""
|
|
369
|
+
import time as _t
|
|
370
|
+
|
|
371
|
+
good_response = json.dumps({
|
|
372
|
+
"auto_fix": [], "ask_user": [],
|
|
373
|
+
"vocabulary": ["fast"],
|
|
374
|
+
})
|
|
375
|
+
call_count = {"i": 0}
|
|
376
|
+
|
|
377
|
+
def slow_or_fast(*_a, **_kw):
|
|
378
|
+
call_count["i"] += 1
|
|
379
|
+
# First call returns immediately; second one hangs past the overall budget.
|
|
380
|
+
if call_count["i"] == 1:
|
|
381
|
+
return good_response
|
|
382
|
+
_t.sleep(5.0)
|
|
383
|
+
return good_response
|
|
384
|
+
|
|
385
|
+
mock_chat.side_effect = slow_or_fast
|
|
386
|
+
n = _REVIEW_BATCH_SIZE * 2 # forces 2 batches
|
|
387
|
+
words = [SuspiciousWord(word=f"w{i}", count=1) for i in range(n)]
|
|
388
|
+
cfg = Config(autocorrect=AutocorrectConfig(api_key="test-key"))
|
|
389
|
+
|
|
390
|
+
started = _t.monotonic()
|
|
391
|
+
result = review_suspicious(cfg, words)
|
|
392
|
+
elapsed = _t.monotonic() - started
|
|
393
|
+
|
|
394
|
+
# Fast batch landed; slow batch was abandoned.
|
|
395
|
+
assert result.vocabulary == ["fast"]
|
|
396
|
+
# Overall budget is rounds(1) * 0.2 * 2 = 0.4s — must terminate well below 5s.
|
|
397
|
+
assert elapsed < 2.0
|
|
398
|
+
|
|
399
|
+
|
|
400
|
+
@patch("voiceio.llm_api.chat")
|
|
401
|
+
def test_review_partial_batch_failure_keeps_others(mock_chat):
|
|
402
|
+
"""One failed batch doesn't lose results from successful batches."""
|
|
403
|
+
good = json.dumps({
|
|
404
|
+
"auto_fix": [{"wrong": "olamma", "right": "Ollama"}],
|
|
405
|
+
"ask_user": [], "vocabulary": [],
|
|
406
|
+
})
|
|
407
|
+
mock_chat.side_effect = [good, None, good]
|
|
408
|
+
n = _REVIEW_BATCH_SIZE * 2 + 1
|
|
409
|
+
words = [SuspiciousWord(word=f"w{i}", count=1) for i in range(n)]
|
|
410
|
+
cfg = Config(autocorrect=AutocorrectConfig(api_key="test-key"))
|
|
411
|
+
|
|
412
|
+
result = review_suspicious(cfg, words)
|
|
413
|
+
# Two successful batches × one auto_fix each
|
|
414
|
+
assert len(result.auto_fix) == 2
|
|
415
|
+
|
|
416
|
+
|
|
417
|
+
# ── ranking tests ─────────────────────────────────────────────────────────
|
|
418
|
+
|
|
419
|
+
|
|
420
|
+
def test_rank_score_llm_suggestion_dominates():
|
|
421
|
+
"""An LLM-suggested correction outranks anything without one."""
|
|
422
|
+
sw = SuspiciousWord(word="foo", count=50, similar_common=["food"])
|
|
423
|
+
item_with = {"wrong": "bar", "right": "barn"}
|
|
424
|
+
item_without = {"wrong": "foo", "right": ""}
|
|
425
|
+
assert rank_review_score(item_with, None) > rank_review_score(item_without, sw)
|
|
426
|
+
|
|
427
|
+
|
|
428
|
+
def test_rank_score_similar_common_boosts():
|
|
429
|
+
"""Words near a real dictionary word score higher than isolated ones."""
|
|
430
|
+
sw_near = SuspiciousWord(word="wordal", count=5, similar_common=["wordle", "word"])
|
|
431
|
+
sw_alone = SuspiciousWord(word="wordal", count=5, similar_common=[])
|
|
432
|
+
item = {"wrong": "wordal", "right": ""}
|
|
433
|
+
assert rank_review_score(item, sw_near) > rank_review_score(item, sw_alone)
|
|
434
|
+
|
|
435
|
+
|
|
436
|
+
def test_rank_score_short_acronym_demoted():
|
|
437
|
+
"""Short all-lowercase ASCII words with no similar common get pushed down."""
|
|
438
|
+
sw_acronym = SuspiciousWord(word="yaml", count=10, similar_common=[])
|
|
439
|
+
sw_typo = SuspiciousWord(word="wordal", count=10, similar_common=["wordle"])
|
|
440
|
+
item_a = {"wrong": "yaml", "right": ""}
|
|
441
|
+
item_t = {"wrong": "wordal", "right": ""}
|
|
442
|
+
assert rank_review_score(item_t, sw_typo) > rank_review_score(item_a, sw_acronym)
|
|
443
|
+
|
|
444
|
+
|
|
445
|
+
def test_rank_review_items_orders_correctly():
|
|
446
|
+
"""rank_review_items: LLM-suggested first, acronyms last."""
|
|
447
|
+
sw_by_word = {
|
|
448
|
+
"yaml": SuspiciousWord(word="yaml", count=10, similar_common=[]),
|
|
449
|
+
"wordal": SuspiciousWord(word="wordal", count=8, similar_common=["wordle"]),
|
|
450
|
+
"ctas": SuspiciousWord(word="ctas", count=14, similar_common=[]),
|
|
451
|
+
"tarnsl": SuspiciousWord(word="tarnsl", count=2, similar_common=["transl", "trans"]),
|
|
452
|
+
}
|
|
453
|
+
items = [
|
|
454
|
+
{"wrong": "yaml", "right": ""},
|
|
455
|
+
{"wrong": "wordal", "right": ""},
|
|
456
|
+
{"wrong": "ctas", "right": ""},
|
|
457
|
+
{"wrong": "tarnsl", "right": "transl"}, # has LLM suggestion
|
|
458
|
+
]
|
|
459
|
+
ranked = rank_review_items(items, sw_by_word)
|
|
460
|
+
order = [it["wrong"] for it in ranked]
|
|
461
|
+
# LLM-suggested first; then word with similar_common; acronyms last.
|
|
462
|
+
assert order[0] == "tarnsl"
|
|
463
|
+
assert order[1] == "wordal"
|
|
464
|
+
assert set(order[2:]) == {"yaml", "ctas"}
|
|
465
|
+
|
|
466
|
+
|
|
467
|
+
def test_rank_review_items_handles_missing_metadata():
|
|
468
|
+
"""Items without a SuspiciousWord entry still rank (lower)."""
|
|
469
|
+
items = [
|
|
470
|
+
{"wrong": "foo", "right": ""},
|
|
471
|
+
{"wrong": "bar", "right": "baz"},
|
|
472
|
+
]
|
|
473
|
+
ranked = rank_review_items(items, {})
|
|
474
|
+
assert ranked[0]["wrong"] == "bar"
|
|
@@ -0,0 +1,264 @@
|
|
|
1
|
+
"""Tests for CLI helpers in voiceio.cli."""
|
|
2
|
+
from __future__ import annotations
|
|
3
|
+
|
|
4
|
+
from unittest.mock import MagicMock
|
|
5
|
+
|
|
6
|
+
import pytest
|
|
7
|
+
|
|
8
|
+
from voiceio.autocorrect import SuspiciousWord
|
|
9
|
+
from voiceio.cli import _offer_cluster_apply, _save_api_key
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@pytest.fixture
|
|
13
|
+
def patch_config_path(monkeypatch, tmp_path):
|
|
14
|
+
"""Redirect voiceio.config.CONFIG_PATH to a temp file."""
|
|
15
|
+
config_path = tmp_path / "config.toml"
|
|
16
|
+
monkeypatch.setattr("voiceio.config.CONFIG_PATH", config_path)
|
|
17
|
+
return config_path
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def test_save_api_key_empty_config_openrouter(patch_config_path):
|
|
21
|
+
"""Saving to a fresh config writes a new [autocorrect] block."""
|
|
22
|
+
cfg = MagicMock()
|
|
23
|
+
_save_api_key(cfg, "sk-or-v1-abc123")
|
|
24
|
+
|
|
25
|
+
content = patch_config_path.read_text(encoding="utf-8")
|
|
26
|
+
assert "[autocorrect]" in content
|
|
27
|
+
assert 'api_key = "sk-or-v1-abc123"' in content
|
|
28
|
+
assert 'base_url = "https://openrouter.ai/api/v1"' in content
|
|
29
|
+
assert "moonshotai/kimi-k2-0905" in content
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def test_save_api_key_detects_anthropic(patch_config_path):
|
|
33
|
+
"""sk-ant- prefix routes to Anthropic provider."""
|
|
34
|
+
_save_api_key(MagicMock(), "sk-ant-api03-xyz")
|
|
35
|
+
|
|
36
|
+
content = patch_config_path.read_text(encoding="utf-8")
|
|
37
|
+
assert 'api_key = "sk-ant-api03-xyz"' in content
|
|
38
|
+
assert "api.anthropic.com" in content
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def test_save_api_key_detects_openai(patch_config_path):
|
|
42
|
+
"""sk-proj- prefix routes to OpenAI provider."""
|
|
43
|
+
_save_api_key(MagicMock(), "sk-proj-foo")
|
|
44
|
+
|
|
45
|
+
content = patch_config_path.read_text(encoding="utf-8")
|
|
46
|
+
assert 'api_key = "sk-proj-foo"' in content
|
|
47
|
+
assert "api.openai.com" in content
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def test_save_api_key_replaces_existing_in_autocorrect(patch_config_path):
|
|
51
|
+
"""Existing api_key under [autocorrect] is overwritten, not duplicated."""
|
|
52
|
+
patch_config_path.write_text(
|
|
53
|
+
'[autocorrect]\n'
|
|
54
|
+
'api_key = "old-key"\n'
|
|
55
|
+
'base_url = "https://example.com/v1"\n'
|
|
56
|
+
'model = "old-model"\n',
|
|
57
|
+
encoding="utf-8",
|
|
58
|
+
)
|
|
59
|
+
_save_api_key(MagicMock(), "sk-or-v1-new")
|
|
60
|
+
|
|
61
|
+
content = patch_config_path.read_text(encoding="utf-8")
|
|
62
|
+
assert content.count("api_key") == 1
|
|
63
|
+
assert 'api_key = "sk-or-v1-new"' in content
|
|
64
|
+
assert "old-key" not in content
|
|
65
|
+
assert "old-model" not in content
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def test_save_api_key_replaces_commented_field(patch_config_path):
|
|
69
|
+
"""A commented-out api_key line gets replaced."""
|
|
70
|
+
patch_config_path.write_text(
|
|
71
|
+
'[autocorrect]\n'
|
|
72
|
+
'# api_key = ""\n'
|
|
73
|
+
'# base_url = ""\n'
|
|
74
|
+
'# model = ""\n',
|
|
75
|
+
encoding="utf-8",
|
|
76
|
+
)
|
|
77
|
+
_save_api_key(MagicMock(), "sk-or-v1-new")
|
|
78
|
+
|
|
79
|
+
content = patch_config_path.read_text(encoding="utf-8")
|
|
80
|
+
assert 'api_key = "sk-or-v1-new"' in content
|
|
81
|
+
assert "# api_key" not in content
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def test_save_api_key_preserves_other_sections(patch_config_path):
|
|
85
|
+
"""Sections other than [autocorrect] are untouched."""
|
|
86
|
+
patch_config_path.write_text(
|
|
87
|
+
'[model]\n'
|
|
88
|
+
'language = "en"\n'
|
|
89
|
+
'\n'
|
|
90
|
+
'[autocorrect]\n'
|
|
91
|
+
'api_key = "old"\n'
|
|
92
|
+
'\n'
|
|
93
|
+
'[tts]\n'
|
|
94
|
+
'engine = "piper"\n',
|
|
95
|
+
encoding="utf-8",
|
|
96
|
+
)
|
|
97
|
+
_save_api_key(MagicMock(), "sk-or-v1-new")
|
|
98
|
+
|
|
99
|
+
content = patch_config_path.read_text(encoding="utf-8")
|
|
100
|
+
assert '[model]' in content
|
|
101
|
+
assert 'language = "en"' in content
|
|
102
|
+
assert '[tts]' in content
|
|
103
|
+
assert 'engine = "piper"' in content
|
|
104
|
+
assert 'api_key = "sk-or-v1-new"' in content
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
def test_save_api_key_appends_section_when_missing(patch_config_path):
|
|
108
|
+
"""If [autocorrect] doesn't exist yet, it's appended."""
|
|
109
|
+
patch_config_path.write_text(
|
|
110
|
+
'[model]\n'
|
|
111
|
+
'language = "en"\n',
|
|
112
|
+
encoding="utf-8",
|
|
113
|
+
)
|
|
114
|
+
_save_api_key(MagicMock(), "sk-or-v1-fresh")
|
|
115
|
+
|
|
116
|
+
content = patch_config_path.read_text(encoding="utf-8")
|
|
117
|
+
assert '[model]' in content
|
|
118
|
+
assert '[autocorrect]' in content
|
|
119
|
+
assert 'api_key = "sk-or-v1-fresh"' in content
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def test_save_api_key_creates_parent_dir(monkeypatch, tmp_path):
|
|
123
|
+
"""Parent directory is created if missing."""
|
|
124
|
+
nested = tmp_path / "deep" / "nested" / "config.toml"
|
|
125
|
+
monkeypatch.setattr("voiceio.config.CONFIG_PATH", nested)
|
|
126
|
+
|
|
127
|
+
_save_api_key(MagicMock(), "sk-or-v1-abc")
|
|
128
|
+
|
|
129
|
+
assert nested.exists()
|
|
130
|
+
assert 'api_key = "sk-or-v1-abc"' in nested.read_text(encoding="utf-8")
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
# ── _offer_cluster_apply ──────────────────────────────────────────────────
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
def _rl_prompt(s): # passthrough — strip ANSI is done by readline IRL
|
|
137
|
+
return s
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
def test_cluster_apply_no_close_variants_returns_zero(monkeypatch):
|
|
141
|
+
"""No remaining items within Levenshtein 2 → no prompt, no extra fixes."""
|
|
142
|
+
cd = MagicMock()
|
|
143
|
+
to_review = [
|
|
144
|
+
{"wrong": "pnit", "right": ""},
|
|
145
|
+
{"wrong": "completely_different", "right": ""},
|
|
146
|
+
]
|
|
147
|
+
sw_by_word = {
|
|
148
|
+
"pnit": SuspiciousWord(word="pnit", count=6),
|
|
149
|
+
"completely_different": SuspiciousWord(word="completely_different", count=1),
|
|
150
|
+
}
|
|
151
|
+
# Should not call input() at all
|
|
152
|
+
monkeypatch.setattr("builtins.input", lambda *_a, **_k: pytest.fail("prompted"))
|
|
153
|
+
n = _offer_cluster_apply(
|
|
154
|
+
cd, "pnit", "peanut", to_review, current_i=1,
|
|
155
|
+
sw_by_word=sw_by_word, rl_prompt=_rl_prompt,
|
|
156
|
+
)
|
|
157
|
+
assert n == 0
|
|
158
|
+
assert cd.add.call_count == 0
|
|
159
|
+
assert len(to_review) == 2
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
def test_cluster_apply_yes_applies_to_all_close_variants(monkeypatch):
|
|
163
|
+
"""Y reply applies the same correction to every Levenshtein-≤2 remaining item."""
|
|
164
|
+
cd = MagicMock()
|
|
165
|
+
to_review = [
|
|
166
|
+
{"wrong": "pnit", "right": ""}, # current (already corrected)
|
|
167
|
+
{"wrong": "pnat", "right": "PNET"}, # close — should be batch-fixed
|
|
168
|
+
{"wrong": "pnut", "right": "PNET"}, # close — should be batch-fixed
|
|
169
|
+
{"wrong": "yaml", "right": ""}, # not close — left alone
|
|
170
|
+
{"wrong": "pinat", "right": "PNET"}, # close (distance 2) — batch-fixed
|
|
171
|
+
]
|
|
172
|
+
sw_by_word = {
|
|
173
|
+
w["wrong"]: SuspiciousWord(word=w["wrong"], count=2) for w in to_review
|
|
174
|
+
}
|
|
175
|
+
monkeypatch.setattr("builtins.input", lambda *_a, **_k: "y")
|
|
176
|
+
|
|
177
|
+
n = _offer_cluster_apply(
|
|
178
|
+
cd, "pnit", "peanut", to_review, current_i=1,
|
|
179
|
+
sw_by_word=sw_by_word, rl_prompt=_rl_prompt,
|
|
180
|
+
)
|
|
181
|
+
assert n == 3
|
|
182
|
+
added = {call.args for call in cd.add.call_args_list}
|
|
183
|
+
assert added == {("pnat", "peanut"), ("pnut", "peanut"), ("pinat", "peanut")}
|
|
184
|
+
# yaml stays in to_review; the three variants are removed
|
|
185
|
+
assert [it["wrong"] for it in to_review] == ["pnit", "yaml"]
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
def test_cluster_apply_no_reply_skips(monkeypatch):
|
|
189
|
+
"""Replying 'n' applies nothing and leaves to_review intact."""
|
|
190
|
+
cd = MagicMock()
|
|
191
|
+
to_review = [
|
|
192
|
+
{"wrong": "pnit", "right": ""},
|
|
193
|
+
{"wrong": "pnat", "right": ""},
|
|
194
|
+
{"wrong": "pnut", "right": ""},
|
|
195
|
+
]
|
|
196
|
+
sw_by_word = {
|
|
197
|
+
w["wrong"]: SuspiciousWord(word=w["wrong"], count=2) for w in to_review
|
|
198
|
+
}
|
|
199
|
+
monkeypatch.setattr("builtins.input", lambda *_a, **_k: "n")
|
|
200
|
+
|
|
201
|
+
n = _offer_cluster_apply(
|
|
202
|
+
cd, "pnit", "peanut", to_review, current_i=1,
|
|
203
|
+
sw_by_word=sw_by_word, rl_prompt=_rl_prompt,
|
|
204
|
+
)
|
|
205
|
+
assert n == 0
|
|
206
|
+
assert cd.add.call_count == 0
|
|
207
|
+
assert len(to_review) == 3
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
def test_cluster_apply_default_yes_on_empty_input(monkeypatch):
|
|
211
|
+
"""Empty input (just Enter) is treated as Yes."""
|
|
212
|
+
cd = MagicMock()
|
|
213
|
+
to_review = [
|
|
214
|
+
{"wrong": "pnit", "right": ""},
|
|
215
|
+
{"wrong": "pnat", "right": ""},
|
|
216
|
+
]
|
|
217
|
+
sw_by_word = {w["wrong"]: SuspiciousWord(word=w["wrong"], count=2) for w in to_review}
|
|
218
|
+
monkeypatch.setattr("builtins.input", lambda *_a, **_k: "")
|
|
219
|
+
n = _offer_cluster_apply(
|
|
220
|
+
cd, "pnit", "peanut", to_review, current_i=1,
|
|
221
|
+
sw_by_word=sw_by_word, rl_prompt=_rl_prompt,
|
|
222
|
+
)
|
|
223
|
+
assert n == 1
|
|
224
|
+
cd.add.assert_called_once_with("pnat", "peanut")
|
|
225
|
+
|
|
226
|
+
|
|
227
|
+
def test_cluster_apply_skips_already_reviewed_indices(monkeypatch):
|
|
228
|
+
"""Items at indices < current_i (already reviewed) are not touched."""
|
|
229
|
+
cd = MagicMock()
|
|
230
|
+
to_review = [
|
|
231
|
+
{"wrong": "pnut", "right": ""}, # already reviewed (index 0)
|
|
232
|
+
{"wrong": "pnit", "right": ""}, # current
|
|
233
|
+
{"wrong": "pnat", "right": ""}, # remaining — eligible
|
|
234
|
+
]
|
|
235
|
+
sw_by_word = {w["wrong"]: SuspiciousWord(word=w["wrong"], count=2) for w in to_review}
|
|
236
|
+
monkeypatch.setattr("builtins.input", lambda *_a, **_k: "y")
|
|
237
|
+
n = _offer_cluster_apply(
|
|
238
|
+
cd, "pnit", "peanut", to_review, current_i=2, # skip past current word too
|
|
239
|
+
sw_by_word=sw_by_word, rl_prompt=_rl_prompt,
|
|
240
|
+
)
|
|
241
|
+
# Only pnat (at index 2) is eligible — pnut is at index 0 (< current_i).
|
|
242
|
+
assert n == 1
|
|
243
|
+
cd.add.assert_called_once_with("pnat", "peanut")
|
|
244
|
+
|
|
245
|
+
|
|
246
|
+
def test_cluster_apply_eof_aborts_safely(monkeypatch):
|
|
247
|
+
"""Ctrl-D / EOF on the prompt does not crash and applies nothing."""
|
|
248
|
+
cd = MagicMock()
|
|
249
|
+
to_review = [
|
|
250
|
+
{"wrong": "pnit", "right": ""},
|
|
251
|
+
{"wrong": "pnat", "right": ""},
|
|
252
|
+
]
|
|
253
|
+
sw_by_word = {w["wrong"]: SuspiciousWord(word=w["wrong"], count=2) for w in to_review}
|
|
254
|
+
|
|
255
|
+
def raise_eof(*_a, **_k):
|
|
256
|
+
raise EOFError
|
|
257
|
+
|
|
258
|
+
monkeypatch.setattr("builtins.input", raise_eof)
|
|
259
|
+
n = _offer_cluster_apply(
|
|
260
|
+
cd, "pnit", "peanut", to_review, current_i=1,
|
|
261
|
+
sw_by_word=sw_by_word, rl_prompt=_rl_prompt,
|
|
262
|
+
)
|
|
263
|
+
assert n == 0
|
|
264
|
+
assert cd.add.call_count == 0
|
|
@@ -95,6 +95,50 @@ def test_chat_malformed_response(mock_urlopen):
|
|
|
95
95
|
assert chat(cfg, "sys", "msg") is None
|
|
96
96
|
|
|
97
97
|
|
|
98
|
+
@patch("urllib.request.urlopen")
|
|
99
|
+
def test_chat_null_content_returns_none(mock_urlopen):
|
|
100
|
+
"""Thinking models (Kimi K2.6 etc.) can return content=None — must not crash."""
|
|
101
|
+
mock_urlopen.return_value = _mock_response({
|
|
102
|
+
"choices": [{"message": {"content": None}}],
|
|
103
|
+
})
|
|
104
|
+
cfg = _cfg()
|
|
105
|
+
assert chat(cfg, "sys", "msg") is None
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
@patch("urllib.request.urlopen")
|
|
109
|
+
def test_chat_falls_back_to_reasoning_field(mock_urlopen):
|
|
110
|
+
"""When content is null but reasoning is present, use reasoning text."""
|
|
111
|
+
mock_urlopen.return_value = _mock_response({
|
|
112
|
+
"choices": [{"message": {
|
|
113
|
+
"content": None,
|
|
114
|
+
"reasoning": "the answer",
|
|
115
|
+
}}],
|
|
116
|
+
})
|
|
117
|
+
cfg = _cfg()
|
|
118
|
+
assert chat(cfg, "sys", "msg") == "the answer"
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
@patch("urllib.request.urlopen")
|
|
122
|
+
def test_chat_falls_back_to_reasoning_content_field(mock_urlopen):
|
|
123
|
+
"""Some providers expose `reasoning_content` instead of `reasoning`."""
|
|
124
|
+
mock_urlopen.return_value = _mock_response({
|
|
125
|
+
"choices": [{"message": {
|
|
126
|
+
"content": None,
|
|
127
|
+
"reasoning_content": "thought-through answer",
|
|
128
|
+
}}],
|
|
129
|
+
})
|
|
130
|
+
cfg = _cfg()
|
|
131
|
+
assert chat(cfg, "sys", "msg") == "thought-through answer"
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
@patch("urllib.request.urlopen")
|
|
135
|
+
def test_chat_anthropic_null_content_array(mock_urlopen):
|
|
136
|
+
"""Anthropic native API: content=null shouldn't crash."""
|
|
137
|
+
mock_urlopen.return_value = _mock_response({"content": None})
|
|
138
|
+
cfg = _cfg(base_url="https://api.anthropic.com/v1")
|
|
139
|
+
assert chat(cfg, "sys", "msg") is None
|
|
140
|
+
|
|
141
|
+
|
|
98
142
|
# ── check_api_key ────────────────────────────────────────────────────────
|
|
99
143
|
|
|
100
144
|
|
|
@@ -157,7 +201,7 @@ def test_check_api_key_anthropic(mock_urlopen):
|
|
|
157
201
|
def test_detect_openrouter():
|
|
158
202
|
base_url, model = detect_provider("sk-or-abc123")
|
|
159
203
|
assert "openrouter" in base_url
|
|
160
|
-
assert "
|
|
204
|
+
assert "kimi" in model
|
|
161
205
|
|
|
162
206
|
|
|
163
207
|
def test_detect_anthropic():
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
"""Tests for text post-processing (punctuation, capitalization)."""
|
|
2
2
|
from __future__ import annotations
|
|
3
3
|
|
|
4
|
-
from voiceio.postprocess import cleanup
|
|
4
|
+
from voiceio.postprocess import apply_pipeline, cleanup
|
|
5
5
|
from voiceio.streaming import _word_match_len
|
|
6
6
|
|
|
7
7
|
|
|
@@ -105,3 +105,50 @@ class TestWordMatchCompatibility:
|
|
|
105
105
|
raw = ["testing,", "testing,", "hello"]
|
|
106
106
|
cleaned = ["Testing,", "testing", "hello"]
|
|
107
107
|
assert _word_match_len(raw, cleaned) == 3
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
class TestVoiceInputPrefix:
|
|
111
|
+
"""The prefix marker is prepended only when configured + text is non-empty."""
|
|
112
|
+
|
|
113
|
+
def test_disabled_by_default(self):
|
|
114
|
+
text, abort = apply_pipeline("hello world", final=True)
|
|
115
|
+
assert text == "hello world"
|
|
116
|
+
assert abort is False
|
|
117
|
+
|
|
118
|
+
def test_applied_when_set_final(self):
|
|
119
|
+
text, abort = apply_pipeline(
|
|
120
|
+
"hello world", voice_input_prefix="[voice]", final=True,
|
|
121
|
+
)
|
|
122
|
+
assert text == "[voice] hello world"
|
|
123
|
+
assert abort is False
|
|
124
|
+
|
|
125
|
+
def test_applied_during_streaming(self):
|
|
126
|
+
# Streaming passes (final=False) must also carry the prefix so the
|
|
127
|
+
# marker appears from the first chunk, not only at the very end.
|
|
128
|
+
text, _ = apply_pipeline(
|
|
129
|
+
"partial", voice_input_prefix="[voice]", final=False,
|
|
130
|
+
)
|
|
131
|
+
assert text == "[voice] partial"
|
|
132
|
+
|
|
133
|
+
def test_not_applied_to_empty_text(self):
|
|
134
|
+
text, abort = apply_pipeline(
|
|
135
|
+
"", voice_input_prefix="[voice]", final=True,
|
|
136
|
+
)
|
|
137
|
+
assert text == ""
|
|
138
|
+
assert abort is False
|
|
139
|
+
|
|
140
|
+
def test_custom_prefix(self):
|
|
141
|
+
text, _ = apply_pipeline(
|
|
142
|
+
"ok", voice_input_prefix="[v]", final=True,
|
|
143
|
+
)
|
|
144
|
+
assert text == "[v] ok"
|
|
145
|
+
|
|
146
|
+
def test_with_cleanup_chain(self):
|
|
147
|
+
# Cleanup capitalizes, then prefix is prepended verbatim.
|
|
148
|
+
text, _ = apply_pipeline(
|
|
149
|
+
"hello world",
|
|
150
|
+
do_cleanup=True,
|
|
151
|
+
voice_input_prefix="[voice]",
|
|
152
|
+
final=True,
|
|
153
|
+
)
|
|
154
|
+
assert text == "[voice] Hello world"
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "0.3.13"
|