agent-cli 0.70.2__py3-none-any.whl → 0.70.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agent_cli/agents/assistant.py +1 -1
- agent_cli/agents/autocorrect.py +1 -1
- agent_cli/agents/chat.py +1 -1
- agent_cli/agents/memory/proxy.py +1 -1
- agent_cli/agents/rag_proxy.py +1 -1
- agent_cli/agents/speak.py +1 -1
- agent_cli/agents/transcribe.py +1 -1
- agent_cli/agents/transcribe_daemon.py +1 -1
- agent_cli/agents/voice_edit.py +1 -1
- agent_cli/core/transcription_logger.py +1 -1
- agent_cli/opts.py +19 -2
- agent_cli/server/cli.py +9 -17
- agent_cli/server/proxy/api.py +65 -18
- {agent_cli-0.70.2.dist-info → agent_cli-0.70.4.dist-info}/METADATA +190 -82
- {agent_cli-0.70.2.dist-info → agent_cli-0.70.4.dist-info}/RECORD +18 -18
- {agent_cli-0.70.2.dist-info → agent_cli-0.70.4.dist-info}/WHEEL +0 -0
- {agent_cli-0.70.2.dist-info → agent_cli-0.70.4.dist-info}/entry_points.txt +0 -0
- {agent_cli-0.70.2.dist-info → agent_cli-0.70.4.dist-info}/licenses/LICENSE +0 -0
agent_cli/agents/assistant.py
CHANGED
|
@@ -306,7 +306,7 @@ def assistant(
|
|
|
306
306
|
# --- General Options ---
|
|
307
307
|
save_file: Path | None = opts.SAVE_FILE,
|
|
308
308
|
clipboard: bool = opts.CLIPBOARD,
|
|
309
|
-
log_level:
|
|
309
|
+
log_level: opts.LogLevel = opts.LOG_LEVEL,
|
|
310
310
|
log_file: str | None = opts.LOG_FILE,
|
|
311
311
|
list_devices: bool = opts.LIST_DEVICES,
|
|
312
312
|
quiet: bool = opts.QUIET,
|
agent_cli/agents/autocorrect.py
CHANGED
|
@@ -233,7 +233,7 @@ def autocorrect(
|
|
|
233
233
|
llm_gemini_model: str = opts.LLM_GEMINI_MODEL,
|
|
234
234
|
gemini_api_key: str | None = opts.GEMINI_API_KEY,
|
|
235
235
|
# --- General Options ---
|
|
236
|
-
log_level:
|
|
236
|
+
log_level: opts.LogLevel = opts.LOG_LEVEL,
|
|
237
237
|
log_file: str | None = opts.LOG_FILE,
|
|
238
238
|
quiet: bool = opts.QUIET,
|
|
239
239
|
json_output: bool = opts.JSON_OUTPUT,
|
agent_cli/agents/chat.py
CHANGED
|
@@ -437,7 +437,7 @@ def chat(
|
|
|
437
437
|
),
|
|
438
438
|
# --- General Options ---
|
|
439
439
|
save_file: Path | None = opts.SAVE_FILE,
|
|
440
|
-
log_level:
|
|
440
|
+
log_level: opts.LogLevel = opts.LOG_LEVEL,
|
|
441
441
|
log_file: str | None = opts.LOG_FILE,
|
|
442
442
|
list_devices: bool = opts.LIST_DEVICES,
|
|
443
443
|
quiet: bool = opts.QUIET,
|
agent_cli/agents/memory/proxy.py
CHANGED
|
@@ -68,7 +68,7 @@ def proxy(
|
|
|
68
68
|
help="Enable automatic git commit of memory changes.",
|
|
69
69
|
rich_help_panel="Memory Configuration",
|
|
70
70
|
),
|
|
71
|
-
log_level:
|
|
71
|
+
log_level: opts.LogLevel = opts.LOG_LEVEL,
|
|
72
72
|
config_file: str | None = opts.CONFIG_FILE,
|
|
73
73
|
print_args: bool = opts.PRINT_ARGS,
|
|
74
74
|
) -> None:
|
agent_cli/agents/rag_proxy.py
CHANGED
|
@@ -45,7 +45,7 @@ def rag_proxy(
|
|
|
45
45
|
help="Port to bind to",
|
|
46
46
|
rich_help_panel="Server Configuration",
|
|
47
47
|
),
|
|
48
|
-
log_level:
|
|
48
|
+
log_level: opts.LogLevel = opts.LOG_LEVEL,
|
|
49
49
|
config_file: str | None = opts.CONFIG_FILE,
|
|
50
50
|
print_args: bool = opts.PRINT_ARGS,
|
|
51
51
|
enable_rag_tools: bool = typer.Option(
|
agent_cli/agents/speak.py
CHANGED
|
@@ -120,7 +120,7 @@ def speak(
|
|
|
120
120
|
stop: bool = opts.STOP,
|
|
121
121
|
status: bool = opts.STATUS,
|
|
122
122
|
toggle: bool = opts.TOGGLE,
|
|
123
|
-
log_level:
|
|
123
|
+
log_level: opts.LogLevel = opts.LOG_LEVEL,
|
|
124
124
|
log_file: str | None = opts.LOG_FILE,
|
|
125
125
|
quiet: bool = opts.QUIET,
|
|
126
126
|
json_output: bool = opts.JSON_OUTPUT,
|
agent_cli/agents/transcribe.py
CHANGED
|
@@ -504,7 +504,7 @@ def transcribe( # noqa: PLR0912
|
|
|
504
504
|
toggle: bool = opts.TOGGLE,
|
|
505
505
|
# --- General Options ---
|
|
506
506
|
clipboard: bool = opts.CLIPBOARD,
|
|
507
|
-
log_level:
|
|
507
|
+
log_level: opts.LogLevel = opts.LOG_LEVEL,
|
|
508
508
|
log_file: str | None = opts.LOG_FILE,
|
|
509
509
|
list_devices: bool = opts.LIST_DEVICES,
|
|
510
510
|
quiet: bool = opts.QUIET,
|
|
@@ -361,7 +361,7 @@ def transcribe_daemon( # noqa: PLR0912
|
|
|
361
361
|
stop: bool = opts.STOP,
|
|
362
362
|
status: bool = opts.STATUS,
|
|
363
363
|
# --- General Options ---
|
|
364
|
-
log_level:
|
|
364
|
+
log_level: opts.LogLevel = opts.LOG_LEVEL,
|
|
365
365
|
log_file_logging: str | None = opts.LOG_FILE,
|
|
366
366
|
list_devices: bool = opts.LIST_DEVICES,
|
|
367
367
|
quiet: bool = opts.QUIET,
|
agent_cli/agents/voice_edit.py
CHANGED
|
@@ -221,7 +221,7 @@ def voice_edit(
|
|
|
221
221
|
# --- General Options ---
|
|
222
222
|
save_file: Path | None = opts.SAVE_FILE,
|
|
223
223
|
clipboard: bool = opts.CLIPBOARD,
|
|
224
|
-
log_level:
|
|
224
|
+
log_level: opts.LogLevel = opts.LOG_LEVEL,
|
|
225
225
|
log_file: str | None = opts.LOG_FILE,
|
|
226
226
|
list_devices: bool = opts.LIST_DEVICES,
|
|
227
227
|
quiet: bool = opts.QUIET,
|
agent_cli/opts.py
CHANGED
|
@@ -2,12 +2,15 @@
|
|
|
2
2
|
|
|
3
3
|
import copy
|
|
4
4
|
from pathlib import Path
|
|
5
|
+
from typing import Literal
|
|
5
6
|
|
|
6
7
|
import typer
|
|
7
8
|
from typer.models import OptionInfo
|
|
8
9
|
|
|
9
10
|
from agent_cli.constants import DEFAULT_OPENAI_EMBEDDING_MODEL, DEFAULT_OPENAI_MODEL
|
|
10
11
|
|
|
12
|
+
LogLevel = Literal["debug", "info", "warning", "error"]
|
|
13
|
+
|
|
11
14
|
|
|
12
15
|
def with_default(option: OptionInfo, default: str) -> OptionInfo:
|
|
13
16
|
"""Create a copy of a typer Option with a different default value."""
|
|
@@ -20,18 +23,21 @@ def with_default(option: OptionInfo, default: str) -> OptionInfo:
|
|
|
20
23
|
LLM_PROVIDER: str = typer.Option(
|
|
21
24
|
"ollama",
|
|
22
25
|
"--llm-provider",
|
|
26
|
+
envvar="LLM_PROVIDER",
|
|
23
27
|
help="The LLM provider to use ('ollama', 'openai', 'gemini').",
|
|
24
28
|
rich_help_panel="Provider Selection",
|
|
25
29
|
)
|
|
26
30
|
ASR_PROVIDER: str = typer.Option(
|
|
27
31
|
"wyoming",
|
|
28
32
|
"--asr-provider",
|
|
33
|
+
envvar="ASR_PROVIDER",
|
|
29
34
|
help="The ASR provider to use ('wyoming', 'openai', 'gemini').",
|
|
30
35
|
rich_help_panel="Provider Selection",
|
|
31
36
|
)
|
|
32
37
|
TTS_PROVIDER: str = typer.Option(
|
|
33
38
|
"wyoming",
|
|
34
39
|
"--tts-provider",
|
|
40
|
+
envvar="TTS_PROVIDER",
|
|
35
41
|
help="The TTS provider to use ('wyoming', 'openai', 'kokoro', 'gemini').",
|
|
36
42
|
rich_help_panel="Provider Selection",
|
|
37
43
|
)
|
|
@@ -48,12 +54,14 @@ LLM: bool = typer.Option(
|
|
|
48
54
|
LLM_OLLAMA_MODEL: str = typer.Option(
|
|
49
55
|
"gemma3:4b",
|
|
50
56
|
"--llm-ollama-model",
|
|
57
|
+
envvar="LLM_OLLAMA_MODEL",
|
|
51
58
|
help="The Ollama model to use. Default is gemma3:4b.",
|
|
52
59
|
rich_help_panel="LLM: Ollama",
|
|
53
60
|
)
|
|
54
61
|
LLM_OLLAMA_HOST: str = typer.Option(
|
|
55
62
|
"http://localhost:11434",
|
|
56
63
|
"--llm-ollama-host",
|
|
64
|
+
envvar="LLM_OLLAMA_HOST",
|
|
57
65
|
help="The Ollama server host. Default is http://localhost:11434.",
|
|
58
66
|
rich_help_panel="LLM: Ollama",
|
|
59
67
|
)
|
|
@@ -61,6 +69,7 @@ LLM_OLLAMA_HOST: str = typer.Option(
|
|
|
61
69
|
LLM_OPENAI_MODEL: str = typer.Option(
|
|
62
70
|
DEFAULT_OPENAI_MODEL,
|
|
63
71
|
"--llm-openai-model",
|
|
72
|
+
envvar="LLM_OPENAI_MODEL",
|
|
64
73
|
help="The OpenAI model to use for LLM tasks.",
|
|
65
74
|
rich_help_panel="LLM: OpenAI-compatible",
|
|
66
75
|
)
|
|
@@ -82,6 +91,7 @@ OPENAI_BASE_URL: str | None = typer.Option(
|
|
|
82
91
|
LLM_GEMINI_MODEL: str = typer.Option(
|
|
83
92
|
"gemini-3-flash-preview",
|
|
84
93
|
"--llm-gemini-model",
|
|
94
|
+
envvar="LLM_GEMINI_MODEL",
|
|
85
95
|
help="The Gemini model to use for LLM tasks.",
|
|
86
96
|
rich_help_panel="LLM: Gemini",
|
|
87
97
|
)
|
|
@@ -124,12 +134,14 @@ LIST_DEVICES: bool = typer.Option(
|
|
|
124
134
|
ASR_WYOMING_IP: str = typer.Option(
|
|
125
135
|
"localhost",
|
|
126
136
|
"--asr-wyoming-ip",
|
|
137
|
+
envvar="ASR_WYOMING_IP",
|
|
127
138
|
help="Wyoming ASR server IP address.",
|
|
128
139
|
rich_help_panel="Audio Input: Wyoming",
|
|
129
140
|
)
|
|
130
141
|
ASR_WYOMING_PORT: int = typer.Option(
|
|
131
142
|
10300,
|
|
132
143
|
"--asr-wyoming-port",
|
|
144
|
+
envvar="ASR_WYOMING_PORT",
|
|
133
145
|
help="Wyoming ASR server port.",
|
|
134
146
|
rich_help_panel="Audio Input: Wyoming",
|
|
135
147
|
)
|
|
@@ -137,18 +149,21 @@ ASR_WYOMING_PORT: int = typer.Option(
|
|
|
137
149
|
ASR_OPENAI_MODEL: str = typer.Option(
|
|
138
150
|
"whisper-1",
|
|
139
151
|
"--asr-openai-model",
|
|
152
|
+
envvar="ASR_OPENAI_MODEL",
|
|
140
153
|
help="The OpenAI model to use for ASR (transcription).",
|
|
141
154
|
rich_help_panel="Audio Input: OpenAI-compatible",
|
|
142
155
|
)
|
|
143
156
|
ASR_OPENAI_BASE_URL: str | None = typer.Option(
|
|
144
157
|
None,
|
|
145
158
|
"--asr-openai-base-url",
|
|
159
|
+
envvar="ASR_OPENAI_BASE_URL",
|
|
146
160
|
help="Custom base URL for OpenAI-compatible ASR API (e.g., for custom Whisper server: http://localhost:9898).",
|
|
147
161
|
rich_help_panel="Audio Input: OpenAI-compatible",
|
|
148
162
|
)
|
|
149
163
|
ASR_OPENAI_PROMPT: str | None = typer.Option(
|
|
150
164
|
None,
|
|
151
165
|
"--asr-openai-prompt",
|
|
166
|
+
envvar="ASR_OPENAI_PROMPT",
|
|
152
167
|
help="Custom prompt to guide transcription (optional).",
|
|
153
168
|
rich_help_panel="Audio Input: OpenAI-compatible",
|
|
154
169
|
)
|
|
@@ -156,6 +171,7 @@ ASR_OPENAI_PROMPT: str | None = typer.Option(
|
|
|
156
171
|
ASR_GEMINI_MODEL: str = typer.Option(
|
|
157
172
|
"gemini-3-flash-preview",
|
|
158
173
|
"--asr-gemini-model",
|
|
174
|
+
envvar="ASR_GEMINI_MODEL",
|
|
159
175
|
help="The Gemini model to use for ASR (transcription).",
|
|
160
176
|
rich_help_panel="Audio Input: Gemini",
|
|
161
177
|
)
|
|
@@ -348,9 +364,10 @@ CLIPBOARD: bool = typer.Option(
|
|
|
348
364
|
help="Copy result to clipboard.",
|
|
349
365
|
rich_help_panel="General Options",
|
|
350
366
|
)
|
|
351
|
-
LOG_LEVEL:
|
|
352
|
-
"
|
|
367
|
+
LOG_LEVEL: LogLevel = typer.Option(
|
|
368
|
+
"info",
|
|
353
369
|
"--log-level",
|
|
370
|
+
envvar="LOG_LEVEL",
|
|
354
371
|
help="Set logging level.",
|
|
355
372
|
case_sensitive=False,
|
|
356
373
|
rich_help_panel="General Options",
|
agent_cli/server/cli.py
CHANGED
|
@@ -10,6 +10,7 @@ from typing import Annotated
|
|
|
10
10
|
|
|
11
11
|
import typer
|
|
12
12
|
|
|
13
|
+
from agent_cli import opts
|
|
13
14
|
from agent_cli.cli import app as main_app
|
|
14
15
|
from agent_cli.core.deps import requires_extras
|
|
15
16
|
from agent_cli.core.process import set_process_title
|
|
@@ -251,14 +252,7 @@ def whisper_cmd( # noqa: PLR0912, PLR0915
|
|
|
251
252
|
help="Download model(s) and exit without starting server",
|
|
252
253
|
),
|
|
253
254
|
] = False,
|
|
254
|
-
log_level:
|
|
255
|
-
str,
|
|
256
|
-
typer.Option(
|
|
257
|
-
"--log-level",
|
|
258
|
-
"-l",
|
|
259
|
-
help="Logging level: debug, info, warning, error",
|
|
260
|
-
),
|
|
261
|
-
] = "info",
|
|
255
|
+
log_level: opts.LogLevel = opts.LOG_LEVEL,
|
|
262
256
|
backend: Annotated[
|
|
263
257
|
str,
|
|
264
258
|
typer.Option(
|
|
@@ -378,6 +372,7 @@ def whisper_cmd( # noqa: PLR0912, PLR0915
|
|
|
378
372
|
console.print()
|
|
379
373
|
console.print("[dim]Configuration:[/dim]")
|
|
380
374
|
console.print(f" Backend: [cyan]{actual_backend}[/cyan]")
|
|
375
|
+
console.print(f" Log level: [cyan]{log_level}[/cyan]")
|
|
381
376
|
console.print()
|
|
382
377
|
console.print("[dim]Endpoints:[/dim]")
|
|
383
378
|
console.print(f" HTTP API: [cyan]http://{host}:{port}[/cyan]")
|
|
@@ -436,6 +431,7 @@ def transcribe_proxy_cmd(
|
|
|
436
431
|
bool,
|
|
437
432
|
typer.Option("--reload", help="Enable auto-reload for development"),
|
|
438
433
|
] = False,
|
|
434
|
+
log_level: opts.LogLevel = opts.LOG_LEVEL,
|
|
439
435
|
) -> None:
|
|
440
436
|
"""Run transcription proxy server.
|
|
441
437
|
|
|
@@ -457,10 +453,12 @@ def transcribe_proxy_cmd(
|
|
|
457
453
|
|
|
458
454
|
"""
|
|
459
455
|
_check_server_deps()
|
|
456
|
+
setup_rich_logging(log_level)
|
|
460
457
|
|
|
461
458
|
console.print(
|
|
462
459
|
f"[bold green]Starting Agent CLI transcription proxy on {host}:{port}[/bold green]",
|
|
463
460
|
)
|
|
461
|
+
console.print(f"[dim]Log level: {log_level}[/dim]")
|
|
464
462
|
if reload:
|
|
465
463
|
console.print("[yellow]Auto-reload enabled for development[/yellow]")
|
|
466
464
|
|
|
@@ -471,7 +469,7 @@ def transcribe_proxy_cmd(
|
|
|
471
469
|
host=host,
|
|
472
470
|
port=port,
|
|
473
471
|
reload=reload,
|
|
474
|
-
log_level=
|
|
472
|
+
log_level=log_level.lower(),
|
|
475
473
|
)
|
|
476
474
|
|
|
477
475
|
|
|
@@ -558,14 +556,7 @@ def tts_cmd( # noqa: PLR0915
|
|
|
558
556
|
help="Download model(s) and exit without starting server",
|
|
559
557
|
),
|
|
560
558
|
] = False,
|
|
561
|
-
log_level:
|
|
562
|
-
str,
|
|
563
|
-
typer.Option(
|
|
564
|
-
"--log-level",
|
|
565
|
-
"-l",
|
|
566
|
-
help="Logging level: debug, info, warning, error",
|
|
567
|
-
),
|
|
568
|
-
] = "info",
|
|
559
|
+
log_level: opts.LogLevel = opts.LOG_LEVEL,
|
|
569
560
|
backend: Annotated[
|
|
570
561
|
str,
|
|
571
562
|
typer.Option(
|
|
@@ -679,6 +670,7 @@ def tts_cmd( # noqa: PLR0915
|
|
|
679
670
|
console.print()
|
|
680
671
|
console.print("[dim]Configuration:[/dim]")
|
|
681
672
|
console.print(f" Backend: [cyan]{resolved_backend}[/cyan]")
|
|
673
|
+
console.print(f" Log level: [cyan]{log_level}[/cyan]")
|
|
682
674
|
console.print()
|
|
683
675
|
console.print("[dim]Endpoints:[/dim]")
|
|
684
676
|
console.print(f" HTTP API: [cyan]http://{host}:{port}[/cyan]")
|
agent_cli/server/proxy/api.py
CHANGED
|
@@ -3,8 +3,9 @@
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
5
|
import logging
|
|
6
|
+
import os
|
|
6
7
|
from pathlib import Path
|
|
7
|
-
from typing import Annotated, Any
|
|
8
|
+
from typing import TYPE_CHECKING, Annotated, Any
|
|
8
9
|
|
|
9
10
|
from fastapi import Depends, FastAPI, File, Form, HTTPException, Request, UploadFile
|
|
10
11
|
from pydantic import BaseModel
|
|
@@ -26,6 +27,9 @@ from agent_cli.server.common import log_requests_middleware
|
|
|
26
27
|
from agent_cli.services import asr
|
|
27
28
|
from agent_cli.services.llm import process_and_update_clipboard
|
|
28
29
|
|
|
30
|
+
if TYPE_CHECKING:
|
|
31
|
+
from typer.models import OptionInfo
|
|
32
|
+
|
|
29
33
|
# Configure logging
|
|
30
34
|
logging.basicConfig(level=logging.INFO)
|
|
31
35
|
LOGGER = logging.getLogger(__name__)
|
|
@@ -37,6 +41,40 @@ app = FastAPI(
|
|
|
37
41
|
)
|
|
38
42
|
|
|
39
43
|
|
|
44
|
+
@app.on_event("startup")
|
|
45
|
+
async def log_effective_config() -> None:
|
|
46
|
+
"""Log effective configuration on startup to help debug env var issues."""
|
|
47
|
+
(
|
|
48
|
+
provider_cfg,
|
|
49
|
+
wyoming_cfg,
|
|
50
|
+
openai_asr_cfg,
|
|
51
|
+
gemini_asr_cfg,
|
|
52
|
+
ollama_cfg,
|
|
53
|
+
openai_llm_cfg,
|
|
54
|
+
gemini_llm_cfg,
|
|
55
|
+
_,
|
|
56
|
+
) = _load_transcription_configs()
|
|
57
|
+
|
|
58
|
+
LOGGER.info("ASR provider: %s", provider_cfg.asr_provider)
|
|
59
|
+
if provider_cfg.asr_provider == "wyoming":
|
|
60
|
+
LOGGER.info(" Wyoming: %s:%d", wyoming_cfg.asr_wyoming_ip, wyoming_cfg.asr_wyoming_port)
|
|
61
|
+
elif provider_cfg.asr_provider == "openai":
|
|
62
|
+
LOGGER.info(" Model: %s", openai_asr_cfg.asr_openai_model)
|
|
63
|
+
LOGGER.info(" Base URL: %s", openai_asr_cfg.openai_base_url or "https://api.openai.com/v1")
|
|
64
|
+
elif provider_cfg.asr_provider == "gemini":
|
|
65
|
+
LOGGER.info(" Model: %s", gemini_asr_cfg.asr_gemini_model)
|
|
66
|
+
|
|
67
|
+
LOGGER.info("LLM provider: %s", provider_cfg.llm_provider)
|
|
68
|
+
if provider_cfg.llm_provider == "ollama":
|
|
69
|
+
LOGGER.info(" Model: %s", ollama_cfg.llm_ollama_model)
|
|
70
|
+
LOGGER.info(" Host: %s", ollama_cfg.llm_ollama_host)
|
|
71
|
+
elif provider_cfg.llm_provider == "openai":
|
|
72
|
+
LOGGER.info(" Model: %s", openai_llm_cfg.llm_openai_model)
|
|
73
|
+
LOGGER.info(" Base URL: %s", openai_llm_cfg.openai_base_url or "https://api.openai.com/v1")
|
|
74
|
+
elif provider_cfg.llm_provider == "gemini":
|
|
75
|
+
LOGGER.info(" Model: %s", gemini_llm_cfg.llm_gemini_model)
|
|
76
|
+
|
|
77
|
+
|
|
40
78
|
@app.middleware("http")
|
|
41
79
|
async def log_requests(request: Request, call_next) -> Any: # type: ignore[no-untyped-def] # noqa: ANN001
|
|
42
80
|
"""Log basic request information."""
|
|
@@ -153,6 +191,13 @@ def _validate_audio_file(audio: UploadFile) -> None:
|
|
|
153
191
|
)
|
|
154
192
|
|
|
155
193
|
|
|
194
|
+
def _cfg(key: str, defaults: dict[str, Any], opt: OptionInfo) -> Any:
|
|
195
|
+
"""Get config with priority: env var > config file > option default."""
|
|
196
|
+
if opt.envvar and (env_val := os.environ.get(opt.envvar)):
|
|
197
|
+
return int(env_val) if isinstance(opt.default, int) else env_val
|
|
198
|
+
return defaults.get(key, opt.default)
|
|
199
|
+
|
|
200
|
+
|
|
156
201
|
def _load_transcription_configs() -> tuple[
|
|
157
202
|
config.ProviderSelection,
|
|
158
203
|
config.WyomingASR,
|
|
@@ -163,41 +208,43 @@ def _load_transcription_configs() -> tuple[
|
|
|
163
208
|
config.GeminiLLM,
|
|
164
209
|
dict[str, Any],
|
|
165
210
|
]:
|
|
166
|
-
"""Load
|
|
211
|
+
"""Load config objects. Priority: env var > config file > default."""
|
|
167
212
|
loaded_config = config.load_config()
|
|
168
213
|
wildcard_config = loaded_config.get("defaults", {})
|
|
169
214
|
command_config = loaded_config.get("transcribe", {})
|
|
170
215
|
defaults = {**wildcard_config, **command_config}
|
|
171
216
|
|
|
172
217
|
provider_cfg = config.ProviderSelection(
|
|
173
|
-
asr_provider=
|
|
174
|
-
llm_provider=
|
|
175
|
-
tts_provider=opts.TTS_PROVIDER
|
|
218
|
+
asr_provider=_cfg("asr_provider", defaults, opts.ASR_PROVIDER),
|
|
219
|
+
llm_provider=_cfg("llm_provider", defaults, opts.LLM_PROVIDER),
|
|
220
|
+
tts_provider=_cfg("tts_provider", defaults, opts.TTS_PROVIDER),
|
|
176
221
|
)
|
|
177
222
|
wyoming_asr_cfg = config.WyomingASR(
|
|
178
|
-
asr_wyoming_ip=
|
|
179
|
-
asr_wyoming_port=
|
|
223
|
+
asr_wyoming_ip=_cfg("asr_wyoming_ip", defaults, opts.ASR_WYOMING_IP),
|
|
224
|
+
asr_wyoming_port=_cfg("asr_wyoming_port", defaults, opts.ASR_WYOMING_PORT),
|
|
180
225
|
)
|
|
181
226
|
openai_asr_cfg = config.OpenAIASR(
|
|
182
|
-
asr_openai_model=
|
|
183
|
-
openai_api_key=
|
|
227
|
+
asr_openai_model=_cfg("asr_openai_model", defaults, opts.ASR_OPENAI_MODEL),
|
|
228
|
+
openai_api_key=_cfg("openai_api_key", defaults, opts.OPENAI_API_KEY),
|
|
229
|
+
openai_base_url=_cfg("asr_openai_base_url", defaults, opts.ASR_OPENAI_BASE_URL),
|
|
230
|
+
asr_openai_prompt=_cfg("asr_openai_prompt", defaults, opts.ASR_OPENAI_PROMPT),
|
|
184
231
|
)
|
|
185
232
|
gemini_asr_cfg = config.GeminiASR(
|
|
186
|
-
asr_gemini_model=
|
|
187
|
-
gemini_api_key=
|
|
233
|
+
asr_gemini_model=_cfg("asr_gemini_model", defaults, opts.ASR_GEMINI_MODEL),
|
|
234
|
+
gemini_api_key=_cfg("gemini_api_key", defaults, opts.GEMINI_API_KEY),
|
|
188
235
|
)
|
|
189
236
|
ollama_cfg = config.Ollama(
|
|
190
|
-
llm_ollama_model=
|
|
191
|
-
llm_ollama_host=
|
|
237
|
+
llm_ollama_model=_cfg("llm_ollama_model", defaults, opts.LLM_OLLAMA_MODEL),
|
|
238
|
+
llm_ollama_host=_cfg("llm_ollama_host", defaults, opts.LLM_OLLAMA_HOST),
|
|
192
239
|
)
|
|
193
240
|
openai_llm_cfg = config.OpenAILLM(
|
|
194
|
-
llm_openai_model=
|
|
195
|
-
openai_api_key=
|
|
196
|
-
openai_base_url=
|
|
241
|
+
llm_openai_model=_cfg("llm_openai_model", defaults, opts.LLM_OPENAI_MODEL),
|
|
242
|
+
openai_api_key=_cfg("openai_api_key", defaults, opts.OPENAI_API_KEY),
|
|
243
|
+
openai_base_url=_cfg("openai_base_url", defaults, opts.OPENAI_BASE_URL),
|
|
197
244
|
)
|
|
198
245
|
gemini_llm_cfg = config.GeminiLLM(
|
|
199
|
-
llm_gemini_model=
|
|
200
|
-
gemini_api_key=
|
|
246
|
+
llm_gemini_model=_cfg("llm_gemini_model", defaults, opts.LLM_GEMINI_MODEL),
|
|
247
|
+
gemini_api_key=_cfg("gemini_api_key", defaults, opts.GEMINI_API_KEY),
|
|
201
248
|
)
|
|
202
249
|
|
|
203
250
|
return (
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: agent-cli
|
|
3
|
-
Version: 0.70.
|
|
3
|
+
Version: 0.70.4
|
|
4
4
|
Summary: A suite of AI-powered command-line tools for text correction, audio transcription, and voice assistance.
|
|
5
5
|
Project-URL: Homepage, https://github.com/basnijholt/agent-cli
|
|
6
6
|
Author-email: Bas Nijholt <bas@nijho.lt>
|
|
@@ -643,17 +643,21 @@ the `[defaults]` section of your configuration file.
|
|
|
643
643
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
644
644
|
╭─ Provider Selection ───────────────────────────────────────────────────────────────────╮
|
|
645
645
|
│ --llm-provider TEXT The LLM provider to use ('ollama', 'openai', 'gemini'). │
|
|
646
|
+
│ [env var: LLM_PROVIDER] │
|
|
646
647
|
│ [default: ollama] │
|
|
647
648
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
648
649
|
╭─ LLM: Ollama ──────────────────────────────────────────────────────────────────────────╮
|
|
649
650
|
│ --llm-ollama-model TEXT The Ollama model to use. Default is gemma3:4b. │
|
|
651
|
+
│ [env var: LLM_OLLAMA_MODEL] │
|
|
650
652
|
│ [default: gemma3:4b] │
|
|
651
653
|
│ --llm-ollama-host TEXT The Ollama server host. Default is │
|
|
652
654
|
│ http://localhost:11434. │
|
|
655
|
+
│ [env var: LLM_OLLAMA_HOST] │
|
|
653
656
|
│ [default: http://localhost:11434] │
|
|
654
657
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
655
658
|
╭─ LLM: OpenAI-compatible ───────────────────────────────────────────────────────────────╮
|
|
656
659
|
│ --llm-openai-model TEXT The OpenAI model to use for LLM tasks. │
|
|
660
|
+
│ [env var: LLM_OPENAI_MODEL] │
|
|
657
661
|
│ [default: gpt-5-mini] │
|
|
658
662
|
│ --openai-api-key TEXT Your OpenAI API key. Can also be set with the │
|
|
659
663
|
│ OPENAI_API_KEY environment variable. │
|
|
@@ -664,21 +668,25 @@ the `[defaults]` section of your configuration file.
|
|
|
664
668
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
665
669
|
╭─ LLM: Gemini ──────────────────────────────────────────────────────────────────────────╮
|
|
666
670
|
│ --llm-gemini-model TEXT The Gemini model to use for LLM tasks. │
|
|
671
|
+
│ [env var: LLM_GEMINI_MODEL] │
|
|
667
672
|
│ [default: gemini-3-flash-preview] │
|
|
668
673
|
│ --gemini-api-key TEXT Your Gemini API key. Can also be set with the │
|
|
669
674
|
│ GEMINI_API_KEY environment variable. │
|
|
670
675
|
│ [env var: GEMINI_API_KEY] │
|
|
671
676
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
672
677
|
╭─ General Options ──────────────────────────────────────────────────────────────────────╮
|
|
673
|
-
│ --log-level
|
|
674
|
-
│
|
|
675
|
-
│
|
|
676
|
-
│ --
|
|
677
|
-
│ --
|
|
678
|
-
│
|
|
679
|
-
│ --
|
|
680
|
-
│
|
|
681
|
-
│
|
|
678
|
+
│ --log-level [debug|info|warning|error] Set logging level. │
|
|
679
|
+
│ [env var: LOG_LEVEL] │
|
|
680
|
+
│ [default: info] │
|
|
681
|
+
│ --log-file TEXT Path to a file to write logs to. │
|
|
682
|
+
│ --quiet -q Suppress console output from rich. │
|
|
683
|
+
│ --json Output result as JSON for │
|
|
684
|
+
│ automation. Implies --quiet and │
|
|
685
|
+
│ --no-clipboard. │
|
|
686
|
+
│ --config TEXT Path to a TOML configuration file. │
|
|
687
|
+
│ --print-args Print the command line arguments, │
|
|
688
|
+
│ including variables taken from the │
|
|
689
|
+
│ configuration file. │
|
|
682
690
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
683
691
|
|
|
684
692
|
```
|
|
@@ -750,8 +758,10 @@ the `[defaults]` section of your configuration file.
|
|
|
750
758
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
751
759
|
╭─ Provider Selection ───────────────────────────────────────────────────────────────────╮
|
|
752
760
|
│ --asr-provider TEXT The ASR provider to use ('wyoming', 'openai', 'gemini'). │
|
|
761
|
+
│ [env var: ASR_PROVIDER] │
|
|
753
762
|
│ [default: wyoming] │
|
|
754
763
|
│ --llm-provider TEXT The LLM provider to use ('ollama', 'openai', 'gemini'). │
|
|
764
|
+
│ [env var: LLM_PROVIDER] │
|
|
755
765
|
│ [default: ollama] │
|
|
756
766
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
757
767
|
╭─ Audio Input ──────────────────────────────────────────────────────────────────────────╮
|
|
@@ -762,31 +772,40 @@ the `[defaults]` section of your configuration file.
|
|
|
762
772
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
763
773
|
╭─ Audio Input: Wyoming ─────────────────────────────────────────────────────────────────╮
|
|
764
774
|
│ --asr-wyoming-ip TEXT Wyoming ASR server IP address. │
|
|
775
|
+
│ [env var: ASR_WYOMING_IP] │
|
|
765
776
|
│ [default: localhost] │
|
|
766
777
|
│ --asr-wyoming-port INTEGER Wyoming ASR server port. │
|
|
778
|
+
│ [env var: ASR_WYOMING_PORT] │
|
|
767
779
|
│ [default: 10300] │
|
|
768
780
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
769
781
|
╭─ Audio Input: OpenAI-compatible ───────────────────────────────────────────────────────╮
|
|
770
782
|
│ --asr-openai-model TEXT The OpenAI model to use for ASR (transcription). │
|
|
783
|
+
│ [env var: ASR_OPENAI_MODEL] │
|
|
771
784
|
│ [default: whisper-1] │
|
|
772
785
|
│ --asr-openai-base-url TEXT Custom base URL for OpenAI-compatible ASR API │
|
|
773
786
|
│ (e.g., for custom Whisper server: │
|
|
774
787
|
│ http://localhost:9898). │
|
|
788
|
+
│ [env var: ASR_OPENAI_BASE_URL] │
|
|
775
789
|
│ --asr-openai-prompt TEXT Custom prompt to guide transcription (optional). │
|
|
790
|
+
│ [env var: ASR_OPENAI_PROMPT] │
|
|
776
791
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
777
792
|
╭─ Audio Input: Gemini ──────────────────────────────────────────────────────────────────╮
|
|
778
793
|
│ --asr-gemini-model TEXT The Gemini model to use for ASR (transcription). │
|
|
794
|
+
│ [env var: ASR_GEMINI_MODEL] │
|
|
779
795
|
│ [default: gemini-3-flash-preview] │
|
|
780
796
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
781
797
|
╭─ LLM: Ollama ──────────────────────────────────────────────────────────────────────────╮
|
|
782
798
|
│ --llm-ollama-model TEXT The Ollama model to use. Default is gemma3:4b. │
|
|
799
|
+
│ [env var: LLM_OLLAMA_MODEL] │
|
|
783
800
|
│ [default: gemma3:4b] │
|
|
784
801
|
│ --llm-ollama-host TEXT The Ollama server host. Default is │
|
|
785
802
|
│ http://localhost:11434. │
|
|
803
|
+
│ [env var: LLM_OLLAMA_HOST] │
|
|
786
804
|
│ [default: http://localhost:11434] │
|
|
787
805
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
788
806
|
╭─ LLM: OpenAI-compatible ───────────────────────────────────────────────────────────────╮
|
|
789
807
|
│ --llm-openai-model TEXT The OpenAI model to use for LLM tasks. │
|
|
808
|
+
│ [env var: LLM_OPENAI_MODEL] │
|
|
790
809
|
│ [default: gpt-5-mini] │
|
|
791
810
|
│ --openai-api-key TEXT Your OpenAI API key. Can also be set with the │
|
|
792
811
|
│ OPENAI_API_KEY environment variable. │
|
|
@@ -797,6 +816,7 @@ the `[defaults]` section of your configuration file.
|
|
|
797
816
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
798
817
|
╭─ LLM: Gemini ──────────────────────────────────────────────────────────────────────────╮
|
|
799
818
|
│ --llm-gemini-model TEXT The Gemini model to use for LLM tasks. │
|
|
819
|
+
│ [env var: LLM_GEMINI_MODEL] │
|
|
800
820
|
│ [default: gemini-3-flash-preview] │
|
|
801
821
|
│ --gemini-api-key TEXT Your Gemini API key. Can also be set with the │
|
|
802
822
|
│ GEMINI_API_KEY environment variable. │
|
|
@@ -809,21 +829,32 @@ the `[defaults]` section of your configuration file.
|
|
|
809
829
|
│ will be stopped. If the process is not running, it will be started. │
|
|
810
830
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
811
831
|
╭─ General Options ──────────────────────────────────────────────────────────────────────╮
|
|
812
|
-
│ --clipboard --no-clipboard
|
|
813
|
-
│
|
|
814
|
-
│
|
|
815
|
-
│
|
|
816
|
-
│
|
|
817
|
-
│
|
|
818
|
-
│ --
|
|
819
|
-
│
|
|
820
|
-
│ --
|
|
821
|
-
│
|
|
822
|
-
│
|
|
823
|
-
│
|
|
824
|
-
│ --
|
|
825
|
-
│
|
|
826
|
-
│
|
|
832
|
+
│ --clipboard --no-clipboard Copy result to │
|
|
833
|
+
│ clipboard. │
|
|
834
|
+
│ [default: clipboard] │
|
|
835
|
+
│ --log-level [debug|info|warning| Set logging level. │
|
|
836
|
+
│ error] [env var: LOG_LEVEL] │
|
|
837
|
+
│ [default: info] │
|
|
838
|
+
│ --log-file TEXT Path to a file to │
|
|
839
|
+
│ write logs to. │
|
|
840
|
+
│ --quiet -q Suppress console │
|
|
841
|
+
│ output from rich. │
|
|
842
|
+
│ --json Output result as JSON │
|
|
843
|
+
│ for automation. │
|
|
844
|
+
│ Implies --quiet and │
|
|
845
|
+
│ --no-clipboard. │
|
|
846
|
+
│ --config TEXT Path to a TOML │
|
|
847
|
+
│ configuration file. │
|
|
848
|
+
│ --print-args Print the command │
|
|
849
|
+
│ line arguments, │
|
|
850
|
+
│ including variables │
|
|
851
|
+
│ taken from the │
|
|
852
|
+
│ configuration file. │
|
|
853
|
+
│ --transcription-log PATH Path to log │
|
|
854
|
+
│ transcription results │
|
|
855
|
+
│ with timestamps, │
|
|
856
|
+
│ hostname, model, and │
|
|
857
|
+
│ raw output. │
|
|
827
858
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
828
859
|
|
|
829
860
|
```
|
|
@@ -924,8 +955,10 @@ uv tool install "agent-cli[vad]" -p 3.13
|
|
|
924
955
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
925
956
|
╭─ Provider Selection ───────────────────────────────────────────────────────────────────╮
|
|
926
957
|
│ --asr-provider TEXT The ASR provider to use ('wyoming', 'openai', 'gemini'). │
|
|
958
|
+
│ [env var: ASR_PROVIDER] │
|
|
927
959
|
│ [default: wyoming] │
|
|
928
960
|
│ --llm-provider TEXT The LLM provider to use ('ollama', 'openai', 'gemini'). │
|
|
961
|
+
│ [env var: LLM_PROVIDER] │
|
|
929
962
|
│ [default: ollama] │
|
|
930
963
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
931
964
|
╭─ Audio Input ──────────────────────────────────────────────────────────────────────────╮
|
|
@@ -936,31 +969,40 @@ uv tool install "agent-cli[vad]" -p 3.13
|
|
|
936
969
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
937
970
|
╭─ Audio Input: Wyoming ─────────────────────────────────────────────────────────────────╮
|
|
938
971
|
│ --asr-wyoming-ip TEXT Wyoming ASR server IP address. │
|
|
972
|
+
│ [env var: ASR_WYOMING_IP] │
|
|
939
973
|
│ [default: localhost] │
|
|
940
974
|
│ --asr-wyoming-port INTEGER Wyoming ASR server port. │
|
|
975
|
+
│ [env var: ASR_WYOMING_PORT] │
|
|
941
976
|
│ [default: 10300] │
|
|
942
977
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
943
978
|
╭─ Audio Input: OpenAI-compatible ───────────────────────────────────────────────────────╮
|
|
944
979
|
│ --asr-openai-model TEXT The OpenAI model to use for ASR (transcription). │
|
|
980
|
+
│ [env var: ASR_OPENAI_MODEL] │
|
|
945
981
|
│ [default: whisper-1] │
|
|
946
982
|
│ --asr-openai-base-url TEXT Custom base URL for OpenAI-compatible ASR API │
|
|
947
983
|
│ (e.g., for custom Whisper server: │
|
|
948
984
|
│ http://localhost:9898). │
|
|
985
|
+
│ [env var: ASR_OPENAI_BASE_URL] │
|
|
949
986
|
│ --asr-openai-prompt TEXT Custom prompt to guide transcription (optional). │
|
|
987
|
+
│ [env var: ASR_OPENAI_PROMPT] │
|
|
950
988
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
951
989
|
╭─ Audio Input: Gemini ──────────────────────────────────────────────────────────────────╮
|
|
952
990
|
│ --asr-gemini-model TEXT The Gemini model to use for ASR (transcription). │
|
|
991
|
+
│ [env var: ASR_GEMINI_MODEL] │
|
|
953
992
|
│ [default: gemini-3-flash-preview] │
|
|
954
993
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
955
994
|
╭─ LLM: Ollama ──────────────────────────────────────────────────────────────────────────╮
|
|
956
995
|
│ --llm-ollama-model TEXT The Ollama model to use. Default is gemma3:4b. │
|
|
996
|
+
│ [env var: LLM_OLLAMA_MODEL] │
|
|
957
997
|
│ [default: gemma3:4b] │
|
|
958
998
|
│ --llm-ollama-host TEXT The Ollama server host. Default is │
|
|
959
999
|
│ http://localhost:11434. │
|
|
1000
|
+
│ [env var: LLM_OLLAMA_HOST] │
|
|
960
1001
|
│ [default: http://localhost:11434] │
|
|
961
1002
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
962
1003
|
╭─ LLM: OpenAI-compatible ───────────────────────────────────────────────────────────────╮
|
|
963
1004
|
│ --llm-openai-model TEXT The OpenAI model to use for LLM tasks. │
|
|
1005
|
+
│ [env var: LLM_OPENAI_MODEL] │
|
|
964
1006
|
│ [default: gpt-5-mini] │
|
|
965
1007
|
│ --openai-api-key TEXT Your OpenAI API key. Can also be set with the │
|
|
966
1008
|
│ OPENAI_API_KEY environment variable. │
|
|
@@ -971,6 +1013,7 @@ uv tool install "agent-cli[vad]" -p 3.13
|
|
|
971
1013
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
972
1014
|
╭─ LLM: Gemini ──────────────────────────────────────────────────────────────────────────╮
|
|
973
1015
|
│ --llm-gemini-model TEXT The Gemini model to use for LLM tasks. │
|
|
1016
|
+
│ [env var: LLM_GEMINI_MODEL] │
|
|
974
1017
|
│ [default: gemini-3-flash-preview] │
|
|
975
1018
|
│ --gemini-api-key TEXT Your Gemini API key. Can also be set with the │
|
|
976
1019
|
│ GEMINI_API_KEY environment variable. │
|
|
@@ -985,13 +1028,15 @@ uv tool install "agent-cli[vad]" -p 3.13
|
|
|
985
1028
|
│ --status Check if a background process is running. │
|
|
986
1029
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
987
1030
|
╭─ General Options ──────────────────────────────────────────────────────────────────────╮
|
|
988
|
-
│ --log-level
|
|
989
|
-
│
|
|
990
|
-
│
|
|
991
|
-
│ --
|
|
992
|
-
│ --
|
|
993
|
-
│ --
|
|
994
|
-
│
|
|
1031
|
+
│ --log-level [debug|info|warning|error] Set logging level. │
|
|
1032
|
+
│ [env var: LOG_LEVEL] │
|
|
1033
|
+
│ [default: info] │
|
|
1034
|
+
│ --log-file TEXT Path to a file to write logs to. │
|
|
1035
|
+
│ --quiet -q Suppress console output from rich. │
|
|
1036
|
+
│ --config TEXT Path to a TOML configuration file. │
|
|
1037
|
+
│ --print-args Print the command line arguments, │
|
|
1038
|
+
│ including variables taken from the │
|
|
1039
|
+
│ configuration file. │
|
|
995
1040
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
996
1041
|
|
|
997
1042
|
```
|
|
@@ -1045,6 +1090,7 @@ uv tool install "agent-cli[vad]" -p 3.13
|
|
|
1045
1090
|
╭─ Provider Selection ───────────────────────────────────────────────────────────────────╮
|
|
1046
1091
|
│ --tts-provider TEXT The TTS provider to use ('wyoming', 'openai', 'kokoro', │
|
|
1047
1092
|
│ 'gemini'). │
|
|
1093
|
+
│ [env var: TTS_PROVIDER] │
|
|
1048
1094
|
│ [default: wyoming] │
|
|
1049
1095
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
1050
1096
|
╭─ Audio Output ─────────────────────────────────────────────────────────────────────────╮
|
|
@@ -1097,16 +1143,19 @@ uv tool install "agent-cli[vad]" -p 3.13
|
|
|
1097
1143
|
│ --list-devices List available audio input and output devices and exit. │
|
|
1098
1144
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
1099
1145
|
╭─ General Options ──────────────────────────────────────────────────────────────────────╮
|
|
1100
|
-
│ --save-file PATH
|
|
1101
|
-
│ --log-level
|
|
1102
|
-
│
|
|
1103
|
-
│
|
|
1104
|
-
│ --
|
|
1105
|
-
│ --
|
|
1106
|
-
│
|
|
1107
|
-
│ --
|
|
1108
|
-
│
|
|
1109
|
-
│
|
|
1146
|
+
│ --save-file PATH Save TTS response audio to WAV file. │
|
|
1147
|
+
│ --log-level [debug|info|warning|error] Set logging level. │
|
|
1148
|
+
│ [env var: LOG_LEVEL] │
|
|
1149
|
+
│ [default: info] │
|
|
1150
|
+
│ --log-file TEXT Path to a file to write logs to. │
|
|
1151
|
+
│ --quiet -q Suppress console output from rich. │
|
|
1152
|
+
│ --json Output result as JSON for │
|
|
1153
|
+
│ automation. Implies --quiet and │
|
|
1154
|
+
│ --no-clipboard. │
|
|
1155
|
+
│ --config TEXT Path to a TOML configuration file. │
|
|
1156
|
+
│ --print-args Print the command line arguments, │
|
|
1157
|
+
│ including variables taken from the │
|
|
1158
|
+
│ configuration file. │
|
|
1110
1159
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
1111
1160
|
╭─ Process Management ───────────────────────────────────────────────────────────────────╮
|
|
1112
1161
|
│ --stop Stop any running background process. │
|
|
@@ -1170,11 +1219,14 @@ uv tool install "agent-cli[vad]" -p 3.13
|
|
|
1170
1219
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
1171
1220
|
╭─ Provider Selection ───────────────────────────────────────────────────────────────────╮
|
|
1172
1221
|
│ --asr-provider TEXT The ASR provider to use ('wyoming', 'openai', 'gemini'). │
|
|
1222
|
+
│ [env var: ASR_PROVIDER] │
|
|
1173
1223
|
│ [default: wyoming] │
|
|
1174
1224
|
│ --llm-provider TEXT The LLM provider to use ('ollama', 'openai', 'gemini'). │
|
|
1225
|
+
│ [env var: LLM_PROVIDER] │
|
|
1175
1226
|
│ [default: ollama] │
|
|
1176
1227
|
│ --tts-provider TEXT The TTS provider to use ('wyoming', 'openai', 'kokoro', │
|
|
1177
1228
|
│ 'gemini'). │
|
|
1229
|
+
│ [env var: TTS_PROVIDER] │
|
|
1178
1230
|
│ [default: wyoming] │
|
|
1179
1231
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
1180
1232
|
╭─ Audio Input ──────────────────────────────────────────────────────────────────────────╮
|
|
@@ -1185,27 +1237,34 @@ uv tool install "agent-cli[vad]" -p 3.13
|
|
|
1185
1237
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
1186
1238
|
╭─ Audio Input: Wyoming ─────────────────────────────────────────────────────────────────╮
|
|
1187
1239
|
│ --asr-wyoming-ip TEXT Wyoming ASR server IP address. │
|
|
1240
|
+
│ [env var: ASR_WYOMING_IP] │
|
|
1188
1241
|
│ [default: localhost] │
|
|
1189
1242
|
│ --asr-wyoming-port INTEGER Wyoming ASR server port. │
|
|
1243
|
+
│ [env var: ASR_WYOMING_PORT] │
|
|
1190
1244
|
│ [default: 10300] │
|
|
1191
1245
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
1192
1246
|
╭─ Audio Input: OpenAI-compatible ───────────────────────────────────────────────────────╮
|
|
1193
1247
|
│ --asr-openai-model TEXT The OpenAI model to use for ASR (transcription). │
|
|
1248
|
+
│ [env var: ASR_OPENAI_MODEL] │
|
|
1194
1249
|
│ [default: whisper-1] │
|
|
1195
1250
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
1196
1251
|
╭─ Audio Input: Gemini ──────────────────────────────────────────────────────────────────╮
|
|
1197
1252
|
│ --asr-gemini-model TEXT The Gemini model to use for ASR (transcription). │
|
|
1253
|
+
│ [env var: ASR_GEMINI_MODEL] │
|
|
1198
1254
|
│ [default: gemini-3-flash-preview] │
|
|
1199
1255
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
1200
1256
|
╭─ LLM: Ollama ──────────────────────────────────────────────────────────────────────────╮
|
|
1201
1257
|
│ --llm-ollama-model TEXT The Ollama model to use. Default is gemma3:4b. │
|
|
1258
|
+
│ [env var: LLM_OLLAMA_MODEL] │
|
|
1202
1259
|
│ [default: gemma3:4b] │
|
|
1203
1260
|
│ --llm-ollama-host TEXT The Ollama server host. Default is │
|
|
1204
1261
|
│ http://localhost:11434. │
|
|
1262
|
+
│ [env var: LLM_OLLAMA_HOST] │
|
|
1205
1263
|
│ [default: http://localhost:11434] │
|
|
1206
1264
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
1207
1265
|
╭─ LLM: OpenAI-compatible ───────────────────────────────────────────────────────────────╮
|
|
1208
1266
|
│ --llm-openai-model TEXT The OpenAI model to use for LLM tasks. │
|
|
1267
|
+
│ [env var: LLM_OPENAI_MODEL] │
|
|
1209
1268
|
│ [default: gpt-5-mini] │
|
|
1210
1269
|
│ --openai-api-key TEXT Your OpenAI API key. Can also be set with the │
|
|
1211
1270
|
│ OPENAI_API_KEY environment variable. │
|
|
@@ -1216,6 +1275,7 @@ uv tool install "agent-cli[vad]" -p 3.13
|
|
|
1216
1275
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
1217
1276
|
╭─ LLM: Gemini ──────────────────────────────────────────────────────────────────────────╮
|
|
1218
1277
|
│ --llm-gemini-model TEXT The Gemini model to use for LLM tasks. │
|
|
1278
|
+
│ [env var: LLM_GEMINI_MODEL] │
|
|
1219
1279
|
│ [default: gemini-3-flash-preview] │
|
|
1220
1280
|
│ --gemini-api-key TEXT Your Gemini API key. Can also be set with the │
|
|
1221
1281
|
│ GEMINI_API_KEY environment variable. │
|
|
@@ -1272,18 +1332,28 @@ uv tool install "agent-cli[vad]" -p 3.13
|
|
|
1272
1332
|
│ will be stopped. If the process is not running, it will be started. │
|
|
1273
1333
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
1274
1334
|
╭─ General Options ──────────────────────────────────────────────────────────────────────╮
|
|
1275
|
-
│ --save-file PATH
|
|
1276
|
-
│
|
|
1277
|
-
│
|
|
1278
|
-
│
|
|
1279
|
-
│
|
|
1280
|
-
│ --log-
|
|
1281
|
-
│
|
|
1282
|
-
│
|
|
1283
|
-
│
|
|
1284
|
-
│
|
|
1285
|
-
│ --
|
|
1286
|
-
│
|
|
1335
|
+
│ --save-file PATH Save TTS response audio │
|
|
1336
|
+
│ to WAV file. │
|
|
1337
|
+
│ --clipboard --no-clipboard Copy result to │
|
|
1338
|
+
│ clipboard. │
|
|
1339
|
+
│ [default: clipboard] │
|
|
1340
|
+
│ --log-level [debug|info|warning|erro Set logging level. │
|
|
1341
|
+
│ r] [env var: LOG_LEVEL] │
|
|
1342
|
+
│ [default: info] │
|
|
1343
|
+
│ --log-file TEXT Path to a file to write │
|
|
1344
|
+
│ logs to. │
|
|
1345
|
+
│ --quiet -q Suppress console output │
|
|
1346
|
+
│ from rich. │
|
|
1347
|
+
│ --json Output result as JSON │
|
|
1348
|
+
│ for automation. Implies │
|
|
1349
|
+
│ --quiet and │
|
|
1350
|
+
│ --no-clipboard. │
|
|
1351
|
+
│ --config TEXT Path to a TOML │
|
|
1352
|
+
│ configuration file. │
|
|
1353
|
+
│ --print-args Print the command line │
|
|
1354
|
+
│ arguments, including │
|
|
1355
|
+
│ variables taken from the │
|
|
1356
|
+
│ configuration file. │
|
|
1287
1357
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
1288
1358
|
|
|
1289
1359
|
```
|
|
@@ -1335,11 +1405,14 @@ uv tool install "agent-cli[vad]" -p 3.13
|
|
|
1335
1405
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
1336
1406
|
╭─ Provider Selection ───────────────────────────────────────────────────────────────────╮
|
|
1337
1407
|
│ --asr-provider TEXT The ASR provider to use ('wyoming', 'openai', 'gemini'). │
|
|
1408
|
+
│ [env var: ASR_PROVIDER] │
|
|
1338
1409
|
│ [default: wyoming] │
|
|
1339
1410
|
│ --llm-provider TEXT The LLM provider to use ('ollama', 'openai', 'gemini'). │
|
|
1411
|
+
│ [env var: LLM_PROVIDER] │
|
|
1340
1412
|
│ [default: ollama] │
|
|
1341
1413
|
│ --tts-provider TEXT The TTS provider to use ('wyoming', 'openai', 'kokoro', │
|
|
1342
1414
|
│ 'gemini'). │
|
|
1415
|
+
│ [env var: TTS_PROVIDER] │
|
|
1343
1416
|
│ [default: wyoming] │
|
|
1344
1417
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
1345
1418
|
╭─ Wake Word ────────────────────────────────────────────────────────────────────────────╮
|
|
@@ -1359,27 +1432,34 @@ uv tool install "agent-cli[vad]" -p 3.13
|
|
|
1359
1432
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
1360
1433
|
╭─ Audio Input: Wyoming ─────────────────────────────────────────────────────────────────╮
|
|
1361
1434
|
│ --asr-wyoming-ip TEXT Wyoming ASR server IP address. │
|
|
1435
|
+
│ [env var: ASR_WYOMING_IP] │
|
|
1362
1436
|
│ [default: localhost] │
|
|
1363
1437
|
│ --asr-wyoming-port INTEGER Wyoming ASR server port. │
|
|
1438
|
+
│ [env var: ASR_WYOMING_PORT] │
|
|
1364
1439
|
│ [default: 10300] │
|
|
1365
1440
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
1366
1441
|
╭─ Audio Input: OpenAI-compatible ───────────────────────────────────────────────────────╮
|
|
1367
1442
|
│ --asr-openai-model TEXT The OpenAI model to use for ASR (transcription). │
|
|
1443
|
+
│ [env var: ASR_OPENAI_MODEL] │
|
|
1368
1444
|
│ [default: whisper-1] │
|
|
1369
1445
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
1370
1446
|
╭─ Audio Input: Gemini ──────────────────────────────────────────────────────────────────╮
|
|
1371
1447
|
│ --asr-gemini-model TEXT The Gemini model to use for ASR (transcription). │
|
|
1448
|
+
│ [env var: ASR_GEMINI_MODEL] │
|
|
1372
1449
|
│ [default: gemini-3-flash-preview] │
|
|
1373
1450
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
1374
1451
|
╭─ LLM: Ollama ──────────────────────────────────────────────────────────────────────────╮
|
|
1375
1452
|
│ --llm-ollama-model TEXT The Ollama model to use. Default is gemma3:4b. │
|
|
1453
|
+
│ [env var: LLM_OLLAMA_MODEL] │
|
|
1376
1454
|
│ [default: gemma3:4b] │
|
|
1377
1455
|
│ --llm-ollama-host TEXT The Ollama server host. Default is │
|
|
1378
1456
|
│ http://localhost:11434. │
|
|
1457
|
+
│ [env var: LLM_OLLAMA_HOST] │
|
|
1379
1458
|
│ [default: http://localhost:11434] │
|
|
1380
1459
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
1381
1460
|
╭─ LLM: OpenAI-compatible ───────────────────────────────────────────────────────────────╮
|
|
1382
1461
|
│ --llm-openai-model TEXT The OpenAI model to use for LLM tasks. │
|
|
1462
|
+
│ [env var: LLM_OPENAI_MODEL] │
|
|
1383
1463
|
│ [default: gpt-5-mini] │
|
|
1384
1464
|
│ --openai-api-key TEXT Your OpenAI API key. Can also be set with the │
|
|
1385
1465
|
│ OPENAI_API_KEY environment variable. │
|
|
@@ -1390,6 +1470,7 @@ uv tool install "agent-cli[vad]" -p 3.13
|
|
|
1390
1470
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
1391
1471
|
╭─ LLM: Gemini ──────────────────────────────────────────────────────────────────────────╮
|
|
1392
1472
|
│ --llm-gemini-model TEXT The Gemini model to use for LLM tasks. │
|
|
1473
|
+
│ [env var: LLM_GEMINI_MODEL] │
|
|
1393
1474
|
│ [default: gemini-3-flash-preview] │
|
|
1394
1475
|
│ --gemini-api-key TEXT Your Gemini API key. Can also be set with the │
|
|
1395
1476
|
│ GEMINI_API_KEY environment variable. │
|
|
@@ -1446,16 +1527,24 @@ uv tool install "agent-cli[vad]" -p 3.13
|
|
|
1446
1527
|
│ will be stopped. If the process is not running, it will be started. │
|
|
1447
1528
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
1448
1529
|
╭─ General Options ──────────────────────────────────────────────────────────────────────╮
|
|
1449
|
-
│ --save-file PATH
|
|
1450
|
-
│
|
|
1451
|
-
│
|
|
1452
|
-
│
|
|
1453
|
-
│
|
|
1454
|
-
│ --log-
|
|
1455
|
-
│
|
|
1456
|
-
│
|
|
1457
|
-
│ --
|
|
1458
|
-
│
|
|
1530
|
+
│ --save-file PATH Save TTS response audio │
|
|
1531
|
+
│ to WAV file. │
|
|
1532
|
+
│ --clipboard --no-clipboard Copy result to │
|
|
1533
|
+
│ clipboard. │
|
|
1534
|
+
│ [default: clipboard] │
|
|
1535
|
+
│ --log-level [debug|info|warning|erro Set logging level. │
|
|
1536
|
+
│ r] [env var: LOG_LEVEL] │
|
|
1537
|
+
│ [default: info] │
|
|
1538
|
+
│ --log-file TEXT Path to a file to write │
|
|
1539
|
+
│ logs to. │
|
|
1540
|
+
│ --quiet -q Suppress console output │
|
|
1541
|
+
│ from rich. │
|
|
1542
|
+
│ --config TEXT Path to a TOML │
|
|
1543
|
+
│ configuration file. │
|
|
1544
|
+
│ --print-args Print the command line │
|
|
1545
|
+
│ arguments, including │
|
|
1546
|
+
│ variables taken from the │
|
|
1547
|
+
│ configuration file. │
|
|
1459
1548
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
1460
1549
|
|
|
1461
1550
|
```
|
|
@@ -1514,11 +1603,14 @@ uv tool install "agent-cli[vad]" -p 3.13
|
|
|
1514
1603
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
1515
1604
|
╭─ Provider Selection ───────────────────────────────────────────────────────────────────╮
|
|
1516
1605
|
│ --asr-provider TEXT The ASR provider to use ('wyoming', 'openai', 'gemini'). │
|
|
1606
|
+
│ [env var: ASR_PROVIDER] │
|
|
1517
1607
|
│ [default: wyoming] │
|
|
1518
1608
|
│ --llm-provider TEXT The LLM provider to use ('ollama', 'openai', 'gemini'). │
|
|
1609
|
+
│ [env var: LLM_PROVIDER] │
|
|
1519
1610
|
│ [default: ollama] │
|
|
1520
1611
|
│ --tts-provider TEXT The TTS provider to use ('wyoming', 'openai', 'kokoro', │
|
|
1521
1612
|
│ 'gemini'). │
|
|
1613
|
+
│ [env var: TTS_PROVIDER] │
|
|
1522
1614
|
│ [default: wyoming] │
|
|
1523
1615
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
1524
1616
|
╭─ Audio Input ──────────────────────────────────────────────────────────────────────────╮
|
|
@@ -1529,31 +1621,40 @@ uv tool install "agent-cli[vad]" -p 3.13
|
|
|
1529
1621
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
1530
1622
|
╭─ Audio Input: Wyoming ─────────────────────────────────────────────────────────────────╮
|
|
1531
1623
|
│ --asr-wyoming-ip TEXT Wyoming ASR server IP address. │
|
|
1624
|
+
│ [env var: ASR_WYOMING_IP] │
|
|
1532
1625
|
│ [default: localhost] │
|
|
1533
1626
|
│ --asr-wyoming-port INTEGER Wyoming ASR server port. │
|
|
1627
|
+
│ [env var: ASR_WYOMING_PORT] │
|
|
1534
1628
|
│ [default: 10300] │
|
|
1535
1629
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
1536
1630
|
╭─ Audio Input: OpenAI-compatible ───────────────────────────────────────────────────────╮
|
|
1537
1631
|
│ --asr-openai-model TEXT The OpenAI model to use for ASR (transcription). │
|
|
1632
|
+
│ [env var: ASR_OPENAI_MODEL] │
|
|
1538
1633
|
│ [default: whisper-1] │
|
|
1539
1634
|
│ --asr-openai-base-url TEXT Custom base URL for OpenAI-compatible ASR API │
|
|
1540
1635
|
│ (e.g., for custom Whisper server: │
|
|
1541
1636
|
│ http://localhost:9898). │
|
|
1637
|
+
│ [env var: ASR_OPENAI_BASE_URL] │
|
|
1542
1638
|
│ --asr-openai-prompt TEXT Custom prompt to guide transcription (optional). │
|
|
1639
|
+
│ [env var: ASR_OPENAI_PROMPT] │
|
|
1543
1640
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
1544
1641
|
╭─ Audio Input: Gemini ──────────────────────────────────────────────────────────────────╮
|
|
1545
1642
|
│ --asr-gemini-model TEXT The Gemini model to use for ASR (transcription). │
|
|
1643
|
+
│ [env var: ASR_GEMINI_MODEL] │
|
|
1546
1644
|
│ [default: gemini-3-flash-preview] │
|
|
1547
1645
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
1548
1646
|
╭─ LLM: Ollama ──────────────────────────────────────────────────────────────────────────╮
|
|
1549
1647
|
│ --llm-ollama-model TEXT The Ollama model to use. Default is gemma3:4b. │
|
|
1648
|
+
│ [env var: LLM_OLLAMA_MODEL] │
|
|
1550
1649
|
│ [default: gemma3:4b] │
|
|
1551
1650
|
│ --llm-ollama-host TEXT The Ollama server host. Default is │
|
|
1552
1651
|
│ http://localhost:11434. │
|
|
1652
|
+
│ [env var: LLM_OLLAMA_HOST] │
|
|
1553
1653
|
│ [default: http://localhost:11434] │
|
|
1554
1654
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
1555
1655
|
╭─ LLM: OpenAI-compatible ───────────────────────────────────────────────────────────────╮
|
|
1556
1656
|
│ --llm-openai-model TEXT The OpenAI model to use for LLM tasks. │
|
|
1657
|
+
│ [env var: LLM_OPENAI_MODEL] │
|
|
1557
1658
|
│ [default: gpt-5-mini] │
|
|
1558
1659
|
│ --openai-api-key TEXT Your OpenAI API key. Can also be set with the │
|
|
1559
1660
|
│ OPENAI_API_KEY environment variable. │
|
|
@@ -1564,6 +1665,7 @@ uv tool install "agent-cli[vad]" -p 3.13
|
|
|
1564
1665
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
1565
1666
|
╭─ LLM: Gemini ──────────────────────────────────────────────────────────────────────────╮
|
|
1566
1667
|
│ --llm-gemini-model TEXT The Gemini model to use for LLM tasks. │
|
|
1668
|
+
│ [env var: LLM_GEMINI_MODEL] │
|
|
1567
1669
|
│ [default: gemini-3-flash-preview] │
|
|
1568
1670
|
│ --gemini-api-key TEXT Your Gemini API key. Can also be set with the │
|
|
1569
1671
|
│ GEMINI_API_KEY environment variable. │
|
|
@@ -1627,14 +1729,16 @@ uv tool install "agent-cli[vad]" -p 3.13
|
|
|
1627
1729
|
│ [default: 50] │
|
|
1628
1730
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
1629
1731
|
╭─ General Options ──────────────────────────────────────────────────────────────────────╮
|
|
1630
|
-
│ --save-file PATH
|
|
1631
|
-
│ --log-level
|
|
1632
|
-
│
|
|
1633
|
-
│
|
|
1634
|
-
│ --
|
|
1635
|
-
│ --
|
|
1636
|
-
│ --
|
|
1637
|
-
│
|
|
1732
|
+
│ --save-file PATH Save TTS response audio to WAV file. │
|
|
1733
|
+
│ --log-level [debug|info|warning|error] Set logging level. │
|
|
1734
|
+
│ [env var: LOG_LEVEL] │
|
|
1735
|
+
│ [default: info] │
|
|
1736
|
+
│ --log-file TEXT Path to a file to write logs to. │
|
|
1737
|
+
│ --quiet -q Suppress console output from rich. │
|
|
1738
|
+
│ --config TEXT Path to a TOML configuration file. │
|
|
1739
|
+
│ --print-args Print the command line arguments, │
|
|
1740
|
+
│ including variables taken from the │
|
|
1741
|
+
│ configuration file. │
|
|
1638
1742
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
1639
1743
|
|
|
1640
1744
|
```
|
|
@@ -1720,11 +1824,13 @@ uv tool install "agent-cli[vad]" -p 3.13
|
|
|
1720
1824
|
│ [default: 8000] │
|
|
1721
1825
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
1722
1826
|
╭─ General Options ──────────────────────────────────────────────────────────────────────╮
|
|
1723
|
-
│ --log-level
|
|
1724
|
-
│
|
|
1725
|
-
│
|
|
1726
|
-
│ --
|
|
1727
|
-
│
|
|
1827
|
+
│ --log-level [debug|info|warning|error] Set logging level. │
|
|
1828
|
+
│ [env var: LOG_LEVEL] │
|
|
1829
|
+
│ [default: info] │
|
|
1830
|
+
│ --config TEXT Path to a TOML configuration file. │
|
|
1831
|
+
│ --print-args Print the command line arguments, │
|
|
1832
|
+
│ including variables taken from the │
|
|
1833
|
+
│ configuration file. │
|
|
1728
1834
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
1729
1835
|
|
|
1730
1836
|
```
|
|
@@ -1860,11 +1966,13 @@ The `memory proxy` command is the core feature—a middleware server that gives
|
|
|
1860
1966
|
│ [default: 8100] │
|
|
1861
1967
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
1862
1968
|
╭─ General Options ──────────────────────────────────────────────────────────────────────╮
|
|
1863
|
-
│ --log-level
|
|
1864
|
-
│
|
|
1865
|
-
│
|
|
1866
|
-
│ --
|
|
1867
|
-
│
|
|
1969
|
+
│ --log-level [debug|info|warning|error] Set logging level. │
|
|
1970
|
+
│ [env var: LOG_LEVEL] │
|
|
1971
|
+
│ [default: info] │
|
|
1972
|
+
│ --config TEXT Path to a TOML configuration file. │
|
|
1973
|
+
│ --print-args Print the command line arguments, │
|
|
1974
|
+
│ including variables taken from the │
|
|
1975
|
+
│ configuration file. │
|
|
1868
1976
|
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
|
1869
1977
|
|
|
1870
1978
|
```
|
|
@@ -9,7 +9,7 @@ agent_cli/config_cmd.py,sha256=CiHk1WxtvT21QeMuklTTMCmAdNwjeYENO_w_Qbiys54,9579
|
|
|
9
9
|
agent_cli/constants.py,sha256=-Q17N6qKIGqPDsu3FxpIKP33G0Cs0RUJlHwYNHxVxek,843
|
|
10
10
|
agent_cli/docs_gen.py,sha256=j6mBHwoPcQzMdgIWi_bB2A6yOyhvmW_cntRfwUg_8p8,13374
|
|
11
11
|
agent_cli/example-config.toml,sha256=xd9BXeOqdYx4xFJt58VBs2I49ESy6dF4-mWF_g8sM9o,7552
|
|
12
|
-
agent_cli/opts.py,sha256=
|
|
12
|
+
agent_cli/opts.py,sha256=NmMxW1Vp52jiSz-wi6PQjurpiDq6p1HqSnoqDBOhNxo,12616
|
|
13
13
|
agent_cli/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
14
14
|
agent_cli/_requirements/.gitkeep,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
15
15
|
agent_cli/_requirements/audio.txt,sha256=KAOtFaFClkj2EAesWI61LZ4N-T1WQvHtvf1sNtDxVgQ,1522
|
|
@@ -25,17 +25,17 @@ agent_cli/_requirements/speed.txt,sha256=KwBTrZFXWtgwJ5zrcNtm45zfqvNK0trcR1SbV-w
|
|
|
25
25
|
agent_cli/_requirements/vad.txt,sha256=HN7OB8cu5cWuVPhoKWye73I9lAWPE-ijeXeVSshCsxs,4017
|
|
26
26
|
agent_cli/agents/__init__.py,sha256=c1rnncDW5pBvP6BiLzFVpLWDNZzFRaUA7-a97avFVAs,321
|
|
27
27
|
agent_cli/agents/_voice_agent_common.py,sha256=PUAztW84Xf9U7d0C_K5cL7I8OANIE1H6M8dFD_cRqps,4360
|
|
28
|
-
agent_cli/agents/assistant.py,sha256=
|
|
29
|
-
agent_cli/agents/autocorrect.py,sha256=
|
|
30
|
-
agent_cli/agents/chat.py,sha256=
|
|
31
|
-
agent_cli/agents/rag_proxy.py,sha256=
|
|
32
|
-
agent_cli/agents/speak.py,sha256=
|
|
33
|
-
agent_cli/agents/transcribe.py,sha256=
|
|
34
|
-
agent_cli/agents/transcribe_daemon.py,sha256=
|
|
35
|
-
agent_cli/agents/voice_edit.py,sha256=
|
|
28
|
+
agent_cli/agents/assistant.py,sha256=uElnNG-ZcRR7QHf8Qcjez0ahF2nEyMHbcEfNE_8dcbY,14056
|
|
29
|
+
agent_cli/agents/autocorrect.py,sha256=yx5Yh8xTtzO-5lyFJBlCgx0yUGO6eYxLSK3RLlrcXlU,9000
|
|
30
|
+
agent_cli/agents/chat.py,sha256=s4g4IvmmLnOOkW5TNX4Mro-AyniiSWw3rKAH7P5DvmA,17267
|
|
31
|
+
agent_cli/agents/rag_proxy.py,sha256=r_Azrw2wNwwmMCOUftHdhFDLbFqZpIUm_rM7Q2eM8Jw,4417
|
|
32
|
+
agent_cli/agents/speak.py,sha256=RgYOGOs7IbNwzLsiLdhXQHK6FGUCPkWLpUBOH1RjQ3s,7111
|
|
33
|
+
agent_cli/agents/transcribe.py,sha256=OYY50085uxP3AdCa36n_bBb1FQ_R1m2WdDLgiNN74js,24664
|
|
34
|
+
agent_cli/agents/transcribe_daemon.py,sha256=pHDs1tSDceUUJBkdE-1sO8VHdeYaALafScL_-qu5bhY,17678
|
|
35
|
+
agent_cli/agents/voice_edit.py,sha256=fL4SdxEfwCMazxtgIbjE9K8Fbm4tpJ4eSsgIkZJ2LB8,11147
|
|
36
36
|
agent_cli/agents/memory/__init__.py,sha256=RkJYhq0SI_62hgUHicvtkJ1k7k9JEvKLqr0302gxylw,805
|
|
37
37
|
agent_cli/agents/memory/add.py,sha256=lk6q2SmuwNNFAoDnfOQxPpNHbbHjekGCyKaWUgC9x-8,6210
|
|
38
|
-
agent_cli/agents/memory/proxy.py,sha256
|
|
38
|
+
agent_cli/agents/memory/proxy.py,sha256=-IXSB2QeVlKOIwa5ha0d799JlfhnergufV1wC2V2eDg,6309
|
|
39
39
|
agent_cli/core/__init__.py,sha256=c_knH7u9QgjsfMIil9NP4bVizHawLUMYoQWU4H9vMlQ,46
|
|
40
40
|
agent_cli/core/audio.py,sha256=43FpYe2Wu_BYK9xJ_55V4xHjHJeFwQ5aM-CQzlTryt8,15168
|
|
41
41
|
agent_cli/core/audio_format.py,sha256=zk3qlYMAlKYPz1enrjihQQspl_C218v1Rbcm7Uktlew,8773
|
|
@@ -45,7 +45,7 @@ agent_cli/core/openai_proxy.py,sha256=VOqh40vyVrOa_u3CvXgolf0Bv-rALIXbKMQfjTBtH3
|
|
|
45
45
|
agent_cli/core/process.py,sha256=Ril7HqMJc-F1E-66pHrOi27gEZD3ZR_ZYhGnN61SVSs,5937
|
|
46
46
|
agent_cli/core/reranker.py,sha256=_RPjLKR5ej6L0Lb05tQFoVSyXOt1cXXn3ydEkCIXj2A,3851
|
|
47
47
|
agent_cli/core/sse.py,sha256=SddiWjHh7DENb1wmvf3wDvX-OhbaC61EceFwQxmDUEo,2232
|
|
48
|
-
agent_cli/core/transcription_logger.py,sha256=
|
|
48
|
+
agent_cli/core/transcription_logger.py,sha256=w8HbHDYFN_qZMfi6Wwpvw3THa-GxkVTzTPhZfE84o-Q,2082
|
|
49
49
|
agent_cli/core/utils.py,sha256=p3OJrNcU6iwqR0C7Q5Ab3_rwJBmP0EbIYT82a9scPSI,16896
|
|
50
50
|
agent_cli/core/vad.py,sha256=mM8VtC4TS3OpohSrtOw38M7T8f6T625FkIKim7Q_EoM,6591
|
|
51
51
|
agent_cli/core/watch.py,sha256=PakxMyqJycN6bkE6fxeo0qe9zj5wjuRly3p7rN9UgjM,2000
|
|
@@ -156,13 +156,13 @@ agent_cli/scripts/nvidia-asr-server/server.py,sha256=kPNQIVF3exblvqMtIVk38Y6sZy2
|
|
|
156
156
|
agent_cli/scripts/nvidia-asr-server/shell.nix,sha256=IT20j5YNj_wc7MdXi7ndogGodDNSGwyq8G0bNoZEpmg,1003
|
|
157
157
|
agent_cli/scripts/nvidia-asr-server/uv.lock,sha256=5WWaqWOuV_moMPC-LIZK-A-Y5oaHr1tUn_vbR-IupzY,728608
|
|
158
158
|
agent_cli/server/__init__.py,sha256=NZuJHlLHck9KWrepNZHrJONptYCQI9P-uTqknSFI5Ds,71
|
|
159
|
-
agent_cli/server/cli.py,sha256=
|
|
159
|
+
agent_cli/server/cli.py,sha256=Bre2Bcofne3MS9jFAEjf9dWgpJYsgHvWlbEWDp4a1wA,22894
|
|
160
160
|
agent_cli/server/common.py,sha256=hBBp6i-2-yhDY260ffwmFBg_ndcoT5SNcfa6uFyP7Vc,6391
|
|
161
161
|
agent_cli/server/model_manager.py,sha256=93l_eeZeqnPALyDIK24or61tvded9TbM8tnde0okVjY,9225
|
|
162
162
|
agent_cli/server/model_registry.py,sha256=KrRV1XxbFYuXu5rJlHFh6PTl_2BKiWnWsaNrf-0c6wQ,6988
|
|
163
163
|
agent_cli/server/streaming.py,sha256=nX_kMNQBxdzvPKUcFQWth7dDBYALRGy_j9mDunKXaJE,2191
|
|
164
164
|
agent_cli/server/proxy/__init__.py,sha256=fOq5bs5B9XKfr7XbdZ825RIxheAJA0VKmKantykjTE4,83
|
|
165
|
-
agent_cli/server/proxy/api.py,sha256=
|
|
165
|
+
agent_cli/server/proxy/api.py,sha256=22TbZlhR9kAVLZDUkw7Ezm0rfd6WFIXp-O00-5kd7SU,15299
|
|
166
166
|
agent_cli/server/tts/__init__.py,sha256=o1TlYmsGeqo-LDZg86KEhLJYWzN7jtMNGS5S8HFUSkw,93
|
|
167
167
|
agent_cli/server/tts/api.py,sha256=GLOJEC4yMlWjz6AjrWBdZLs368DddP27PMvgqVlVCFc,10598
|
|
168
168
|
agent_cli/server/tts/model_manager.py,sha256=TlBc0Q1iitDfNi8uYXyb1CaJr8Kt62SDyFIsCyVm7DY,6116
|
|
@@ -188,8 +188,8 @@ agent_cli/services/asr.py,sha256=aRaCLVCygsJ15qyQEPECOZsdSrnlLPbyY4RwAqY0qIw,172
|
|
|
188
188
|
agent_cli/services/llm.py,sha256=i01utl1eYWlM13gvW2eR6ErL_ndH-g0d-BSleZra_7k,7229
|
|
189
189
|
agent_cli/services/tts.py,sha256=NX5Qnq7ddLI3mwm3nzhbR3zB1Os4Ip4sSVSjDZDTBcI,14855
|
|
190
190
|
agent_cli/services/wake_word.py,sha256=JFJ1SA22H4yko9DXiQ1t7fcoxeALLAe3iBrLs0z8rX4,4664
|
|
191
|
-
agent_cli-0.70.
|
|
192
|
-
agent_cli-0.70.
|
|
193
|
-
agent_cli-0.70.
|
|
194
|
-
agent_cli-0.70.
|
|
195
|
-
agent_cli-0.70.
|
|
191
|
+
agent_cli-0.70.4.dist-info/METADATA,sha256=-HsUno3wBQ7XhlByLGAnuPolhntScSuAo7GmmLmb_LY,166392
|
|
192
|
+
agent_cli-0.70.4.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
193
|
+
agent_cli-0.70.4.dist-info/entry_points.txt,sha256=FUv-fB2atLsPUk_RT4zqnZl1coz4_XHFwRALOKOF38s,97
|
|
194
|
+
agent_cli-0.70.4.dist-info/licenses/LICENSE,sha256=majJU6S9kC8R8bW39NVBHyv32Dq50FL6TDxECG2WVts,1068
|
|
195
|
+
agent_cli-0.70.4.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|