agent-cli 0.70.3__py3-none-any.whl → 0.70.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -306,7 +306,7 @@ def assistant(
306
306
  # --- General Options ---
307
307
  save_file: Path | None = opts.SAVE_FILE,
308
308
  clipboard: bool = opts.CLIPBOARD,
309
- log_level: str = opts.LOG_LEVEL,
309
+ log_level: opts.LogLevel = opts.LOG_LEVEL,
310
310
  log_file: str | None = opts.LOG_FILE,
311
311
  list_devices: bool = opts.LIST_DEVICES,
312
312
  quiet: bool = opts.QUIET,
@@ -233,7 +233,7 @@ def autocorrect(
233
233
  llm_gemini_model: str = opts.LLM_GEMINI_MODEL,
234
234
  gemini_api_key: str | None = opts.GEMINI_API_KEY,
235
235
  # --- General Options ---
236
- log_level: str = opts.LOG_LEVEL,
236
+ log_level: opts.LogLevel = opts.LOG_LEVEL,
237
237
  log_file: str | None = opts.LOG_FILE,
238
238
  quiet: bool = opts.QUIET,
239
239
  json_output: bool = opts.JSON_OUTPUT,
agent_cli/agents/chat.py CHANGED
@@ -437,7 +437,7 @@ def chat(
437
437
  ),
438
438
  # --- General Options ---
439
439
  save_file: Path | None = opts.SAVE_FILE,
440
- log_level: str = opts.LOG_LEVEL,
440
+ log_level: opts.LogLevel = opts.LOG_LEVEL,
441
441
  log_file: str | None = opts.LOG_FILE,
442
442
  list_devices: bool = opts.LIST_DEVICES,
443
443
  quiet: bool = opts.QUIET,
@@ -68,7 +68,7 @@ def proxy(
68
68
  help="Enable automatic git commit of memory changes.",
69
69
  rich_help_panel="Memory Configuration",
70
70
  ),
71
- log_level: str = opts.with_default(opts.LOG_LEVEL, "INFO"),
71
+ log_level: opts.LogLevel = opts.LOG_LEVEL,
72
72
  config_file: str | None = opts.CONFIG_FILE,
73
73
  print_args: bool = opts.PRINT_ARGS,
74
74
  ) -> None:
@@ -45,7 +45,7 @@ def rag_proxy(
45
45
  help="Port to bind to",
46
46
  rich_help_panel="Server Configuration",
47
47
  ),
48
- log_level: str = opts.with_default(opts.LOG_LEVEL, "INFO"),
48
+ log_level: opts.LogLevel = opts.LOG_LEVEL,
49
49
  config_file: str | None = opts.CONFIG_FILE,
50
50
  print_args: bool = opts.PRINT_ARGS,
51
51
  enable_rag_tools: bool = typer.Option(
agent_cli/agents/speak.py CHANGED
@@ -120,7 +120,7 @@ def speak(
120
120
  stop: bool = opts.STOP,
121
121
  status: bool = opts.STATUS,
122
122
  toggle: bool = opts.TOGGLE,
123
- log_level: str = opts.LOG_LEVEL,
123
+ log_level: opts.LogLevel = opts.LOG_LEVEL,
124
124
  log_file: str | None = opts.LOG_FILE,
125
125
  quiet: bool = opts.QUIET,
126
126
  json_output: bool = opts.JSON_OUTPUT,
@@ -504,7 +504,7 @@ def transcribe( # noqa: PLR0912
504
504
  toggle: bool = opts.TOGGLE,
505
505
  # --- General Options ---
506
506
  clipboard: bool = opts.CLIPBOARD,
507
- log_level: str = opts.LOG_LEVEL,
507
+ log_level: opts.LogLevel = opts.LOG_LEVEL,
508
508
  log_file: str | None = opts.LOG_FILE,
509
509
  list_devices: bool = opts.LIST_DEVICES,
510
510
  quiet: bool = opts.QUIET,
@@ -361,7 +361,7 @@ def transcribe_daemon( # noqa: PLR0912
361
361
  stop: bool = opts.STOP,
362
362
  status: bool = opts.STATUS,
363
363
  # --- General Options ---
364
- log_level: str = opts.LOG_LEVEL,
364
+ log_level: opts.LogLevel = opts.LOG_LEVEL,
365
365
  log_file_logging: str | None = opts.LOG_FILE,
366
366
  list_devices: bool = opts.LIST_DEVICES,
367
367
  quiet: bool = opts.QUIET,
@@ -221,7 +221,7 @@ def voice_edit(
221
221
  # --- General Options ---
222
222
  save_file: Path | None = opts.SAVE_FILE,
223
223
  clipboard: bool = opts.CLIPBOARD,
224
- log_level: str = opts.LOG_LEVEL,
224
+ log_level: opts.LogLevel = opts.LOG_LEVEL,
225
225
  log_file: str | None = opts.LOG_FILE,
226
226
  list_devices: bool = opts.LIST_DEVICES,
227
227
  quiet: bool = opts.QUIET,
@@ -20,7 +20,7 @@ class TranscriptionLogger:
20
20
 
21
21
  """
22
22
  if log_file is None:
23
- log_file = Path.cwd() / "transcription_log.json"
23
+ log_file = Path.home() / ".config" / "agent-cli" / "transcriptions.jsonl"
24
24
  elif isinstance(log_file, str):
25
25
  log_file = Path(log_file)
26
26
 
agent_cli/opts.py CHANGED
@@ -2,12 +2,15 @@
2
2
 
3
3
  import copy
4
4
  from pathlib import Path
5
+ from typing import Literal
5
6
 
6
7
  import typer
7
8
  from typer.models import OptionInfo
8
9
 
9
10
  from agent_cli.constants import DEFAULT_OPENAI_EMBEDDING_MODEL, DEFAULT_OPENAI_MODEL
10
11
 
12
+ LogLevel = Literal["debug", "info", "warning", "error"]
13
+
11
14
 
12
15
  def with_default(option: OptionInfo, default: str) -> OptionInfo:
13
16
  """Create a copy of a typer Option with a different default value."""
@@ -20,6 +23,7 @@ def with_default(option: OptionInfo, default: str) -> OptionInfo:
20
23
  LLM_PROVIDER: str = typer.Option(
21
24
  "ollama",
22
25
  "--llm-provider",
26
+ envvar="LLM_PROVIDER",
23
27
  help="The LLM provider to use ('ollama', 'openai', 'gemini').",
24
28
  rich_help_panel="Provider Selection",
25
29
  )
@@ -33,6 +37,7 @@ ASR_PROVIDER: str = typer.Option(
33
37
  TTS_PROVIDER: str = typer.Option(
34
38
  "wyoming",
35
39
  "--tts-provider",
40
+ envvar="TTS_PROVIDER",
36
41
  help="The TTS provider to use ('wyoming', 'openai', 'kokoro', 'gemini').",
37
42
  rich_help_panel="Provider Selection",
38
43
  )
@@ -49,12 +54,14 @@ LLM: bool = typer.Option(
49
54
  LLM_OLLAMA_MODEL: str = typer.Option(
50
55
  "gemma3:4b",
51
56
  "--llm-ollama-model",
57
+ envvar="LLM_OLLAMA_MODEL",
52
58
  help="The Ollama model to use. Default is gemma3:4b.",
53
59
  rich_help_panel="LLM: Ollama",
54
60
  )
55
61
  LLM_OLLAMA_HOST: str = typer.Option(
56
62
  "http://localhost:11434",
57
63
  "--llm-ollama-host",
64
+ envvar="LLM_OLLAMA_HOST",
58
65
  help="The Ollama server host. Default is http://localhost:11434.",
59
66
  rich_help_panel="LLM: Ollama",
60
67
  )
@@ -62,6 +69,7 @@ LLM_OLLAMA_HOST: str = typer.Option(
62
69
  LLM_OPENAI_MODEL: str = typer.Option(
63
70
  DEFAULT_OPENAI_MODEL,
64
71
  "--llm-openai-model",
72
+ envvar="LLM_OPENAI_MODEL",
65
73
  help="The OpenAI model to use for LLM tasks.",
66
74
  rich_help_panel="LLM: OpenAI-compatible",
67
75
  )
@@ -83,6 +91,7 @@ OPENAI_BASE_URL: str | None = typer.Option(
83
91
  LLM_GEMINI_MODEL: str = typer.Option(
84
92
  "gemini-3-flash-preview",
85
93
  "--llm-gemini-model",
94
+ envvar="LLM_GEMINI_MODEL",
86
95
  help="The Gemini model to use for LLM tasks.",
87
96
  rich_help_panel="LLM: Gemini",
88
97
  )
@@ -140,18 +149,21 @@ ASR_WYOMING_PORT: int = typer.Option(
140
149
  ASR_OPENAI_MODEL: str = typer.Option(
141
150
  "whisper-1",
142
151
  "--asr-openai-model",
152
+ envvar="ASR_OPENAI_MODEL",
143
153
  help="The OpenAI model to use for ASR (transcription).",
144
154
  rich_help_panel="Audio Input: OpenAI-compatible",
145
155
  )
146
156
  ASR_OPENAI_BASE_URL: str | None = typer.Option(
147
157
  None,
148
158
  "--asr-openai-base-url",
159
+ envvar="ASR_OPENAI_BASE_URL",
149
160
  help="Custom base URL for OpenAI-compatible ASR API (e.g., for custom Whisper server: http://localhost:9898).",
150
161
  rich_help_panel="Audio Input: OpenAI-compatible",
151
162
  )
152
163
  ASR_OPENAI_PROMPT: str | None = typer.Option(
153
164
  None,
154
165
  "--asr-openai-prompt",
166
+ envvar="ASR_OPENAI_PROMPT",
155
167
  help="Custom prompt to guide transcription (optional).",
156
168
  rich_help_panel="Audio Input: OpenAI-compatible",
157
169
  )
@@ -159,6 +171,7 @@ ASR_OPENAI_PROMPT: str | None = typer.Option(
159
171
  ASR_GEMINI_MODEL: str = typer.Option(
160
172
  "gemini-3-flash-preview",
161
173
  "--asr-gemini-model",
174
+ envvar="ASR_GEMINI_MODEL",
162
175
  help="The Gemini model to use for ASR (transcription).",
163
176
  rich_help_panel="Audio Input: Gemini",
164
177
  )
@@ -351,9 +364,10 @@ CLIPBOARD: bool = typer.Option(
351
364
  help="Copy result to clipboard.",
352
365
  rich_help_panel="General Options",
353
366
  )
354
- LOG_LEVEL: str = typer.Option(
355
- "WARNING",
367
+ LOG_LEVEL: LogLevel = typer.Option(
368
+ "info",
356
369
  "--log-level",
370
+ envvar="LOG_LEVEL",
357
371
  help="Set logging level.",
358
372
  case_sensitive=False,
359
373
  rich_help_panel="General Options",
agent_cli/server/cli.py CHANGED
@@ -10,6 +10,7 @@ from typing import Annotated
10
10
 
11
11
  import typer
12
12
 
13
+ from agent_cli import opts
13
14
  from agent_cli.cli import app as main_app
14
15
  from agent_cli.core.deps import requires_extras
15
16
  from agent_cli.core.process import set_process_title
@@ -251,14 +252,7 @@ def whisper_cmd( # noqa: PLR0912, PLR0915
251
252
  help="Download model(s) and exit without starting server",
252
253
  ),
253
254
  ] = False,
254
- log_level: Annotated[
255
- str,
256
- typer.Option(
257
- "--log-level",
258
- "-l",
259
- help="Logging level: debug, info, warning, error",
260
- ),
261
- ] = "info",
255
+ log_level: opts.LogLevel = opts.LOG_LEVEL,
262
256
  backend: Annotated[
263
257
  str,
264
258
  typer.Option(
@@ -378,6 +372,7 @@ def whisper_cmd( # noqa: PLR0912, PLR0915
378
372
  console.print()
379
373
  console.print("[dim]Configuration:[/dim]")
380
374
  console.print(f" Backend: [cyan]{actual_backend}[/cyan]")
375
+ console.print(f" Log level: [cyan]{log_level}[/cyan]")
381
376
  console.print()
382
377
  console.print("[dim]Endpoints:[/dim]")
383
378
  console.print(f" HTTP API: [cyan]http://{host}:{port}[/cyan]")
@@ -436,6 +431,7 @@ def transcribe_proxy_cmd(
436
431
  bool,
437
432
  typer.Option("--reload", help="Enable auto-reload for development"),
438
433
  ] = False,
434
+ log_level: opts.LogLevel = opts.LOG_LEVEL,
439
435
  ) -> None:
440
436
  """Run transcription proxy server.
441
437
 
@@ -457,10 +453,12 @@ def transcribe_proxy_cmd(
457
453
 
458
454
  """
459
455
  _check_server_deps()
456
+ setup_rich_logging(log_level)
460
457
 
461
458
  console.print(
462
459
  f"[bold green]Starting Agent CLI transcription proxy on {host}:{port}[/bold green]",
463
460
  )
461
+ console.print(f"[dim]Log level: {log_level}[/dim]")
464
462
  if reload:
465
463
  console.print("[yellow]Auto-reload enabled for development[/yellow]")
466
464
 
@@ -471,7 +469,7 @@ def transcribe_proxy_cmd(
471
469
  host=host,
472
470
  port=port,
473
471
  reload=reload,
474
- log_level="info",
472
+ log_level=log_level.lower(),
475
473
  )
476
474
 
477
475
 
@@ -558,14 +556,7 @@ def tts_cmd( # noqa: PLR0915
558
556
  help="Download model(s) and exit without starting server",
559
557
  ),
560
558
  ] = False,
561
- log_level: Annotated[
562
- str,
563
- typer.Option(
564
- "--log-level",
565
- "-l",
566
- help="Logging level: debug, info, warning, error",
567
- ),
568
- ] = "info",
559
+ log_level: opts.LogLevel = opts.LOG_LEVEL,
569
560
  backend: Annotated[
570
561
  str,
571
562
  typer.Option(
@@ -679,6 +670,7 @@ def tts_cmd( # noqa: PLR0915
679
670
  console.print()
680
671
  console.print("[dim]Configuration:[/dim]")
681
672
  console.print(f" Backend: [cyan]{resolved_backend}[/cyan]")
673
+ console.print(f" Log level: [cyan]{log_level}[/cyan]")
682
674
  console.print()
683
675
  console.print("[dim]Endpoints:[/dim]")
684
676
  console.print(f" HTTP API: [cyan]http://{host}:{port}[/cyan]")
@@ -41,6 +41,40 @@ app = FastAPI(
41
41
  )
42
42
 
43
43
 
44
+ @app.on_event("startup")
45
+ async def log_effective_config() -> None:
46
+ """Log effective configuration on startup to help debug env var issues."""
47
+ (
48
+ provider_cfg,
49
+ wyoming_cfg,
50
+ openai_asr_cfg,
51
+ gemini_asr_cfg,
52
+ ollama_cfg,
53
+ openai_llm_cfg,
54
+ gemini_llm_cfg,
55
+ _,
56
+ ) = _load_transcription_configs()
57
+
58
+ LOGGER.info("ASR provider: %s", provider_cfg.asr_provider)
59
+ if provider_cfg.asr_provider == "wyoming":
60
+ LOGGER.info(" Wyoming: %s:%d", wyoming_cfg.asr_wyoming_ip, wyoming_cfg.asr_wyoming_port)
61
+ elif provider_cfg.asr_provider == "openai":
62
+ LOGGER.info(" Model: %s", openai_asr_cfg.asr_openai_model)
63
+ LOGGER.info(" Base URL: %s", openai_asr_cfg.openai_base_url or "https://api.openai.com/v1")
64
+ elif provider_cfg.asr_provider == "gemini":
65
+ LOGGER.info(" Model: %s", gemini_asr_cfg.asr_gemini_model)
66
+
67
+ LOGGER.info("LLM provider: %s", provider_cfg.llm_provider)
68
+ if provider_cfg.llm_provider == "ollama":
69
+ LOGGER.info(" Model: %s", ollama_cfg.llm_ollama_model)
70
+ LOGGER.info(" Host: %s", ollama_cfg.llm_ollama_host)
71
+ elif provider_cfg.llm_provider == "openai":
72
+ LOGGER.info(" Model: %s", openai_llm_cfg.llm_openai_model)
73
+ LOGGER.info(" Base URL: %s", openai_llm_cfg.openai_base_url or "https://api.openai.com/v1")
74
+ elif provider_cfg.llm_provider == "gemini":
75
+ LOGGER.info(" Model: %s", gemini_llm_cfg.llm_gemini_model)
76
+
77
+
44
78
  @app.middleware("http")
45
79
  async def log_requests(request: Request, call_next) -> Any: # type: ignore[no-untyped-def] # noqa: ANN001
46
80
  """Log basic request information."""
@@ -192,6 +226,8 @@ def _load_transcription_configs() -> tuple[
192
226
  openai_asr_cfg = config.OpenAIASR(
193
227
  asr_openai_model=_cfg("asr_openai_model", defaults, opts.ASR_OPENAI_MODEL),
194
228
  openai_api_key=_cfg("openai_api_key", defaults, opts.OPENAI_API_KEY),
229
+ openai_base_url=_cfg("asr_openai_base_url", defaults, opts.ASR_OPENAI_BASE_URL),
230
+ asr_openai_prompt=_cfg("asr_openai_prompt", defaults, opts.ASR_OPENAI_PROMPT),
195
231
  )
196
232
  gemini_asr_cfg = config.GeminiASR(
197
233
  asr_gemini_model=_cfg("asr_gemini_model", defaults, opts.ASR_GEMINI_MODEL),
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: agent-cli
3
- Version: 0.70.3
3
+ Version: 0.70.4
4
4
  Summary: A suite of AI-powered command-line tools for text correction, audio transcription, and voice assistance.
5
5
  Project-URL: Homepage, https://github.com/basnijholt/agent-cli
6
6
  Author-email: Bas Nijholt <bas@nijho.lt>
@@ -643,17 +643,21 @@ the `[defaults]` section of your configuration file.
643
643
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
644
644
  ╭─ Provider Selection ───────────────────────────────────────────────────────────────────╮
645
645
  │ --llm-provider TEXT The LLM provider to use ('ollama', 'openai', 'gemini'). │
646
+ │ [env var: LLM_PROVIDER] │
646
647
  │ [default: ollama] │
647
648
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
648
649
  ╭─ LLM: Ollama ──────────────────────────────────────────────────────────────────────────╮
649
650
  │ --llm-ollama-model TEXT The Ollama model to use. Default is gemma3:4b. │
651
+ │ [env var: LLM_OLLAMA_MODEL] │
650
652
  │ [default: gemma3:4b] │
651
653
  │ --llm-ollama-host TEXT The Ollama server host. Default is │
652
654
  │ http://localhost:11434. │
655
+ │ [env var: LLM_OLLAMA_HOST] │
653
656
  │ [default: http://localhost:11434] │
654
657
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
655
658
  ╭─ LLM: OpenAI-compatible ───────────────────────────────────────────────────────────────╮
656
659
  │ --llm-openai-model TEXT The OpenAI model to use for LLM tasks. │
660
+ │ [env var: LLM_OPENAI_MODEL] │
657
661
  │ [default: gpt-5-mini] │
658
662
  │ --openai-api-key TEXT Your OpenAI API key. Can also be set with the │
659
663
  │ OPENAI_API_KEY environment variable. │
@@ -664,21 +668,25 @@ the `[defaults]` section of your configuration file.
664
668
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
665
669
  ╭─ LLM: Gemini ──────────────────────────────────────────────────────────────────────────╮
666
670
  │ --llm-gemini-model TEXT The Gemini model to use for LLM tasks. │
671
+ │ [env var: LLM_GEMINI_MODEL] │
667
672
  │ [default: gemini-3-flash-preview] │
668
673
  │ --gemini-api-key TEXT Your Gemini API key. Can also be set with the │
669
674
  │ GEMINI_API_KEY environment variable. │
670
675
  │ [env var: GEMINI_API_KEY] │
671
676
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
672
677
  ╭─ General Options ──────────────────────────────────────────────────────────────────────╮
673
- │ --log-level TEXT Set logging level.
674
- [default: WARNING]
675
- --log-file TEXT Path to a file to write logs to.
676
- │ --quiet -q Suppress console output from rich.
677
- │ --json Output result as JSON for automation. Implies --quiet and
678
- --no-clipboard.
679
- │ --config TEXT Path to a TOML configuration file.
680
- --print-args Print the command line arguments, including variables
681
- taken from the configuration file.
678
+ │ --log-level [debug|info|warning|error] Set logging level.
679
+ [env var: LOG_LEVEL]
680
+ [default: info]
681
+ │ --log-file TEXT Path to a file to write logs to.
682
+ │ --quiet -q Suppress console output from rich.
683
+ --json Output result as JSON for
684
+ automation. Implies --quiet and
685
+ --no-clipboard.
686
+ --config TEXT Path to a TOML configuration file.
687
+ │ --print-args Print the command line arguments, │
688
+ │ including variables taken from the │
689
+ │ configuration file. │
682
690
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
683
691
 
684
692
  ```
@@ -753,6 +761,7 @@ the `[defaults]` section of your configuration file.
753
761
  │ [env var: ASR_PROVIDER] │
754
762
  │ [default: wyoming] │
755
763
  │ --llm-provider TEXT The LLM provider to use ('ollama', 'openai', 'gemini'). │
764
+ │ [env var: LLM_PROVIDER] │
756
765
  │ [default: ollama] │
757
766
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
758
767
  ╭─ Audio Input ──────────────────────────────────────────────────────────────────────────╮
@@ -771,25 +780,32 @@ the `[defaults]` section of your configuration file.
771
780
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
772
781
  ╭─ Audio Input: OpenAI-compatible ───────────────────────────────────────────────────────╮
773
782
  │ --asr-openai-model TEXT The OpenAI model to use for ASR (transcription). │
783
+ │ [env var: ASR_OPENAI_MODEL] │
774
784
  │ [default: whisper-1] │
775
785
  │ --asr-openai-base-url TEXT Custom base URL for OpenAI-compatible ASR API │
776
786
  │ (e.g., for custom Whisper server: │
777
787
  │ http://localhost:9898). │
788
+ │ [env var: ASR_OPENAI_BASE_URL] │
778
789
  │ --asr-openai-prompt TEXT Custom prompt to guide transcription (optional). │
790
+ │ [env var: ASR_OPENAI_PROMPT] │
779
791
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
780
792
  ╭─ Audio Input: Gemini ──────────────────────────────────────────────────────────────────╮
781
793
  │ --asr-gemini-model TEXT The Gemini model to use for ASR (transcription). │
794
+ │ [env var: ASR_GEMINI_MODEL] │
782
795
  │ [default: gemini-3-flash-preview] │
783
796
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
784
797
  ╭─ LLM: Ollama ──────────────────────────────────────────────────────────────────────────╮
785
798
  │ --llm-ollama-model TEXT The Ollama model to use. Default is gemma3:4b. │
799
+ │ [env var: LLM_OLLAMA_MODEL] │
786
800
  │ [default: gemma3:4b] │
787
801
  │ --llm-ollama-host TEXT The Ollama server host. Default is │
788
802
  │ http://localhost:11434. │
803
+ │ [env var: LLM_OLLAMA_HOST] │
789
804
  │ [default: http://localhost:11434] │
790
805
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
791
806
  ╭─ LLM: OpenAI-compatible ───────────────────────────────────────────────────────────────╮
792
807
  │ --llm-openai-model TEXT The OpenAI model to use for LLM tasks. │
808
+ │ [env var: LLM_OPENAI_MODEL] │
793
809
  │ [default: gpt-5-mini] │
794
810
  │ --openai-api-key TEXT Your OpenAI API key. Can also be set with the │
795
811
  │ OPENAI_API_KEY environment variable. │
@@ -800,6 +816,7 @@ the `[defaults]` section of your configuration file.
800
816
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
801
817
  ╭─ LLM: Gemini ──────────────────────────────────────────────────────────────────────────╮
802
818
  │ --llm-gemini-model TEXT The Gemini model to use for LLM tasks. │
819
+ │ [env var: LLM_GEMINI_MODEL] │
803
820
  │ [default: gemini-3-flash-preview] │
804
821
  │ --gemini-api-key TEXT Your Gemini API key. Can also be set with the │
805
822
  │ GEMINI_API_KEY environment variable. │
@@ -812,21 +829,32 @@ the `[defaults]` section of your configuration file.
812
829
  │ will be stopped. If the process is not running, it will be started. │
813
830
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
814
831
  ╭─ General Options ──────────────────────────────────────────────────────────────────────╮
815
- │ --clipboard --no-clipboard Copy result to clipboard.
816
- [default: clipboard]
817
- --log-level TEXT Set logging level.
818
- [default: WARNING]
819
- --log-file TEXT Path to a file to write logs to.
820
- --quiet -q Suppress console output from rich.
821
- │ --json Output result as JSON for automation.
822
- Implies --quiet and --no-clipboard.
823
- │ --config TEXT Path to a TOML configuration file.
824
- --print-args Print the command line arguments,
825
- including variables taken from the
826
- configuration file.
827
- │ --transcription-log PATH Path to log transcription results
828
- with timestamps, hostname, model, and
829
- raw output.
832
+ │ --clipboard --no-clipboard Copy result to
833
+ clipboard.
834
+ [default: clipboard]
835
+ --log-level [debug|info|warning| Set logging level.
836
+ error] [env var: LOG_LEVEL]
837
+ [default: info]
838
+ │ --log-file TEXT Path to a file to
839
+ write logs to.
840
+ │ --quiet -q Suppress console
841
+ output from rich.
842
+ --json Output result as JSON
843
+ for automation.
844
+ Implies --quiet and
845
+ --no-clipboard.
846
+ --config TEXT Path to a TOML
847
+ │ configuration file. │
848
+ │ --print-args Print the command │
849
+ │ line arguments, │
850
+ │ including variables │
851
+ │ taken from the │
852
+ │ configuration file. │
853
+ │ --transcription-log PATH Path to log │
854
+ │ transcription results │
855
+ │ with timestamps, │
856
+ │ hostname, model, and │
857
+ │ raw output. │
830
858
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
831
859
 
832
860
  ```
@@ -930,6 +958,7 @@ uv tool install "agent-cli[vad]" -p 3.13
930
958
  │ [env var: ASR_PROVIDER] │
931
959
  │ [default: wyoming] │
932
960
  │ --llm-provider TEXT The LLM provider to use ('ollama', 'openai', 'gemini'). │
961
+ │ [env var: LLM_PROVIDER] │
933
962
  │ [default: ollama] │
934
963
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
935
964
  ╭─ Audio Input ──────────────────────────────────────────────────────────────────────────╮
@@ -948,25 +977,32 @@ uv tool install "agent-cli[vad]" -p 3.13
948
977
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
949
978
  ╭─ Audio Input: OpenAI-compatible ───────────────────────────────────────────────────────╮
950
979
  │ --asr-openai-model TEXT The OpenAI model to use for ASR (transcription). │
980
+ │ [env var: ASR_OPENAI_MODEL] │
951
981
  │ [default: whisper-1] │
952
982
  │ --asr-openai-base-url TEXT Custom base URL for OpenAI-compatible ASR API │
953
983
  │ (e.g., for custom Whisper server: │
954
984
  │ http://localhost:9898). │
985
+ │ [env var: ASR_OPENAI_BASE_URL] │
955
986
  │ --asr-openai-prompt TEXT Custom prompt to guide transcription (optional). │
987
+ │ [env var: ASR_OPENAI_PROMPT] │
956
988
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
957
989
  ╭─ Audio Input: Gemini ──────────────────────────────────────────────────────────────────╮
958
990
  │ --asr-gemini-model TEXT The Gemini model to use for ASR (transcription). │
991
+ │ [env var: ASR_GEMINI_MODEL] │
959
992
  │ [default: gemini-3-flash-preview] │
960
993
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
961
994
  ╭─ LLM: Ollama ──────────────────────────────────────────────────────────────────────────╮
962
995
  │ --llm-ollama-model TEXT The Ollama model to use. Default is gemma3:4b. │
996
+ │ [env var: LLM_OLLAMA_MODEL] │
963
997
  │ [default: gemma3:4b] │
964
998
  │ --llm-ollama-host TEXT The Ollama server host. Default is │
965
999
  │ http://localhost:11434. │
1000
+ │ [env var: LLM_OLLAMA_HOST] │
966
1001
  │ [default: http://localhost:11434] │
967
1002
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
968
1003
  ╭─ LLM: OpenAI-compatible ───────────────────────────────────────────────────────────────╮
969
1004
  │ --llm-openai-model TEXT The OpenAI model to use for LLM tasks. │
1005
+ │ [env var: LLM_OPENAI_MODEL] │
970
1006
  │ [default: gpt-5-mini] │
971
1007
  │ --openai-api-key TEXT Your OpenAI API key. Can also be set with the │
972
1008
  │ OPENAI_API_KEY environment variable. │
@@ -977,6 +1013,7 @@ uv tool install "agent-cli[vad]" -p 3.13
977
1013
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
978
1014
  ╭─ LLM: Gemini ──────────────────────────────────────────────────────────────────────────╮
979
1015
  │ --llm-gemini-model TEXT The Gemini model to use for LLM tasks. │
1016
+ │ [env var: LLM_GEMINI_MODEL] │
980
1017
  │ [default: gemini-3-flash-preview] │
981
1018
  │ --gemini-api-key TEXT Your Gemini API key. Can also be set with the │
982
1019
  │ GEMINI_API_KEY environment variable. │
@@ -991,13 +1028,15 @@ uv tool install "agent-cli[vad]" -p 3.13
991
1028
  │ --status Check if a background process is running. │
992
1029
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
993
1030
  ╭─ General Options ──────────────────────────────────────────────────────────────────────╮
994
- │ --log-level TEXT Set logging level.
995
- [default: WARNING]
996
- --log-file TEXT Path to a file to write logs to.
997
- │ --quiet -q Suppress console output from rich.
998
- │ --config TEXT Path to a TOML configuration file.
999
- │ --print-args Print the command line arguments, including variables
1000
- taken from the configuration file.
1031
+ │ --log-level [debug|info|warning|error] Set logging level.
1032
+ [env var: LOG_LEVEL]
1033
+ [default: info]
1034
+ │ --log-file TEXT Path to a file to write logs to.
1035
+ │ --quiet -q Suppress console output from rich.
1036
+ │ --config TEXT Path to a TOML configuration file.
1037
+ --print-args Print the command line arguments,
1038
+ │ including variables taken from the │
1039
+ │ configuration file. │
1001
1040
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
1002
1041
 
1003
1042
  ```
@@ -1051,6 +1090,7 @@ uv tool install "agent-cli[vad]" -p 3.13
1051
1090
  ╭─ Provider Selection ───────────────────────────────────────────────────────────────────╮
1052
1091
  │ --tts-provider TEXT The TTS provider to use ('wyoming', 'openai', 'kokoro', │
1053
1092
  │ 'gemini'). │
1093
+ │ [env var: TTS_PROVIDER] │
1054
1094
  │ [default: wyoming] │
1055
1095
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
1056
1096
  ╭─ Audio Output ─────────────────────────────────────────────────────────────────────────╮
@@ -1103,16 +1143,19 @@ uv tool install "agent-cli[vad]" -p 3.13
1103
1143
  │ --list-devices List available audio input and output devices and exit. │
1104
1144
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
1105
1145
  ╭─ General Options ──────────────────────────────────────────────────────────────────────╮
1106
- │ --save-file PATH Save TTS response audio to WAV file.
1107
- │ --log-level TEXT Set logging level.
1108
- [default: WARNING]
1109
- --log-file TEXT Path to a file to write logs to.
1110
- │ --quiet -q Suppress console output from rich.
1111
- │ --json Output result as JSON for automation. Implies --quiet and
1112
- --no-clipboard.
1113
- │ --config TEXT Path to a TOML configuration file.
1114
- --print-args Print the command line arguments, including variables
1115
- taken from the configuration file.
1146
+ │ --save-file PATH Save TTS response audio to WAV file.
1147
+ │ --log-level [debug|info|warning|error] Set logging level.
1148
+ [env var: LOG_LEVEL]
1149
+ [default: info]
1150
+ │ --log-file TEXT Path to a file to write logs to.
1151
+ │ --quiet -q Suppress console output from rich.
1152
+ --json Output result as JSON for
1153
+ automation. Implies --quiet and
1154
+ --no-clipboard.
1155
+ --config TEXT Path to a TOML configuration file.
1156
+ │ --print-args Print the command line arguments, │
1157
+ │ including variables taken from the │
1158
+ │ configuration file. │
1116
1159
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
1117
1160
  ╭─ Process Management ───────────────────────────────────────────────────────────────────╮
1118
1161
  │ --stop Stop any running background process. │
@@ -1179,9 +1222,11 @@ uv tool install "agent-cli[vad]" -p 3.13
1179
1222
  │ [env var: ASR_PROVIDER] │
1180
1223
  │ [default: wyoming] │
1181
1224
  │ --llm-provider TEXT The LLM provider to use ('ollama', 'openai', 'gemini'). │
1225
+ │ [env var: LLM_PROVIDER] │
1182
1226
  │ [default: ollama] │
1183
1227
  │ --tts-provider TEXT The TTS provider to use ('wyoming', 'openai', 'kokoro', │
1184
1228
  │ 'gemini'). │
1229
+ │ [env var: TTS_PROVIDER] │
1185
1230
  │ [default: wyoming] │
1186
1231
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
1187
1232
  ╭─ Audio Input ──────────────────────────────────────────────────────────────────────────╮
@@ -1200,21 +1245,26 @@ uv tool install "agent-cli[vad]" -p 3.13
1200
1245
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
1201
1246
  ╭─ Audio Input: OpenAI-compatible ───────────────────────────────────────────────────────╮
1202
1247
  │ --asr-openai-model TEXT The OpenAI model to use for ASR (transcription). │
1248
+ │ [env var: ASR_OPENAI_MODEL] │
1203
1249
  │ [default: whisper-1] │
1204
1250
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
1205
1251
  ╭─ Audio Input: Gemini ──────────────────────────────────────────────────────────────────╮
1206
1252
  │ --asr-gemini-model TEXT The Gemini model to use for ASR (transcription). │
1253
+ │ [env var: ASR_GEMINI_MODEL] │
1207
1254
  │ [default: gemini-3-flash-preview] │
1208
1255
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
1209
1256
  ╭─ LLM: Ollama ──────────────────────────────────────────────────────────────────────────╮
1210
1257
  │ --llm-ollama-model TEXT The Ollama model to use. Default is gemma3:4b. │
1258
+ │ [env var: LLM_OLLAMA_MODEL] │
1211
1259
  │ [default: gemma3:4b] │
1212
1260
  │ --llm-ollama-host TEXT The Ollama server host. Default is │
1213
1261
  │ http://localhost:11434. │
1262
+ │ [env var: LLM_OLLAMA_HOST] │
1214
1263
  │ [default: http://localhost:11434] │
1215
1264
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
1216
1265
  ╭─ LLM: OpenAI-compatible ───────────────────────────────────────────────────────────────╮
1217
1266
  │ --llm-openai-model TEXT The OpenAI model to use for LLM tasks. │
1267
+ │ [env var: LLM_OPENAI_MODEL] │
1218
1268
  │ [default: gpt-5-mini] │
1219
1269
  │ --openai-api-key TEXT Your OpenAI API key. Can also be set with the │
1220
1270
  │ OPENAI_API_KEY environment variable. │
@@ -1225,6 +1275,7 @@ uv tool install "agent-cli[vad]" -p 3.13
1225
1275
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
1226
1276
  ╭─ LLM: Gemini ──────────────────────────────────────────────────────────────────────────╮
1227
1277
  │ --llm-gemini-model TEXT The Gemini model to use for LLM tasks. │
1278
+ │ [env var: LLM_GEMINI_MODEL] │
1228
1279
  │ [default: gemini-3-flash-preview] │
1229
1280
  │ --gemini-api-key TEXT Your Gemini API key. Can also be set with the │
1230
1281
  │ GEMINI_API_KEY environment variable. │
@@ -1281,18 +1332,28 @@ uv tool install "agent-cli[vad]" -p 3.13
1281
1332
  │ will be stopped. If the process is not running, it will be started. │
1282
1333
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
1283
1334
  ╭─ General Options ──────────────────────────────────────────────────────────────────────╮
1284
- │ --save-file PATH Save TTS response audio to WAV file.
1285
- --clipboard --no-clipboard Copy result to clipboard.
1286
- [default: clipboard]
1287
- --log-level TEXT Set logging level.
1288
- [default: WARNING]
1289
- │ --log-file TEXT Path to a file to write logs to.
1290
- --quiet -q Suppress console output from rich.
1291
- --json Output result as JSON for automation.
1292
- Implies --quiet and --no-clipboard.
1293
- --config TEXT Path to a TOML configuration file.
1294
- │ --print-args Print the command line arguments, including
1295
- variables taken from the configuration file.
1335
+ │ --save-file PATH Save TTS response audio
1336
+ to WAV file.
1337
+ --clipboard --no-clipboard Copy result to
1338
+ clipboard.
1339
+ [default: clipboard]
1340
+ │ --log-level [debug|info|warning|erro Set logging level.
1341
+ r] [env var: LOG_LEVEL]
1342
+ [default: info]
1343
+ │ --log-file TEXT Path to a file to write
1344
+ logs to.
1345
+ │ --quiet -q Suppress console output
1346
+ from rich.
1347
+ │ --json Output result as JSON │
1348
+ │ for automation. Implies │
1349
+ │ --quiet and │
1350
+ │ --no-clipboard. │
1351
+ │ --config TEXT Path to a TOML │
1352
+ │ configuration file. │
1353
+ │ --print-args Print the command line │
1354
+ │ arguments, including │
1355
+ │ variables taken from the │
1356
+ │ configuration file. │
1296
1357
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
1297
1358
 
1298
1359
  ```
@@ -1347,9 +1408,11 @@ uv tool install "agent-cli[vad]" -p 3.13
1347
1408
  │ [env var: ASR_PROVIDER] │
1348
1409
  │ [default: wyoming] │
1349
1410
  │ --llm-provider TEXT The LLM provider to use ('ollama', 'openai', 'gemini'). │
1411
+ │ [env var: LLM_PROVIDER] │
1350
1412
  │ [default: ollama] │
1351
1413
  │ --tts-provider TEXT The TTS provider to use ('wyoming', 'openai', 'kokoro', │
1352
1414
  │ 'gemini'). │
1415
+ │ [env var: TTS_PROVIDER] │
1353
1416
  │ [default: wyoming] │
1354
1417
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
1355
1418
  ╭─ Wake Word ────────────────────────────────────────────────────────────────────────────╮
@@ -1377,21 +1440,26 @@ uv tool install "agent-cli[vad]" -p 3.13
1377
1440
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
1378
1441
  ╭─ Audio Input: OpenAI-compatible ───────────────────────────────────────────────────────╮
1379
1442
  │ --asr-openai-model TEXT The OpenAI model to use for ASR (transcription). │
1443
+ │ [env var: ASR_OPENAI_MODEL] │
1380
1444
  │ [default: whisper-1] │
1381
1445
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
1382
1446
  ╭─ Audio Input: Gemini ──────────────────────────────────────────────────────────────────╮
1383
1447
  │ --asr-gemini-model TEXT The Gemini model to use for ASR (transcription). │
1448
+ │ [env var: ASR_GEMINI_MODEL] │
1384
1449
  │ [default: gemini-3-flash-preview] │
1385
1450
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
1386
1451
  ╭─ LLM: Ollama ──────────────────────────────────────────────────────────────────────────╮
1387
1452
  │ --llm-ollama-model TEXT The Ollama model to use. Default is gemma3:4b. │
1453
+ │ [env var: LLM_OLLAMA_MODEL] │
1388
1454
  │ [default: gemma3:4b] │
1389
1455
  │ --llm-ollama-host TEXT The Ollama server host. Default is │
1390
1456
  │ http://localhost:11434. │
1457
+ │ [env var: LLM_OLLAMA_HOST] │
1391
1458
  │ [default: http://localhost:11434] │
1392
1459
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
1393
1460
  ╭─ LLM: OpenAI-compatible ───────────────────────────────────────────────────────────────╮
1394
1461
  │ --llm-openai-model TEXT The OpenAI model to use for LLM tasks. │
1462
+ │ [env var: LLM_OPENAI_MODEL] │
1395
1463
  │ [default: gpt-5-mini] │
1396
1464
  │ --openai-api-key TEXT Your OpenAI API key. Can also be set with the │
1397
1465
  │ OPENAI_API_KEY environment variable. │
@@ -1402,6 +1470,7 @@ uv tool install "agent-cli[vad]" -p 3.13
1402
1470
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
1403
1471
  ╭─ LLM: Gemini ──────────────────────────────────────────────────────────────────────────╮
1404
1472
  │ --llm-gemini-model TEXT The Gemini model to use for LLM tasks. │
1473
+ │ [env var: LLM_GEMINI_MODEL] │
1405
1474
  │ [default: gemini-3-flash-preview] │
1406
1475
  │ --gemini-api-key TEXT Your Gemini API key. Can also be set with the │
1407
1476
  │ GEMINI_API_KEY environment variable. │
@@ -1458,16 +1527,24 @@ uv tool install "agent-cli[vad]" -p 3.13
1458
1527
  │ will be stopped. If the process is not running, it will be started. │
1459
1528
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
1460
1529
  ╭─ General Options ──────────────────────────────────────────────────────────────────────╮
1461
- │ --save-file PATH Save TTS response audio to WAV file.
1462
- --clipboard --no-clipboard Copy result to clipboard.
1463
- [default: clipboard]
1464
- --log-level TEXT Set logging level.
1465
- [default: WARNING]
1466
- │ --log-file TEXT Path to a file to write logs to.
1467
- --quiet -q Suppress console output from rich.
1468
- --config TEXT Path to a TOML configuration file.
1469
- │ --print-args Print the command line arguments, including
1470
- variables taken from the configuration file.
1530
+ │ --save-file PATH Save TTS response audio
1531
+ to WAV file.
1532
+ --clipboard --no-clipboard Copy result to
1533
+ clipboard.
1534
+ [default: clipboard]
1535
+ │ --log-level [debug|info|warning|erro Set logging level.
1536
+ r] [env var: LOG_LEVEL]
1537
+ [default: info]
1538
+ │ --log-file TEXT Path to a file to write
1539
+ logs to.
1540
+ │ --quiet -q Suppress console output │
1541
+ │ from rich. │
1542
+ │ --config TEXT Path to a TOML │
1543
+ │ configuration file. │
1544
+ │ --print-args Print the command line │
1545
+ │ arguments, including │
1546
+ │ variables taken from the │
1547
+ │ configuration file. │
1471
1548
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
1472
1549
 
1473
1550
  ```
@@ -1529,9 +1606,11 @@ uv tool install "agent-cli[vad]" -p 3.13
1529
1606
  │ [env var: ASR_PROVIDER] │
1530
1607
  │ [default: wyoming] │
1531
1608
  │ --llm-provider TEXT The LLM provider to use ('ollama', 'openai', 'gemini'). │
1609
+ │ [env var: LLM_PROVIDER] │
1532
1610
  │ [default: ollama] │
1533
1611
  │ --tts-provider TEXT The TTS provider to use ('wyoming', 'openai', 'kokoro', │
1534
1612
  │ 'gemini'). │
1613
+ │ [env var: TTS_PROVIDER] │
1535
1614
  │ [default: wyoming] │
1536
1615
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
1537
1616
  ╭─ Audio Input ──────────────────────────────────────────────────────────────────────────╮
@@ -1550,25 +1629,32 @@ uv tool install "agent-cli[vad]" -p 3.13
1550
1629
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
1551
1630
  ╭─ Audio Input: OpenAI-compatible ───────────────────────────────────────────────────────╮
1552
1631
  │ --asr-openai-model TEXT The OpenAI model to use for ASR (transcription). │
1632
+ │ [env var: ASR_OPENAI_MODEL] │
1553
1633
  │ [default: whisper-1] │
1554
1634
  │ --asr-openai-base-url TEXT Custom base URL for OpenAI-compatible ASR API │
1555
1635
  │ (e.g., for custom Whisper server: │
1556
1636
  │ http://localhost:9898). │
1637
+ │ [env var: ASR_OPENAI_BASE_URL] │
1557
1638
  │ --asr-openai-prompt TEXT Custom prompt to guide transcription (optional). │
1639
+ │ [env var: ASR_OPENAI_PROMPT] │
1558
1640
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
1559
1641
  ╭─ Audio Input: Gemini ──────────────────────────────────────────────────────────────────╮
1560
1642
  │ --asr-gemini-model TEXT The Gemini model to use for ASR (transcription). │
1643
+ │ [env var: ASR_GEMINI_MODEL] │
1561
1644
  │ [default: gemini-3-flash-preview] │
1562
1645
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
1563
1646
  ╭─ LLM: Ollama ──────────────────────────────────────────────────────────────────────────╮
1564
1647
  │ --llm-ollama-model TEXT The Ollama model to use. Default is gemma3:4b. │
1648
+ │ [env var: LLM_OLLAMA_MODEL] │
1565
1649
  │ [default: gemma3:4b] │
1566
1650
  │ --llm-ollama-host TEXT The Ollama server host. Default is │
1567
1651
  │ http://localhost:11434. │
1652
+ │ [env var: LLM_OLLAMA_HOST] │
1568
1653
  │ [default: http://localhost:11434] │
1569
1654
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
1570
1655
  ╭─ LLM: OpenAI-compatible ───────────────────────────────────────────────────────────────╮
1571
1656
  │ --llm-openai-model TEXT The OpenAI model to use for LLM tasks. │
1657
+ │ [env var: LLM_OPENAI_MODEL] │
1572
1658
  │ [default: gpt-5-mini] │
1573
1659
  │ --openai-api-key TEXT Your OpenAI API key. Can also be set with the │
1574
1660
  │ OPENAI_API_KEY environment variable. │
@@ -1579,6 +1665,7 @@ uv tool install "agent-cli[vad]" -p 3.13
1579
1665
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
1580
1666
  ╭─ LLM: Gemini ──────────────────────────────────────────────────────────────────────────╮
1581
1667
  │ --llm-gemini-model TEXT The Gemini model to use for LLM tasks. │
1668
+ │ [env var: LLM_GEMINI_MODEL] │
1582
1669
  │ [default: gemini-3-flash-preview] │
1583
1670
  │ --gemini-api-key TEXT Your Gemini API key. Can also be set with the │
1584
1671
  │ GEMINI_API_KEY environment variable. │
@@ -1642,14 +1729,16 @@ uv tool install "agent-cli[vad]" -p 3.13
1642
1729
  │ [default: 50] │
1643
1730
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
1644
1731
  ╭─ General Options ──────────────────────────────────────────────────────────────────────╮
1645
- │ --save-file PATH Save TTS response audio to WAV file.
1646
- │ --log-level TEXT Set logging level.
1647
- [default: WARNING]
1648
- --log-file TEXT Path to a file to write logs to.
1649
- │ --quiet -q Suppress console output from rich.
1650
- │ --config TEXT Path to a TOML configuration file.
1651
- │ --print-args Print the command line arguments, including variables
1652
- taken from the configuration file.
1732
+ │ --save-file PATH Save TTS response audio to WAV file.
1733
+ │ --log-level [debug|info|warning|error] Set logging level.
1734
+ [env var: LOG_LEVEL]
1735
+ [default: info]
1736
+ │ --log-file TEXT Path to a file to write logs to.
1737
+ │ --quiet -q Suppress console output from rich.
1738
+ │ --config TEXT Path to a TOML configuration file.
1739
+ --print-args Print the command line arguments,
1740
+ │ including variables taken from the │
1741
+ │ configuration file. │
1653
1742
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
1654
1743
 
1655
1744
  ```
@@ -1735,11 +1824,13 @@ uv tool install "agent-cli[vad]" -p 3.13
1735
1824
  │ [default: 8000] │
1736
1825
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
1737
1826
  ╭─ General Options ──────────────────────────────────────────────────────────────────────╮
1738
- │ --log-level TEXT Set logging level.
1739
- [default: INFO]
1740
- --config TEXT Path to a TOML configuration file.
1741
- │ --print-args Print the command line arguments, including variables taken
1742
- from the configuration file.
1827
+ │ --log-level [debug|info|warning|error] Set logging level.
1828
+ [env var: LOG_LEVEL]
1829
+ [default: info]
1830
+ │ --config TEXT Path to a TOML configuration file.
1831
+ --print-args Print the command line arguments,
1832
+ │ including variables taken from the │
1833
+ │ configuration file. │
1743
1834
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
1744
1835
 
1745
1836
  ```
@@ -1875,11 +1966,13 @@ The `memory proxy` command is the core feature—a middleware server that gives
1875
1966
  │ [default: 8100] │
1876
1967
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
1877
1968
  ╭─ General Options ──────────────────────────────────────────────────────────────────────╮
1878
- │ --log-level TEXT Set logging level.
1879
- [default: INFO]
1880
- --config TEXT Path to a TOML configuration file.
1881
- │ --print-args Print the command line arguments, including variables taken
1882
- from the configuration file.
1969
+ │ --log-level [debug|info|warning|error] Set logging level.
1970
+ [env var: LOG_LEVEL]
1971
+ [default: info]
1972
+ │ --config TEXT Path to a TOML configuration file.
1973
+ --print-args Print the command line arguments,
1974
+ │ including variables taken from the │
1975
+ │ configuration file. │
1883
1976
  ╰────────────────────────────────────────────────────────────────────────────────────────╯
1884
1977
 
1885
1978
  ```
@@ -9,7 +9,7 @@ agent_cli/config_cmd.py,sha256=CiHk1WxtvT21QeMuklTTMCmAdNwjeYENO_w_Qbiys54,9579
9
9
  agent_cli/constants.py,sha256=-Q17N6qKIGqPDsu3FxpIKP33G0Cs0RUJlHwYNHxVxek,843
10
10
  agent_cli/docs_gen.py,sha256=j6mBHwoPcQzMdgIWi_bB2A6yOyhvmW_cntRfwUg_8p8,13374
11
11
  agent_cli/example-config.toml,sha256=xd9BXeOqdYx4xFJt58VBs2I49ESy6dF4-mWF_g8sM9o,7552
12
- agent_cli/opts.py,sha256=Sa9sclTRhCr9ad1YcbV-qz7BTtlUmp0uwfAD1fZ3xng,12201
12
+ agent_cli/opts.py,sha256=NmMxW1Vp52jiSz-wi6PQjurpiDq6p1HqSnoqDBOhNxo,12616
13
13
  agent_cli/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
14
  agent_cli/_requirements/.gitkeep,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
15
15
  agent_cli/_requirements/audio.txt,sha256=KAOtFaFClkj2EAesWI61LZ4N-T1WQvHtvf1sNtDxVgQ,1522
@@ -25,17 +25,17 @@ agent_cli/_requirements/speed.txt,sha256=KwBTrZFXWtgwJ5zrcNtm45zfqvNK0trcR1SbV-w
25
25
  agent_cli/_requirements/vad.txt,sha256=HN7OB8cu5cWuVPhoKWye73I9lAWPE-ijeXeVSshCsxs,4017
26
26
  agent_cli/agents/__init__.py,sha256=c1rnncDW5pBvP6BiLzFVpLWDNZzFRaUA7-a97avFVAs,321
27
27
  agent_cli/agents/_voice_agent_common.py,sha256=PUAztW84Xf9U7d0C_K5cL7I8OANIE1H6M8dFD_cRqps,4360
28
- agent_cli/agents/assistant.py,sha256=dXExb-UrWdMiEcI1c6bXvMhii1iGg49Zw4On6-inQUE,14046
29
- agent_cli/agents/autocorrect.py,sha256=Voxv43g-IbOhCGKAyWHa4WoQ3-dJEe076XLEmEwg9eg,8990
30
- agent_cli/agents/chat.py,sha256=7qwDBXFQvhqtlRA4IFXJS24hZ9PN6lb0g9boxwHUS1c,17257
31
- agent_cli/agents/rag_proxy.py,sha256=47maSXkebpRzt4acvsbIN_sPfIBOHcpBED_PHOlgiSs,4434
32
- agent_cli/agents/speak.py,sha256=xG2ppthpdmpzsYkNeUynwDmWe8JTpJYhdKs3L1jR6eY,7101
33
- agent_cli/agents/transcribe.py,sha256=3N9cA9ba-3acH4-UPEw-LYIv-HQhzb4bE0efxZzSwqQ,24654
34
- agent_cli/agents/transcribe_daemon.py,sha256=FPqcAfGNK_PyxfgQw1b-xph4JrFeCvKy8e9b1HIhRUU,17668
35
- agent_cli/agents/voice_edit.py,sha256=bOZzeRici5GFq9qPUwHTQiW1TFhHcm_AcLG-EdKxwgk,11137
28
+ agent_cli/agents/assistant.py,sha256=uElnNG-ZcRR7QHf8Qcjez0ahF2nEyMHbcEfNE_8dcbY,14056
29
+ agent_cli/agents/autocorrect.py,sha256=yx5Yh8xTtzO-5lyFJBlCgx0yUGO6eYxLSK3RLlrcXlU,9000
30
+ agent_cli/agents/chat.py,sha256=s4g4IvmmLnOOkW5TNX4Mro-AyniiSWw3rKAH7P5DvmA,17267
31
+ agent_cli/agents/rag_proxy.py,sha256=r_Azrw2wNwwmMCOUftHdhFDLbFqZpIUm_rM7Q2eM8Jw,4417
32
+ agent_cli/agents/speak.py,sha256=RgYOGOs7IbNwzLsiLdhXQHK6FGUCPkWLpUBOH1RjQ3s,7111
33
+ agent_cli/agents/transcribe.py,sha256=OYY50085uxP3AdCa36n_bBb1FQ_R1m2WdDLgiNN74js,24664
34
+ agent_cli/agents/transcribe_daemon.py,sha256=pHDs1tSDceUUJBkdE-1sO8VHdeYaALafScL_-qu5bhY,17678
35
+ agent_cli/agents/voice_edit.py,sha256=fL4SdxEfwCMazxtgIbjE9K8Fbm4tpJ4eSsgIkZJ2LB8,11147
36
36
  agent_cli/agents/memory/__init__.py,sha256=RkJYhq0SI_62hgUHicvtkJ1k7k9JEvKLqr0302gxylw,805
37
37
  agent_cli/agents/memory/add.py,sha256=lk6q2SmuwNNFAoDnfOQxPpNHbbHjekGCyKaWUgC9x-8,6210
38
- agent_cli/agents/memory/proxy.py,sha256=metocNGxTFbpLQ-E4dRhjj8YMRNgPf6WYjegTMOHk_E,6326
38
+ agent_cli/agents/memory/proxy.py,sha256=-IXSB2QeVlKOIwa5ha0d799JlfhnergufV1wC2V2eDg,6309
39
39
  agent_cli/core/__init__.py,sha256=c_knH7u9QgjsfMIil9NP4bVizHawLUMYoQWU4H9vMlQ,46
40
40
  agent_cli/core/audio.py,sha256=43FpYe2Wu_BYK9xJ_55V4xHjHJeFwQ5aM-CQzlTryt8,15168
41
41
  agent_cli/core/audio_format.py,sha256=zk3qlYMAlKYPz1enrjihQQspl_C218v1Rbcm7Uktlew,8773
@@ -45,7 +45,7 @@ agent_cli/core/openai_proxy.py,sha256=VOqh40vyVrOa_u3CvXgolf0Bv-rALIXbKMQfjTBtH3
45
45
  agent_cli/core/process.py,sha256=Ril7HqMJc-F1E-66pHrOi27gEZD3ZR_ZYhGnN61SVSs,5937
46
46
  agent_cli/core/reranker.py,sha256=_RPjLKR5ej6L0Lb05tQFoVSyXOt1cXXn3ydEkCIXj2A,3851
47
47
  agent_cli/core/sse.py,sha256=SddiWjHh7DENb1wmvf3wDvX-OhbaC61EceFwQxmDUEo,2232
48
- agent_cli/core/transcription_logger.py,sha256=PVVfQK0leoB9JwUu5jYAhyRDBVq9exiPC0_KNXV8ggY,2057
48
+ agent_cli/core/transcription_logger.py,sha256=w8HbHDYFN_qZMfi6Wwpvw3THa-GxkVTzTPhZfE84o-Q,2082
49
49
  agent_cli/core/utils.py,sha256=p3OJrNcU6iwqR0C7Q5Ab3_rwJBmP0EbIYT82a9scPSI,16896
50
50
  agent_cli/core/vad.py,sha256=mM8VtC4TS3OpohSrtOw38M7T8f6T625FkIKim7Q_EoM,6591
51
51
  agent_cli/core/watch.py,sha256=PakxMyqJycN6bkE6fxeo0qe9zj5wjuRly3p7rN9UgjM,2000
@@ -156,13 +156,13 @@ agent_cli/scripts/nvidia-asr-server/server.py,sha256=kPNQIVF3exblvqMtIVk38Y6sZy2
156
156
  agent_cli/scripts/nvidia-asr-server/shell.nix,sha256=IT20j5YNj_wc7MdXi7ndogGodDNSGwyq8G0bNoZEpmg,1003
157
157
  agent_cli/scripts/nvidia-asr-server/uv.lock,sha256=5WWaqWOuV_moMPC-LIZK-A-Y5oaHr1tUn_vbR-IupzY,728608
158
158
  agent_cli/server/__init__.py,sha256=NZuJHlLHck9KWrepNZHrJONptYCQI9P-uTqknSFI5Ds,71
159
- agent_cli/server/cli.py,sha256=sCSXk6iSjlxRhEeBI1Vt_3RhBVcp92pid_T0cusdFPU,22897
159
+ agent_cli/server/cli.py,sha256=Bre2Bcofne3MS9jFAEjf9dWgpJYsgHvWlbEWDp4a1wA,22894
160
160
  agent_cli/server/common.py,sha256=hBBp6i-2-yhDY260ffwmFBg_ndcoT5SNcfa6uFyP7Vc,6391
161
161
  agent_cli/server/model_manager.py,sha256=93l_eeZeqnPALyDIK24or61tvded9TbM8tnde0okVjY,9225
162
162
  agent_cli/server/model_registry.py,sha256=KrRV1XxbFYuXu5rJlHFh6PTl_2BKiWnWsaNrf-0c6wQ,6988
163
163
  agent_cli/server/streaming.py,sha256=nX_kMNQBxdzvPKUcFQWth7dDBYALRGy_j9mDunKXaJE,2191
164
164
  agent_cli/server/proxy/__init__.py,sha256=fOq5bs5B9XKfr7XbdZ825RIxheAJA0VKmKantykjTE4,83
165
- agent_cli/server/proxy/api.py,sha256=ZsfG7fEF15JE5cVFp2K8Ivadbdxw2q9LHGsTdi4EU2w,13650
165
+ agent_cli/server/proxy/api.py,sha256=22TbZlhR9kAVLZDUkw7Ezm0rfd6WFIXp-O00-5kd7SU,15299
166
166
  agent_cli/server/tts/__init__.py,sha256=o1TlYmsGeqo-LDZg86KEhLJYWzN7jtMNGS5S8HFUSkw,93
167
167
  agent_cli/server/tts/api.py,sha256=GLOJEC4yMlWjz6AjrWBdZLs368DddP27PMvgqVlVCFc,10598
168
168
  agent_cli/server/tts/model_manager.py,sha256=TlBc0Q1iitDfNi8uYXyb1CaJr8Kt62SDyFIsCyVm7DY,6116
@@ -188,8 +188,8 @@ agent_cli/services/asr.py,sha256=aRaCLVCygsJ15qyQEPECOZsdSrnlLPbyY4RwAqY0qIw,172
188
188
  agent_cli/services/llm.py,sha256=i01utl1eYWlM13gvW2eR6ErL_ndH-g0d-BSleZra_7k,7229
189
189
  agent_cli/services/tts.py,sha256=NX5Qnq7ddLI3mwm3nzhbR3zB1Os4Ip4sSVSjDZDTBcI,14855
190
190
  agent_cli/services/wake_word.py,sha256=JFJ1SA22H4yko9DXiQ1t7fcoxeALLAe3iBrLs0z8rX4,4664
191
- agent_cli-0.70.3.dist-info/METADATA,sha256=ZRTUfqIxUcFdkbEMuk7-R_-z6cIZQF3y53D8q8u75u0,157557
192
- agent_cli-0.70.3.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
193
- agent_cli-0.70.3.dist-info/entry_points.txt,sha256=FUv-fB2atLsPUk_RT4zqnZl1coz4_XHFwRALOKOF38s,97
194
- agent_cli-0.70.3.dist-info/licenses/LICENSE,sha256=majJU6S9kC8R8bW39NVBHyv32Dq50FL6TDxECG2WVts,1068
195
- agent_cli-0.70.3.dist-info/RECORD,,
191
+ agent_cli-0.70.4.dist-info/METADATA,sha256=-HsUno3wBQ7XhlByLGAnuPolhntScSuAo7GmmLmb_LY,166392
192
+ agent_cli-0.70.4.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
193
+ agent_cli-0.70.4.dist-info/entry_points.txt,sha256=FUv-fB2atLsPUk_RT4zqnZl1coz4_XHFwRALOKOF38s,97
194
+ agent_cli-0.70.4.dist-info/licenses/LICENSE,sha256=majJU6S9kC8R8bW39NVBHyv32Dq50FL6TDxECG2WVts,1068
195
+ agent_cli-0.70.4.dist-info/RECORD,,