agent-cli 0.70.2__py3-none-any.whl → 0.72.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. agent_cli/_extras.json +4 -3
  2. agent_cli/_requirements/memory.txt +14 -1
  3. agent_cli/_requirements/rag.txt +14 -1
  4. agent_cli/_requirements/vad.txt +1 -85
  5. agent_cli/_requirements/wyoming.txt +71 -0
  6. agent_cli/agents/assistant.py +24 -28
  7. agent_cli/agents/autocorrect.py +30 -4
  8. agent_cli/agents/chat.py +45 -15
  9. agent_cli/agents/memory/__init__.py +19 -1
  10. agent_cli/agents/memory/add.py +3 -3
  11. agent_cli/agents/memory/proxy.py +20 -11
  12. agent_cli/agents/rag_proxy.py +42 -10
  13. agent_cli/agents/speak.py +23 -3
  14. agent_cli/agents/transcribe.py +21 -3
  15. agent_cli/agents/transcribe_daemon.py +34 -22
  16. agent_cli/agents/voice_edit.py +18 -10
  17. agent_cli/cli.py +25 -2
  18. agent_cli/config_cmd.py +30 -11
  19. agent_cli/core/deps.py +6 -3
  20. agent_cli/core/transcription_logger.py +1 -1
  21. agent_cli/core/vad.py +6 -24
  22. agent_cli/dev/cli.py +295 -65
  23. agent_cli/docs_gen.py +18 -8
  24. agent_cli/install/extras.py +44 -13
  25. agent_cli/install/hotkeys.py +22 -11
  26. agent_cli/install/services.py +54 -14
  27. agent_cli/opts.py +43 -22
  28. agent_cli/server/cli.py +128 -62
  29. agent_cli/server/proxy/api.py +77 -19
  30. agent_cli/services/__init__.py +46 -5
  31. {agent_cli-0.70.2.dist-info → agent_cli-0.72.1.dist-info}/METADATA +627 -246
  32. {agent_cli-0.70.2.dist-info → agent_cli-0.72.1.dist-info}/RECORD +35 -34
  33. {agent_cli-0.70.2.dist-info → agent_cli-0.72.1.dist-info}/WHEEL +0 -0
  34. {agent_cli-0.70.2.dist-info → agent_cli-0.72.1.dist-info}/entry_points.txt +0 -0
  35. {agent_cli-0.70.2.dist-info → agent_cli-0.72.1.dist-info}/licenses/LICENSE +0 -0
agent_cli/_extras.json CHANGED
@@ -1,8 +1,9 @@
1
1
  {
2
- "audio": ["Audio recording/playback with Wyoming protocol", ["numpy", "sounddevice", "wyoming"]],
2
+ "wyoming": ["Wyoming protocol for ASR/TTS servers", ["wyoming"]],
3
+ "audio": ["Local audio recording/playback", ["numpy", "sounddevice", "wyoming"]],
3
4
  "llm": ["LLM framework (pydantic-ai)", ["pydantic_ai"]],
4
- "memory": ["Long-term memory proxy", ["chromadb", "yaml"]],
5
- "rag": ["RAG proxy (ChromaDB, embeddings)", ["chromadb"]],
5
+ "memory": ["Long-term memory proxy", ["chromadb", "openai", "yaml"]],
6
+ "rag": ["RAG proxy (ChromaDB, embeddings)", ["chromadb", "openai"]],
6
7
  "server": ["FastAPI server components", ["fastapi"]],
7
8
  "speed": ["Audio speed adjustment (audiostretchy)", ["audiostretchy"]],
8
9
  "piper": ["Local Piper TTS", ["piper"]],
@@ -7,6 +7,7 @@ annotated-types==0.7.0
7
7
  anyio==4.12.1
8
8
  # via
9
9
  # httpx
10
+ # openai
10
11
  # starlette
11
12
  # watchfiles
12
13
  attrs==25.4.0
@@ -45,7 +46,9 @@ colorama==0.4.6 ; os_name == 'nt' or sys_platform == 'win32'
45
46
  coloredlogs==15.0.1
46
47
  # via onnxruntime
47
48
  distro==1.9.0
48
- # via posthog
49
+ # via
50
+ # openai
51
+ # posthog
49
52
  dnspython==2.8.0
50
53
  # via email-validator
51
54
  dotenv==0.9.9
@@ -96,6 +99,7 @@ httpx==0.28.1
96
99
  # chromadb
97
100
  # fastapi
98
101
  # fastapi-cloud-cli
102
+ # openai
99
103
  huggingface-hub==0.36.0
100
104
  # via
101
105
  # agent-cli
@@ -115,6 +119,8 @@ importlib-resources==6.5.2
115
119
  # via chromadb
116
120
  jinja2==3.1.6
117
121
  # via fastapi
122
+ jiter==0.12.0
123
+ # via openai
118
124
  jsonschema==4.26.0
119
125
  # via chromadb
120
126
  jsonschema-specifications==2025.9.1
@@ -142,6 +148,8 @@ onnxruntime==1.20.1
142
148
  # via
143
149
  # agent-cli
144
150
  # chromadb
151
+ openai==2.15.0
152
+ # via agent-cli
145
153
  opentelemetry-api==1.39.1
146
154
  # via
147
155
  # chromadb
@@ -195,6 +203,7 @@ pydantic==2.12.5
195
203
  # chromadb
196
204
  # fastapi
197
205
  # fastapi-cloud-cli
206
+ # openai
198
207
  # pydantic-extra-types
199
208
  # pydantic-settings
200
209
  pydantic-core==2.41.5
@@ -281,6 +290,8 @@ six==1.17.0
281
290
  # kubernetes
282
291
  # posthog
283
292
  # python-dateutil
293
+ sniffio==1.3.1
294
+ # via openai
284
295
  starlette==0.50.0
285
296
  # via fastapi
286
297
  sympy==1.14.0
@@ -295,6 +306,7 @@ tqdm==4.67.1
295
306
  # via
296
307
  # chromadb
297
308
  # huggingface-hub
309
+ # openai
298
310
  # transformers
299
311
  transformers==4.57.5
300
312
  # via agent-cli
@@ -313,6 +325,7 @@ typing-extensions==4.15.0
313
325
  # fastapi
314
326
  # grpcio
315
327
  # huggingface-hub
328
+ # openai
316
329
  # opentelemetry-api
317
330
  # opentelemetry-exporter-otlp-proto-grpc
318
331
  # opentelemetry-sdk
@@ -7,6 +7,7 @@ annotated-types==0.7.0
7
7
  anyio==4.12.1
8
8
  # via
9
9
  # httpx
10
+ # openai
10
11
  # starlette
11
12
  # watchfiles
12
13
  attrs==25.4.0
@@ -61,7 +62,9 @@ cryptography==46.0.3
61
62
  defusedxml==0.7.1
62
63
  # via markitdown
63
64
  distro==1.9.0
64
- # via posthog
65
+ # via
66
+ # openai
67
+ # posthog
65
68
  dnspython==2.8.0
66
69
  # via email-validator
67
70
  dotenv==0.9.9
@@ -112,6 +115,7 @@ httpx==0.28.1
112
115
  # chromadb
113
116
  # fastapi
114
117
  # fastapi-cloud-cli
118
+ # openai
115
119
  huggingface-hub==0.36.0
116
120
  # via
117
121
  # agent-cli
@@ -131,6 +135,8 @@ importlib-resources==6.5.2
131
135
  # via chromadb
132
136
  jinja2==3.1.6
133
137
  # via fastapi
138
+ jiter==0.12.0
139
+ # via openai
134
140
  jsonschema==4.26.0
135
141
  # via chromadb
136
142
  jsonschema-specifications==2025.9.1
@@ -173,6 +179,8 @@ onnxruntime==1.20.1
173
179
  # chromadb
174
180
  # magika
175
181
  # markitdown
182
+ openai==2.15.0
183
+ # via agent-cli
176
184
  opentelemetry-api==1.39.1
177
185
  # via
178
186
  # chromadb
@@ -232,6 +240,7 @@ pydantic==2.12.5
232
240
  # chromadb
233
241
  # fastapi
234
242
  # fastapi-cloud-cli
243
+ # openai
235
244
  # pydantic-extra-types
236
245
  # pydantic-settings
237
246
  pydantic-core==2.41.5
@@ -322,6 +331,8 @@ six==1.17.0
322
331
  # markdownify
323
332
  # posthog
324
333
  # python-dateutil
334
+ sniffio==1.3.1
335
+ # via openai
325
336
  soupsieve==2.8.1
326
337
  # via beautifulsoup4
327
338
  starlette==0.50.0
@@ -338,6 +349,7 @@ tqdm==4.67.1
338
349
  # via
339
350
  # chromadb
340
351
  # huggingface-hub
352
+ # openai
341
353
  # transformers
342
354
  transformers==4.57.5
343
355
  # via agent-cli
@@ -357,6 +369,7 @@ typing-extensions==4.15.0
357
369
  # fastapi
358
370
  # grpcio
359
371
  # huggingface-hub
372
+ # openai
360
373
  # opentelemetry-api
361
374
  # opentelemetry-exporter-otlp-proto-grpc
362
375
  # opentelemetry-sdk
@@ -14,89 +14,22 @@ click==8.3.1
14
14
  # typer-slim
15
15
  colorama==0.4.6 ; sys_platform == 'win32'
16
16
  # via click
17
- coloredlogs==15.0.1
18
- # via onnxruntime
19
17
  dotenv==0.9.9
20
18
  # via agent-cli
21
- filelock==3.20.3
22
- # via torch
23
- flatbuffers==25.12.19
24
- # via onnxruntime
25
- fsspec==2026.1.0
26
- # via torch
27
19
  h11==0.16.0
28
20
  # via httpcore
29
21
  httpcore==1.0.9
30
22
  # via httpx
31
23
  httpx==0.28.1
32
24
  # via agent-cli
33
- humanfriendly==10.0
34
- # via coloredlogs
35
25
  idna==3.11
36
26
  # via
37
27
  # anyio
38
28
  # httpx
39
- jinja2==3.1.6
40
- # via torch
41
29
  markdown-it-py==4.0.0
42
30
  # via rich
43
- markupsafe==3.0.3
44
- # via jinja2
45
31
  mdurl==0.1.2
46
32
  # via markdown-it-py
47
- mpmath==1.3.0
48
- # via sympy
49
- networkx==3.6.1
50
- # via torch
51
- numpy==2.3.5
52
- # via onnxruntime
53
- nvidia-cublas-cu12==12.8.4.1 ; platform_machine == 'x86_64' and sys_platform == 'linux'
54
- # via
55
- # nvidia-cudnn-cu12
56
- # nvidia-cusolver-cu12
57
- # torch
58
- nvidia-cuda-cupti-cu12==12.8.90 ; platform_machine == 'x86_64' and sys_platform == 'linux'
59
- # via torch
60
- nvidia-cuda-nvrtc-cu12==12.8.93 ; platform_machine == 'x86_64' and sys_platform == 'linux'
61
- # via torch
62
- nvidia-cuda-runtime-cu12==12.8.90 ; platform_machine == 'x86_64' and sys_platform == 'linux'
63
- # via torch
64
- nvidia-cudnn-cu12==9.10.2.21 ; platform_machine == 'x86_64' and sys_platform == 'linux'
65
- # via torch
66
- nvidia-cufft-cu12==11.3.3.83 ; platform_machine == 'x86_64' and sys_platform == 'linux'
67
- # via torch
68
- nvidia-cufile-cu12==1.13.1.3 ; platform_machine == 'x86_64' and sys_platform == 'linux'
69
- # via torch
70
- nvidia-curand-cu12==10.3.9.90 ; platform_machine == 'x86_64' and sys_platform == 'linux'
71
- # via torch
72
- nvidia-cusolver-cu12==11.7.3.90 ; platform_machine == 'x86_64' and sys_platform == 'linux'
73
- # via torch
74
- nvidia-cusparse-cu12==12.5.8.93 ; platform_machine == 'x86_64' and sys_platform == 'linux'
75
- # via
76
- # nvidia-cusolver-cu12
77
- # torch
78
- nvidia-cusparselt-cu12==0.7.1 ; platform_machine == 'x86_64' and sys_platform == 'linux'
79
- # via torch
80
- nvidia-nccl-cu12==2.27.5 ; platform_machine == 'x86_64' and sys_platform == 'linux'
81
- # via torch
82
- nvidia-nvjitlink-cu12==12.8.93 ; platform_machine == 'x86_64' and sys_platform == 'linux'
83
- # via
84
- # nvidia-cufft-cu12
85
- # nvidia-cusolver-cu12
86
- # nvidia-cusparse-cu12
87
- # torch
88
- nvidia-nvshmem-cu12==3.3.20 ; platform_machine == 'x86_64' and sys_platform == 'linux'
89
- # via torch
90
- nvidia-nvtx-cu12==12.8.90 ; platform_machine == 'x86_64' and sys_platform == 'linux'
91
- # via torch
92
- onnxruntime==1.20.1
93
- # via silero-vad
94
- packaging==25.0
95
- # via
96
- # onnxruntime
97
- # silero-vad
98
- protobuf==6.33.4
99
- # via onnxruntime
100
33
  psutil==7.2.1 ; sys_platform == 'win32'
101
34
  # via agent-cli
102
35
  pydantic==2.12.5
@@ -107,8 +40,6 @@ pygments==2.19.2
107
40
  # via rich
108
41
  pyperclip==1.11.0
109
42
  # via agent-cli
110
- pyreadline3==3.5.4 ; sys_platform == 'win32'
111
- # via humanfriendly
112
43
  python-dotenv==1.2.1
113
44
  # via dotenv
114
45
  rich==14.2.0
@@ -118,26 +49,12 @@ rich==14.2.0
118
49
  # typer-slim
119
50
  setproctitle==1.3.7
120
51
  # via agent-cli
121
- setuptools==80.9.0 ; python_full_version >= '3.12'
122
- # via torch
123
52
  shellingham==1.5.4
124
53
  # via
125
54
  # typer
126
55
  # typer-slim
127
- silero-vad==6.2.0
56
+ silero-vad-lite==0.2.1
128
57
  # via agent-cli
129
- sympy==1.14.0
130
- # via
131
- # onnxruntime
132
- # torch
133
- torch==2.9.1
134
- # via
135
- # silero-vad
136
- # torchaudio
137
- torchaudio==2.9.1
138
- # via silero-vad
139
- triton==3.5.1 ; platform_machine == 'x86_64' and sys_platform == 'linux'
140
- # via torch
141
58
  typer==0.21.1
142
59
  # via agent-cli
143
60
  typer-slim==0.21.1
@@ -147,7 +64,6 @@ typing-extensions==4.15.0
147
64
  # anyio
148
65
  # pydantic
149
66
  # pydantic-core
150
- # torch
151
67
  # typer
152
68
  # typer-slim
153
69
  # typing-inspection
@@ -0,0 +1,71 @@
1
+ # This file was autogenerated by uv via the following command:
2
+ # uv export --extra wyoming --no-dev --no-emit-project --no-hashes
3
+ annotated-types==0.7.0
4
+ # via pydantic
5
+ anyio==4.12.1
6
+ # via httpx
7
+ certifi==2026.1.4
8
+ # via
9
+ # httpcore
10
+ # httpx
11
+ click==8.3.1
12
+ # via
13
+ # typer
14
+ # typer-slim
15
+ colorama==0.4.6 ; sys_platform == 'win32'
16
+ # via click
17
+ dotenv==0.9.9
18
+ # via agent-cli
19
+ h11==0.16.0
20
+ # via httpcore
21
+ httpcore==1.0.9
22
+ # via httpx
23
+ httpx==0.28.1
24
+ # via agent-cli
25
+ idna==3.11
26
+ # via
27
+ # anyio
28
+ # httpx
29
+ markdown-it-py==4.0.0
30
+ # via rich
31
+ mdurl==0.1.2
32
+ # via markdown-it-py
33
+ psutil==7.2.1 ; sys_platform == 'win32'
34
+ # via agent-cli
35
+ pydantic==2.12.5
36
+ # via agent-cli
37
+ pydantic-core==2.41.5
38
+ # via pydantic
39
+ pygments==2.19.2
40
+ # via rich
41
+ pyperclip==1.11.0
42
+ # via agent-cli
43
+ python-dotenv==1.2.1
44
+ # via dotenv
45
+ rich==14.2.0
46
+ # via
47
+ # agent-cli
48
+ # typer
49
+ # typer-slim
50
+ setproctitle==1.3.7
51
+ # via agent-cli
52
+ shellingham==1.5.4
53
+ # via
54
+ # typer
55
+ # typer-slim
56
+ typer==0.21.1
57
+ # via agent-cli
58
+ typer-slim==0.21.1
59
+ # via agent-cli
60
+ typing-extensions==4.15.0
61
+ # via
62
+ # anyio
63
+ # pydantic
64
+ # pydantic-core
65
+ # typer
66
+ # typer-slim
67
+ # typing-inspection
68
+ typing-inspection==0.4.2
69
+ # via pydantic
70
+ wyoming==1.8.0
71
+ # via agent-cli
@@ -1,29 +1,4 @@
1
- r"""Wake word-based voice assistant that records when wake word is detected.
2
-
3
- This agent uses Wyoming wake word detection to implement a hands-free voice assistant that:
4
- 1. Continuously listens for a wake word
5
- 2. When the wake word is detected, starts recording user speech
6
- 3. When the wake word is detected again, stops recording and processes the speech
7
- 4. Sends the recorded speech to ASR for transcription
8
- 5. Optionally processes the transcript with an LLM and speaks the response
9
-
10
- WORKFLOW:
11
- 1. Agent starts listening for the specified wake word
12
- 2. First wake word detection -> start recording user speech
13
- 3. Second wake word detection -> stop recording and process the speech
14
- 4. Transcribe the recorded speech using Wyoming ASR
15
- 5. Optionally process with LLM and respond with TTS
16
-
17
- USAGE:
18
- - Start the agent: assistant --wake-word "ok_nabu" --input-device-index 1
19
- - The agent runs continuously until stopped with Ctrl+C or --stop
20
- - Uses background process management for daemon-like operation
21
-
22
- REQUIREMENTS:
23
- - Wyoming wake word server (e.g., wyoming-openwakeword)
24
- - Wyoming ASR server (e.g., wyoming-whisper)
25
- - Optional: Wyoming TTS server for responses
26
- """
1
+ """Wake word-based voice assistant using Wyoming protocol services."""
27
2
 
28
3
  from __future__ import annotations
29
4
 
@@ -306,14 +281,35 @@ def assistant(
306
281
  # --- General Options ---
307
282
  save_file: Path | None = opts.SAVE_FILE,
308
283
  clipboard: bool = opts.CLIPBOARD,
309
- log_level: str = opts.LOG_LEVEL,
284
+ log_level: opts.LogLevel = opts.LOG_LEVEL,
310
285
  log_file: str | None = opts.LOG_FILE,
311
286
  list_devices: bool = opts.LIST_DEVICES,
312
287
  quiet: bool = opts.QUIET,
313
288
  config_file: str | None = opts.CONFIG_FILE,
314
289
  print_args: bool = opts.PRINT_ARGS,
315
290
  ) -> None:
316
- """Wake word-based voice assistant using local or remote services."""
291
+ """Hands-free voice assistant using wake word detection.
292
+
293
+ Continuously listens for a wake word, then records your speech until you say
294
+ the wake word again. The recording is transcribed and sent to an LLM for a
295
+ conversational response, optionally spoken back via TTS.
296
+
297
+ **Conversation flow:**
298
+ 1. Say wake word → starts recording
299
+ 2. Speak your question/command
300
+ 3. Say wake word again → stops recording and processes
301
+
302
+ The assistant runs in a loop, ready for the next command after each response.
303
+ Stop with Ctrl+C or `--stop`.
304
+
305
+ **Requirements:**
306
+ - Wyoming wake word server (e.g., wyoming-openwakeword on port 10400)
307
+ - Wyoming ASR server (e.g., wyoming-whisper on port 10300)
308
+ - Optional: TTS server for spoken responses (enable with `--tts`)
309
+
310
+ **Example:**
311
+ `assistant --wake-word ok_nabu --tts --input-device-name USB`
312
+ """
317
313
  if print_args:
318
314
  print_command_line_args(locals())
319
315
 
@@ -1,4 +1,4 @@
1
- """Read text from clipboard, correct it using a local or remote LLM, and write the result back to the clipboard."""
1
+ """Fix grammar, spelling, and punctuation in text using an LLM."""
2
2
 
3
3
  from __future__ import annotations
4
4
 
@@ -216,7 +216,7 @@ def autocorrect(
216
216
  *,
217
217
  text: str | None = typer.Argument(
218
218
  None,
219
- help="The text to correct. If not provided, reads from clipboard.",
219
+ help="Text to correct. If omitted, reads from system clipboard.",
220
220
  rich_help_panel="General Options",
221
221
  ),
222
222
  # --- Provider Selection ---
@@ -233,14 +233,40 @@ def autocorrect(
233
233
  llm_gemini_model: str = opts.LLM_GEMINI_MODEL,
234
234
  gemini_api_key: str | None = opts.GEMINI_API_KEY,
235
235
  # --- General Options ---
236
- log_level: str = opts.LOG_LEVEL,
236
+ log_level: opts.LogLevel = opts.LOG_LEVEL,
237
237
  log_file: str | None = opts.LOG_FILE,
238
238
  quiet: bool = opts.QUIET,
239
239
  json_output: bool = opts.JSON_OUTPUT,
240
240
  config_file: str | None = opts.CONFIG_FILE,
241
241
  print_args: bool = opts.PRINT_ARGS,
242
242
  ) -> None:
243
- """Correct text from clipboard using a local or remote LLM."""
243
+ """Fix grammar, spelling, and punctuation using an LLM.
244
+
245
+ Reads text from clipboard (or argument), sends to LLM for correction,
246
+ and copies the result back to clipboard. Only makes technical corrections
247
+ without changing meaning or tone.
248
+
249
+ **Workflow:**
250
+ 1. Read text from clipboard (or `TEXT` argument)
251
+ 2. Send to LLM for grammar/spelling/punctuation fixes
252
+ 3. Copy corrected text to clipboard (unless `--json`)
253
+ 4. Display result
254
+
255
+ **Examples:**
256
+ ```bash
257
+ # Correct text from clipboard (default)
258
+ agent-cli autocorrect
259
+
260
+ # Correct specific text
261
+ agent-cli autocorrect "this is incorect"
262
+
263
+ # Use OpenAI instead of local Ollama
264
+ agent-cli autocorrect --llm-provider openai
265
+
266
+ # Get JSON output for scripting (disables clipboard)
267
+ agent-cli autocorrect --json
268
+ ```
269
+ """
244
270
  if print_args:
245
271
  print_command_line_args(locals())
246
272
 
agent_cli/agents/chat.py CHANGED
@@ -1,13 +1,15 @@
1
- """An chat agent that you can talk to.
2
-
3
- This agent will:
4
- - Listen for your voice command.
5
- - Transcribe the command.
6
- - Send the transcription to an LLM.
7
- - Speak the LLM's response.
8
- - Remember the conversation history.
9
- - Attach timestamps to the saved conversation.
10
- - Format timestamps as "ago" when sending to the LLM.
1
+ """Voice-based conversational chat agent with memory and tools.
2
+
3
+ Runs an interactive voice loop: listens for speech, transcribes it,
4
+ sends to the LLM (with conversation context), and optionally speaks the response.
5
+
6
+ **Available tools** (automatically used by the LLM when relevant):
7
+ - `add_memory`/`search_memory`/`update_memory` - persistent long-term memory
8
+ - `duckduckgo_search` - web search for current information
9
+ - `read_file`/`execute_code` - file access and shell commands
10
+
11
+ **Process management**: Use `--toggle` to start/stop via hotkey, `--stop` to terminate,
12
+ or `--status` to check if running. Useful for binding to a keyboard shortcut.
11
13
  """
12
14
 
13
15
  from __future__ import annotations
@@ -425,26 +427,54 @@ def chat(
425
427
  history_dir: Path = typer.Option( # noqa: B008
426
428
  "~/.config/agent-cli/history",
427
429
  "--history-dir",
428
- help="Directory to store conversation history.",
430
+ help="Directory for conversation history and long-term memory. "
431
+ "Both `conversation.json` and `long_term_memory.json` are stored here.",
429
432
  rich_help_panel="History Options",
430
433
  ),
431
434
  last_n_messages: int = typer.Option(
432
435
  50,
433
436
  "--last-n-messages",
434
- help="Number of messages to include in the conversation history."
435
- " Set to 0 to disable history.",
437
+ help="Number of past messages to include as context for the LLM. "
438
+ "Set to 0 to start fresh each session (memory tools still persist).",
436
439
  rich_help_panel="History Options",
437
440
  ),
438
441
  # --- General Options ---
439
442
  save_file: Path | None = opts.SAVE_FILE,
440
- log_level: str = opts.LOG_LEVEL,
443
+ log_level: opts.LogLevel = opts.LOG_LEVEL,
441
444
  log_file: str | None = opts.LOG_FILE,
442
445
  list_devices: bool = opts.LIST_DEVICES,
443
446
  quiet: bool = opts.QUIET,
444
447
  config_file: str | None = opts.CONFIG_FILE,
445
448
  print_args: bool = opts.PRINT_ARGS,
446
449
  ) -> None:
447
- """An chat agent that you can talk to."""
450
+ """Voice-based conversational chat agent with memory and tools.
451
+
452
+ Runs an interactive loop: listen → transcribe → LLM → speak response.
453
+ Conversation history is persisted and included as context for continuity.
454
+
455
+ **Built-in tools** (LLM uses automatically when relevant):
456
+
457
+ - `add_memory`/`search_memory`/`update_memory` - persistent long-term memory
458
+ - `duckduckgo_search` - web search for current information
459
+ - `read_file`/`execute_code` - file access and shell commands
460
+
461
+ **Process management**: Use `--toggle` to start/stop via hotkey (bind to
462
+ a keyboard shortcut), `--stop` to terminate, or `--status` to check state.
463
+
464
+ **Examples**:
465
+
466
+ Use OpenAI-compatible providers for speech and LLM, with TTS enabled:
467
+
468
+ agent-cli chat --asr-provider openai --llm-provider openai --tts
469
+
470
+ Start in background mode (toggle on/off with hotkey):
471
+
472
+ agent-cli chat --toggle
473
+
474
+ Use local Ollama LLM with Wyoming ASR:
475
+
476
+ agent-cli chat --llm-provider ollama
477
+ """
448
478
  if print_args:
449
479
  print_command_line_args(locals())
450
480
 
@@ -9,7 +9,25 @@ from agent_cli.core.process import set_process_title
9
9
 
10
10
  memory_app = typer.Typer(
11
11
  name="memory",
12
- help="Memory system operations (add, proxy, etc.).",
12
+ help="""Long-term memory system for AI chat applications.
13
+
14
+ Provides persistent memory across conversations by storing facts and context
15
+ in Markdown files, with automatic vector indexing for semantic retrieval.
16
+
17
+ **Subcommands:**
18
+
19
+ - `proxy`: Start an OpenAI-compatible proxy that injects relevant memories
20
+ into chat requests and extracts new facts from responses
21
+ - `add`: Manually add facts/memories without going through LLM extraction
22
+
23
+ **Quick Start:**
24
+
25
+ # Start the memory proxy (point your chat client at localhost:8100)
26
+ agent-cli memory proxy --openai-base-url http://localhost:11434/v1
27
+
28
+ # Manually seed some memories
29
+ agent-cli memory add "User prefers dark mode" "User is a Python developer"
30
+ """,
13
31
  add_completion=True,
14
32
  rich_markup_mode="markdown",
15
33
  no_args_is_help=True,
@@ -127,17 +127,17 @@ def add(
127
127
  "default",
128
128
  "--conversation-id",
129
129
  "-c",
130
- help="Conversation ID to add memories to.",
130
+ help="Conversation namespace for these memories. Memories are retrieved per-conversation unless shared globally.",
131
131
  ),
132
132
  memory_path: Path = typer.Option( # noqa: B008
133
133
  "./memory_db",
134
134
  "--memory-path",
135
- help="Path to the memory store.",
135
+ help="Directory for memory storage (same as `memory proxy --memory-path`).",
136
136
  ),
137
137
  git_versioning: bool = typer.Option(
138
138
  True, # noqa: FBT003
139
139
  "--git-versioning/--no-git-versioning",
140
- help="Commit changes to git.",
140
+ help="Auto-commit changes to git for version history.",
141
141
  ),
142
142
  quiet: bool = opts.QUIET,
143
143
  config_file: str | None = opts.CONFIG_FILE,