agent-cli 0.70.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agent_cli/__init__.py +5 -0
- agent_cli/__main__.py +6 -0
- agent_cli/_extras.json +14 -0
- agent_cli/_requirements/.gitkeep +0 -0
- agent_cli/_requirements/audio.txt +79 -0
- agent_cli/_requirements/faster-whisper.txt +215 -0
- agent_cli/_requirements/kokoro.txt +425 -0
- agent_cli/_requirements/llm.txt +183 -0
- agent_cli/_requirements/memory.txt +355 -0
- agent_cli/_requirements/mlx-whisper.txt +222 -0
- agent_cli/_requirements/piper.txt +176 -0
- agent_cli/_requirements/rag.txt +402 -0
- agent_cli/_requirements/server.txt +154 -0
- agent_cli/_requirements/speed.txt +77 -0
- agent_cli/_requirements/vad.txt +155 -0
- agent_cli/_requirements/wyoming.txt +71 -0
- agent_cli/_tools.py +368 -0
- agent_cli/agents/__init__.py +23 -0
- agent_cli/agents/_voice_agent_common.py +136 -0
- agent_cli/agents/assistant.py +383 -0
- agent_cli/agents/autocorrect.py +284 -0
- agent_cli/agents/chat.py +496 -0
- agent_cli/agents/memory/__init__.py +31 -0
- agent_cli/agents/memory/add.py +190 -0
- agent_cli/agents/memory/proxy.py +160 -0
- agent_cli/agents/rag_proxy.py +128 -0
- agent_cli/agents/speak.py +209 -0
- agent_cli/agents/transcribe.py +671 -0
- agent_cli/agents/transcribe_daemon.py +499 -0
- agent_cli/agents/voice_edit.py +291 -0
- agent_cli/api.py +22 -0
- agent_cli/cli.py +106 -0
- agent_cli/config.py +503 -0
- agent_cli/config_cmd.py +307 -0
- agent_cli/constants.py +27 -0
- agent_cli/core/__init__.py +1 -0
- agent_cli/core/audio.py +461 -0
- agent_cli/core/audio_format.py +299 -0
- agent_cli/core/chroma.py +88 -0
- agent_cli/core/deps.py +191 -0
- agent_cli/core/openai_proxy.py +139 -0
- agent_cli/core/process.py +195 -0
- agent_cli/core/reranker.py +120 -0
- agent_cli/core/sse.py +87 -0
- agent_cli/core/transcription_logger.py +70 -0
- agent_cli/core/utils.py +526 -0
- agent_cli/core/vad.py +175 -0
- agent_cli/core/watch.py +65 -0
- agent_cli/dev/__init__.py +14 -0
- agent_cli/dev/cli.py +1588 -0
- agent_cli/dev/coding_agents/__init__.py +19 -0
- agent_cli/dev/coding_agents/aider.py +24 -0
- agent_cli/dev/coding_agents/base.py +167 -0
- agent_cli/dev/coding_agents/claude.py +39 -0
- agent_cli/dev/coding_agents/codex.py +24 -0
- agent_cli/dev/coding_agents/continue_dev.py +15 -0
- agent_cli/dev/coding_agents/copilot.py +24 -0
- agent_cli/dev/coding_agents/cursor_agent.py +48 -0
- agent_cli/dev/coding_agents/gemini.py +28 -0
- agent_cli/dev/coding_agents/opencode.py +15 -0
- agent_cli/dev/coding_agents/registry.py +49 -0
- agent_cli/dev/editors/__init__.py +19 -0
- agent_cli/dev/editors/base.py +89 -0
- agent_cli/dev/editors/cursor.py +15 -0
- agent_cli/dev/editors/emacs.py +46 -0
- agent_cli/dev/editors/jetbrains.py +56 -0
- agent_cli/dev/editors/nano.py +31 -0
- agent_cli/dev/editors/neovim.py +33 -0
- agent_cli/dev/editors/registry.py +59 -0
- agent_cli/dev/editors/sublime.py +20 -0
- agent_cli/dev/editors/vim.py +42 -0
- agent_cli/dev/editors/vscode.py +15 -0
- agent_cli/dev/editors/zed.py +20 -0
- agent_cli/dev/project.py +568 -0
- agent_cli/dev/registry.py +52 -0
- agent_cli/dev/skill/SKILL.md +141 -0
- agent_cli/dev/skill/examples.md +571 -0
- agent_cli/dev/terminals/__init__.py +19 -0
- agent_cli/dev/terminals/apple_terminal.py +82 -0
- agent_cli/dev/terminals/base.py +56 -0
- agent_cli/dev/terminals/gnome.py +51 -0
- agent_cli/dev/terminals/iterm2.py +84 -0
- agent_cli/dev/terminals/kitty.py +77 -0
- agent_cli/dev/terminals/registry.py +48 -0
- agent_cli/dev/terminals/tmux.py +58 -0
- agent_cli/dev/terminals/warp.py +132 -0
- agent_cli/dev/terminals/zellij.py +78 -0
- agent_cli/dev/worktree.py +856 -0
- agent_cli/docs_gen.py +417 -0
- agent_cli/example-config.toml +185 -0
- agent_cli/install/__init__.py +5 -0
- agent_cli/install/common.py +89 -0
- agent_cli/install/extras.py +174 -0
- agent_cli/install/hotkeys.py +48 -0
- agent_cli/install/services.py +87 -0
- agent_cli/memory/__init__.py +7 -0
- agent_cli/memory/_files.py +250 -0
- agent_cli/memory/_filters.py +63 -0
- agent_cli/memory/_git.py +157 -0
- agent_cli/memory/_indexer.py +142 -0
- agent_cli/memory/_ingest.py +408 -0
- agent_cli/memory/_persistence.py +182 -0
- agent_cli/memory/_prompt.py +91 -0
- agent_cli/memory/_retrieval.py +294 -0
- agent_cli/memory/_store.py +169 -0
- agent_cli/memory/_streaming.py +44 -0
- agent_cli/memory/_tasks.py +48 -0
- agent_cli/memory/api.py +113 -0
- agent_cli/memory/client.py +272 -0
- agent_cli/memory/engine.py +361 -0
- agent_cli/memory/entities.py +43 -0
- agent_cli/memory/models.py +112 -0
- agent_cli/opts.py +433 -0
- agent_cli/py.typed +0 -0
- agent_cli/rag/__init__.py +3 -0
- agent_cli/rag/_indexer.py +67 -0
- agent_cli/rag/_indexing.py +226 -0
- agent_cli/rag/_prompt.py +30 -0
- agent_cli/rag/_retriever.py +156 -0
- agent_cli/rag/_store.py +48 -0
- agent_cli/rag/_utils.py +218 -0
- agent_cli/rag/api.py +175 -0
- agent_cli/rag/client.py +299 -0
- agent_cli/rag/engine.py +302 -0
- agent_cli/rag/models.py +55 -0
- agent_cli/scripts/.runtime/.gitkeep +0 -0
- agent_cli/scripts/__init__.py +1 -0
- agent_cli/scripts/check_plugin_skill_sync.py +50 -0
- agent_cli/scripts/linux-hotkeys/README.md +63 -0
- agent_cli/scripts/linux-hotkeys/toggle-autocorrect.sh +45 -0
- agent_cli/scripts/linux-hotkeys/toggle-transcription.sh +58 -0
- agent_cli/scripts/linux-hotkeys/toggle-voice-edit.sh +58 -0
- agent_cli/scripts/macos-hotkeys/README.md +45 -0
- agent_cli/scripts/macos-hotkeys/skhd-config-example +5 -0
- agent_cli/scripts/macos-hotkeys/toggle-autocorrect.sh +12 -0
- agent_cli/scripts/macos-hotkeys/toggle-transcription.sh +37 -0
- agent_cli/scripts/macos-hotkeys/toggle-voice-edit.sh +37 -0
- agent_cli/scripts/nvidia-asr-server/README.md +99 -0
- agent_cli/scripts/nvidia-asr-server/pyproject.toml +27 -0
- agent_cli/scripts/nvidia-asr-server/server.py +255 -0
- agent_cli/scripts/nvidia-asr-server/shell.nix +32 -0
- agent_cli/scripts/nvidia-asr-server/uv.lock +4654 -0
- agent_cli/scripts/run-openwakeword.sh +11 -0
- agent_cli/scripts/run-piper-windows.ps1 +30 -0
- agent_cli/scripts/run-piper.sh +24 -0
- agent_cli/scripts/run-whisper-linux.sh +40 -0
- agent_cli/scripts/run-whisper-macos.sh +6 -0
- agent_cli/scripts/run-whisper-windows.ps1 +51 -0
- agent_cli/scripts/run-whisper.sh +9 -0
- agent_cli/scripts/run_faster_whisper_server.py +136 -0
- agent_cli/scripts/setup-linux-hotkeys.sh +72 -0
- agent_cli/scripts/setup-linux.sh +108 -0
- agent_cli/scripts/setup-macos-hotkeys.sh +61 -0
- agent_cli/scripts/setup-macos.sh +76 -0
- agent_cli/scripts/setup-windows.ps1 +63 -0
- agent_cli/scripts/start-all-services-windows.ps1 +53 -0
- agent_cli/scripts/start-all-services.sh +178 -0
- agent_cli/scripts/sync_extras.py +138 -0
- agent_cli/server/__init__.py +3 -0
- agent_cli/server/cli.py +721 -0
- agent_cli/server/common.py +222 -0
- agent_cli/server/model_manager.py +288 -0
- agent_cli/server/model_registry.py +225 -0
- agent_cli/server/proxy/__init__.py +3 -0
- agent_cli/server/proxy/api.py +444 -0
- agent_cli/server/streaming.py +67 -0
- agent_cli/server/tts/__init__.py +3 -0
- agent_cli/server/tts/api.py +335 -0
- agent_cli/server/tts/backends/__init__.py +82 -0
- agent_cli/server/tts/backends/base.py +139 -0
- agent_cli/server/tts/backends/kokoro.py +403 -0
- agent_cli/server/tts/backends/piper.py +253 -0
- agent_cli/server/tts/model_manager.py +201 -0
- agent_cli/server/tts/model_registry.py +28 -0
- agent_cli/server/tts/wyoming_handler.py +249 -0
- agent_cli/server/whisper/__init__.py +3 -0
- agent_cli/server/whisper/api.py +413 -0
- agent_cli/server/whisper/backends/__init__.py +89 -0
- agent_cli/server/whisper/backends/base.py +97 -0
- agent_cli/server/whisper/backends/faster_whisper.py +225 -0
- agent_cli/server/whisper/backends/mlx.py +270 -0
- agent_cli/server/whisper/languages.py +116 -0
- agent_cli/server/whisper/model_manager.py +157 -0
- agent_cli/server/whisper/model_registry.py +28 -0
- agent_cli/server/whisper/wyoming_handler.py +203 -0
- agent_cli/services/__init__.py +343 -0
- agent_cli/services/_wyoming_utils.py +64 -0
- agent_cli/services/asr.py +506 -0
- agent_cli/services/llm.py +228 -0
- agent_cli/services/tts.py +450 -0
- agent_cli/services/wake_word.py +142 -0
- agent_cli-0.70.5.dist-info/METADATA +2118 -0
- agent_cli-0.70.5.dist-info/RECORD +196 -0
- agent_cli-0.70.5.dist-info/WHEEL +4 -0
- agent_cli-0.70.5.dist-info/entry_points.txt +4 -0
- agent_cli-0.70.5.dist-info/licenses/LICENSE +21 -0
agent_cli/docs_gen.py
ADDED
|
@@ -0,0 +1,417 @@
|
|
|
1
|
+
"""Documentation generators for markdown-code-runner integration.
|
|
2
|
+
|
|
3
|
+
This module provides functions to introspect Typer commands and generate
|
|
4
|
+
Markdown documentation. Use with markdown-code-runner to auto-generate
|
|
5
|
+
options tables in documentation.
|
|
6
|
+
|
|
7
|
+
Example usage in Markdown files:
|
|
8
|
+
<!-- CODE:START -->
|
|
9
|
+
<!-- from agent_cli.docs_gen import all_options_for_docs -->
|
|
10
|
+
<!-- print(all_options_for_docs("transcribe")) -->
|
|
11
|
+
<!-- CODE:END -->
|
|
12
|
+
<!-- OUTPUT:START -->
|
|
13
|
+
...auto-generated table...
|
|
14
|
+
<!-- OUTPUT:END -->
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
from __future__ import annotations
|
|
18
|
+
|
|
19
|
+
from typing import Any, get_origin
|
|
20
|
+
|
|
21
|
+
import click
|
|
22
|
+
from typer.main import get_command
|
|
23
|
+
|
|
24
|
+
from agent_cli import opts
|
|
25
|
+
from agent_cli.cli import app
|
|
26
|
+
from agent_cli.install.extras import EXTRAS
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def _get_type_str(annotation: Any) -> str:
|
|
30
|
+
"""Convert a type annotation to a readable string."""
|
|
31
|
+
if annotation is None:
|
|
32
|
+
return "str"
|
|
33
|
+
|
|
34
|
+
# Handle Optional types (Union[X, None])
|
|
35
|
+
origin = get_origin(annotation)
|
|
36
|
+
if origin is type(None):
|
|
37
|
+
return "None"
|
|
38
|
+
|
|
39
|
+
# Get the base type name
|
|
40
|
+
if hasattr(annotation, "__name__"):
|
|
41
|
+
return annotation.__name__.upper()
|
|
42
|
+
if hasattr(annotation, "__origin__"):
|
|
43
|
+
# Handle generic types like Optional[str]
|
|
44
|
+
args = getattr(annotation, "__args__", ())
|
|
45
|
+
non_none_args = [a for a in args if a is not type(None)]
|
|
46
|
+
if non_none_args:
|
|
47
|
+
return _get_type_str(non_none_args[0])
|
|
48
|
+
return str(annotation).replace("typing.", "").upper()
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def _format_default(default: Any) -> str:
|
|
52
|
+
"""Format a default value for display."""
|
|
53
|
+
if default is None:
|
|
54
|
+
return "-"
|
|
55
|
+
if isinstance(default, bool):
|
|
56
|
+
return str(default).lower()
|
|
57
|
+
if isinstance(default, str) and default == "":
|
|
58
|
+
return '""'
|
|
59
|
+
return str(default)
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def _get_click_command(command_path: str) -> click.Command | None:
|
|
63
|
+
"""Get a Click command from a path like 'transcribe' or 'memory.proxy'."""
|
|
64
|
+
parts = command_path.split(".")
|
|
65
|
+
click_app = get_command(app)
|
|
66
|
+
|
|
67
|
+
cmd: click.Command | click.Group = click_app
|
|
68
|
+
for part in parts:
|
|
69
|
+
if isinstance(cmd, click.Group):
|
|
70
|
+
cmd = cmd.commands.get(part) # type: ignore[assignment]
|
|
71
|
+
if cmd is None:
|
|
72
|
+
return None
|
|
73
|
+
else:
|
|
74
|
+
return None
|
|
75
|
+
return cmd
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def _extract_options_from_click(cmd: click.Command) -> list[dict[str, Any]]:
|
|
79
|
+
"""Extract options from a Click command."""
|
|
80
|
+
options = []
|
|
81
|
+
for param in cmd.params:
|
|
82
|
+
if isinstance(param, click.Option):
|
|
83
|
+
# Get the primary option name (longest one, usually --foo)
|
|
84
|
+
opt_names = [n for n in param.opts if n.startswith("--")]
|
|
85
|
+
if not opt_names:
|
|
86
|
+
opt_names = param.opts
|
|
87
|
+
primary_name = max(opt_names, key=len) if opt_names else param.name
|
|
88
|
+
|
|
89
|
+
# Handle boolean flags
|
|
90
|
+
if param.is_flag and param.secondary_opts:
|
|
91
|
+
# e.g., --llm/--no-llm
|
|
92
|
+
primary_name = f"{opt_names[0]}/{param.secondary_opts[0]}"
|
|
93
|
+
|
|
94
|
+
# Get panel from rich_help_panel or use default
|
|
95
|
+
panel = getattr(param, "rich_help_panel", None) or "Options"
|
|
96
|
+
|
|
97
|
+
options.append(
|
|
98
|
+
{
|
|
99
|
+
"name": primary_name,
|
|
100
|
+
"type": param.type.name.upper() if hasattr(param.type, "name") else "TEXT",
|
|
101
|
+
"default": _format_default(param.default),
|
|
102
|
+
"help": param.help or "",
|
|
103
|
+
"panel": panel,
|
|
104
|
+
"envvar": param.envvar[0] if param.envvar else None,
|
|
105
|
+
"required": param.required,
|
|
106
|
+
"is_flag": param.is_flag,
|
|
107
|
+
},
|
|
108
|
+
)
|
|
109
|
+
return options
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
def _get_command_options(command_path: str) -> list[dict[str, Any]]:
|
|
113
|
+
"""Extract all options from a Typer command."""
|
|
114
|
+
cmd = _get_click_command(command_path)
|
|
115
|
+
if cmd is None:
|
|
116
|
+
return []
|
|
117
|
+
return _extract_options_from_click(cmd)
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
def _options_table(
|
|
121
|
+
command_path: str,
|
|
122
|
+
panel: str | None = None,
|
|
123
|
+
*,
|
|
124
|
+
include_type: bool = True,
|
|
125
|
+
include_default: bool = True,
|
|
126
|
+
) -> str:
|
|
127
|
+
"""Generate a Markdown table of options for a command."""
|
|
128
|
+
options = _get_command_options(command_path)
|
|
129
|
+
if panel:
|
|
130
|
+
options = [o for o in options if o["panel"] == panel]
|
|
131
|
+
|
|
132
|
+
if not options:
|
|
133
|
+
return f"*No options found for panel '{panel}'*" if panel else "*No options found*"
|
|
134
|
+
|
|
135
|
+
# Build header
|
|
136
|
+
header_parts = ["Option"]
|
|
137
|
+
if include_type:
|
|
138
|
+
header_parts.append("Type")
|
|
139
|
+
if include_default:
|
|
140
|
+
header_parts.append("Default")
|
|
141
|
+
header_parts.append("Description")
|
|
142
|
+
|
|
143
|
+
header = "| " + " | ".join(header_parts) + " |"
|
|
144
|
+
separator = "|" + "|".join("-" * (len(p) + 2) for p in header_parts) + "|"
|
|
145
|
+
|
|
146
|
+
lines = [header, separator]
|
|
147
|
+
|
|
148
|
+
for opt in options:
|
|
149
|
+
row_parts = [f"`{opt['name']}`"]
|
|
150
|
+
if include_type:
|
|
151
|
+
row_parts.append(opt["type"])
|
|
152
|
+
if include_default:
|
|
153
|
+
default = opt["default"]
|
|
154
|
+
row_parts.append(f"`{default}`" if default != "-" else "-")
|
|
155
|
+
row_parts.append(opt["help"])
|
|
156
|
+
lines.append("| " + " | ".join(row_parts) + " |")
|
|
157
|
+
|
|
158
|
+
return "\n".join(lines)
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
def _options_by_panel(
|
|
162
|
+
command_path: str,
|
|
163
|
+
*,
|
|
164
|
+
include_type: bool = True,
|
|
165
|
+
include_default: bool = True,
|
|
166
|
+
heading_level: int = 3,
|
|
167
|
+
) -> str:
|
|
168
|
+
"""Generate options tables grouped by panel."""
|
|
169
|
+
options = _get_command_options(command_path)
|
|
170
|
+
if not options:
|
|
171
|
+
return "*No options found*"
|
|
172
|
+
|
|
173
|
+
# Get unique panels in order of first appearance
|
|
174
|
+
panels: list[str] = []
|
|
175
|
+
for opt in options:
|
|
176
|
+
if opt["panel"] not in panels:
|
|
177
|
+
panels.append(opt["panel"])
|
|
178
|
+
|
|
179
|
+
heading_prefix = "#" * heading_level
|
|
180
|
+
output = []
|
|
181
|
+
|
|
182
|
+
for panel in panels:
|
|
183
|
+
output.append(f"{heading_prefix} {panel}\n")
|
|
184
|
+
output.append(
|
|
185
|
+
_options_table(
|
|
186
|
+
command_path,
|
|
187
|
+
panel=panel,
|
|
188
|
+
include_type=include_type,
|
|
189
|
+
include_default=include_default,
|
|
190
|
+
),
|
|
191
|
+
)
|
|
192
|
+
output.append("") # Blank line between panels
|
|
193
|
+
|
|
194
|
+
return "\n".join(output)
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
def _list_commands() -> list[str]:
|
|
198
|
+
"""List all available commands including subcommands."""
|
|
199
|
+
click_app = get_command(app)
|
|
200
|
+
commands = []
|
|
201
|
+
|
|
202
|
+
def _walk(cmd: click.Command | click.Group, prefix: str = "") -> None:
|
|
203
|
+
if isinstance(cmd, click.Group):
|
|
204
|
+
for name, subcmd in cmd.commands.items():
|
|
205
|
+
path = f"{prefix}.{name}" if prefix else name
|
|
206
|
+
if isinstance(subcmd, click.Group):
|
|
207
|
+
_walk(subcmd, path)
|
|
208
|
+
else:
|
|
209
|
+
commands.append(path)
|
|
210
|
+
elif prefix:
|
|
211
|
+
commands.append(prefix)
|
|
212
|
+
|
|
213
|
+
_walk(click_app)
|
|
214
|
+
return sorted(commands)
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
def env_vars_table() -> str:
|
|
218
|
+
"""Generate a table of all environment variables.
|
|
219
|
+
|
|
220
|
+
Returns:
|
|
221
|
+
Markdown table of environment variables and descriptions
|
|
222
|
+
|
|
223
|
+
"""
|
|
224
|
+
lines = [
|
|
225
|
+
"| Variable | Description |",
|
|
226
|
+
"|----------|-------------|",
|
|
227
|
+
]
|
|
228
|
+
|
|
229
|
+
seen = set()
|
|
230
|
+
for name in dir(opts):
|
|
231
|
+
if name.startswith("_"):
|
|
232
|
+
continue
|
|
233
|
+
obj = getattr(opts, name)
|
|
234
|
+
if hasattr(obj, "envvar") and obj.envvar:
|
|
235
|
+
envvar = obj.envvar
|
|
236
|
+
if envvar not in seen:
|
|
237
|
+
seen.add(envvar)
|
|
238
|
+
help_text = getattr(obj, "help", "") or ""
|
|
239
|
+
lines.append(f"| `{envvar}` | {help_text} |")
|
|
240
|
+
|
|
241
|
+
return "\n".join(lines)
|
|
242
|
+
|
|
243
|
+
|
|
244
|
+
def provider_matrix() -> str:
|
|
245
|
+
"""Generate provider comparison matrix.
|
|
246
|
+
|
|
247
|
+
Returns:
|
|
248
|
+
Markdown table comparing local vs cloud providers
|
|
249
|
+
|
|
250
|
+
"""
|
|
251
|
+
return """| Capability | Local (Default) | Cloud Options |
|
|
252
|
+
|------------|-----------------|---------------|
|
|
253
|
+
| **LLM** | Ollama (`ollama`) | OpenAI (`openai`), Gemini (`gemini`) |
|
|
254
|
+
| **ASR** (Speech-to-Text) | Wyoming/Faster Whisper (`wyoming`) | OpenAI-compatible Whisper (`openai`), Gemini (`gemini`) |
|
|
255
|
+
| **TTS** (Text-to-Speech) | Wyoming/Piper (`wyoming`), Kokoro (`kokoro`) | OpenAI-compatible TTS (`openai`) |
|
|
256
|
+
| **Wake Word** | Wyoming/openWakeWord | - |"""
|
|
257
|
+
|
|
258
|
+
|
|
259
|
+
def commands_table(
|
|
260
|
+
category: str | None = None,
|
|
261
|
+
link_prefix: str = "",
|
|
262
|
+
) -> str:
|
|
263
|
+
"""Generate a table of available commands.
|
|
264
|
+
|
|
265
|
+
Args:
|
|
266
|
+
category: Filter by category (voice, text, ai, install, config) or None for all
|
|
267
|
+
link_prefix: Prefix for links (e.g., "docs/commands/" for README.md)
|
|
268
|
+
|
|
269
|
+
Returns:
|
|
270
|
+
Markdown table of commands
|
|
271
|
+
|
|
272
|
+
"""
|
|
273
|
+
# Define command metadata
|
|
274
|
+
command_info = {
|
|
275
|
+
"transcribe": ("Speech-to-text", "Record voice → text in clipboard", "voice"),
|
|
276
|
+
"transcribe-daemon": ("Continuous transcription", "Background VAD service", "voice"),
|
|
277
|
+
"speak": ("Text-to-speech", "Read text aloud", "voice"),
|
|
278
|
+
"voice-edit": ("Voice-powered editor", "Edit clipboard with voice", "voice"),
|
|
279
|
+
"assistant": ("Wake word assistant", "Hands-free voice interaction", "voice"),
|
|
280
|
+
"chat": ("Conversational AI", "Voice chat with tools", "voice"),
|
|
281
|
+
"autocorrect": ("Grammar & spelling", "Fix text from clipboard", "text"),
|
|
282
|
+
"rag-proxy": ("RAG server", "Chat with documents", "ai"),
|
|
283
|
+
"memory.proxy": ("Long-term memory", "Persistent conversation memory", "ai"),
|
|
284
|
+
"memory.add": ("Add memories", "Directly add facts to memory", "ai"),
|
|
285
|
+
"server": ("Transcription server", "HTTP API for transcription", "ai"),
|
|
286
|
+
"install-services": ("Install services", "Set up AI services", "install"),
|
|
287
|
+
"install-hotkeys": ("Install hotkeys", "Set up system hotkeys", "install"),
|
|
288
|
+
"start-services": ("Start services", "Launch all services", "install"),
|
|
289
|
+
"config": ("Configuration", "Manage config files", "config"),
|
|
290
|
+
}
|
|
291
|
+
|
|
292
|
+
if category:
|
|
293
|
+
commands = {k: v for k, v in command_info.items() if v[2] == category}
|
|
294
|
+
else:
|
|
295
|
+
commands = command_info
|
|
296
|
+
|
|
297
|
+
if not commands:
|
|
298
|
+
return "*No commands found*"
|
|
299
|
+
|
|
300
|
+
lines = [
|
|
301
|
+
"| Command | Purpose | Use Case |",
|
|
302
|
+
"|---------|---------|----------|",
|
|
303
|
+
]
|
|
304
|
+
|
|
305
|
+
for cmd, (purpose, use_case, _) in commands.items():
|
|
306
|
+
# Convert command path to link
|
|
307
|
+
doc_path = cmd.replace(".", "/")
|
|
308
|
+
link = f"{link_prefix}{doc_path}.md"
|
|
309
|
+
lines.append(f"| [`{cmd}`]({link}) | {purpose} | {use_case} |")
|
|
310
|
+
|
|
311
|
+
return "\n".join(lines)
|
|
312
|
+
|
|
313
|
+
|
|
314
|
+
def config_example(command_path: str | None = None) -> str:
|
|
315
|
+
"""Generate example TOML configuration for a command.
|
|
316
|
+
|
|
317
|
+
Args:
|
|
318
|
+
command_path: Command path or None for defaults section
|
|
319
|
+
|
|
320
|
+
Returns:
|
|
321
|
+
TOML configuration snippet
|
|
322
|
+
|
|
323
|
+
"""
|
|
324
|
+
if command_path is None:
|
|
325
|
+
# Generate defaults section
|
|
326
|
+
return """[defaults]
|
|
327
|
+
# Provider defaults (can be overridden per command)
|
|
328
|
+
# llm_provider = "ollama"
|
|
329
|
+
# asr_provider = "wyoming"
|
|
330
|
+
# tts_provider = "wyoming"
|
|
331
|
+
|
|
332
|
+
# API keys (or use environment variables)
|
|
333
|
+
# openai_api_key = "sk-..."
|
|
334
|
+
# gemini_api_key = "..."
|
|
335
|
+
|
|
336
|
+
# Audio devices
|
|
337
|
+
# input_device_index = 1
|
|
338
|
+
# output_device_index = 0"""
|
|
339
|
+
|
|
340
|
+
options = _get_command_options(command_path)
|
|
341
|
+
if not options:
|
|
342
|
+
return f"# No configurable options for {command_path}"
|
|
343
|
+
|
|
344
|
+
section = command_path.replace(".", "-")
|
|
345
|
+
lines = [f"[{section}]"]
|
|
346
|
+
|
|
347
|
+
for opt in options:
|
|
348
|
+
# Skip process management and meta options
|
|
349
|
+
if opt["panel"] in ("Process Management Options",):
|
|
350
|
+
continue
|
|
351
|
+
|
|
352
|
+
# Convert flag name to config key
|
|
353
|
+
key = opt["name"].lstrip("-").replace("-", "_").split("/")[0]
|
|
354
|
+
default = opt["default"]
|
|
355
|
+
help_text = opt["help"]
|
|
356
|
+
|
|
357
|
+
# Format the value appropriately
|
|
358
|
+
if default == "-":
|
|
359
|
+
value = '""' if opt["type"] == "TEXT" else "null"
|
|
360
|
+
lines.append(f"# {key} = {value} # {help_text}")
|
|
361
|
+
elif opt["type"] == "TEXT":
|
|
362
|
+
lines.append(f'# {key} = "{default}" # {help_text}')
|
|
363
|
+
elif opt["type"] in ("INTEGER", "FLOAT") or opt["is_flag"]:
|
|
364
|
+
lines.append(f"# {key} = {default} # {help_text}")
|
|
365
|
+
else:
|
|
366
|
+
lines.append(f"# {key} = {default} # {help_text}")
|
|
367
|
+
|
|
368
|
+
return "\n".join(lines)
|
|
369
|
+
|
|
370
|
+
|
|
371
|
+
def all_options_for_docs(command_path: str) -> str:
|
|
372
|
+
"""Generate complete options documentation for a command page.
|
|
373
|
+
|
|
374
|
+
This is the main function to use in docs/commands/*.md files.
|
|
375
|
+
It generates all options grouped by panel with proper formatting.
|
|
376
|
+
|
|
377
|
+
Args:
|
|
378
|
+
command_path: Command path like "transcribe" or "memory.proxy"
|
|
379
|
+
|
|
380
|
+
Returns:
|
|
381
|
+
Complete Markdown options section
|
|
382
|
+
|
|
383
|
+
"""
|
|
384
|
+
return _options_by_panel(
|
|
385
|
+
command_path,
|
|
386
|
+
include_type=False, # Types are often obvious and clutter the table
|
|
387
|
+
include_default=True,
|
|
388
|
+
heading_level=3,
|
|
389
|
+
)
|
|
390
|
+
|
|
391
|
+
|
|
392
|
+
def extras_table() -> str:
|
|
393
|
+
"""Generate a table of available extras for install-extras command."""
|
|
394
|
+
lines = [
|
|
395
|
+
"| Extra | Description |",
|
|
396
|
+
"|-------|-------------|",
|
|
397
|
+
]
|
|
398
|
+
for name, description in EXTRAS.items():
|
|
399
|
+
lines.append(f"| `{name}` | {description} |")
|
|
400
|
+
return "\n".join(lines)
|
|
401
|
+
|
|
402
|
+
|
|
403
|
+
if __name__ == "__main__":
|
|
404
|
+
# Demo: print options for transcribe command
|
|
405
|
+
print("=== Available Commands ===")
|
|
406
|
+
for cmd in _list_commands():
|
|
407
|
+
print(f" {cmd}")
|
|
408
|
+
print()
|
|
409
|
+
|
|
410
|
+
print("=== transcribe options by panel ===")
|
|
411
|
+
print(_options_by_panel("transcribe"))
|
|
412
|
+
|
|
413
|
+
print("\n=== Environment Variables ===")
|
|
414
|
+
print(env_vars_table())
|
|
415
|
+
|
|
416
|
+
print("\n=== Provider Matrix ===")
|
|
417
|
+
print(provider_matrix())
|
|
@@ -0,0 +1,185 @@
|
|
|
1
|
+
# Example configuration for agent-cli
|
|
2
|
+
# Copy this file to ~/.config/agent-cli/config.toml or ./agent-cli-config.toml and edit
|
|
3
|
+
#
|
|
4
|
+
# This file demonstrates how to configure all available options.
|
|
5
|
+
# Keys use dashes to match the command-line arguments.
|
|
6
|
+
# Any option here can be overridden by a command-line argument.
|
|
7
|
+
|
|
8
|
+
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
9
|
+
# --- Default Settings ---
|
|
10
|
+
# These settings apply to all commands unless overridden in a command-specific
|
|
11
|
+
# section below.
|
|
12
|
+
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
13
|
+
[defaults]
|
|
14
|
+
|
|
15
|
+
# --- Provider Selection ---
|
|
16
|
+
# Select the default provider for each service ("ollama"/"openai"/"gemini" for LLM, "wyoming"/"openai" for ASR).
|
|
17
|
+
llm-provider = "ollama" # "local" still works as a deprecated alias
|
|
18
|
+
tts-provider = "wyoming"
|
|
19
|
+
|
|
20
|
+
# --- API Keys ---
|
|
21
|
+
# Your OpenAI API key. Can also be set via the OPENAI_API_KEY environment variable.
|
|
22
|
+
openai-api-key = "sk-..."
|
|
23
|
+
|
|
24
|
+
# --- Audio Device Settings ---
|
|
25
|
+
# You can specify partial names for devices, and the first match will be used.
|
|
26
|
+
# Use `agent-cli speak --list-devices` to see available devices.
|
|
27
|
+
# input-device-name = "logitech,airpods,macbook"
|
|
28
|
+
# output-device-name = "airpods,macbook"
|
|
29
|
+
# You can also specify device by index, though name is more stable.
|
|
30
|
+
# input-device-index = 1
|
|
31
|
+
# output-device-index = 1
|
|
32
|
+
|
|
33
|
+
# --- LLM Settings ---
|
|
34
|
+
# Ollama (local)
|
|
35
|
+
llm-ollama-model = "gemma3:4b"
|
|
36
|
+
llm-ollama-host = "http://localhost:11434"
|
|
37
|
+
# OpenAI
|
|
38
|
+
llm-openai-model = "gpt-5-mini"
|
|
39
|
+
# For llama-server (llama-cpp) or other OpenAI-compatible APIs:
|
|
40
|
+
# openai-base-url = "http://localhost:8080/v1"
|
|
41
|
+
|
|
42
|
+
# --- ASR (Speech-to-Text) Settings ---
|
|
43
|
+
# Wyoming (local)
|
|
44
|
+
asr-wyoming-ip = "localhost"
|
|
45
|
+
asr-wyoming-port = 10300
|
|
46
|
+
# OpenAI
|
|
47
|
+
asr-openai-model = "whisper-1"
|
|
48
|
+
# Custom ASR endpoint (e.g., NVIDIA Canary, local Whisper server)
|
|
49
|
+
# Uncomment and configure to use a custom OpenAI-compatible Whisper API:
|
|
50
|
+
# asr-provider = "openai"
|
|
51
|
+
# asr-openai-base-url = "http://localhost:9898"
|
|
52
|
+
# asr-openai-model = "nvidia/canary-qwen-2.5b" # Optional: override model
|
|
53
|
+
# asr-openai-prompt = "Transcribe the following:" # Optional: add prompt
|
|
54
|
+
|
|
55
|
+
# --- TTS (Text-to-Speech) Settings ---
|
|
56
|
+
# Wyoming (local)
|
|
57
|
+
tts-wyoming-ip = "localhost"
|
|
58
|
+
tts-wyoming-port = 10200
|
|
59
|
+
tts-wyoming-voice = "en_US-lessac-medium"
|
|
60
|
+
# tts-wyoming-language = "en_US" # Optional: specify language for the voice
|
|
61
|
+
# tts-wyoming-speaker = "speaker_name" # Optional: specify speaker for the voice
|
|
62
|
+
# OpenAI
|
|
63
|
+
tts-openai-model = "tts-1"
|
|
64
|
+
tts-openai-voice = "alloy"
|
|
65
|
+
# Custom OpenAI-compatible TTS endpoint (e.g., your own proxy)
|
|
66
|
+
# tts-openai-base-url = "http://localhost:8000/v1"
|
|
67
|
+
# Kokoro (high-quality local TTS)
|
|
68
|
+
# tts-kokoro-model = "kokoro"
|
|
69
|
+
# tts-kokoro-voice = "af_sky"
|
|
70
|
+
# tts-kokoro-host = "http://localhost:8880/v1"
|
|
71
|
+
# Gemini TTS
|
|
72
|
+
# tts-gemini-model = "gemini-2.5-flash-preview-tts"
|
|
73
|
+
# tts-gemini-voice = "Kore"
|
|
74
|
+
|
|
75
|
+
# --- Embedding Model (for RAG and Memory) ---
|
|
76
|
+
# Used by rag-proxy and memory-proxy for vectorization.
|
|
77
|
+
# embedding-model = "text-embedding-3-small" # OpenAI default
|
|
78
|
+
# embedding-model = "embeddinggemma:300m" # Ollama local model
|
|
79
|
+
|
|
80
|
+
# --- General Behavior ---
|
|
81
|
+
log-level = "WARNING" # Logging level (e.g., DEBUG, INFO, WARNING, ERROR)
|
|
82
|
+
# log-file = "/path/to/agent-cli.log" # Path to a file to write logs to
|
|
83
|
+
quiet = false # Suppress most console output
|
|
84
|
+
clipboard = true # Copy results to clipboard by default
|
|
85
|
+
# save-file = "/path/to/output.wav" # Save TTS audio to a file instead of playing
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
89
|
+
# --- Command-Specific Overrides ---
|
|
90
|
+
# Settings in these sections will override the [defaults] for that specific
|
|
91
|
+
# command.
|
|
92
|
+
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
93
|
+
|
|
94
|
+
[assistant]
|
|
95
|
+
# Wake-word specific settings
|
|
96
|
+
wake-server-ip = "localhost"
|
|
97
|
+
wake-server-port = 10400
|
|
98
|
+
wake-word = "ok_nabu" # e.g., "ok_nabu", "hey_jarvis"
|
|
99
|
+
# The assistant agent also uses ASR, LLM, and TTS settings from [defaults]
|
|
100
|
+
enable_tts = true
|
|
101
|
+
|
|
102
|
+
[autocorrect]
|
|
103
|
+
# Use a more powerful model specifically for the autocorrect command.
|
|
104
|
+
llm-provider = "ollama"
|
|
105
|
+
llm-ollama-model = "devstral:24b"
|
|
106
|
+
|
|
107
|
+
[chat]
|
|
108
|
+
# By default, chat uses local providers.
|
|
109
|
+
# For better tool use, you might want to switch to OpenAI:
|
|
110
|
+
# llm-provider = "openai"
|
|
111
|
+
# tts-provider = "openai"
|
|
112
|
+
# llm-openai-model = "gpt-4-turbo"
|
|
113
|
+
enable_tts = true
|
|
114
|
+
tts-speed = 1.2
|
|
115
|
+
# Conversation history settings
|
|
116
|
+
history-dir = "~/.config/agent-cli/history"
|
|
117
|
+
last-n-messages = 50 # Number of messages to load from history
|
|
118
|
+
|
|
119
|
+
[speak]
|
|
120
|
+
# Use a specific voice for the speak command.
|
|
121
|
+
tts-provider = "wyoming"
|
|
122
|
+
tts-wyoming-voice = "en_US-ryan-high"
|
|
123
|
+
tts-speed = 1.0
|
|
124
|
+
|
|
125
|
+
[transcribe]
|
|
126
|
+
# By default, transcription uses local providers.
|
|
127
|
+
# For higher accuracy, you can switch to OpenAI:
|
|
128
|
+
# asr-provider = "openai"
|
|
129
|
+
# llm-provider = "openai"
|
|
130
|
+
# Enable LLM cleanup for the transcript.
|
|
131
|
+
llm = true
|
|
132
|
+
# Allow the user to provide additional instructions for the LLM.
|
|
133
|
+
# Use this to improve transcription accuracy for domain-specific terms.
|
|
134
|
+
extra-instructions = """
|
|
135
|
+
Assume the user is often discussing Python programming.
|
|
136
|
+
Use backticks for variable names, function names, and other code elements.
|
|
137
|
+
Follow PEP8: use `snake_case` for variables, functions, and package names; `CamelCase` for classes.
|
|
138
|
+
"""
|
|
139
|
+
# Log all transcriptions with timestamps for later reference.
|
|
140
|
+
# transcription-log = "~/.config/agent-cli/transcription.log"
|
|
141
|
+
|
|
142
|
+
[voice-edit]
|
|
143
|
+
# Use a powerful local model for the voice assistant.
|
|
144
|
+
llm-provider = "ollama"
|
|
145
|
+
llm-ollama-model = "llama3"
|
|
146
|
+
enable_tts = true
|
|
147
|
+
|
|
148
|
+
[rag-proxy]
|
|
149
|
+
# RAG (Retrieval-Augmented Generation) proxy server settings.
|
|
150
|
+
# docs-folder = "./rag_docs" # Folder to watch for documents
|
|
151
|
+
# chroma-path = "./rag_db" # Path to ChromaDB persistence directory
|
|
152
|
+
# limit = 3 # Number of document chunks to retrieve per query
|
|
153
|
+
# rag-tools = true # Allow agent to fetch full documents when snippets are insufficient
|
|
154
|
+
# host = "0.0.0.0"
|
|
155
|
+
# port = 8000
|
|
156
|
+
|
|
157
|
+
[memory.proxy]
|
|
158
|
+
# Long-term memory proxy server settings.
|
|
159
|
+
# memory-path = "./memory_db" # Path to the memory store
|
|
160
|
+
# default-top-k = 5 # Number of memory entries to retrieve per query
|
|
161
|
+
# max-entries = 500 # Maximum stored memory entries per conversation
|
|
162
|
+
# mmr-lambda = 0.7 # MMR lambda (0-1): higher favors relevance, lower favors diversity
|
|
163
|
+
# recency-weight = 0.2 # Recency score weight (0.0-1.0)
|
|
164
|
+
# score-threshold = 0.35 # Minimum semantic relevance threshold
|
|
165
|
+
# summarization = true # Enable automatic fact extraction and summaries
|
|
166
|
+
# git-versioning = true # Enable automatic git commit of memory changes
|
|
167
|
+
# host = "0.0.0.0"
|
|
168
|
+
# port = 8100
|
|
169
|
+
|
|
170
|
+
[transcribe-daemon]
|
|
171
|
+
# Continuous transcription settings
|
|
172
|
+
# role = "user"
|
|
173
|
+
# silence-threshold = 1.0
|
|
174
|
+
# min-segment = 0.25
|
|
175
|
+
# vad-threshold = 0.3
|
|
176
|
+
# save-audio = true
|
|
177
|
+
# audio-dir = "~/.config/agent-cli/audio"
|
|
178
|
+
# transcription-log = "~/.config/agent-cli/transcriptions.jsonl"
|
|
179
|
+
# clipboard = false
|
|
180
|
+
|
|
181
|
+
[server]
|
|
182
|
+
# Transcription server settings
|
|
183
|
+
# host = "0.0.0.0"
|
|
184
|
+
# port = 61337
|
|
185
|
+
# reload = false
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
"""Common utilities for installation commands."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import platform
|
|
6
|
+
import subprocess
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import TYPE_CHECKING
|
|
9
|
+
|
|
10
|
+
import typer
|
|
11
|
+
|
|
12
|
+
from agent_cli.core.utils import print_error_message, print_with_style
|
|
13
|
+
|
|
14
|
+
if TYPE_CHECKING:
|
|
15
|
+
from subprocess import CompletedProcess
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def _script_directory() -> Path:
|
|
19
|
+
"""Get the directory containing all scripts."""
|
|
20
|
+
# First check if we're running from source (development)
|
|
21
|
+
source_scripts = Path(__file__).parent.parent.parent / "scripts"
|
|
22
|
+
if source_scripts.exists():
|
|
23
|
+
return source_scripts
|
|
24
|
+
# Check for scripts bundled with the package
|
|
25
|
+
package_scripts = Path(__file__).parent.parent / "scripts"
|
|
26
|
+
if package_scripts.exists():
|
|
27
|
+
return package_scripts
|
|
28
|
+
msg = "Should never happen: no scripts directory found, please report an issue."
|
|
29
|
+
raise RuntimeError(msg)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def get_script_path(script_name: str) -> Path:
|
|
33
|
+
"""Get the path to a script in the scripts directory."""
|
|
34
|
+
script_dir = _script_directory()
|
|
35
|
+
return script_dir / script_name
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def _run_script(script_path: Path) -> CompletedProcess[bytes]:
|
|
39
|
+
"""Run a shell script, streaming its output directly to the terminal."""
|
|
40
|
+
if not script_path.exists():
|
|
41
|
+
msg = f"Script not found: {script_path}"
|
|
42
|
+
raise FileNotFoundError(msg)
|
|
43
|
+
|
|
44
|
+
# Run the script through the shell, which handles execution permissions
|
|
45
|
+
# This avoids modifying file permissions in the package directory
|
|
46
|
+
return subprocess.run(
|
|
47
|
+
["bash", str(script_path)], # noqa: S607
|
|
48
|
+
check=True,
|
|
49
|
+
cwd=script_path.parent,
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def get_platform_script(macos_script: str, linux_script: str) -> str:
|
|
54
|
+
"""Get the appropriate script name based on the platform."""
|
|
55
|
+
system = platform.system().lower()
|
|
56
|
+
|
|
57
|
+
if system == "darwin":
|
|
58
|
+
return macos_script
|
|
59
|
+
if system == "linux":
|
|
60
|
+
return linux_script
|
|
61
|
+
print_error_message(f"Unsupported operating system: {system}")
|
|
62
|
+
raise typer.Exit(1) from None
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def execute_installation_script(
|
|
66
|
+
script_name: str,
|
|
67
|
+
operation_name: str,
|
|
68
|
+
success_message: str,
|
|
69
|
+
next_steps: list[str] | None = None,
|
|
70
|
+
) -> None:
|
|
71
|
+
"""Execute an installation script with standard error handling."""
|
|
72
|
+
script_path = get_script_path(script_name)
|
|
73
|
+
print_with_style(f"🚀 Running {script_name} to {operation_name.lower()}...", "green")
|
|
74
|
+
|
|
75
|
+
try:
|
|
76
|
+
_run_script(script_path)
|
|
77
|
+
print_with_style(f"✅ {success_message}", "green")
|
|
78
|
+
if next_steps:
|
|
79
|
+
print_with_style("\nNext steps:", "yellow")
|
|
80
|
+
for i, step in enumerate(next_steps, 1):
|
|
81
|
+
print_with_style(f" {i}. {step}", "cyan")
|
|
82
|
+
except FileNotFoundError as e:
|
|
83
|
+
# This case is for when the script file itself is not found
|
|
84
|
+
print_error_message(f"{operation_name} failed: {e}")
|
|
85
|
+
raise typer.Exit(1) from None
|
|
86
|
+
except subprocess.CalledProcessError as e:
|
|
87
|
+
# This case handles non-zero exit codes from the script
|
|
88
|
+
print_error_message(f"{operation_name} failed with exit code {e.returncode}")
|
|
89
|
+
raise typer.Exit(e.returncode) from None
|