agent-cli 0.70.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agent_cli/__init__.py +5 -0
- agent_cli/__main__.py +6 -0
- agent_cli/_extras.json +14 -0
- agent_cli/_requirements/.gitkeep +0 -0
- agent_cli/_requirements/audio.txt +79 -0
- agent_cli/_requirements/faster-whisper.txt +215 -0
- agent_cli/_requirements/kokoro.txt +425 -0
- agent_cli/_requirements/llm.txt +183 -0
- agent_cli/_requirements/memory.txt +355 -0
- agent_cli/_requirements/mlx-whisper.txt +222 -0
- agent_cli/_requirements/piper.txt +176 -0
- agent_cli/_requirements/rag.txt +402 -0
- agent_cli/_requirements/server.txt +154 -0
- agent_cli/_requirements/speed.txt +77 -0
- agent_cli/_requirements/vad.txt +155 -0
- agent_cli/_requirements/wyoming.txt +71 -0
- agent_cli/_tools.py +368 -0
- agent_cli/agents/__init__.py +23 -0
- agent_cli/agents/_voice_agent_common.py +136 -0
- agent_cli/agents/assistant.py +383 -0
- agent_cli/agents/autocorrect.py +284 -0
- agent_cli/agents/chat.py +496 -0
- agent_cli/agents/memory/__init__.py +31 -0
- agent_cli/agents/memory/add.py +190 -0
- agent_cli/agents/memory/proxy.py +160 -0
- agent_cli/agents/rag_proxy.py +128 -0
- agent_cli/agents/speak.py +209 -0
- agent_cli/agents/transcribe.py +671 -0
- agent_cli/agents/transcribe_daemon.py +499 -0
- agent_cli/agents/voice_edit.py +291 -0
- agent_cli/api.py +22 -0
- agent_cli/cli.py +106 -0
- agent_cli/config.py +503 -0
- agent_cli/config_cmd.py +307 -0
- agent_cli/constants.py +27 -0
- agent_cli/core/__init__.py +1 -0
- agent_cli/core/audio.py +461 -0
- agent_cli/core/audio_format.py +299 -0
- agent_cli/core/chroma.py +88 -0
- agent_cli/core/deps.py +191 -0
- agent_cli/core/openai_proxy.py +139 -0
- agent_cli/core/process.py +195 -0
- agent_cli/core/reranker.py +120 -0
- agent_cli/core/sse.py +87 -0
- agent_cli/core/transcription_logger.py +70 -0
- agent_cli/core/utils.py +526 -0
- agent_cli/core/vad.py +175 -0
- agent_cli/core/watch.py +65 -0
- agent_cli/dev/__init__.py +14 -0
- agent_cli/dev/cli.py +1588 -0
- agent_cli/dev/coding_agents/__init__.py +19 -0
- agent_cli/dev/coding_agents/aider.py +24 -0
- agent_cli/dev/coding_agents/base.py +167 -0
- agent_cli/dev/coding_agents/claude.py +39 -0
- agent_cli/dev/coding_agents/codex.py +24 -0
- agent_cli/dev/coding_agents/continue_dev.py +15 -0
- agent_cli/dev/coding_agents/copilot.py +24 -0
- agent_cli/dev/coding_agents/cursor_agent.py +48 -0
- agent_cli/dev/coding_agents/gemini.py +28 -0
- agent_cli/dev/coding_agents/opencode.py +15 -0
- agent_cli/dev/coding_agents/registry.py +49 -0
- agent_cli/dev/editors/__init__.py +19 -0
- agent_cli/dev/editors/base.py +89 -0
- agent_cli/dev/editors/cursor.py +15 -0
- agent_cli/dev/editors/emacs.py +46 -0
- agent_cli/dev/editors/jetbrains.py +56 -0
- agent_cli/dev/editors/nano.py +31 -0
- agent_cli/dev/editors/neovim.py +33 -0
- agent_cli/dev/editors/registry.py +59 -0
- agent_cli/dev/editors/sublime.py +20 -0
- agent_cli/dev/editors/vim.py +42 -0
- agent_cli/dev/editors/vscode.py +15 -0
- agent_cli/dev/editors/zed.py +20 -0
- agent_cli/dev/project.py +568 -0
- agent_cli/dev/registry.py +52 -0
- agent_cli/dev/skill/SKILL.md +141 -0
- agent_cli/dev/skill/examples.md +571 -0
- agent_cli/dev/terminals/__init__.py +19 -0
- agent_cli/dev/terminals/apple_terminal.py +82 -0
- agent_cli/dev/terminals/base.py +56 -0
- agent_cli/dev/terminals/gnome.py +51 -0
- agent_cli/dev/terminals/iterm2.py +84 -0
- agent_cli/dev/terminals/kitty.py +77 -0
- agent_cli/dev/terminals/registry.py +48 -0
- agent_cli/dev/terminals/tmux.py +58 -0
- agent_cli/dev/terminals/warp.py +132 -0
- agent_cli/dev/terminals/zellij.py +78 -0
- agent_cli/dev/worktree.py +856 -0
- agent_cli/docs_gen.py +417 -0
- agent_cli/example-config.toml +185 -0
- agent_cli/install/__init__.py +5 -0
- agent_cli/install/common.py +89 -0
- agent_cli/install/extras.py +174 -0
- agent_cli/install/hotkeys.py +48 -0
- agent_cli/install/services.py +87 -0
- agent_cli/memory/__init__.py +7 -0
- agent_cli/memory/_files.py +250 -0
- agent_cli/memory/_filters.py +63 -0
- agent_cli/memory/_git.py +157 -0
- agent_cli/memory/_indexer.py +142 -0
- agent_cli/memory/_ingest.py +408 -0
- agent_cli/memory/_persistence.py +182 -0
- agent_cli/memory/_prompt.py +91 -0
- agent_cli/memory/_retrieval.py +294 -0
- agent_cli/memory/_store.py +169 -0
- agent_cli/memory/_streaming.py +44 -0
- agent_cli/memory/_tasks.py +48 -0
- agent_cli/memory/api.py +113 -0
- agent_cli/memory/client.py +272 -0
- agent_cli/memory/engine.py +361 -0
- agent_cli/memory/entities.py +43 -0
- agent_cli/memory/models.py +112 -0
- agent_cli/opts.py +433 -0
- agent_cli/py.typed +0 -0
- agent_cli/rag/__init__.py +3 -0
- agent_cli/rag/_indexer.py +67 -0
- agent_cli/rag/_indexing.py +226 -0
- agent_cli/rag/_prompt.py +30 -0
- agent_cli/rag/_retriever.py +156 -0
- agent_cli/rag/_store.py +48 -0
- agent_cli/rag/_utils.py +218 -0
- agent_cli/rag/api.py +175 -0
- agent_cli/rag/client.py +299 -0
- agent_cli/rag/engine.py +302 -0
- agent_cli/rag/models.py +55 -0
- agent_cli/scripts/.runtime/.gitkeep +0 -0
- agent_cli/scripts/__init__.py +1 -0
- agent_cli/scripts/check_plugin_skill_sync.py +50 -0
- agent_cli/scripts/linux-hotkeys/README.md +63 -0
- agent_cli/scripts/linux-hotkeys/toggle-autocorrect.sh +45 -0
- agent_cli/scripts/linux-hotkeys/toggle-transcription.sh +58 -0
- agent_cli/scripts/linux-hotkeys/toggle-voice-edit.sh +58 -0
- agent_cli/scripts/macos-hotkeys/README.md +45 -0
- agent_cli/scripts/macos-hotkeys/skhd-config-example +5 -0
- agent_cli/scripts/macos-hotkeys/toggle-autocorrect.sh +12 -0
- agent_cli/scripts/macos-hotkeys/toggle-transcription.sh +37 -0
- agent_cli/scripts/macos-hotkeys/toggle-voice-edit.sh +37 -0
- agent_cli/scripts/nvidia-asr-server/README.md +99 -0
- agent_cli/scripts/nvidia-asr-server/pyproject.toml +27 -0
- agent_cli/scripts/nvidia-asr-server/server.py +255 -0
- agent_cli/scripts/nvidia-asr-server/shell.nix +32 -0
- agent_cli/scripts/nvidia-asr-server/uv.lock +4654 -0
- agent_cli/scripts/run-openwakeword.sh +11 -0
- agent_cli/scripts/run-piper-windows.ps1 +30 -0
- agent_cli/scripts/run-piper.sh +24 -0
- agent_cli/scripts/run-whisper-linux.sh +40 -0
- agent_cli/scripts/run-whisper-macos.sh +6 -0
- agent_cli/scripts/run-whisper-windows.ps1 +51 -0
- agent_cli/scripts/run-whisper.sh +9 -0
- agent_cli/scripts/run_faster_whisper_server.py +136 -0
- agent_cli/scripts/setup-linux-hotkeys.sh +72 -0
- agent_cli/scripts/setup-linux.sh +108 -0
- agent_cli/scripts/setup-macos-hotkeys.sh +61 -0
- agent_cli/scripts/setup-macos.sh +76 -0
- agent_cli/scripts/setup-windows.ps1 +63 -0
- agent_cli/scripts/start-all-services-windows.ps1 +53 -0
- agent_cli/scripts/start-all-services.sh +178 -0
- agent_cli/scripts/sync_extras.py +138 -0
- agent_cli/server/__init__.py +3 -0
- agent_cli/server/cli.py +721 -0
- agent_cli/server/common.py +222 -0
- agent_cli/server/model_manager.py +288 -0
- agent_cli/server/model_registry.py +225 -0
- agent_cli/server/proxy/__init__.py +3 -0
- agent_cli/server/proxy/api.py +444 -0
- agent_cli/server/streaming.py +67 -0
- agent_cli/server/tts/__init__.py +3 -0
- agent_cli/server/tts/api.py +335 -0
- agent_cli/server/tts/backends/__init__.py +82 -0
- agent_cli/server/tts/backends/base.py +139 -0
- agent_cli/server/tts/backends/kokoro.py +403 -0
- agent_cli/server/tts/backends/piper.py +253 -0
- agent_cli/server/tts/model_manager.py +201 -0
- agent_cli/server/tts/model_registry.py +28 -0
- agent_cli/server/tts/wyoming_handler.py +249 -0
- agent_cli/server/whisper/__init__.py +3 -0
- agent_cli/server/whisper/api.py +413 -0
- agent_cli/server/whisper/backends/__init__.py +89 -0
- agent_cli/server/whisper/backends/base.py +97 -0
- agent_cli/server/whisper/backends/faster_whisper.py +225 -0
- agent_cli/server/whisper/backends/mlx.py +270 -0
- agent_cli/server/whisper/languages.py +116 -0
- agent_cli/server/whisper/model_manager.py +157 -0
- agent_cli/server/whisper/model_registry.py +28 -0
- agent_cli/server/whisper/wyoming_handler.py +203 -0
- agent_cli/services/__init__.py +343 -0
- agent_cli/services/_wyoming_utils.py +64 -0
- agent_cli/services/asr.py +506 -0
- agent_cli/services/llm.py +228 -0
- agent_cli/services/tts.py +450 -0
- agent_cli/services/wake_word.py +142 -0
- agent_cli-0.70.5.dist-info/METADATA +2118 -0
- agent_cli-0.70.5.dist-info/RECORD +196 -0
- agent_cli-0.70.5.dist-info/WHEEL +4 -0
- agent_cli-0.70.5.dist-info/entry_points.txt +4 -0
- agent_cli-0.70.5.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
echo "๐ Starting Wyoming OpenWakeWord on port 10400..."
|
|
3
|
+
|
|
4
|
+
# Use the LiteRT fork until the PR is merged
|
|
5
|
+
# PR: https://github.com/rhasspy/wyoming-openwakeword/pull/XXX
|
|
6
|
+
# This version works on macOS and other platforms without tflite-runtime
|
|
7
|
+
|
|
8
|
+
uvx --python 3.12 --from git+https://github.com/basnijholt/wyoming-openwakeword.git@litert \
|
|
9
|
+
wyoming-openwakeword \
|
|
10
|
+
--uri 'tcp://0.0.0.0:10400' \
|
|
11
|
+
--preload-model 'ok_nabu'
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
# PowerShell script to run Wyoming Piper TTS on Windows
|
|
2
|
+
# Run with: powershell -ExecutionPolicy Bypass -File scripts/run-piper-windows.ps1
|
|
3
|
+
|
|
4
|
+
Write-Host "๐ Starting Wyoming Piper on port 10200..." -ForegroundColor Cyan
|
|
5
|
+
|
|
6
|
+
# Create .runtime directory
|
|
7
|
+
$ScriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path
|
|
8
|
+
$RuntimeDir = Join-Path $ScriptDir ".runtime"
|
|
9
|
+
$PiperDataDir = Join-Path $RuntimeDir "piper-data"
|
|
10
|
+
if (-not (Test-Path $PiperDataDir)) {
|
|
11
|
+
New-Item -ItemType Directory -Path $PiperDataDir -Force | Out-Null
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
# Download voice if not present
|
|
15
|
+
$VoiceDir = Join-Path $PiperDataDir "en_US-lessac-medium"
|
|
16
|
+
if (-not (Test-Path $VoiceDir)) {
|
|
17
|
+
Write-Host "โฌ๏ธ Downloading voice model..." -ForegroundColor Yellow
|
|
18
|
+
Push-Location $PiperDataDir
|
|
19
|
+
uvx --python 3.12 --from piper-tts python -m piper.download_voices en_US-lessac-medium
|
|
20
|
+
Pop-Location
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
# Run Wyoming Piper using uvx
|
|
24
|
+
uvx --python 3.12 `
|
|
25
|
+
--from "git+https://github.com/rhasspy/wyoming-piper.git@v2.1.1" `
|
|
26
|
+
wyoming-piper `
|
|
27
|
+
--voice en_US-lessac-medium `
|
|
28
|
+
--uri "tcp://0.0.0.0:10200" `
|
|
29
|
+
--data-dir $PiperDataDir `
|
|
30
|
+
--download-dir $PiperDataDir
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
echo "๐ Starting Wyoming Piper on port 10200..."
|
|
3
|
+
|
|
4
|
+
# Create .runtime directory for local assets
|
|
5
|
+
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
|
6
|
+
mkdir -p "$SCRIPT_DIR/.runtime"
|
|
7
|
+
|
|
8
|
+
# Download voice if not present using uvx
|
|
9
|
+
if [ ! -d "$SCRIPT_DIR/.runtime/piper-data/en_US-lessac-medium" ]; then
|
|
10
|
+
echo "โฌ๏ธ Downloading voice model..."
|
|
11
|
+
mkdir -p "$SCRIPT_DIR/.runtime/piper-data"
|
|
12
|
+
cd "$SCRIPT_DIR/.runtime/piper-data"
|
|
13
|
+
uvx --python 3.12 --from piper-tts python -m piper.download_voices en_US-lessac-medium
|
|
14
|
+
cd "$SCRIPT_DIR"
|
|
15
|
+
fi
|
|
16
|
+
|
|
17
|
+
# Run Wyoming Piper using uvx wrapper
|
|
18
|
+
uvx --python 3.12 \
|
|
19
|
+
--from git+https://github.com/rhasspy/wyoming-piper.git@v2.1.1 \
|
|
20
|
+
wyoming-piper \
|
|
21
|
+
--voice en_US-lessac-medium \
|
|
22
|
+
--uri 'tcp://0.0.0.0:10200' \
|
|
23
|
+
--data-dir "$SCRIPT_DIR/.runtime/piper-data" \
|
|
24
|
+
--download-dir "$SCRIPT_DIR/.runtime/piper-data"
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
# Linux: faster-whisper with CUDA/CPU detection
|
|
3
|
+
echo "๐ค Starting Wyoming Faster Whisper on port 10300..."
|
|
4
|
+
|
|
5
|
+
# Detect if CUDA is available
|
|
6
|
+
if command -v nvidia-smi &> /dev/null && nvidia-smi &> /dev/null; then
|
|
7
|
+
echo "โก NVIDIA GPU detected"
|
|
8
|
+
DETECTED_DEVICE="cuda"
|
|
9
|
+
else
|
|
10
|
+
echo "๐ป No GPU detected or CUDA unavailable"
|
|
11
|
+
DETECTED_DEVICE="cpu"
|
|
12
|
+
fi
|
|
13
|
+
|
|
14
|
+
# Allow device override via environment variable
|
|
15
|
+
DEVICE="${WHISPER_DEVICE:-$DETECTED_DEVICE}"
|
|
16
|
+
|
|
17
|
+
# Set default model based on final device choice
|
|
18
|
+
if [ "$DEVICE" = "cuda" ]; then
|
|
19
|
+
DEFAULT_MODEL="large-v3"
|
|
20
|
+
else
|
|
21
|
+
DEFAULT_MODEL="tiny"
|
|
22
|
+
fi
|
|
23
|
+
|
|
24
|
+
# Allow model override via environment variable
|
|
25
|
+
MODEL="${WHISPER_MODEL:-$DEFAULT_MODEL}"
|
|
26
|
+
echo "๐ฆ Using model: $MODEL on device: $DEVICE"
|
|
27
|
+
|
|
28
|
+
# Create .runtime directory for whisper data
|
|
29
|
+
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
|
30
|
+
mkdir -p "$SCRIPT_DIR/.runtime"
|
|
31
|
+
|
|
32
|
+
uvx --python 3.12 \
|
|
33
|
+
--from git+https://github.com/rhasspy/wyoming-faster-whisper.git@v3.0.1 \
|
|
34
|
+
wyoming-faster-whisper \
|
|
35
|
+
--model "$MODEL" \
|
|
36
|
+
--language en \
|
|
37
|
+
--device "$DEVICE" \
|
|
38
|
+
--uri 'tcp://0.0.0.0:10300' \
|
|
39
|
+
--data-dir "$SCRIPT_DIR/.runtime/whisper-data" \
|
|
40
|
+
--download-dir "$SCRIPT_DIR/.runtime/whisper-data"
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
# PowerShell script to run Wyoming Faster Whisper on Windows
|
|
2
|
+
# Run with: powershell -ExecutionPolicy Bypass -File scripts/run-whisper-windows.ps1
|
|
3
|
+
|
|
4
|
+
Write-Host "๐ค Starting Wyoming Faster Whisper on port 10300..." -ForegroundColor Cyan
|
|
5
|
+
|
|
6
|
+
# Create .runtime directory
|
|
7
|
+
$ScriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path
|
|
8
|
+
$RuntimeDir = Join-Path $ScriptDir ".runtime"
|
|
9
|
+
$WhisperDataDir = Join-Path $RuntimeDir "whisper-data"
|
|
10
|
+
if (-not (Test-Path $WhisperDataDir)) {
|
|
11
|
+
New-Item -ItemType Directory -Path $WhisperDataDir -Force | Out-Null
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
# Detect if CUDA is available
|
|
15
|
+
$Device = "cpu"
|
|
16
|
+
$Model = "tiny"
|
|
17
|
+
|
|
18
|
+
try {
|
|
19
|
+
$nvidiaSmi = Get-Command nvidia-smi -ErrorAction SilentlyContinue
|
|
20
|
+
if ($nvidiaSmi) {
|
|
21
|
+
$null = & nvidia-smi 2>&1
|
|
22
|
+
if ($LASTEXITCODE -eq 0) {
|
|
23
|
+
Write-Host "โก NVIDIA GPU detected" -ForegroundColor Green
|
|
24
|
+
$Device = "cuda"
|
|
25
|
+
$Model = "large-v3"
|
|
26
|
+
}
|
|
27
|
+
}
|
|
28
|
+
} catch {
|
|
29
|
+
# nvidia-smi not found or failed
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
if ($Device -eq "cpu") {
|
|
33
|
+
Write-Host "๐ป No GPU detected or CUDA unavailable, using CPU" -ForegroundColor Yellow
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
# Allow override via environment variables
|
|
37
|
+
if ($env:WHISPER_DEVICE) { $Device = $env:WHISPER_DEVICE }
|
|
38
|
+
if ($env:WHISPER_MODEL) { $Model = $env:WHISPER_MODEL }
|
|
39
|
+
|
|
40
|
+
Write-Host "๐ฆ Using model: $Model on device: $Device" -ForegroundColor Cyan
|
|
41
|
+
|
|
42
|
+
# Run Wyoming Faster Whisper using uvx
|
|
43
|
+
uvx --python 3.12 `
|
|
44
|
+
--from "git+https://github.com/rhasspy/wyoming-faster-whisper.git@v3.0.1" `
|
|
45
|
+
wyoming-faster-whisper `
|
|
46
|
+
--model $Model `
|
|
47
|
+
--language en `
|
|
48
|
+
--device $Device `
|
|
49
|
+
--uri "tcp://0.0.0.0:10300" `
|
|
50
|
+
--data-dir $WhisperDataDir `
|
|
51
|
+
--download-dir $WhisperDataDir
|
|
@@ -0,0 +1,136 @@
|
|
|
1
|
+
#!/usr/bin/env -S uv run --script
|
|
2
|
+
#
|
|
3
|
+
# /// script
|
|
4
|
+
# requires-python = ">=3.12"
|
|
5
|
+
# dependencies = [
|
|
6
|
+
# "typer>=0.12",
|
|
7
|
+
# "fastapi>=0.115",
|
|
8
|
+
# "uvicorn>=0.30",
|
|
9
|
+
# "python-multipart>=0.0.9",
|
|
10
|
+
# "faster-whisper>=1.1.1",
|
|
11
|
+
# ]
|
|
12
|
+
# ///
|
|
13
|
+
|
|
14
|
+
"""Minimal FastAPI server exposing faster-whisper transcription.
|
|
15
|
+
|
|
16
|
+
Run directly with uv:
|
|
17
|
+
|
|
18
|
+
./scripts/run_faster_whisper_server.py --model large-v3 --host 0.0.0.0 --port 8811
|
|
19
|
+
|
|
20
|
+
Then point agent-cli at it, e.g.:
|
|
21
|
+
|
|
22
|
+
agent-cli transcribe --asr-openai-base-url http://localhost:8811/v1 --asr-openai-model large-v3
|
|
23
|
+
|
|
24
|
+
Note: agent-cli requires a `--asr-openai-model` value in the request, but this server ignores it and always uses the model you start it with (`--model`).
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
from __future__ import annotations
|
|
28
|
+
|
|
29
|
+
import tempfile
|
|
30
|
+
import threading
|
|
31
|
+
from pathlib import Path
|
|
32
|
+
from typing import Annotated
|
|
33
|
+
|
|
34
|
+
import typer
|
|
35
|
+
import uvicorn
|
|
36
|
+
from fastapi import FastAPI, File, UploadFile
|
|
37
|
+
from fastapi.responses import JSONResponse
|
|
38
|
+
from faster_whisper import WhisperModel
|
|
39
|
+
|
|
40
|
+
# Defaults configurable via environment if desired.
|
|
41
|
+
DEFAULT_MODEL = "large-v3"
|
|
42
|
+
DEFAULT_HOST = "0.0.0.0" # noqa: S104
|
|
43
|
+
DEFAULT_PORT = 8811
|
|
44
|
+
DEFAULT_DEVICE = "auto"
|
|
45
|
+
DEFAULT_COMPUTE_TYPE = "default"
|
|
46
|
+
DEFAULT_LOG_LEVEL = "info"
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class ModelHolder:
|
|
50
|
+
"""Thread-safe lazy model loader so we only download/init once."""
|
|
51
|
+
|
|
52
|
+
def __init__(self, model_id: str, device: str, compute_type: str) -> None:
|
|
53
|
+
"""Store model configuration."""
|
|
54
|
+
self.model_id = model_id
|
|
55
|
+
self.device = device
|
|
56
|
+
self.compute_type = compute_type
|
|
57
|
+
self._model: WhisperModel | None = None
|
|
58
|
+
self._lock = threading.Lock()
|
|
59
|
+
|
|
60
|
+
def get(self) -> WhisperModel:
|
|
61
|
+
"""Load or return the cached WhisperModel instance."""
|
|
62
|
+
if self._model is None:
|
|
63
|
+
with self._lock:
|
|
64
|
+
if self._model is None:
|
|
65
|
+
self._model = WhisperModel(
|
|
66
|
+
self.model_id,
|
|
67
|
+
device=self.device,
|
|
68
|
+
compute_type=self.compute_type,
|
|
69
|
+
)
|
|
70
|
+
return self._model
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def build_api(holder: ModelHolder) -> FastAPI:
|
|
74
|
+
"""Create the FastAPI app wired to the provided model holder."""
|
|
75
|
+
api = FastAPI(title="faster-whisper-api")
|
|
76
|
+
|
|
77
|
+
@api.get("/health")
|
|
78
|
+
def health() -> dict:
|
|
79
|
+
return {
|
|
80
|
+
"status": "ok",
|
|
81
|
+
"model": holder.model_id,
|
|
82
|
+
"device": holder.device,
|
|
83
|
+
"compute_type": holder.compute_type,
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
@api.post("/v1/audio/transcriptions")
|
|
87
|
+
async def transcribe(
|
|
88
|
+
file: Annotated[UploadFile, File(..., description="Audio file (wav, mp3, m4a, etc.)")],
|
|
89
|
+
language: str | None = None,
|
|
90
|
+
) -> dict[str, str]:
|
|
91
|
+
audiobytes = await file.read()
|
|
92
|
+
if not audiobytes:
|
|
93
|
+
return JSONResponse({"error": "empty file"}, status_code=400)
|
|
94
|
+
|
|
95
|
+
with tempfile.NamedTemporaryFile(
|
|
96
|
+
delete=False,
|
|
97
|
+
suffix=Path(file.filename or "audio").suffix,
|
|
98
|
+
) as tmp:
|
|
99
|
+
tmp.write(audiobytes)
|
|
100
|
+
tmp_path = tmp.name
|
|
101
|
+
|
|
102
|
+
try:
|
|
103
|
+
model = holder.get()
|
|
104
|
+
segments, info = model.transcribe(tmp_path, language=language)
|
|
105
|
+
text = " ".join(seg.text.strip() for seg in segments)
|
|
106
|
+
return {
|
|
107
|
+
"object": "transcription",
|
|
108
|
+
"model": holder.model_id,
|
|
109
|
+
"language": language or info.language,
|
|
110
|
+
"text": text,
|
|
111
|
+
}
|
|
112
|
+
finally:
|
|
113
|
+
Path(tmp_path).unlink(missing_ok=True)
|
|
114
|
+
|
|
115
|
+
return api
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
def main(
|
|
119
|
+
model: str = typer.Option(DEFAULT_MODEL, help="faster-whisper model id"),
|
|
120
|
+
host: str = typer.Option(DEFAULT_HOST, show_default=True),
|
|
121
|
+
port: int = typer.Option(DEFAULT_PORT, show_default=True),
|
|
122
|
+
device: str = typer.Option(DEFAULT_DEVICE, help="cpu, cuda, or auto"),
|
|
123
|
+
compute_type: str = typer.Option(
|
|
124
|
+
DEFAULT_COMPUTE_TYPE,
|
|
125
|
+
help="faster-whisper compute_type (e.g., int8, int8_float16, float16, float32, default)",
|
|
126
|
+
),
|
|
127
|
+
log_level: str = typer.Option(DEFAULT_LOG_LEVEL, help="uvicorn log level"),
|
|
128
|
+
) -> None:
|
|
129
|
+
"""Start the server with the given runtime options."""
|
|
130
|
+
holder = ModelHolder(model_id=model, device=device, compute_type=compute_type)
|
|
131
|
+
api = build_api(holder)
|
|
132
|
+
uvicorn.run(api, host=host, port=port, log_level=log_level)
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
if __name__ == "__main__":
|
|
136
|
+
typer.run(main)
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
|
|
3
|
+
set -e
|
|
4
|
+
|
|
5
|
+
echo "โจ๏ธ Setting up Linux hotkeys..."
|
|
6
|
+
|
|
7
|
+
# Check if we're on Linux
|
|
8
|
+
if [[ "$(uname)" != "Linux" ]]; then
|
|
9
|
+
echo "โ This script is for Linux only"
|
|
10
|
+
exit 1
|
|
11
|
+
fi
|
|
12
|
+
|
|
13
|
+
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
14
|
+
|
|
15
|
+
TRANSCRIBE_SCRIPT="$SCRIPT_DIR/linux-hotkeys/toggle-transcription.sh"
|
|
16
|
+
AUTOCORRECT_SCRIPT="$SCRIPT_DIR/linux-hotkeys/toggle-autocorrect.sh"
|
|
17
|
+
VOICE_EDIT_SCRIPT="$SCRIPT_DIR/linux-hotkeys/toggle-voice-edit.sh"
|
|
18
|
+
|
|
19
|
+
# Install notifications if missing
|
|
20
|
+
echo "๐ข Checking notifications..."
|
|
21
|
+
if ! command -v notify-send &> /dev/null && ! command -v dunstify &> /dev/null; then
|
|
22
|
+
echo "๐ฆ Installing notification support..."
|
|
23
|
+
if command -v apt &> /dev/null; then
|
|
24
|
+
sudo apt install -y libnotify-bin
|
|
25
|
+
elif command -v dnf &> /dev/null; then
|
|
26
|
+
sudo dnf install -y libnotify
|
|
27
|
+
elif command -v pacman &> /dev/null; then
|
|
28
|
+
sudo pacman -S --noconfirm libnotify
|
|
29
|
+
elif command -v zypper &> /dev/null; then
|
|
30
|
+
sudo zypper install -y libnotify-tools
|
|
31
|
+
else
|
|
32
|
+
echo "โ ๏ธ Please install libnotify manually for your distribution"
|
|
33
|
+
fi
|
|
34
|
+
fi
|
|
35
|
+
|
|
36
|
+
# Test notifications
|
|
37
|
+
if command -v notify-send &> /dev/null; then
|
|
38
|
+
notify-send "๐๏ธ Setup Complete" "Agent-CLI hotkeys ready!" || echo "โ ๏ธ Notifications may not work in your environment"
|
|
39
|
+
elif command -v dunstify &> /dev/null; then
|
|
40
|
+
dunstify "๐๏ธ Setup Complete" "Agent-CLI hotkeys ready!" || echo "โ ๏ธ Notifications may not work in your environment"
|
|
41
|
+
fi
|
|
42
|
+
|
|
43
|
+
echo ""
|
|
44
|
+
echo "โ
Scripts ready! Add these hotkeys to your desktop environment:"
|
|
45
|
+
echo ""
|
|
46
|
+
echo "๐ Hotkey Bindings:"
|
|
47
|
+
echo " Super+Shift+R โ $TRANSCRIBE_SCRIPT"
|
|
48
|
+
echo " Super+Shift+A โ $AUTOCORRECT_SCRIPT"
|
|
49
|
+
echo " Super+Shift+V โ $VOICE_EDIT_SCRIPT"
|
|
50
|
+
echo ""
|
|
51
|
+
echo "๐ฅ๏ธ Configuration by Desktop Environment:"
|
|
52
|
+
echo ""
|
|
53
|
+
echo "Hyprland (~/.config/hypr/hyprland.conf):"
|
|
54
|
+
echo " bind = SUPER SHIFT, R, exec, $TRANSCRIBE_SCRIPT"
|
|
55
|
+
echo " bind = SUPER SHIFT, A, exec, $AUTOCORRECT_SCRIPT"
|
|
56
|
+
echo " bind = SUPER SHIFT, V, exec, $VOICE_EDIT_SCRIPT"
|
|
57
|
+
echo ""
|
|
58
|
+
echo "Sway (~/.config/sway/config):"
|
|
59
|
+
echo " bindsym \$mod+Shift+r exec $TRANSCRIBE_SCRIPT"
|
|
60
|
+
echo " bindsym \$mod+Shift+a exec $AUTOCORRECT_SCRIPT"
|
|
61
|
+
echo " bindsym \$mod+Shift+v exec $VOICE_EDIT_SCRIPT"
|
|
62
|
+
echo ""
|
|
63
|
+
echo "i3 (~/.config/i3/config):"
|
|
64
|
+
echo " bindsym \$mod+Shift+r exec --no-startup-id $TRANSCRIBE_SCRIPT"
|
|
65
|
+
echo " bindsym \$mod+Shift+a exec --no-startup-id $AUTOCORRECT_SCRIPT"
|
|
66
|
+
echo " bindsym \$mod+Shift+v exec --no-startup-id $VOICE_EDIT_SCRIPT"
|
|
67
|
+
echo ""
|
|
68
|
+
echo "GNOME: Settings โ Keyboard โ View and Customize Shortcuts โ Custom Shortcuts"
|
|
69
|
+
echo "KDE: System Settings โ Shortcuts โ Custom Shortcuts"
|
|
70
|
+
echo "XFCE: Settings Manager โ Keyboard โ Application Shortcuts"
|
|
71
|
+
echo ""
|
|
72
|
+
echo "For other environments, bind Super+Shift+R/A/V to the respective scripts."
|
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
|
|
3
|
+
set -e
|
|
4
|
+
|
|
5
|
+
echo "๐ Setting up agent-cli services on Linux..."
|
|
6
|
+
|
|
7
|
+
# Function to install uv based on the distribution
|
|
8
|
+
install_uv() {
|
|
9
|
+
if command -v curl &> /dev/null; then
|
|
10
|
+
echo "๐ฆ Installing uv using curl..."
|
|
11
|
+
curl -LsSf https://astral.sh/uv/install.sh | sh
|
|
12
|
+
# Add to PATH for current session
|
|
13
|
+
export PATH="$HOME/.local/bin:$PATH"
|
|
14
|
+
else
|
|
15
|
+
echo "curl not found. Please install curl first:"
|
|
16
|
+
echo " Ubuntu/Debian: sudo apt install curl"
|
|
17
|
+
echo " Fedora/RHEL: sudo dnf install curl"
|
|
18
|
+
exit 1
|
|
19
|
+
fi
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
# Check if uv is installed
|
|
23
|
+
if ! command -v uv &> /dev/null; then
|
|
24
|
+
echo "๐ฆ Installing uv..."
|
|
25
|
+
install_uv
|
|
26
|
+
fi
|
|
27
|
+
|
|
28
|
+
# Check for PortAudio (required for audio processing)
|
|
29
|
+
echo "๐ Checking PortAudio..."
|
|
30
|
+
if ! pkg-config --exists portaudio-2.0 2>/dev/null; then
|
|
31
|
+
echo "โ ERROR: PortAudio development libraries are not installed."
|
|
32
|
+
echo ""
|
|
33
|
+
echo "PyAudio requires PortAudio. Install using your distribution's package manager:"
|
|
34
|
+
echo ""
|
|
35
|
+
echo "Ubuntu/Debian:"
|
|
36
|
+
echo " sudo apt install portaudio19-dev"
|
|
37
|
+
echo ""
|
|
38
|
+
echo "Fedora/RHEL/CentOS:"
|
|
39
|
+
echo " sudo dnf install portaudio-devel"
|
|
40
|
+
echo ""
|
|
41
|
+
echo "Arch Linux:"
|
|
42
|
+
echo " sudo pacman -S portaudio"
|
|
43
|
+
echo ""
|
|
44
|
+
echo "openSUSE:"
|
|
45
|
+
echo " sudo zypper install portaudio-devel"
|
|
46
|
+
echo ""
|
|
47
|
+
echo "After installing PortAudio, run this script again."
|
|
48
|
+
exit 1
|
|
49
|
+
else
|
|
50
|
+
echo "โ
PortAudio is already installed"
|
|
51
|
+
fi
|
|
52
|
+
|
|
53
|
+
# Install Ollama
|
|
54
|
+
echo "๐ง Checking Ollama..."
|
|
55
|
+
if ! command -v ollama &> /dev/null; then
|
|
56
|
+
echo "๐ฆ Installing Ollama..."
|
|
57
|
+
curl -fsSL https://ollama.ai/install.sh | sh
|
|
58
|
+
echo "โ
Ollama installed successfully"
|
|
59
|
+
else
|
|
60
|
+
echo "โ
Ollama is already installed"
|
|
61
|
+
fi
|
|
62
|
+
|
|
63
|
+
# Check if zellij is available or offer alternatives
|
|
64
|
+
if ! command -v zellij &> /dev/null; then
|
|
65
|
+
echo "๐บ Zellij not found. Installing..."
|
|
66
|
+
|
|
67
|
+
# Try different installation methods based on what's available
|
|
68
|
+
if command -v cargo &> /dev/null; then
|
|
69
|
+
echo "๐ฆ Installing zellij via cargo..."
|
|
70
|
+
cargo install zellij
|
|
71
|
+
elif command -v flatpak &> /dev/null; then
|
|
72
|
+
echo "๐ฆ Installing zellij via flatpak..."
|
|
73
|
+
flatpak install -y flathub org.zellij_developers.zellij
|
|
74
|
+
else
|
|
75
|
+
echo "๐ฅ Installing zellij binary..."
|
|
76
|
+
curl -L https://github.com/zellij-org/zellij/releases/latest/download/zellij-x86_64-unknown-linux-musl.tar.gz | tar -xz -C ~/.local/bin/
|
|
77
|
+
chmod +x ~/.local/bin/zellij
|
|
78
|
+
export PATH="$HOME/.local/bin:$PATH"
|
|
79
|
+
fi
|
|
80
|
+
fi
|
|
81
|
+
|
|
82
|
+
# Install agent-cli
|
|
83
|
+
echo "๐ค Installing/upgrading agent-cli..."
|
|
84
|
+
uv tool install --upgrade agent-cli
|
|
85
|
+
|
|
86
|
+
# Preload default Ollama model
|
|
87
|
+
echo "โฌ๏ธ Preloading default Ollama model (gemma3:4b)..."
|
|
88
|
+
echo "โณ This may take a few minutes depending on your internet connection..."
|
|
89
|
+
# Start Ollama in background, then pull model synchronously
|
|
90
|
+
(ollama serve >/dev/null 2>&1 &) && sleep 2 && ollama pull gemma3:4b
|
|
91
|
+
# Stop the temporary ollama server
|
|
92
|
+
pkill -f "ollama serve" || true
|
|
93
|
+
|
|
94
|
+
echo ""
|
|
95
|
+
echo "โ
Setup complete! You can now run the services:"
|
|
96
|
+
echo ""
|
|
97
|
+
echo "Option 1 - Run all services at once:"
|
|
98
|
+
echo " scripts/start-all-services.sh"
|
|
99
|
+
echo ""
|
|
100
|
+
echo "Option 2 - Run services individually:"
|
|
101
|
+
echo " 1. Ollama: ollama serve"
|
|
102
|
+
echo " 2. Whisper: scripts/run-whisper.sh"
|
|
103
|
+
echo " 3. Piper: scripts/run-piper.sh"
|
|
104
|
+
echo " 4. OpenWakeWord: scripts/run-openwakeword.sh"
|
|
105
|
+
echo ""
|
|
106
|
+
echo "๐ Note: Services use uvx to run without needing virtual environments."
|
|
107
|
+
echo "For GPU acceleration, make sure NVIDIA drivers and CUDA are installed."
|
|
108
|
+
echo "๐ agent-cli has been installed and is ready to use!"
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
|
|
3
|
+
set -e
|
|
4
|
+
|
|
5
|
+
echo "โจ๏ธ Setting up macOS hotkeys..."
|
|
6
|
+
|
|
7
|
+
# Check macOS
|
|
8
|
+
if [[ "$(uname)" != "Darwin" ]]; then
|
|
9
|
+
echo "โ This script is for macOS only"
|
|
10
|
+
exit 1
|
|
11
|
+
fi
|
|
12
|
+
|
|
13
|
+
# Install dependencies
|
|
14
|
+
echo "๐ฆ Installing dependencies..."
|
|
15
|
+
if ! command -v brew &> /dev/null; then
|
|
16
|
+
echo "๐บ Installing Homebrew..."
|
|
17
|
+
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
|
|
18
|
+
fi
|
|
19
|
+
|
|
20
|
+
brew install terminal-notifier
|
|
21
|
+
brew tap jackielii/tap && brew install jackielii/tap/skhd-zig
|
|
22
|
+
|
|
23
|
+
# Setup configuration
|
|
24
|
+
echo "โ๏ธ Setting up configuration..."
|
|
25
|
+
mkdir -p ~/.config/skhd
|
|
26
|
+
|
|
27
|
+
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
28
|
+
TRANSCRIBE_SCRIPT="$SCRIPT_DIR/macos-hotkeys/toggle-transcription.sh"
|
|
29
|
+
AUTOCORRECT_SCRIPT="$SCRIPT_DIR/macos-hotkeys/toggle-autocorrect.sh"
|
|
30
|
+
VOICE_EDIT_SCRIPT="$SCRIPT_DIR/macos-hotkeys/toggle-voice-edit.sh"
|
|
31
|
+
|
|
32
|
+
cat > ~/.config/skhd/skhdrc << EOF
|
|
33
|
+
# Agent-CLI Hotkeys
|
|
34
|
+
cmd + shift - r : "$TRANSCRIBE_SCRIPT"
|
|
35
|
+
cmd + shift - a : "$AUTOCORRECT_SCRIPT"
|
|
36
|
+
cmd + shift - v : "$VOICE_EDIT_SCRIPT"
|
|
37
|
+
EOF
|
|
38
|
+
|
|
39
|
+
# Start service
|
|
40
|
+
echo "๐ Starting skhd..."
|
|
41
|
+
skhd --start-service
|
|
42
|
+
|
|
43
|
+
# Test
|
|
44
|
+
echo "๐งช Testing..."
|
|
45
|
+
terminal-notifier -title "โจ๏ธ Setup Complete" -message "Agent-CLI hotkeys ready!"
|
|
46
|
+
|
|
47
|
+
echo ""
|
|
48
|
+
echo "โ
Done! Hotkeys:"
|
|
49
|
+
echo " Cmd+Shift+R - Transcribe voice"
|
|
50
|
+
echo " Cmd+Shift+A - Autocorrect clipboard"
|
|
51
|
+
echo " Cmd+Shift+V - Voice edit clipboard"
|
|
52
|
+
echo ""
|
|
53
|
+
echo "If the hotkey doesn't work:"
|
|
54
|
+
echo "1. Open System Settings โ Privacy & Security โ Accessibility"
|
|
55
|
+
echo "2. Add and enable 'skhd'"
|
|
56
|
+
echo ""
|
|
57
|
+
echo "If the notification doesn't show:"
|
|
58
|
+
echo "1. Open System Settings โ Notifications"
|
|
59
|
+
echo "2. Find 'terminal-notifier' and allow notifications"
|
|
60
|
+
echo "3. Set Alert style to Persistent for better visibility"
|
|
61
|
+
echo "4. Enable 'Allow notification when mirroring or sharing the display'"
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
|
|
3
|
+
set -e
|
|
4
|
+
|
|
5
|
+
echo "๐ Setting up agent-cli services on macOS..."
|
|
6
|
+
|
|
7
|
+
# Check if Homebrew is installed
|
|
8
|
+
if ! command -v brew &> /dev/null; then
|
|
9
|
+
echo "โ Homebrew is not installed. Please install Homebrew first:"
|
|
10
|
+
echo "/bin/bash -c \"\$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)\""
|
|
11
|
+
exit 1
|
|
12
|
+
fi
|
|
13
|
+
|
|
14
|
+
# Check if uv is installed
|
|
15
|
+
if ! command -v uv &> /dev/null; then
|
|
16
|
+
echo "๐ฆ Installing uv..."
|
|
17
|
+
brew install uv
|
|
18
|
+
fi
|
|
19
|
+
|
|
20
|
+
# Install Ollama
|
|
21
|
+
echo "๐ง Checking Ollama..."
|
|
22
|
+
if ! command -v ollama &> /dev/null; then
|
|
23
|
+
echo "๐บ Installing Ollama via Homebrew..."
|
|
24
|
+
brew install ollama
|
|
25
|
+
echo "โ
Ollama installed successfully"
|
|
26
|
+
else
|
|
27
|
+
echo "โ
Ollama is already installed"
|
|
28
|
+
fi
|
|
29
|
+
|
|
30
|
+
# Check if zellij is installed
|
|
31
|
+
if ! command -v zellij &> /dev/null; then
|
|
32
|
+
echo "๐บ Installing zellij..."
|
|
33
|
+
brew install zellij
|
|
34
|
+
fi
|
|
35
|
+
|
|
36
|
+
# Install agent-cli
|
|
37
|
+
echo "๐ค Installing/upgrading agent-cli..."
|
|
38
|
+
uv tool install --upgrade agent-cli
|
|
39
|
+
|
|
40
|
+
# Start Ollama as a background service
|
|
41
|
+
echo "๐ง Starting Ollama as a background service..."
|
|
42
|
+
brew services start ollama
|
|
43
|
+
|
|
44
|
+
# Preload default Ollama model
|
|
45
|
+
echo "โฌ๏ธ Preloading default Ollama model (gemma3:4b)..."
|
|
46
|
+
echo "โณ This may take a few minutes depending on your internet connection..."
|
|
47
|
+
sleep 2 # Give Ollama service time to start
|
|
48
|
+
ollama pull gemma3:4b
|
|
49
|
+
|
|
50
|
+
# Install wyoming-mlx-whisper as a launchd service (Apple Silicon only)
|
|
51
|
+
if [ "$(uname -m)" = "arm64" ]; then
|
|
52
|
+
echo "๐ค Installing wyoming-mlx-whisper as a background service..."
|
|
53
|
+
echo " This will run speech-to-text on Apple Silicon using MLX"
|
|
54
|
+
curl -fsSL https://raw.githubusercontent.com/basnijholt/wyoming-mlx-whisper/main/scripts/install_service.sh | bash
|
|
55
|
+
echo "โ
wyoming-mlx-whisper installed as launchd service"
|
|
56
|
+
else
|
|
57
|
+
echo "โน๏ธ Skipping wyoming-mlx-whisper service (Intel Mac - use Linux-style setup)"
|
|
58
|
+
fi
|
|
59
|
+
|
|
60
|
+
echo ""
|
|
61
|
+
echo "โ
Setup complete! You can now run the services:"
|
|
62
|
+
echo ""
|
|
63
|
+
echo "Option 1 - Run all services at once:"
|
|
64
|
+
echo " ./start-all-services.sh"
|
|
65
|
+
echo ""
|
|
66
|
+
echo "Option 2 - Run services individually:"
|
|
67
|
+
echo " 1. Ollama: running as brew service (brew services start ollama)"
|
|
68
|
+
if [ "$(uname -m)" = "arm64" ]; then
|
|
69
|
+
echo " 2. Whisper: running as launchd service (wyoming-mlx-whisper)"
|
|
70
|
+
else
|
|
71
|
+
echo " 2. Whisper: ./run-whisper.sh"
|
|
72
|
+
fi
|
|
73
|
+
echo " 3. Piper: ./run-piper.sh"
|
|
74
|
+
echo " 4. OpenWakeWord: ./run-openwakeword.sh"
|
|
75
|
+
echo ""
|
|
76
|
+
echo "๐ agent-cli has been installed and is ready to use!"
|