hypercli-cli 2026.3.13__tar.gz → 2026.3.18__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {hypercli_cli-2026.3.13 → hypercli_cli-2026.3.18}/PKG-INFO +4 -4
- hypercli_cli-2026.3.18/hypercli_cli/__init__.py +1 -0
- {hypercli_cli-2026.3.13 → hypercli_cli-2026.3.18}/hypercli_cli/agent.py +158 -88
- {hypercli_cli-2026.3.13 → hypercli_cli-2026.3.18}/hypercli_cli/agents.py +56 -31
- {hypercli_cli-2026.3.13 → hypercli_cli-2026.3.18}/hypercli_cli/cli.py +3 -1
- {hypercli_cli-2026.3.13 → hypercli_cli-2026.3.18}/pyproject.toml +4 -4
- hypercli_cli-2026.3.18/tests/test_openclaw_config.py +63 -0
- hypercli_cli-2026.3.13/hypercli_cli/__init__.py +0 -1
- {hypercli_cli-2026.3.13 → hypercli_cli-2026.3.18}/.gitignore +0 -0
- {hypercli_cli-2026.3.13 → hypercli_cli-2026.3.18}/README.md +0 -0
- {hypercli_cli-2026.3.13 → hypercli_cli-2026.3.18}/hypercli_cli/billing.py +0 -0
- {hypercli_cli-2026.3.13 → hypercli_cli-2026.3.18}/hypercli_cli/comfyui.py +0 -0
- {hypercli_cli-2026.3.13 → hypercli_cli-2026.3.18}/hypercli_cli/embed.py +0 -0
- {hypercli_cli-2026.3.13 → hypercli_cli-2026.3.18}/hypercli_cli/flow.py +0 -0
- {hypercli_cli-2026.3.13 → hypercli_cli-2026.3.18}/hypercli_cli/instances.py +0 -0
- {hypercli_cli-2026.3.13 → hypercli_cli-2026.3.18}/hypercli_cli/jobs.py +0 -0
- {hypercli_cli-2026.3.13 → hypercli_cli-2026.3.18}/hypercli_cli/keys.py +0 -0
- {hypercli_cli-2026.3.13 → hypercli_cli-2026.3.18}/hypercli_cli/onboard.py +0 -0
- {hypercli_cli-2026.3.13 → hypercli_cli-2026.3.18}/hypercli_cli/output.py +0 -0
- {hypercli_cli-2026.3.13 → hypercli_cli-2026.3.18}/hypercli_cli/renders.py +0 -0
- {hypercli_cli-2026.3.13 → hypercli_cli-2026.3.18}/hypercli_cli/stt.py +0 -0
- {hypercli_cli-2026.3.13 → hypercli_cli-2026.3.18}/hypercli_cli/tui/__init__.py +0 -0
- {hypercli_cli-2026.3.13 → hypercli_cli-2026.3.18}/hypercli_cli/tui/job_monitor.py +0 -0
- {hypercli_cli-2026.3.13 → hypercli_cli-2026.3.18}/hypercli_cli/user.py +0 -0
- {hypercli_cli-2026.3.13 → hypercli_cli-2026.3.18}/hypercli_cli/voice.py +0 -0
- {hypercli_cli-2026.3.13 → hypercli_cli-2026.3.18}/hypercli_cli/wallet.py +0 -0
- {hypercli_cli-2026.3.13 → hypercli_cli-2026.3.18}/tests/test_exec_shell_dryrun.py +0 -0
- {hypercli_cli-2026.3.13 → hypercli_cli-2026.3.18}/tests/test_jobs_list_tags.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: hypercli-cli
|
|
3
|
-
Version: 2026.3.
|
|
3
|
+
Version: 2026.3.18
|
|
4
4
|
Summary: CLI for HyperCLI - GPU orchestration and LLM API
|
|
5
5
|
Project-URL: Homepage, https://hypercli.com
|
|
6
6
|
Project-URL: Documentation, https://docs.hypercli.com
|
|
@@ -9,7 +9,7 @@ Author-email: HyperCLI <support@hypercli.com>
|
|
|
9
9
|
License: MIT
|
|
10
10
|
Requires-Python: >=3.10
|
|
11
11
|
Requires-Dist: httpx>=0.27.0
|
|
12
|
-
Requires-Dist: hypercli-sdk>=2026.3.
|
|
12
|
+
Requires-Dist: hypercli-sdk>=2026.3.18
|
|
13
13
|
Requires-Dist: mutagen>=1.47.0
|
|
14
14
|
Requires-Dist: pyyaml>=6.0
|
|
15
15
|
Requires-Dist: rich>=14.2.0
|
|
@@ -19,11 +19,11 @@ Provides-Extra: all
|
|
|
19
19
|
Requires-Dist: argon2-cffi>=25.0.0; extra == 'all'
|
|
20
20
|
Requires-Dist: eth-account>=0.13.0; extra == 'all'
|
|
21
21
|
Requires-Dist: faster-whisper>=1.1.0; extra == 'all'
|
|
22
|
-
Requires-Dist: hypercli-sdk[comfyui]>=2026.3.
|
|
22
|
+
Requires-Dist: hypercli-sdk[comfyui]>=2026.3.18; extra == 'all'
|
|
23
23
|
Requires-Dist: web3>=7.0.0; extra == 'all'
|
|
24
24
|
Requires-Dist: x402[evm,httpx]>=2.0.0; extra == 'all'
|
|
25
25
|
Provides-Extra: comfyui
|
|
26
|
-
Requires-Dist: hypercli-sdk[comfyui]>=2026.3.
|
|
26
|
+
Requires-Dist: hypercli-sdk[comfyui]>=2026.3.18; extra == 'comfyui'
|
|
27
27
|
Provides-Extra: dev
|
|
28
28
|
Requires-Dist: pytest>=8.0.0; extra == 'dev'
|
|
29
29
|
Requires-Dist: ruff>=0.3.0; extra == 'dev'
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "2026.3.18"
|
|
@@ -55,6 +55,8 @@ HYPERCLI_DIR = Path.home() / ".hypercli"
|
|
|
55
55
|
AGENT_KEY_PATH = HYPERCLI_DIR / "agent-key.json"
|
|
56
56
|
DEV_API_BASE = "https://api.dev.hypercli.com"
|
|
57
57
|
PROD_API_BASE = "https://api.hypercli.com"
|
|
58
|
+
DEV_INFERENCE_API_BASE = "https://api.agents.dev.hypercli.com"
|
|
59
|
+
PROD_INFERENCE_API_BASE = "https://api.agents.hypercli.com"
|
|
58
60
|
|
|
59
61
|
|
|
60
62
|
def require_x402_deps():
|
|
@@ -544,12 +546,43 @@ OPENCLAW_CONFIG_PATH = Path.home() / ".openclaw" / "openclaw.json"
|
|
|
544
546
|
|
|
545
547
|
def _resolve_api_base(base_url: str | None = None, dev: bool = False) -> str:
|
|
546
548
|
"""Resolve API base from flag/env, then fall back to dev/prod defaults."""
|
|
547
|
-
return (
|
|
549
|
+
return (
|
|
550
|
+
base_url
|
|
551
|
+
or os.environ.get("HYPERCLAW_API_BASE")
|
|
552
|
+
or (DEV_INFERENCE_API_BASE if dev else PROD_INFERENCE_API_BASE)
|
|
553
|
+
).rstrip("/")
|
|
548
554
|
|
|
549
555
|
|
|
550
|
-
def fetch_models(api_key: str, api_base: str =
|
|
556
|
+
def fetch_models(api_key: str, api_base: str = PROD_INFERENCE_API_BASE) -> list[dict]:
|
|
551
557
|
"""Fetch available models from LiteLLM /v1/models (served by HyperClaw)."""
|
|
552
558
|
import httpx
|
|
559
|
+
|
|
560
|
+
def _infer_mode(model_id: str) -> str | None:
|
|
561
|
+
normalized = (model_id or "").strip().lower()
|
|
562
|
+
if "embedding" in normalized:
|
|
563
|
+
return "embedding"
|
|
564
|
+
return None
|
|
565
|
+
|
|
566
|
+
def _meta_for_model(model_id: str) -> dict:
|
|
567
|
+
normalized = (model_id or "").strip().lower()
|
|
568
|
+
aliases = {
|
|
569
|
+
"kimi-k2.5": {"name": "Kimi K2.5", "reasoning": True, "contextWindow": 262144},
|
|
570
|
+
"moonshotai/kimi-k2.5": {"name": "Kimi K2.5", "reasoning": True, "contextWindow": 262144},
|
|
571
|
+
"glm-5": {"name": "GLM-5", "reasoning": True, "contextWindow": 202752},
|
|
572
|
+
"zai-org/glm-5": {"name": "GLM-5", "reasoning": True, "contextWindow": 202752},
|
|
573
|
+
"qwen3-embedding-4b": {
|
|
574
|
+
"name": "Qwen3 Embedding 4B",
|
|
575
|
+
"reasoning": False,
|
|
576
|
+
"contextWindow": 32768,
|
|
577
|
+
"mode": "embedding",
|
|
578
|
+
"input": ["text"],
|
|
579
|
+
},
|
|
580
|
+
}
|
|
581
|
+
if normalized in aliases:
|
|
582
|
+
return aliases[normalized]
|
|
583
|
+
suffix = normalized.rsplit("/", 1)[-1]
|
|
584
|
+
return aliases.get(suffix, {})
|
|
585
|
+
|
|
553
586
|
try:
|
|
554
587
|
resp = httpx.get(
|
|
555
588
|
f"{api_base}/v1/models",
|
|
@@ -558,19 +591,14 @@ def fetch_models(api_key: str, api_base: str = PROD_API_BASE) -> list[dict]:
|
|
|
558
591
|
)
|
|
559
592
|
resp.raise_for_status()
|
|
560
593
|
data = resp.json().get("data", [])
|
|
561
|
-
# Known model metadata (context windows, reasoning, etc.)
|
|
562
|
-
MODEL_META = {
|
|
563
|
-
"kimi-k2.5": {"name": "Kimi K2.5", "reasoning": True, "contextWindow": 262144},
|
|
564
|
-
"glm-5": {"name": "GLM-5", "reasoning": True, "contextWindow": 202752},
|
|
565
|
-
}
|
|
566
594
|
return [
|
|
567
595
|
{
|
|
568
596
|
"id": m["id"],
|
|
569
|
-
"name":
|
|
570
|
-
"reasoning":
|
|
571
|
-
"input": ["text"],
|
|
572
|
-
"contextWindow":
|
|
573
|
-
**({"mode": m["mode"]} if m.get("mode") else {}),
|
|
597
|
+
"name": _meta_for_model(m["id"]).get("name", m["id"].replace("-", " ").title()),
|
|
598
|
+
"reasoning": _meta_for_model(m["id"]).get("reasoning", False),
|
|
599
|
+
"input": _meta_for_model(m["id"]).get("input", ["text", "image"]),
|
|
600
|
+
"contextWindow": _meta_for_model(m["id"]).get("contextWindow", 200000),
|
|
601
|
+
**({"mode": m.get("mode") or _meta_for_model(m["id"]).get("mode") or _infer_mode(m["id"])} if (m.get("mode") or _meta_for_model(m["id"]).get("mode") or _infer_mode(m["id"])) else {}),
|
|
574
602
|
}
|
|
575
603
|
for m in data
|
|
576
604
|
if m.get("id")
|
|
@@ -583,16 +611,24 @@ def fetch_models(api_key: str, api_base: str = PROD_API_BASE) -> list[dict]:
|
|
|
583
611
|
"id": "kimi-k2.5",
|
|
584
612
|
"name": "Kimi K2.5",
|
|
585
613
|
"reasoning": True,
|
|
586
|
-
"input": ["text"],
|
|
614
|
+
"input": ["text", "image"],
|
|
587
615
|
"contextWindow": 262144,
|
|
588
616
|
},
|
|
589
617
|
{
|
|
590
618
|
"id": "glm-5",
|
|
591
619
|
"name": "GLM-5",
|
|
592
620
|
"reasoning": True,
|
|
593
|
-
"input": ["text"],
|
|
621
|
+
"input": ["text", "image"],
|
|
594
622
|
"contextWindow": 202752,
|
|
595
623
|
},
|
|
624
|
+
{
|
|
625
|
+
"id": "qwen3-embedding-4b",
|
|
626
|
+
"name": "Qwen3 Embedding 4B",
|
|
627
|
+
"reasoning": False,
|
|
628
|
+
"input": ["text"],
|
|
629
|
+
"contextWindow": 32768,
|
|
630
|
+
"mode": "embedding",
|
|
631
|
+
},
|
|
596
632
|
]
|
|
597
633
|
|
|
598
634
|
|
|
@@ -620,49 +656,21 @@ def openclaw_setup(
|
|
|
620
656
|
console.print("[red]❌ Invalid key file — missing 'key' field[/red]")
|
|
621
657
|
raise typer.Exit(1)
|
|
622
658
|
|
|
623
|
-
|
|
659
|
+
config = {}
|
|
624
660
|
if OPENCLAW_CONFIG_PATH.exists():
|
|
625
661
|
with open(OPENCLAW_CONFIG_PATH) as f:
|
|
626
662
|
config = json.load(f)
|
|
627
|
-
else:
|
|
628
|
-
config = {}
|
|
629
|
-
|
|
630
|
-
# Fetch current model list from LiteLLM via API
|
|
631
|
-
models = fetch_models(api_key)
|
|
632
|
-
|
|
633
|
-
# Patch models.providers.hyperclaw + embedding config
|
|
634
|
-
config.setdefault("models", {}).setdefault("providers", {})
|
|
635
|
-
chat_models = [m for m in models if m.get("mode") != "embedding"]
|
|
636
|
-
embedding_models = [m for m in models if m.get("mode") == "embedding"]
|
|
637
|
-
config["models"]["providers"]["hyperclaw"] = {
|
|
638
|
-
"baseUrl": "https://api.hypercli.com",
|
|
639
|
-
"apiKey": api_key,
|
|
640
|
-
"api": "anthropic-messages",
|
|
641
|
-
"models": chat_models,
|
|
642
|
-
}
|
|
643
|
-
config["models"]["providers"]["hyperclaw-embed"] = {
|
|
644
|
-
"baseUrl": "https://api.hypercli.com/v1",
|
|
645
|
-
"apiKey": api_key,
|
|
646
|
-
"api": "openai-completions",
|
|
647
|
-
"models": embedding_models,
|
|
648
|
-
}
|
|
649
663
|
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
"
|
|
654
|
-
"model"
|
|
655
|
-
"
|
|
656
|
-
|
|
657
|
-
"
|
|
658
|
-
}
|
|
659
|
-
}
|
|
664
|
+
models = fetch_models(api_key, PROD_INFERENCE_API_BASE)
|
|
665
|
+
snippet = _config_openclaw(api_key, models, PROD_INFERENCE_API_BASE)
|
|
666
|
+
if not default:
|
|
667
|
+
defaults = (((snippet.get("agents") or {}).get("defaults") or {}))
|
|
668
|
+
model_cfg = defaults.get("model") or {}
|
|
669
|
+
model_cfg.pop("primary", None)
|
|
670
|
+
if not model_cfg and "model" in defaults:
|
|
671
|
+
defaults.pop("model", None)
|
|
660
672
|
|
|
661
|
-
|
|
662
|
-
if default:
|
|
663
|
-
config["agents"]["defaults"].setdefault("model", {})
|
|
664
|
-
if chat_models:
|
|
665
|
-
config["agents"]["defaults"]["model"]["primary"] = f"hyperclaw/{chat_models[0]['id']}"
|
|
673
|
+
_deep_merge(config, snippet)
|
|
666
674
|
|
|
667
675
|
# Write back
|
|
668
676
|
OPENCLAW_CONFIG_PATH.parent.mkdir(parents=True, exist_ok=True)
|
|
@@ -671,15 +679,14 @@ def openclaw_setup(
|
|
|
671
679
|
f.write("\n")
|
|
672
680
|
|
|
673
681
|
console.print(f"[green]✅ Patched {OPENCLAW_CONFIG_PATH}[/green]")
|
|
674
|
-
|
|
675
|
-
|
|
676
|
-
console.print("
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
console.print(f" default model: hyperclaw/{chat_models[0]['id']}")
|
|
682
|
+
providers = ((snippet.get("models") or {}).get("providers") or {})
|
|
683
|
+
for provider_id, provider_cfg in providers.items():
|
|
684
|
+
console.print(f" provider: {provider_id} key: {api_key[:16]}...")
|
|
685
|
+
for m in provider_cfg.get("models") or []:
|
|
686
|
+
console.print(f" model: {provider_id}/{m['id']}")
|
|
687
|
+
primary = ((((snippet.get("agents") or {}).get("defaults") or {}).get("model") or {}).get("primary"))
|
|
688
|
+
if primary:
|
|
689
|
+
console.print(f" default model: {primary}")
|
|
683
690
|
console.print("\nOpenClaw will use the Anthropic-compatible /v1/messages endpoint.")
|
|
684
691
|
console.print("Run: [bold]openclaw gateway restart[/bold]")
|
|
685
692
|
|
|
@@ -703,11 +710,43 @@ def _resolve_api_key(key: str | None) -> str:
|
|
|
703
710
|
raise typer.Exit(1)
|
|
704
711
|
|
|
705
712
|
|
|
706
|
-
def _config_openclaw(
|
|
707
|
-
|
|
713
|
+
def _config_openclaw(
|
|
714
|
+
api_key: str,
|
|
715
|
+
models: list[dict],
|
|
716
|
+
api_base: str = PROD_INFERENCE_API_BASE,
|
|
717
|
+
placeholder_env: str | None = None,
|
|
718
|
+
) -> dict:
|
|
719
|
+
"""OpenClaw openclaw.json provider snippet (LLM only)."""
|
|
720
|
+
def _model_suffix(model_id: str) -> str:
|
|
721
|
+
return str(model_id or "").strip().lower().rsplit("/", 1)[-1]
|
|
722
|
+
|
|
723
|
+
def _is_supported_openclaw_model(model: dict) -> bool:
|
|
724
|
+
suffix = _model_suffix(model.get("id", ""))
|
|
725
|
+
return (
|
|
726
|
+
suffix == "glm-5"
|
|
727
|
+
or "kimi" in suffix
|
|
728
|
+
or "embedding" in suffix
|
|
729
|
+
)
|
|
730
|
+
|
|
708
731
|
api_base = api_base.rstrip("/")
|
|
709
|
-
|
|
710
|
-
|
|
732
|
+
supported_models = [m for m in models if _is_supported_openclaw_model(m)]
|
|
733
|
+
chat_models = [m for m in supported_models if m.get("mode") != "embedding"]
|
|
734
|
+
embedding_models = [m for m in supported_models if m.get("mode") == "embedding"]
|
|
735
|
+
kimi_models = [m for m in chat_models if "kimi" in _model_suffix(m.get("id", ""))]
|
|
736
|
+
glm_models = [m for m in chat_models if _model_suffix(m.get("id", "")) == "glm-5"]
|
|
737
|
+
openai_chat_models = [
|
|
738
|
+
m for m in chat_models
|
|
739
|
+
if m not in kimi_models and m not in glm_models
|
|
740
|
+
]
|
|
741
|
+
embedding_model_id = embedding_models[0]["id"] if embedding_models else None
|
|
742
|
+
primary_model = (
|
|
743
|
+
f"kimi-coding/{kimi_models[0]['id']}" if kimi_models else (
|
|
744
|
+
f"hyperclaw/{glm_models[0]['id']}" if glm_models else (
|
|
745
|
+
f"hyperclaw-openai/{openai_chat_models[0]['id']}" if openai_chat_models else None
|
|
746
|
+
)
|
|
747
|
+
)
|
|
748
|
+
)
|
|
749
|
+
config_api_key = f"${{{placeholder_env}}}" if placeholder_env else api_key
|
|
711
750
|
return {
|
|
712
751
|
"models": {
|
|
713
752
|
"mode": "merge",
|
|
@@ -715,39 +754,69 @@ def _config_openclaw(api_key: str, models: list[dict], api_base: str = PROD_API_
|
|
|
715
754
|
"hyperclaw": {
|
|
716
755
|
# OpenClaw/pi-ai appends /v1/messages for anthropic-messages.
|
|
717
756
|
"baseUrl": api_base,
|
|
718
|
-
"apiKey":
|
|
757
|
+
"apiKey": config_api_key,
|
|
719
758
|
"api": "anthropic-messages",
|
|
720
|
-
"models":
|
|
721
|
-
},
|
|
722
|
-
"hyperclaw-embed": {
|
|
723
|
-
# Embeddings go through the OpenAI-compatible /v1 endpoints.
|
|
724
|
-
"baseUrl": f"{api_base}/v1",
|
|
725
|
-
"apiKey": api_key,
|
|
726
|
-
"api": "openai-completions",
|
|
727
|
-
"models": embedding_models,
|
|
759
|
+
"models": glm_models,
|
|
728
760
|
},
|
|
761
|
+
**(
|
|
762
|
+
{
|
|
763
|
+
"kimi-coding": {
|
|
764
|
+
# Use the upstream Kimi provider semantics while still
|
|
765
|
+
# routing requests through the HyperClaw Anthropic proxy.
|
|
766
|
+
"baseUrl": api_base,
|
|
767
|
+
"apiKey": config_api_key,
|
|
768
|
+
"api": "anthropic-messages",
|
|
769
|
+
"headers": {
|
|
770
|
+
"User-Agent": "claude-code/0.1.0",
|
|
771
|
+
},
|
|
772
|
+
"models": kimi_models,
|
|
773
|
+
},
|
|
774
|
+
}
|
|
775
|
+
if kimi_models
|
|
776
|
+
else {}
|
|
777
|
+
),
|
|
778
|
+
**(
|
|
779
|
+
{
|
|
780
|
+
"hyperclaw-openai": {
|
|
781
|
+
"baseUrl": f"{api_base}/v1",
|
|
782
|
+
"apiKey": config_api_key,
|
|
783
|
+
"api": "openai-completions",
|
|
784
|
+
"models": openai_chat_models,
|
|
785
|
+
},
|
|
786
|
+
}
|
|
787
|
+
if openai_chat_models
|
|
788
|
+
else {}
|
|
789
|
+
),
|
|
729
790
|
}
|
|
730
791
|
},
|
|
731
792
|
"agents": {
|
|
732
793
|
"defaults": {
|
|
794
|
+
**({"model": {"primary": primary_model}} if primary_model else {}),
|
|
733
795
|
"models": {
|
|
734
|
-
**{f"hyperclaw/{m['id']}": {"alias":
|
|
735
|
-
**{f"
|
|
796
|
+
**{f"hyperclaw/{m['id']}": {"alias": "glm"} for m in glm_models},
|
|
797
|
+
**{f"kimi-coding/{m['id']}": {"alias": "kimi"} for m in kimi_models},
|
|
798
|
+
**{f"hyperclaw-openai/{m['id']}": {"alias": m['id'].split('-')[0]} for m in openai_chat_models},
|
|
736
799
|
},
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
|
|
740
|
-
|
|
741
|
-
|
|
742
|
-
|
|
800
|
+
**(
|
|
801
|
+
{
|
|
802
|
+
"memorySearch": {
|
|
803
|
+
"provider": "openai",
|
|
804
|
+
"model": embedding_model_id,
|
|
805
|
+
"remote": {
|
|
806
|
+
"baseUrl": f"{api_base}/v1",
|
|
807
|
+
"apiKey": config_api_key,
|
|
808
|
+
},
|
|
809
|
+
}
|
|
743
810
|
}
|
|
744
|
-
|
|
811
|
+
if embedding_model_id
|
|
812
|
+
else {}
|
|
813
|
+
),
|
|
745
814
|
}
|
|
746
815
|
}
|
|
747
816
|
}
|
|
748
817
|
|
|
749
818
|
|
|
750
|
-
def _config_opencode(api_key: str, models: list[dict], api_base: str =
|
|
819
|
+
def _config_opencode(api_key: str, models: list[dict], api_base: str = PROD_INFERENCE_API_BASE) -> dict:
|
|
751
820
|
"""OpenCode opencode.json provider snippet."""
|
|
752
821
|
api_base = api_base.rstrip("/")
|
|
753
822
|
model_entries = {}
|
|
@@ -769,7 +838,7 @@ def _config_opencode(api_key: str, models: list[dict], api_base: str = PROD_API_
|
|
|
769
838
|
}
|
|
770
839
|
|
|
771
840
|
|
|
772
|
-
def _config_env(api_key: str, models: list[dict], api_base: str =
|
|
841
|
+
def _config_env(api_key: str, models: list[dict], api_base: str = PROD_INFERENCE_API_BASE) -> str:
|
|
773
842
|
"""Shell env vars for generic OpenAI-compatible tools."""
|
|
774
843
|
api_base = api_base.rstrip("/")
|
|
775
844
|
lines = [
|
|
@@ -786,7 +855,7 @@ def exec_cmd(
|
|
|
786
855
|
command: str = typer.Argument(..., help="Command to execute"),
|
|
787
856
|
timeout: int = typer.Option(30, "--timeout", "-t", help="Command timeout (seconds)"),
|
|
788
857
|
):
|
|
789
|
-
"""Execute a command on a
|
|
858
|
+
"""Execute a command on a `hypercli-openclaw` agent container."""
|
|
790
859
|
from . import agents
|
|
791
860
|
|
|
792
861
|
agents.exec_cmd(agent_id=agent_id, command=command, timeout=timeout)
|
|
@@ -796,7 +865,7 @@ def exec_cmd(
|
|
|
796
865
|
def shell_cmd(
|
|
797
866
|
agent_id: str = typer.Argument(..., help="Agent ID (or prefix)"),
|
|
798
867
|
):
|
|
799
|
-
"""Open an interactive shell on a
|
|
868
|
+
"""Open an interactive shell on a `hypercli-openclaw` agent container."""
|
|
800
869
|
from . import agents
|
|
801
870
|
|
|
802
871
|
agents.shell(agent_id=agent_id)
|
|
@@ -813,6 +882,7 @@ def config_cmd(
|
|
|
813
882
|
),
|
|
814
883
|
key: str = typer.Option(None, "--key", "-k", help="API key (sk-...). Falls back to ~/.hypercli/agent-key.json"),
|
|
815
884
|
base_url: str = typer.Option(None, "--base-url", help="HyperClaw API base URL. Falls back to HYPERCLAW_API_BASE, then --dev/prod defaults"),
|
|
885
|
+
placeholder_env: str = typer.Option(None, "--placeholder-env", help="Write ${ENV_VAR} placeholders into generated config instead of literal API keys"),
|
|
816
886
|
apply: bool = typer.Option(False, "--apply", help="Write config to the appropriate file (openclaw/opencode only)"),
|
|
817
887
|
dev: bool = typer.Option(False, "--dev", help="Use dev API"),
|
|
818
888
|
):
|
|
@@ -844,7 +914,7 @@ def config_cmd(
|
|
|
844
914
|
|
|
845
915
|
for fmt in formats:
|
|
846
916
|
if fmt == "openclaw":
|
|
847
|
-
snippet = _config_openclaw(api_key, models, api_base)
|
|
917
|
+
snippet = _config_openclaw(api_key, models, api_base, placeholder_env=placeholder_env)
|
|
848
918
|
_show_snippet("OpenClaw", "~/.openclaw/openclaw.json", snippet, apply, OPENCLAW_CONFIG_PATH)
|
|
849
919
|
elif fmt == "opencode":
|
|
850
920
|
snippet = _config_opencode(api_key, models, api_base)
|
|
@@ -12,9 +12,9 @@ import typer
|
|
|
12
12
|
from rich.console import Console
|
|
13
13
|
from rich.table import Table
|
|
14
14
|
|
|
15
|
-
from hypercli.agents import Agent, Deployments, OpenClawAgent
|
|
15
|
+
from hypercli.agents import Agent, Deployments, OpenClawAgent, DEFAULT_OPENCLAW_IMAGE
|
|
16
16
|
|
|
17
|
-
app = typer.Typer(help="Manage OpenClaw agent pods
|
|
17
|
+
app = typer.Typer(help="Manage OpenClaw agent pods")
|
|
18
18
|
console = Console()
|
|
19
19
|
PROD_API_BASE = "https://api.hypercli.com"
|
|
20
20
|
DEV_API_BASE = "https://api.dev.hypercli.com"
|
|
@@ -29,6 +29,13 @@ STATE_DIR = Path.home() / ".hypercli"
|
|
|
29
29
|
AGENTS_STATE = STATE_DIR / "agents.json"
|
|
30
30
|
|
|
31
31
|
|
|
32
|
+
def _default_openclaw_image(image: str | None, config: dict | None = None) -> str:
|
|
33
|
+
if image:
|
|
34
|
+
return image
|
|
35
|
+
configured = str((config or {}).get("image") or "").strip()
|
|
36
|
+
return configured or DEFAULT_OPENCLAW_IMAGE
|
|
37
|
+
|
|
38
|
+
|
|
32
39
|
@app.callback()
|
|
33
40
|
def agents_root(
|
|
34
41
|
dev: bool = typer.Option(False, "--dev", help="Use the dev HyperClaw agents API"),
|
|
@@ -125,10 +132,11 @@ def _resolve_agent(agent_id: str) -> str:
|
|
|
125
132
|
|
|
126
133
|
def _get_pod_with_token(agent_id: str) -> Agent:
|
|
127
134
|
"""Get an agent, filling JWT from local state if needed."""
|
|
135
|
+
resolved_agent_id = _resolve_agent(agent_id)
|
|
128
136
|
agents = _get_deployments_client()
|
|
129
|
-
pod = agents.get(
|
|
137
|
+
pod = agents.get(resolved_agent_id)
|
|
130
138
|
state = _load_state()
|
|
131
|
-
local = state.get(
|
|
139
|
+
local = state.get(resolved_agent_id, {})
|
|
132
140
|
if not pod.jwt_token and local.get("jwt_token"):
|
|
133
141
|
pod.jwt_token = local["jwt_token"]
|
|
134
142
|
if isinstance(pod, OpenClawAgent) and not pod.gateway_token and local.get("gateway_token"):
|
|
@@ -274,7 +282,7 @@ def create(
|
|
|
274
282
|
port: list[str] = typer.Option(None, "--port", help="Expose port as PORT or PORT:noauth. Repeatable."),
|
|
275
283
|
command: str = typer.Option(None, "--command", help="Container args as a shell-style string"),
|
|
276
284
|
entrypoint: str = typer.Option(None, "--entrypoint", help="Container entrypoint as a shell-style string"),
|
|
277
|
-
image: str = typer.Option(None, "--image", help="Override the default
|
|
285
|
+
image: str = typer.Option(None, "--image", help="Override the default OpenClaw image"),
|
|
278
286
|
registry_url: str = typer.Option(None, "--registry-url", help="Container registry URL for private image pulls"),
|
|
279
287
|
registry_username: str = typer.Option(None, "--registry-username", help="Registry username"),
|
|
280
288
|
registry_password: str = typer.Option(None, "--registry-password", help="Registry password"),
|
|
@@ -303,7 +311,7 @@ def create(
|
|
|
303
311
|
ports=ports_list,
|
|
304
312
|
command=command_argv,
|
|
305
313
|
entrypoint=entrypoint_argv,
|
|
306
|
-
image=image,
|
|
314
|
+
image=_default_openclaw_image(image),
|
|
307
315
|
registry_url=registry_url,
|
|
308
316
|
registry_auth=registry_auth,
|
|
309
317
|
gateway_token=gateway_token,
|
|
@@ -322,7 +330,7 @@ def create(
|
|
|
322
330
|
console.print(f" Size: {pod.cpu} CPU, {pod.memory} GB")
|
|
323
331
|
console.print(f" State: {pod.state}")
|
|
324
332
|
console.print(f" Desktop: {pod.vnc_url}")
|
|
325
|
-
console.print(f" Shell: {pod.shell_url}")
|
|
333
|
+
console.print(f" Shell: {'via hyper agents shell' if not pod.shell_url else pod.shell_url}")
|
|
326
334
|
display_ports = pod.ports or ports_list or []
|
|
327
335
|
for p in display_ports:
|
|
328
336
|
auth_text = "auth" if p.get("auth", True) else "noauth"
|
|
@@ -330,26 +338,14 @@ def create(
|
|
|
330
338
|
|
|
331
339
|
if wait and not pod.dry_run:
|
|
332
340
|
console.print("\n[dim]Waiting for pod to start...[/dim]")
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
elif pod.state in ("failed", "stopped"):
|
|
342
|
-
console.print(f"[red]❌ Agent failed: {pod.state}[/red]")
|
|
343
|
-
if pod.last_error:
|
|
344
|
-
console.print(f" Error: {pod.last_error}")
|
|
345
|
-
raise typer.Exit(1)
|
|
346
|
-
else:
|
|
347
|
-
console.print(f" [{i*5}s] State: {pod.state}")
|
|
348
|
-
except typer.Exit:
|
|
349
|
-
raise
|
|
350
|
-
except Exception as e:
|
|
351
|
-
console.print(f" [{i*5}s] Checking... ({e})")
|
|
352
|
-
else:
|
|
341
|
+
try:
|
|
342
|
+
pod = agents.wait_running(pod.id, timeout=300, poll_interval=5)
|
|
343
|
+
_save_pod_state(pod)
|
|
344
|
+
console.print(f"[green]✅ Agent is running![/green]")
|
|
345
|
+
except RuntimeError as e:
|
|
346
|
+
console.print(f"[red]❌ Agent failed: {e}[/red]")
|
|
347
|
+
raise typer.Exit(1)
|
|
348
|
+
except TimeoutError:
|
|
353
349
|
console.print("[yellow]⚠ Timed out (5 min). Pod may still be starting.[/yellow]")
|
|
354
350
|
|
|
355
351
|
if pod.dry_run:
|
|
@@ -360,6 +356,33 @@ def create(
|
|
|
360
356
|
console.print(f"Desktop: {pod.vnc_url}")
|
|
361
357
|
|
|
362
358
|
|
|
359
|
+
@app.command("wait")
|
|
360
|
+
def wait_agent(
|
|
361
|
+
agent_id: str = typer.Argument(None, help="Agent ID or name"),
|
|
362
|
+
timeout: int = typer.Option(300, "--timeout", help="Seconds to wait for RUNNING"),
|
|
363
|
+
poll_interval: float = typer.Option(5.0, "--poll-interval", help="Seconds between polls"),
|
|
364
|
+
):
|
|
365
|
+
"""Wait for an agent to reach RUNNING."""
|
|
366
|
+
agents = _get_deployments_client()
|
|
367
|
+
pod = _get_pod_with_token(agent_id)
|
|
368
|
+
|
|
369
|
+
try:
|
|
370
|
+
pod = agents.wait_running(pod.id, timeout=timeout, poll_interval=poll_interval)
|
|
371
|
+
except RuntimeError as e:
|
|
372
|
+
console.print(f"[red]❌ Agent failed: {e}[/red]")
|
|
373
|
+
raise typer.Exit(1)
|
|
374
|
+
except TimeoutError as e:
|
|
375
|
+
console.print(f"[yellow]⚠ {e}[/yellow]")
|
|
376
|
+
raise typer.Exit(1)
|
|
377
|
+
|
|
378
|
+
_save_pod_state(pod)
|
|
379
|
+
console.print(f"[green]✅ Agent is running:[/green] [bold]{pod.id[:12]}[/bold]")
|
|
380
|
+
console.print(f" Name: {pod.name or pod.pod_name}")
|
|
381
|
+
console.print(f" State: {pod.state}")
|
|
382
|
+
console.print(f" Desktop: {pod.vnc_url}")
|
|
383
|
+
console.print(f" Shell: {'via hyper agents shell' if not pod.shell_url else pod.shell_url}")
|
|
384
|
+
|
|
385
|
+
|
|
363
386
|
@app.command("list")
|
|
364
387
|
def list_agents(
|
|
365
388
|
json_output: bool = typer.Option(False, "--json", help="JSON output"),
|
|
@@ -442,7 +465,7 @@ def status(
|
|
|
442
465
|
console.print(f" Size: {pod.cpu} CPU, {pod.memory} GB")
|
|
443
466
|
console.print(f" State: {pod.state}")
|
|
444
467
|
console.print(f" Desktop: {pod.vnc_url}")
|
|
445
|
-
console.print(f" Shell: {pod.shell_url}")
|
|
468
|
+
console.print(f" Shell: {'via hyper agents shell' if not pod.shell_url else pod.shell_url}")
|
|
446
469
|
console.print(f" Created: {pod.created_at}")
|
|
447
470
|
if pod.started_at:
|
|
448
471
|
console.print(f" Started: {pod.started_at}")
|
|
@@ -470,7 +493,7 @@ def start(
|
|
|
470
493
|
port: list[str] = typer.Option(None, "--port", help="Expose port as PORT or PORT:noauth. Repeatable."),
|
|
471
494
|
command: str = typer.Option(None, "--command", help="Container args as a shell-style string"),
|
|
472
495
|
entrypoint: str = typer.Option(None, "--entrypoint", help="Container entrypoint as a shell-style string"),
|
|
473
|
-
image: str = typer.Option(None, "--image", help="Override the default
|
|
496
|
+
image: str = typer.Option(None, "--image", help="Override the default OpenClaw image"),
|
|
474
497
|
registry_url: str = typer.Option(None, "--registry-url", help="Container registry URL for private image pulls"),
|
|
475
498
|
registry_username: str = typer.Option(None, "--registry-username", help="Registry username"),
|
|
476
499
|
registry_password: str = typer.Option(None, "--registry-password", help="Registry password"),
|
|
@@ -489,6 +512,7 @@ def start(
|
|
|
489
512
|
registry_auth = _build_registry_auth(registry_username, registry_password)
|
|
490
513
|
launch_config = dict(local.get("launch_config") or {})
|
|
491
514
|
effective_gateway_token = gateway_token or local.get("gateway_token")
|
|
515
|
+
effective_image = _default_openclaw_image(image, launch_config)
|
|
492
516
|
|
|
493
517
|
try:
|
|
494
518
|
pod = agents.start(
|
|
@@ -498,7 +522,7 @@ def start(
|
|
|
498
522
|
ports=ports_list,
|
|
499
523
|
command=command_argv,
|
|
500
524
|
entrypoint=entrypoint_argv,
|
|
501
|
-
image=
|
|
525
|
+
image=effective_image,
|
|
502
526
|
registry_url=registry_url,
|
|
503
527
|
registry_auth=registry_auth,
|
|
504
528
|
gateway_token=effective_gateway_token,
|
|
@@ -978,12 +1002,13 @@ def gateway_cron(
|
|
|
978
1002
|
def gateway_chat(
|
|
979
1003
|
agent_id: str = typer.Argument(None, help="Agent ID or name"),
|
|
980
1004
|
message: str = typer.Argument(..., help="Message to send"),
|
|
1005
|
+
session_key: str = typer.Option("main", "--session-key", help="Gateway chat session key"),
|
|
981
1006
|
):
|
|
982
1007
|
"""Send a chat message to an agent via the Gateway and stream the response."""
|
|
983
1008
|
pod = _require_openclaw_agent(_get_pod_with_token(agent_id))
|
|
984
1009
|
|
|
985
1010
|
async def _run():
|
|
986
|
-
async for event in pod.chat_send(message):
|
|
1011
|
+
async for event in pod.chat_send(message, session_key=session_key):
|
|
987
1012
|
if event.type == "content":
|
|
988
1013
|
print(event.text, end="", flush=True)
|
|
989
1014
|
elif event.type == "thinking":
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
"""HyperCLI - Main entry point"""
|
|
2
2
|
import sys
|
|
3
|
+
import json
|
|
3
4
|
import typer
|
|
4
5
|
from rich.console import Console
|
|
5
6
|
from rich.prompt import Prompt
|
|
@@ -143,7 +144,8 @@ def cli():
|
|
|
143
144
|
try:
|
|
144
145
|
app()
|
|
145
146
|
except APIError as e:
|
|
146
|
-
|
|
147
|
+
raw_detail = e.detail or str(e)
|
|
148
|
+
detail = raw_detail if isinstance(raw_detail, str) else json.dumps(raw_detail)
|
|
147
149
|
|
|
148
150
|
# Check for GPU type errors and suggest corrections
|
|
149
151
|
if "GPU type" in detail and "not found" in detail and "Available:" in detail:
|
|
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "hypercli-cli"
|
|
7
|
-
version = "2026.3.
|
|
7
|
+
version = "2026.3.18"
|
|
8
8
|
description = "CLI for HyperCLI - GPU orchestration and LLM API"
|
|
9
9
|
readme = "README.md"
|
|
10
10
|
requires-python = ">=3.10"
|
|
@@ -13,7 +13,7 @@ authors = [
|
|
|
13
13
|
{ name = "HyperCLI", email = "support@hypercli.com" }
|
|
14
14
|
]
|
|
15
15
|
dependencies = [
|
|
16
|
-
"hypercli-sdk>=2026.3.
|
|
16
|
+
"hypercli-sdk>=2026.3.18",
|
|
17
17
|
"typer>=0.20.0",
|
|
18
18
|
"rich>=14.2.0",
|
|
19
19
|
"websocket-client>=1.6.0",
|
|
@@ -24,7 +24,7 @@ dependencies = [
|
|
|
24
24
|
|
|
25
25
|
[project.optional-dependencies]
|
|
26
26
|
comfyui = [
|
|
27
|
-
"hypercli-sdk[comfyui]>=2026.3.
|
|
27
|
+
"hypercli-sdk[comfyui]>=2026.3.18",
|
|
28
28
|
]
|
|
29
29
|
wallet = [
|
|
30
30
|
"x402[httpx,evm]>=2.0.0",
|
|
@@ -37,7 +37,7 @@ stt = [
|
|
|
37
37
|
"faster-whisper>=1.1.0",
|
|
38
38
|
]
|
|
39
39
|
all = [
|
|
40
|
-
"hypercli-sdk[comfyui]>=2026.3.
|
|
40
|
+
"hypercli-sdk[comfyui]>=2026.3.18",
|
|
41
41
|
"x402[httpx,evm]>=2.0.0",
|
|
42
42
|
"eth-account>=0.13.0",
|
|
43
43
|
"web3>=7.0.0",
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
from hypercli_cli.agent import _config_openclaw
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def test_config_openclaw_limits_runtime_models_to_supported_set():
|
|
5
|
+
api_key = "sk-test"
|
|
6
|
+
api_base = "https://api.agents.hypercli.com"
|
|
7
|
+
models = [
|
|
8
|
+
{"id": "kimi-k2.5", "name": "Kimi K2.5", "reasoning": True},
|
|
9
|
+
{"id": "glm-5", "name": "GLM-5", "reasoning": True},
|
|
10
|
+
{
|
|
11
|
+
"id": "qwen3-embedding-4b",
|
|
12
|
+
"name": "Qwen3 Embedding 4B",
|
|
13
|
+
"reasoning": False,
|
|
14
|
+
"mode": "embedding",
|
|
15
|
+
},
|
|
16
|
+
{"id": "claude-sonnet-4", "name": "Claude Sonnet 4", "reasoning": False},
|
|
17
|
+
{"id": "minimax-m2.5", "name": "MiniMax M2.5", "reasoning": False},
|
|
18
|
+
]
|
|
19
|
+
|
|
20
|
+
config = _config_openclaw(api_key, models, api_base)
|
|
21
|
+
providers = config["models"]["providers"]
|
|
22
|
+
|
|
23
|
+
assert set(providers) == {"hyperclaw", "kimi-coding"}
|
|
24
|
+
assert [m["id"] for m in providers["hyperclaw"]["models"]] == ["glm-5"]
|
|
25
|
+
assert [m["id"] for m in providers["kimi-coding"]["models"]] == ["kimi-k2.5"]
|
|
26
|
+
|
|
27
|
+
defaults = config["agents"]["defaults"]
|
|
28
|
+
assert defaults["model"]["primary"] == "kimi-coding/kimi-k2.5"
|
|
29
|
+
assert defaults["memorySearch"]["provider"] == "openai"
|
|
30
|
+
assert defaults["memorySearch"]["model"] == "qwen3-embedding-4b"
|
|
31
|
+
assert defaults["memorySearch"]["remote"]["baseUrl"] == "https://api.agents.hypercli.com/v1"
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def test_config_openclaw_uses_first_embedding_model_for_memory_search():
|
|
35
|
+
config = _config_openclaw(
|
|
36
|
+
"sk-test",
|
|
37
|
+
[
|
|
38
|
+
{"id": "kimi-k2.5", "name": "Kimi K2.5", "reasoning": True},
|
|
39
|
+
{"id": "text-embedding-3-large", "name": "Text Embedding 3 Large", "mode": "embedding"},
|
|
40
|
+
],
|
|
41
|
+
"https://api.agents.hypercli.com",
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
defaults = config["agents"]["defaults"]
|
|
45
|
+
assert defaults["memorySearch"]["model"] == "text-embedding-3-large"
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def test_config_openclaw_supports_placeholder_api_key_env():
|
|
49
|
+
config = _config_openclaw(
|
|
50
|
+
"sk-real",
|
|
51
|
+
[
|
|
52
|
+
{"id": "kimi-k2.5", "name": "Kimi K2.5", "reasoning": True},
|
|
53
|
+
{"id": "glm-5", "name": "GLM-5", "reasoning": True},
|
|
54
|
+
{"id": "qwen3-embedding-4b", "name": "Qwen3 Embedding 4B", "mode": "embedding"},
|
|
55
|
+
],
|
|
56
|
+
"https://api.agents.hypercli.com",
|
|
57
|
+
placeholder_env="HYPER_AGENTS_API_KEY",
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
providers = config["models"]["providers"]
|
|
61
|
+
assert providers["hyperclaw"]["apiKey"] == "${HYPER_AGENTS_API_KEY}"
|
|
62
|
+
assert providers["kimi-coding"]["apiKey"] == "${HYPER_AGENTS_API_KEY}"
|
|
63
|
+
assert config["agents"]["defaults"]["memorySearch"]["remote"]["apiKey"] == "${HYPER_AGENTS_API_KEY}"
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
__version__ = "2026.3.13"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|