@dhf-hermes/grix 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of @dhf-hermes/grix might be problematic. Click here for more details.

Files changed (51) hide show
  1. package/.gitignore +6 -0
  2. package/LICENSE +21 -0
  3. package/README.md +98 -0
  4. package/bin/grix-hermes.mjs +93 -0
  5. package/grix-admin/SKILL.md +109 -0
  6. package/grix-admin/agents/openai.yaml +7 -0
  7. package/grix-admin/scripts/admin.mjs +12 -0
  8. package/grix-admin/scripts/bind_from_json.py +118 -0
  9. package/grix-admin/scripts/bind_local.py +226 -0
  10. package/grix-egg/SKILL.md +73 -0
  11. package/grix-egg/agents/openai.yaml +7 -0
  12. package/grix-egg/references/acceptance-checklist.md +10 -0
  13. package/grix-egg/scripts/card-link.mjs +12 -0
  14. package/grix-egg/scripts/validate_install_context.mjs +74 -0
  15. package/grix-group/SKILL.md +42 -0
  16. package/grix-group/agents/openai.yaml +7 -0
  17. package/grix-group/scripts/group.mjs +12 -0
  18. package/grix-query/SKILL.md +53 -0
  19. package/grix-query/agents/openai.yaml +7 -0
  20. package/grix-query/scripts/query.mjs +12 -0
  21. package/grix-register/SKILL.md +68 -0
  22. package/grix-register/agents/openai.yaml +7 -0
  23. package/grix-register/references/handoff-contract.md +21 -0
  24. package/grix-register/scripts/create_api_agent_and_bind.py +105 -0
  25. package/grix-register/scripts/grix_auth.py +487 -0
  26. package/grix-update/SKILL.md +50 -0
  27. package/grix-update/agents/openai.yaml +7 -0
  28. package/grix-update/references/cron-setup.md +11 -0
  29. package/grix-update/scripts/grix_update.py +99 -0
  30. package/lib/manifest.mjs +68 -0
  31. package/message-send/SKILL.md +71 -0
  32. package/message-send/agents/openai.yaml +7 -0
  33. package/message-send/scripts/card-link.mjs +40 -0
  34. package/message-send/scripts/send.mjs +12 -0
  35. package/message-unsend/SKILL.md +39 -0
  36. package/message-unsend/agents/openai.yaml +7 -0
  37. package/message-unsend/scripts/unsend.mjs +12 -0
  38. package/openclaw-memory-setup/SKILL.md +38 -0
  39. package/openclaw-memory-setup/agents/openai.yaml +7 -0
  40. package/openclaw-memory-setup/scripts/bench_ollama_embeddings.py +257 -0
  41. package/openclaw-memory-setup/scripts/set_openclaw_memory_model.py +240 -0
  42. package/openclaw-memory-setup/scripts/survey_host_readiness.py +379 -0
  43. package/package.json +51 -0
  44. package/shared/cli/actions.mjs +339 -0
  45. package/shared/cli/aibot-client.mjs +274 -0
  46. package/shared/cli/card-links.mjs +90 -0
  47. package/shared/cli/config.mjs +141 -0
  48. package/shared/cli/grix-hermes.mjs +87 -0
  49. package/shared/cli/targets.mjs +119 -0
  50. package/shared/references/grix-card-links.md +27 -0
  51. package/shared/references/hermes-grix-config.md +30 -0
@@ -0,0 +1,68 @@
1
+ import { homedir } from "node:os";
2
+ import path from "node:path";
3
+ import { fileURLToPath } from "node:url";
4
+
5
+ export const SKILLS = [
6
+ {
7
+ name: "grix-admin",
8
+ description: "Remote Grix agent creation/category management plus local OpenClaw binding."
9
+ },
10
+ {
11
+ name: "grix-egg",
12
+ description: "Install-flow orchestrator for Grix/OpenClaw package delivery and acceptance."
13
+ },
14
+ {
15
+ name: "grix-group",
16
+ description: "Grix group lifecycle and membership governance over the bundled websocket CLI."
17
+ },
18
+ {
19
+ name: "grix-query",
20
+ description: "Read-only Grix contact, session, and message lookup over the bundled websocket CLI."
21
+ },
22
+ {
23
+ name: "grix-register",
24
+ description: "HTTP-based Grix registration and first-agent bootstrap for Hermes/OpenClaw."
25
+ },
26
+ {
27
+ name: "grix-update",
28
+ description: "OpenClaw plugin update workflow for Grix-related deployments."
29
+ },
30
+ {
31
+ name: "message-send",
32
+ description: "Hermes-native Grix message sending and conversation-card rules."
33
+ },
34
+ {
35
+ name: "message-unsend",
36
+ description: "Silent Grix unsend workflow over the bundled websocket CLI."
37
+ },
38
+ {
39
+ name: "openclaw-memory-setup",
40
+ description: "OpenClaw memory provider setup and validation."
41
+ }
42
+ ];
43
+
44
+ export const SUPPORT_ENTRIES = ["shared"];
45
+
46
+ export function projectRoot() {
47
+ return path.resolve(path.dirname(fileURLToPath(import.meta.url)), "..");
48
+ }
49
+
50
+ export function defaultInstallDir() {
51
+ const hermesHome = process.env.HERMES_HOME
52
+ ? path.resolve(process.env.HERMES_HOME)
53
+ : path.join(homedir(), ".hermes");
54
+ return path.join(hermesHome, "skills", "grix-hermes");
55
+ }
56
+
57
+ export function installEntries() {
58
+ return [...SKILLS.map((skill) => skill.name), ...SUPPORT_ENTRIES];
59
+ }
60
+
61
+ export function manifestData() {
62
+ return {
63
+ name: "grix-hermes",
64
+ version: "0.1.0",
65
+ install_dir: defaultInstallDir(),
66
+ skills: SKILLS
67
+ };
68
+ }
@@ -0,0 +1,71 @@
1
+ ---
2
+ name: message-send
3
+ description: 需要在 Hermes 里向 Grix 当前会话或其他会话发送消息时使用。优先使用 Hermes 自带 `send_message`,并遵守 Grix `session_id`、`route_session_key`、会话卡片、状态卡片的发送规则。
4
+ ---
5
+
6
+ # Message Send
7
+
8
+ 这个技能现在有两条路:
9
+
10
+ 1. 当前会话内普通回复:直接回复
11
+ 2. 跨会话、卡片、独立投递:优先用本技能自带的 WS helper
12
+
13
+ ## 发送主线
14
+
15
+ ### 当前会话回复
16
+
17
+ 如果就是回复当前会话,直接正常回复即可。
18
+
19
+ ### 跨会话发送
20
+
21
+ 优先使用:
22
+
23
+ ```bash
24
+ node scripts/send.mjs --to <SESSION_ID_OR_ROUTE_SESSION_KEY> --message "..."
25
+ ```
26
+
27
+ 如果上层明确要求走 Hermes 自带 `send_message`,再使用:
28
+
29
+ ```json
30
+ {
31
+ "action": "send",
32
+ "target": "grix:<SESSION_ID_OR_ROUTE_SESSION_KEY>",
33
+ "message": "..."
34
+ }
35
+ ```
36
+
37
+ 不要把裸 `session_id` 当成 Hermes `send_message.target`。
38
+
39
+ ## 如何拿目标会话
40
+
41
+ - 已知准确 `session_id`:直接发
42
+ - 只有 `route_session_key`:也可以直接发
43
+ - 还不知道目标:先用 [grix-query](../grix-query/SKILL.md) 找准确会话
44
+
45
+ ## 卡片规则
46
+
47
+ 如果你要发:
48
+
49
+ - 会话卡片
50
+ - 安装状态卡
51
+ - Agent 资料卡
52
+
53
+ 都应优先通过本技能的 helper 先生成单行 Markdown 链接,再单独一条消息发送。
54
+
55
+ 参考格式见:
56
+
57
+ - [Grix Card Links](../shared/references/grix-card-links.md)
58
+
59
+ 如果要稳定生成卡片链接,优先用本技能自带 helper:
60
+
61
+ ```bash
62
+ node scripts/send.mjs --to <SESSION_ID_OR_ROUTE_SESSION_KEY> --message "$(node scripts/card-link.mjs conversation --session-id <SESSION_ID> --session-type group --title 测试群)"
63
+ node scripts/card-link.mjs conversation --session-id <SESSION_ID> --session-type group --title 测试群
64
+ node scripts/card-link.mjs user-profile --user-id <AGENT_ID> --nickname writer-openclaw
65
+ node scripts/card-link.mjs egg-status --install-id <INSTALL_ID> --status running --step installing --summary 已开始安装
66
+ ```
67
+
68
+ ## 注意
69
+
70
+ - `grix://card` 链接不要混在普通说明文字里
71
+ - 如果还要补充说明,另发一条普通文本
@@ -0,0 +1,7 @@
1
+ interface:
2
+ display_name: "Message Send"
3
+ short_description: "Send Grix messages and card links with Hermes."
4
+ default_prompt: "Use $message-send to deliver a Grix message or a conversation card with Hermes."
5
+
6
+ policy:
7
+ allow_implicit_invocation: true
@@ -0,0 +1,40 @@
1
+ #!/usr/bin/env node
2
+
3
+ import { dispatchCardBuilder } from "../../shared/cli/card-links.mjs";
4
+
5
+ function toCamelFlag(key) {
6
+ return key.replace(/-([a-z])/g, (_match, ch) => ch.toUpperCase());
7
+ }
8
+
9
+ function parseArgs(argv) {
10
+ const positional = [];
11
+ const flags = {};
12
+ for (let i = 0; i < argv.length; i += 1) {
13
+ const token = argv[i];
14
+ if (!token.startsWith("--")) {
15
+ positional.push(token);
16
+ continue;
17
+ }
18
+ const key = toCamelFlag(token.slice(2));
19
+ const next = argv[i + 1];
20
+ if (!next || next.startsWith("--")) {
21
+ flags[key] = true;
22
+ continue;
23
+ }
24
+ flags[key] = next;
25
+ i += 1;
26
+ }
27
+ return { positional, flags };
28
+ }
29
+
30
+ const { positional, flags } = parseArgs(process.argv.slice(2));
31
+ const kind = positional[0];
32
+ if (!kind || ["help", "--help", "-h"].includes(kind)) {
33
+ console.log(`Usage:
34
+ node scripts/card-link.mjs conversation --session-id <id> --session-type group --title <title>
35
+ node scripts/card-link.mjs user-profile --user-id <id> --nickname <name> [--avatar-url <url>]
36
+ node scripts/card-link.mjs egg-status --install-id <id> --status running --step installing --summary <text>`);
37
+ process.exit(0);
38
+ }
39
+
40
+ console.log(dispatchCardBuilder(kind, flags));
@@ -0,0 +1,12 @@
1
+ #!/usr/bin/env node
2
+
3
+ import path from "node:path";
4
+ import { fileURLToPath } from "node:url";
5
+ import { spawnSync } from "node:child_process";
6
+
7
+ const scriptDir = path.dirname(fileURLToPath(import.meta.url));
8
+ const sharedCli = path.resolve(scriptDir, "../../shared/cli/grix-hermes.mjs");
9
+ const result = spawnSync(process.execPath, [sharedCli, "send", ...process.argv.slice(2)], {
10
+ stdio: "inherit",
11
+ });
12
+ process.exit(result.status ?? 1);
@@ -0,0 +1,39 @@
1
+ ---
2
+ name: message-unsend
3
+ description: 需要在 Hermes 里静默撤回 Grix 消息时使用。适用于撤回 agent 自己刚发出的错误消息、状态消息或卡片消息。通过 `terminal` 执行 `../shared/cli/grix-hermes.mjs unsend`,按静默双重撤回规则处理。
4
+ ---
5
+
6
+ # Message Unsend
7
+
8
+ 这个技能只做静默撤回。
9
+
10
+ ## 执行方式
11
+
12
+ 统一用:
13
+
14
+ ```bash
15
+ node scripts/unsend.mjs --message-id <MSG_ID> --session-id <SESSION_ID>
16
+ ```
17
+
18
+ 如果目标不是直接 `session_id`,也可以传:
19
+
20
+ ```bash
21
+ node scripts/unsend.mjs --message-id <MSG_ID> --to <ROUTE_SESSION_KEY_OR_SESSION_ID>
22
+ ```
23
+
24
+ 如果你还知道当前命令消息所在通道和消息 ID,就一起传,让它做静默双重撤回:
25
+
26
+ ```bash
27
+ node scripts/unsend.mjs --message-id <TARGET_MSG_ID> --session-id <SESSION_ID> --current-channel-id <CURRENT_CHANNEL_ID> --current-message-id <CURRENT_MSG_ID>
28
+ ```
29
+
30
+ ## 规则
31
+
32
+ - `messageId` 必须是数字字符串
33
+ - 优先用于撤回 agent 自己刚发的消息
34
+ - 默认静默执行,不要先发“我来撤回一下”
35
+ - 如果消息不存在或不可撤回,不要额外制造噪音
36
+
37
+ ## 参考
38
+
39
+ - [Hermes Grix Runtime](../shared/references/hermes-grix-config.md)
@@ -0,0 +1,7 @@
1
+ interface:
2
+ display_name: "Message Unsend"
3
+ short_description: "Silently retract Grix messages through websocket."
4
+ default_prompt: "Use $message-unsend to silently retract an incorrect Grix message."
5
+
6
+ policy:
7
+ allow_implicit_invocation: true
@@ -0,0 +1,12 @@
1
+ #!/usr/bin/env node
2
+
3
+ import path from "node:path";
4
+ import { fileURLToPath } from "node:url";
5
+ import { spawnSync } from "node:child_process";
6
+
7
+ const scriptDir = path.dirname(fileURLToPath(import.meta.url));
8
+ const sharedCli = path.resolve(scriptDir, "../../shared/cli/grix-hermes.mjs");
9
+ const result = spawnSync(process.execPath, [sharedCli, "unsend", ...process.argv.slice(2)], {
10
+ stdio: "inherit",
11
+ });
12
+ process.exit(result.status ?? 1);
@@ -0,0 +1,38 @@
1
+ ---
2
+ name: openclaw-memory-setup
3
+ description: 配置 OpenClaw memory provider、Ollama embedding 模型、校验并重建索引时使用。适用于新机器初始化、已有配置调整、provider 切换和资源受限主机的 memory 健康检查。通过本技能自带脚本和 `openclaw` 官方 CLI 完成。
4
+ ---
5
+
6
+ # OpenClaw Memory Setup
7
+
8
+ ## 进入方式
9
+
10
+ 如果用户已经给了明确 provider / model / endpoint / api key,就直接改配置。
11
+
12
+ 如果机器情况还不清楚,先体检,再挑候选模型,再做实测。
13
+
14
+ ## 可用脚本
15
+
16
+ ```bash
17
+ python3 scripts/survey_host_readiness.py --json
18
+ python3 scripts/bench_ollama_embeddings.py --json --model nomic-embed-text
19
+ python3 scripts/set_openclaw_memory_model.py --preview ...
20
+ python3 scripts/set_openclaw_memory_model.py ...
21
+ ```
22
+
23
+ ## 主线
24
+
25
+ 1. 需要时先跑 `survey_host_readiness.py`
26
+ 2. 如需本地模型比较,再跑 `bench_ollama_embeddings.py`
27
+ 3. 用 `set_openclaw_memory_model.py` 预览或写入配置
28
+ 4. 写完后必须执行:
29
+ - `openclaw --profile <profile> config validate`
30
+ - `openclaw --profile <profile> gateway restart`
31
+ - `openclaw --profile <profile> memory index --force`
32
+ - `openclaw --profile <profile> memory status`
33
+ - `openclaw --profile <profile> status`
34
+
35
+ ## 结果判断
36
+
37
+ - 只改了配置但没重建索引,不算完成
38
+ - `memory status` 和 `status` 都正常,才算完成
@@ -0,0 +1,7 @@
1
+ interface:
2
+ display_name: "OpenClaw Memory Setup"
3
+ short_description: "Configure and validate OpenClaw memory."
4
+ default_prompt: "Use $openclaw-memory-setup to configure and verify OpenClaw memory."
5
+
6
+ policy:
7
+ allow_implicit_invocation: true
@@ -0,0 +1,257 @@
1
+ #!/usr/bin/env python3
2
+ """Benchmark Ollama embedding models on the current machine."""
3
+
4
+ from __future__ import annotations
5
+
6
+ import argparse
7
+ import json
8
+ import statistics
9
+ import sys
10
+ import time
11
+ import urllib.error
12
+ import urllib.request
13
+ from typing import Any
14
+
15
+ DEFAULT_TEXT = (
16
+ "Generate a stable semantic embedding for this short bilingual memory sample. "
17
+ "It should be suitable for retrieval, recall, and near-duplicate detection."
18
+ )
19
+
20
+
21
+ def parse_args() -> argparse.Namespace:
22
+ parser = argparse.ArgumentParser(
23
+ description="Benchmark Ollama embedding models with single and batched requests."
24
+ )
25
+ parser.add_argument(
26
+ "models",
27
+ nargs="+",
28
+ help="Model names to benchmark, for example embeddinggemma:300m-qat-q8_0",
29
+ )
30
+ parser.add_argument(
31
+ "--host",
32
+ default="http://127.0.0.1:11434",
33
+ help="Ollama host. Default: %(default)s",
34
+ )
35
+ parser.add_argument(
36
+ "--rounds",
37
+ type=int,
38
+ default=1,
39
+ help="Measured rounds per test after warmup. Default: %(default)s",
40
+ )
41
+ parser.add_argument(
42
+ "--batch-size",
43
+ type=int,
44
+ default=4,
45
+ help="Number of inputs in the batch test. Default: %(default)s",
46
+ )
47
+ parser.add_argument(
48
+ "--timeout",
49
+ type=float,
50
+ default=300.0,
51
+ help="Request timeout in seconds. Default: %(default)s",
52
+ )
53
+ parser.add_argument(
54
+ "--single-text",
55
+ default=DEFAULT_TEXT,
56
+ help="Text used for the single-input benchmark.",
57
+ )
58
+ parser.add_argument(
59
+ "--skip-warmup",
60
+ action="store_true",
61
+ help="Skip the initial warmup request for each model.",
62
+ )
63
+ parser.add_argument(
64
+ "--json",
65
+ action="store_true",
66
+ help="Emit full JSON results instead of a human table.",
67
+ )
68
+ args = parser.parse_args()
69
+ if args.rounds < 1:
70
+ parser.error("--rounds must be at least 1")
71
+ if args.batch_size < 1:
72
+ parser.error("--batch-size must be at least 1")
73
+ return args
74
+
75
+
76
+ def build_batch_inputs(text: str, batch_size: int) -> list[str]:
77
+ return [f"{text} [sample {index + 1}]" for index in range(batch_size)]
78
+
79
+
80
+ def post_embed(host: str, model: str, inputs: list[str], timeout: float) -> dict[str, Any]:
81
+ url = host.rstrip("/") + "/api/embed"
82
+ payload = {"model": model, "input": inputs if len(inputs) > 1 else inputs[0]}
83
+ body = json.dumps(payload).encode("utf-8")
84
+ request = urllib.request.Request(
85
+ url,
86
+ data=body,
87
+ headers={"Content-Type": "application/json"},
88
+ method="POST",
89
+ )
90
+ try:
91
+ with urllib.request.urlopen(request, timeout=timeout) as response:
92
+ data = json.load(response)
93
+ except urllib.error.HTTPError as exc:
94
+ detail = exc.read().decode("utf-8", errors="replace")
95
+ raise RuntimeError(f"HTTP {exc.code}: {detail}") from exc
96
+ except urllib.error.URLError as exc:
97
+ raise RuntimeError(f"Cannot reach Ollama at {url}: {exc}") from exc
98
+
99
+ embeddings = data.get("embeddings")
100
+ if not isinstance(embeddings, list):
101
+ raise RuntimeError(f"Unexpected Ollama response for {model}: {data}")
102
+ return data
103
+
104
+
105
+ def run_one(host: str, model: str, inputs: list[str], timeout: float) -> dict[str, Any]:
106
+ started = time.perf_counter()
107
+ response = post_embed(host, model, inputs, timeout)
108
+ elapsed = time.perf_counter() - started
109
+ total_duration = response.get("total_duration")
110
+ api_seconds = None
111
+ if isinstance(total_duration, (int, float)):
112
+ api_seconds = float(total_duration) / 1_000_000_000.0
113
+ embeddings = response.get("embeddings", [])
114
+ return {
115
+ "wall_seconds": elapsed,
116
+ "api_seconds": api_seconds,
117
+ "items": len(embeddings),
118
+ "vector_dims": len(embeddings[0]) if embeddings else 0,
119
+ }
120
+
121
+
122
+ def median_or_none(values: list[float | None]) -> float | None:
123
+ usable = [value for value in values if value is not None]
124
+ if not usable:
125
+ return None
126
+ return statistics.median(usable)
127
+
128
+
129
+ def benchmark_model(
130
+ host: str,
131
+ model: str,
132
+ single_text: str,
133
+ batch_inputs: list[str],
134
+ rounds: int,
135
+ timeout: float,
136
+ skip_warmup: bool,
137
+ ) -> dict[str, Any]:
138
+ if not skip_warmup:
139
+ run_one(host, model, [single_text], timeout)
140
+
141
+ single_runs = [run_one(host, model, [single_text], timeout) for _ in range(rounds)]
142
+ batch_runs = [run_one(host, model, batch_inputs, timeout) for _ in range(rounds)]
143
+
144
+ return {
145
+ "model": model,
146
+ "ok": True,
147
+ "single_wall_seconds": statistics.median(run["wall_seconds"] for run in single_runs),
148
+ "single_api_seconds": median_or_none([run["api_seconds"] for run in single_runs]),
149
+ "batch_wall_seconds": statistics.median(run["wall_seconds"] for run in batch_runs),
150
+ "batch_api_seconds": median_or_none([run["api_seconds"] for run in batch_runs]),
151
+ "batch_size": len(batch_inputs),
152
+ "vector_dims": single_runs[-1]["vector_dims"],
153
+ "rounds": rounds,
154
+ }
155
+
156
+
157
+ def format_seconds(value: float | None) -> str:
158
+ if value is None:
159
+ return "-"
160
+ return f"{value:.2f}"
161
+
162
+
163
+ def print_table(results: list[dict[str, Any]], batch_size: int) -> None:
164
+ headers = [
165
+ "Model",
166
+ "Single(s)",
167
+ f"Batch{batch_size}(s)",
168
+ "API Single(s)",
169
+ f"API Batch{batch_size}(s)",
170
+ "Dims",
171
+ "Status",
172
+ ]
173
+ rows: list[list[str]] = []
174
+ for result in results:
175
+ if result["ok"]:
176
+ rows.append(
177
+ [
178
+ result["model"],
179
+ format_seconds(result["single_wall_seconds"]),
180
+ format_seconds(result["batch_wall_seconds"]),
181
+ format_seconds(result["single_api_seconds"]),
182
+ format_seconds(result["batch_api_seconds"]),
183
+ str(result["vector_dims"]),
184
+ "ok",
185
+ ]
186
+ )
187
+ else:
188
+ rows.append(
189
+ [
190
+ result["model"],
191
+ "-",
192
+ "-",
193
+ "-",
194
+ "-",
195
+ "-",
196
+ result["error"],
197
+ ]
198
+ )
199
+
200
+ widths = [
201
+ max(len(header), *(len(row[index]) for row in rows))
202
+ for index, header in enumerate(headers)
203
+ ]
204
+
205
+ def emit(columns: list[str]) -> None:
206
+ print(" ".join(value.ljust(widths[index]) for index, value in enumerate(columns)))
207
+
208
+ emit(headers)
209
+ emit(["-" * width for width in widths])
210
+ for row in rows:
211
+ emit(row)
212
+
213
+ successful = [result for result in results if result["ok"]]
214
+ if successful:
215
+ ranked = sorted(
216
+ successful,
217
+ key=lambda item: (
218
+ item["batch_wall_seconds"],
219
+ item["single_wall_seconds"],
220
+ item["model"],
221
+ ),
222
+ )
223
+ print()
224
+ print(f"Recommended by speed: {ranked[0]['model']}")
225
+
226
+
227
+ def main() -> int:
228
+ args = parse_args()
229
+ batch_inputs = build_batch_inputs(args.single_text, args.batch_size)
230
+ results: list[dict[str, Any]] = []
231
+
232
+ for model in args.models:
233
+ try:
234
+ result = benchmark_model(
235
+ host=args.host,
236
+ model=model,
237
+ single_text=args.single_text,
238
+ batch_inputs=batch_inputs,
239
+ rounds=args.rounds,
240
+ timeout=args.timeout,
241
+ skip_warmup=args.skip_warmup,
242
+ )
243
+ except Exception as exc: # noqa: BLE001
244
+ result = {"model": model, "ok": False, "error": str(exc)}
245
+ results.append(result)
246
+
247
+ successful = [result for result in results if result["ok"]]
248
+ if args.json:
249
+ print(json.dumps({"results": results}, indent=2))
250
+ else:
251
+ print_table(results, args.batch_size)
252
+
253
+ return 0 if successful else 1
254
+
255
+
256
+ if __name__ == "__main__":
257
+ sys.exit(main())