@dhf-hermes/grix 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of @dhf-hermes/grix might be problematic. Click here for more details.
- package/.gitignore +6 -0
- package/LICENSE +21 -0
- package/README.md +98 -0
- package/bin/grix-hermes.mjs +93 -0
- package/grix-admin/SKILL.md +109 -0
- package/grix-admin/agents/openai.yaml +7 -0
- package/grix-admin/scripts/admin.mjs +12 -0
- package/grix-admin/scripts/bind_from_json.py +118 -0
- package/grix-admin/scripts/bind_local.py +226 -0
- package/grix-egg/SKILL.md +73 -0
- package/grix-egg/agents/openai.yaml +7 -0
- package/grix-egg/references/acceptance-checklist.md +10 -0
- package/grix-egg/scripts/card-link.mjs +12 -0
- package/grix-egg/scripts/validate_install_context.mjs +74 -0
- package/grix-group/SKILL.md +42 -0
- package/grix-group/agents/openai.yaml +7 -0
- package/grix-group/scripts/group.mjs +12 -0
- package/grix-query/SKILL.md +53 -0
- package/grix-query/agents/openai.yaml +7 -0
- package/grix-query/scripts/query.mjs +12 -0
- package/grix-register/SKILL.md +68 -0
- package/grix-register/agents/openai.yaml +7 -0
- package/grix-register/references/handoff-contract.md +21 -0
- package/grix-register/scripts/create_api_agent_and_bind.py +105 -0
- package/grix-register/scripts/grix_auth.py +487 -0
- package/grix-update/SKILL.md +50 -0
- package/grix-update/agents/openai.yaml +7 -0
- package/grix-update/references/cron-setup.md +11 -0
- package/grix-update/scripts/grix_update.py +99 -0
- package/lib/manifest.mjs +68 -0
- package/message-send/SKILL.md +71 -0
- package/message-send/agents/openai.yaml +7 -0
- package/message-send/scripts/card-link.mjs +40 -0
- package/message-send/scripts/send.mjs +12 -0
- package/message-unsend/SKILL.md +39 -0
- package/message-unsend/agents/openai.yaml +7 -0
- package/message-unsend/scripts/unsend.mjs +12 -0
- package/openclaw-memory-setup/SKILL.md +38 -0
- package/openclaw-memory-setup/agents/openai.yaml +7 -0
- package/openclaw-memory-setup/scripts/bench_ollama_embeddings.py +257 -0
- package/openclaw-memory-setup/scripts/set_openclaw_memory_model.py +240 -0
- package/openclaw-memory-setup/scripts/survey_host_readiness.py +379 -0
- package/package.json +51 -0
- package/shared/cli/actions.mjs +339 -0
- package/shared/cli/aibot-client.mjs +274 -0
- package/shared/cli/card-links.mjs +90 -0
- package/shared/cli/config.mjs +141 -0
- package/shared/cli/grix-hermes.mjs +87 -0
- package/shared/cli/targets.mjs +119 -0
- package/shared/references/grix-card-links.md +27 -0
- package/shared/references/hermes-grix-config.md +30 -0
|
@@ -0,0 +1,240 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Preview or update OpenClaw memory model settings in openclaw.json files."""
|
|
3
|
+
|
|
4
|
+
from __future__ import annotations
|
|
5
|
+
|
|
6
|
+
import argparse
|
|
7
|
+
import copy
|
|
8
|
+
import json
|
|
9
|
+
import os
|
|
10
|
+
import shutil
|
|
11
|
+
import sys
|
|
12
|
+
import tempfile
|
|
13
|
+
import time
|
|
14
|
+
from pathlib import Path
|
|
15
|
+
from typing import Any
|
|
16
|
+
|
|
17
|
+
SENSITIVE_KEY_MARKERS = (
|
|
18
|
+
"apikey",
|
|
19
|
+
"api_key",
|
|
20
|
+
"token",
|
|
21
|
+
"secret",
|
|
22
|
+
"authorization",
|
|
23
|
+
"password",
|
|
24
|
+
"passwd",
|
|
25
|
+
"credential",
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def parse_args() -> argparse.Namespace:
|
|
30
|
+
parser = argparse.ArgumentParser(
|
|
31
|
+
description="Preview or update agents.defaults.memorySearch in OpenClaw profile configs."
|
|
32
|
+
)
|
|
33
|
+
parser.add_argument(
|
|
34
|
+
"targets",
|
|
35
|
+
nargs="+",
|
|
36
|
+
help="Profile directories or direct openclaw.json paths.",
|
|
37
|
+
)
|
|
38
|
+
parser.add_argument(
|
|
39
|
+
"--model",
|
|
40
|
+
required=True,
|
|
41
|
+
help="Target Ollama embedding model, for example embeddinggemma:300m-qat-q8_0",
|
|
42
|
+
)
|
|
43
|
+
parser.add_argument(
|
|
44
|
+
"--provider",
|
|
45
|
+
default="ollama",
|
|
46
|
+
help="Memory provider to write. Default: %(default)s",
|
|
47
|
+
)
|
|
48
|
+
parser.add_argument(
|
|
49
|
+
"--set",
|
|
50
|
+
dest="extra_settings",
|
|
51
|
+
action="append",
|
|
52
|
+
default=[],
|
|
53
|
+
metavar="KEY=VALUE",
|
|
54
|
+
help=(
|
|
55
|
+
"Extra memorySearch setting to write. KEY supports dotted paths under "
|
|
56
|
+
"agents.defaults.memorySearch. VALUE is parsed as JSON when possible, "
|
|
57
|
+
"otherwise stored as a string. Repeat as needed."
|
|
58
|
+
),
|
|
59
|
+
)
|
|
60
|
+
parser.add_argument(
|
|
61
|
+
"--write",
|
|
62
|
+
action="store_true",
|
|
63
|
+
help="Apply the change. Without this flag, print a preview only.",
|
|
64
|
+
)
|
|
65
|
+
return parser.parse_args()
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def resolve_config_path(raw_target: str) -> Path:
|
|
69
|
+
target = Path(raw_target).expanduser()
|
|
70
|
+
if target.is_dir():
|
|
71
|
+
target = target / "openclaw.json"
|
|
72
|
+
if target.name != "openclaw.json":
|
|
73
|
+
raise RuntimeError(f"Refusing to edit non-OpenClaw config file: {target}")
|
|
74
|
+
return target
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def load_config(path: Path) -> dict[str, Any]:
|
|
78
|
+
try:
|
|
79
|
+
return json.loads(path.read_text(encoding="utf-8"))
|
|
80
|
+
except FileNotFoundError as exc:
|
|
81
|
+
raise RuntimeError(f"Config not found: {path}") from exc
|
|
82
|
+
except json.JSONDecodeError as exc:
|
|
83
|
+
raise RuntimeError(f"Invalid JSON in {path}: {exc}") from exc
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def ensure_memory_search(config: dict[str, Any]) -> dict[str, Any]:
|
|
87
|
+
if "agents" not in config:
|
|
88
|
+
raise RuntimeError("Refusing to edit a config without a top-level 'agents' object")
|
|
89
|
+
agents = config.setdefault("agents", {})
|
|
90
|
+
if not isinstance(agents, dict):
|
|
91
|
+
raise RuntimeError("Top-level 'agents' must be a JSON object")
|
|
92
|
+
defaults = agents.setdefault("defaults", {})
|
|
93
|
+
if not isinstance(defaults, dict):
|
|
94
|
+
raise RuntimeError("'agents.defaults' must be a JSON object")
|
|
95
|
+
memory_search = defaults.setdefault("memorySearch", {})
|
|
96
|
+
if not isinstance(memory_search, dict):
|
|
97
|
+
raise RuntimeError("'agents.defaults.memorySearch' must be a JSON object")
|
|
98
|
+
return memory_search
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def parse_extra_settings(entries: list[str]) -> list[tuple[list[str], Any]]:
|
|
102
|
+
parsed: list[tuple[list[str], Any]] = []
|
|
103
|
+
for entry in entries:
|
|
104
|
+
key, separator, value_text = entry.partition("=")
|
|
105
|
+
if not separator:
|
|
106
|
+
raise RuntimeError(f"Invalid --set value (expected KEY=VALUE): {entry}")
|
|
107
|
+
key_parts = [part.strip() for part in key.split(".") if part.strip()]
|
|
108
|
+
if not key_parts:
|
|
109
|
+
raise RuntimeError(f"Invalid --set key: {entry}")
|
|
110
|
+
try:
|
|
111
|
+
value: Any = json.loads(value_text)
|
|
112
|
+
except json.JSONDecodeError:
|
|
113
|
+
value = value_text
|
|
114
|
+
parsed.append((key_parts, value))
|
|
115
|
+
return parsed
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
def set_dotted_value(root: dict[str, Any], key_parts: list[str], value: Any) -> None:
|
|
119
|
+
current = root
|
|
120
|
+
for part in key_parts[:-1]:
|
|
121
|
+
next_value = current.get(part)
|
|
122
|
+
if next_value is None:
|
|
123
|
+
next_value = {}
|
|
124
|
+
current[part] = next_value
|
|
125
|
+
if not isinstance(next_value, dict):
|
|
126
|
+
dotted = ".".join(key_parts)
|
|
127
|
+
raise RuntimeError(f"Cannot set {dotted}: {part} is not a JSON object")
|
|
128
|
+
current = next_value
|
|
129
|
+
current[key_parts[-1]] = value
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
def update_config(
|
|
133
|
+
config: dict[str, Any], provider: str, model: str, extra_settings: list[str]
|
|
134
|
+
) -> tuple[dict[str, Any], dict[str, Any]]:
|
|
135
|
+
memory_search = ensure_memory_search(config)
|
|
136
|
+
before = copy.deepcopy(memory_search)
|
|
137
|
+
provider_changed = memory_search.get("provider") != provider
|
|
138
|
+
if provider_changed:
|
|
139
|
+
memory_search.clear()
|
|
140
|
+
memory_search["provider"] = provider
|
|
141
|
+
memory_search["model"] = model
|
|
142
|
+
for key_parts, value in parse_extra_settings(extra_settings):
|
|
143
|
+
set_dotted_value(memory_search, key_parts, value)
|
|
144
|
+
after = copy.deepcopy(memory_search)
|
|
145
|
+
return before, after
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
def key_looks_sensitive(key: str) -> bool:
|
|
149
|
+
normalized = key.lower().replace("-", "").replace("_", "")
|
|
150
|
+
return any(marker.replace("_", "") in normalized for marker in SENSITIVE_KEY_MARKERS)
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
def redact_for_display(value: Any, key_path: tuple[str, ...] = ()) -> Any:
|
|
154
|
+
if isinstance(value, dict):
|
|
155
|
+
redacted: dict[str, Any] = {}
|
|
156
|
+
for key, nested_value in value.items():
|
|
157
|
+
if key_looks_sensitive(key):
|
|
158
|
+
redacted[key] = "<redacted>"
|
|
159
|
+
else:
|
|
160
|
+
redacted[key] = redact_for_display(nested_value, key_path + (key,))
|
|
161
|
+
return redacted
|
|
162
|
+
if isinstance(value, list):
|
|
163
|
+
return [redact_for_display(item, key_path) for item in value]
|
|
164
|
+
return value
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
def backup_path(config_path: Path, stamp: str) -> Path:
|
|
168
|
+
candidate = config_path.with_name(f"{config_path.name}.bak.{stamp}")
|
|
169
|
+
index = 1
|
|
170
|
+
while candidate.exists():
|
|
171
|
+
candidate = config_path.with_name(f"{config_path.name}.bak.{stamp}-{index}")
|
|
172
|
+
index += 1
|
|
173
|
+
return candidate
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
def write_config(config_path: Path, config: dict[str, Any], stamp: str) -> Path:
|
|
177
|
+
backup = backup_path(config_path, stamp)
|
|
178
|
+
shutil.copy2(config_path, backup)
|
|
179
|
+
fd, temp_path = tempfile.mkstemp(
|
|
180
|
+
prefix=f"{config_path.name}.tmp.",
|
|
181
|
+
dir=config_path.parent,
|
|
182
|
+
text=True,
|
|
183
|
+
)
|
|
184
|
+
try:
|
|
185
|
+
with os.fdopen(fd, "w", encoding="utf-8") as handle:
|
|
186
|
+
handle.write(json.dumps(config, indent=2, ensure_ascii=False) + "\n")
|
|
187
|
+
os.replace(temp_path, config_path)
|
|
188
|
+
finally:
|
|
189
|
+
if os.path.exists(temp_path):
|
|
190
|
+
os.unlink(temp_path)
|
|
191
|
+
return backup
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
def main() -> int:
|
|
195
|
+
args = parse_args()
|
|
196
|
+
stamp = time.strftime("%Y%m%d-%H%M%S")
|
|
197
|
+
exit_code = 0
|
|
198
|
+
|
|
199
|
+
for raw_target in args.targets:
|
|
200
|
+
try:
|
|
201
|
+
config_path = resolve_config_path(raw_target)
|
|
202
|
+
config = load_config(config_path)
|
|
203
|
+
before, after = update_config(
|
|
204
|
+
config,
|
|
205
|
+
args.provider,
|
|
206
|
+
args.model,
|
|
207
|
+
args.extra_settings,
|
|
208
|
+
)
|
|
209
|
+
changed = before != after
|
|
210
|
+
|
|
211
|
+
print(f"{config_path}")
|
|
212
|
+
print(
|
|
213
|
+
" before: "
|
|
214
|
+
f"{json.dumps(redact_for_display(before), ensure_ascii=False, sort_keys=True)}"
|
|
215
|
+
)
|
|
216
|
+
print(
|
|
217
|
+
" after: "
|
|
218
|
+
f"{json.dumps(redact_for_display(after), ensure_ascii=False, sort_keys=True)}"
|
|
219
|
+
)
|
|
220
|
+
|
|
221
|
+
if not changed:
|
|
222
|
+
print(" result: unchanged")
|
|
223
|
+
continue
|
|
224
|
+
|
|
225
|
+
if args.write:
|
|
226
|
+
backup = write_config(config_path, config, stamp)
|
|
227
|
+
print(f" result: written")
|
|
228
|
+
print(f" backup: {backup}")
|
|
229
|
+
else:
|
|
230
|
+
print(" result: preview only (add --write to apply)")
|
|
231
|
+
except Exception as exc: # noqa: BLE001
|
|
232
|
+
exit_code = 1
|
|
233
|
+
print(f"{raw_target}")
|
|
234
|
+
print(f" error: {exc}")
|
|
235
|
+
|
|
236
|
+
return exit_code
|
|
237
|
+
|
|
238
|
+
|
|
239
|
+
if __name__ == "__main__":
|
|
240
|
+
sys.exit(main())
|
|
@@ -0,0 +1,379 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Survey host readiness for Ollama and OpenClaw memory setup."""
|
|
3
|
+
|
|
4
|
+
from __future__ import annotations
|
|
5
|
+
|
|
6
|
+
import argparse
|
|
7
|
+
import ctypes
|
|
8
|
+
import json
|
|
9
|
+
import os
|
|
10
|
+
import platform
|
|
11
|
+
import shutil
|
|
12
|
+
import subprocess
|
|
13
|
+
import sys
|
|
14
|
+
import urllib.error
|
|
15
|
+
import urllib.request
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
from typing import Any
|
|
18
|
+
|
|
19
|
+
def parse_args() -> argparse.Namespace:
|
|
20
|
+
parser = argparse.ArgumentParser(
|
|
21
|
+
description="Report machine readiness for Ollama and OpenClaw memory setup."
|
|
22
|
+
)
|
|
23
|
+
parser.add_argument(
|
|
24
|
+
"--ollama-host",
|
|
25
|
+
default="http://127.0.0.1:11434",
|
|
26
|
+
help="Ollama host to probe. Default: %(default)s",
|
|
27
|
+
)
|
|
28
|
+
parser.add_argument(
|
|
29
|
+
"--json",
|
|
30
|
+
action="store_true",
|
|
31
|
+
help="Emit JSON instead of a readable report.",
|
|
32
|
+
)
|
|
33
|
+
return parser.parse_args()
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def run_command(argv: list[str]) -> tuple[bool, str]:
|
|
37
|
+
try:
|
|
38
|
+
completed = subprocess.run(
|
|
39
|
+
argv,
|
|
40
|
+
check=False,
|
|
41
|
+
capture_output=True,
|
|
42
|
+
text=True,
|
|
43
|
+
)
|
|
44
|
+
except FileNotFoundError:
|
|
45
|
+
return False, ""
|
|
46
|
+
|
|
47
|
+
output = (completed.stdout or completed.stderr).strip()
|
|
48
|
+
return completed.returncode == 0, output
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def shell_context() -> str:
|
|
52
|
+
if os.environ.get("WSL_DISTRO_NAME"):
|
|
53
|
+
return "wsl"
|
|
54
|
+
if sys.platform.startswith("linux"):
|
|
55
|
+
try:
|
|
56
|
+
with open("/proc/version", "r", encoding="utf-8") as handle:
|
|
57
|
+
version = handle.read().lower()
|
|
58
|
+
if "microsoft" in version:
|
|
59
|
+
return "wsl"
|
|
60
|
+
except OSError:
|
|
61
|
+
pass
|
|
62
|
+
if sys.platform == "win32":
|
|
63
|
+
return "windows-native"
|
|
64
|
+
if sys.platform == "darwin":
|
|
65
|
+
return "macos-native"
|
|
66
|
+
if sys.platform.startswith("linux"):
|
|
67
|
+
return "linux-native"
|
|
68
|
+
return "unknown"
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def command_candidates(command: str) -> list[str]:
|
|
72
|
+
candidates: list[str] = []
|
|
73
|
+
|
|
74
|
+
if command == "ollama":
|
|
75
|
+
candidates.extend(
|
|
76
|
+
[
|
|
77
|
+
"/usr/local/bin/ollama",
|
|
78
|
+
"/opt/homebrew/bin/ollama",
|
|
79
|
+
"/usr/bin/ollama",
|
|
80
|
+
]
|
|
81
|
+
)
|
|
82
|
+
local_app_data = os.environ.get("LOCALAPPDATA")
|
|
83
|
+
if local_app_data:
|
|
84
|
+
candidates.append(
|
|
85
|
+
os.path.join(local_app_data, "Programs", "Ollama", "ollama.exe")
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
elif command == "openclaw":
|
|
89
|
+
candidates.extend(
|
|
90
|
+
[
|
|
91
|
+
"/usr/local/bin/openclaw",
|
|
92
|
+
"/opt/homebrew/bin/openclaw",
|
|
93
|
+
"/usr/bin/openclaw",
|
|
94
|
+
]
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
elif command == "node":
|
|
98
|
+
candidates.extend(
|
|
99
|
+
[
|
|
100
|
+
"/usr/local/bin/node",
|
|
101
|
+
"/opt/homebrew/bin/node",
|
|
102
|
+
"/usr/bin/node",
|
|
103
|
+
]
|
|
104
|
+
)
|
|
105
|
+
program_files = os.environ.get("ProgramFiles")
|
|
106
|
+
if program_files:
|
|
107
|
+
candidates.append(os.path.join(program_files, "nodejs", "node.exe"))
|
|
108
|
+
|
|
109
|
+
elif command == "npm":
|
|
110
|
+
candidates.extend(
|
|
111
|
+
[
|
|
112
|
+
"/usr/local/bin/npm",
|
|
113
|
+
"/opt/homebrew/bin/npm",
|
|
114
|
+
"/usr/bin/npm",
|
|
115
|
+
]
|
|
116
|
+
)
|
|
117
|
+
program_files = os.environ.get("ProgramFiles")
|
|
118
|
+
if program_files:
|
|
119
|
+
candidates.append(os.path.join(program_files, "nodejs", "npm.cmd"))
|
|
120
|
+
candidates.append(os.path.join(program_files, "nodejs", "npm.exe"))
|
|
121
|
+
|
|
122
|
+
return candidates
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def resolve_command_path(command: str) -> str | None:
|
|
126
|
+
path = shutil.which(command)
|
|
127
|
+
if path:
|
|
128
|
+
return path
|
|
129
|
+
for candidate in command_candidates(command):
|
|
130
|
+
if os.path.exists(candidate) and os.access(candidate, os.X_OK):
|
|
131
|
+
return candidate
|
|
132
|
+
return None
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
def path_search_dirs() -> list[Path]:
|
|
136
|
+
directories: list[Path] = []
|
|
137
|
+
seen: set[str] = set()
|
|
138
|
+
|
|
139
|
+
for raw_dir in os.environ.get("PATH", "").split(os.pathsep):
|
|
140
|
+
if not raw_dir:
|
|
141
|
+
continue
|
|
142
|
+
path = Path(raw_dir).expanduser()
|
|
143
|
+
key = str(path)
|
|
144
|
+
if key in seen:
|
|
145
|
+
continue
|
|
146
|
+
seen.add(key)
|
|
147
|
+
directories.append(path)
|
|
148
|
+
|
|
149
|
+
for candidate in command_candidates("openclaw"):
|
|
150
|
+
path = Path(candidate).expanduser().parent
|
|
151
|
+
key = str(path)
|
|
152
|
+
if key in seen:
|
|
153
|
+
continue
|
|
154
|
+
seen.add(key)
|
|
155
|
+
directories.append(path)
|
|
156
|
+
|
|
157
|
+
return directories
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
def is_executable_file(path: Path) -> bool:
|
|
161
|
+
if not path.is_file():
|
|
162
|
+
return False
|
|
163
|
+
if os.name == "nt":
|
|
164
|
+
pathext = {
|
|
165
|
+
suffix.lower()
|
|
166
|
+
for suffix in os.environ.get("PATHEXT", ".COM;.EXE;.BAT;.CMD;.PS1").split(";")
|
|
167
|
+
}
|
|
168
|
+
return path.suffix.lower() in pathext or os.access(path, os.X_OK)
|
|
169
|
+
return os.access(path, os.X_OK)
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
def discover_management_entrypoint(search_dirs: list[Path] | None = None) -> str | None:
|
|
173
|
+
explicit = resolve_command_path("openclaw")
|
|
174
|
+
if explicit:
|
|
175
|
+
return explicit
|
|
176
|
+
|
|
177
|
+
search_roots = search_dirs if search_dirs is not None else path_search_dirs()
|
|
178
|
+
matches: list[str] = []
|
|
179
|
+
seen: set[str] = set()
|
|
180
|
+
|
|
181
|
+
for directory in search_roots:
|
|
182
|
+
try:
|
|
183
|
+
entries = sorted(directory.iterdir(), key=lambda item: item.name.lower())
|
|
184
|
+
except OSError:
|
|
185
|
+
continue
|
|
186
|
+
for entry in entries:
|
|
187
|
+
name = entry.name.lower()
|
|
188
|
+
if not name.startswith("openclaw"):
|
|
189
|
+
continue
|
|
190
|
+
if not is_executable_file(entry):
|
|
191
|
+
continue
|
|
192
|
+
resolved = str(entry.resolve())
|
|
193
|
+
if resolved in seen:
|
|
194
|
+
continue
|
|
195
|
+
seen.add(resolved)
|
|
196
|
+
matches.append(resolved)
|
|
197
|
+
|
|
198
|
+
return matches[0] if matches else None
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
def total_memory_gib() -> float | None:
|
|
202
|
+
if sys.platform == "win32":
|
|
203
|
+
class MemoryStatusEx(ctypes.Structure):
|
|
204
|
+
_fields_ = [
|
|
205
|
+
("dwLength", ctypes.c_ulong),
|
|
206
|
+
("dwMemoryLoad", ctypes.c_ulong),
|
|
207
|
+
("ullTotalPhys", ctypes.c_ulonglong),
|
|
208
|
+
("ullAvailPhys", ctypes.c_ulonglong),
|
|
209
|
+
("ullTotalPageFile", ctypes.c_ulonglong),
|
|
210
|
+
("ullAvailPageFile", ctypes.c_ulonglong),
|
|
211
|
+
("ullTotalVirtual", ctypes.c_ulonglong),
|
|
212
|
+
("ullAvailVirtual", ctypes.c_ulonglong),
|
|
213
|
+
("ullAvailExtendedVirtual", ctypes.c_ulonglong),
|
|
214
|
+
]
|
|
215
|
+
|
|
216
|
+
status = MemoryStatusEx()
|
|
217
|
+
status.dwLength = ctypes.sizeof(MemoryStatusEx)
|
|
218
|
+
if ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(status)):
|
|
219
|
+
return status.ullTotalPhys / (1024 ** 3)
|
|
220
|
+
return None
|
|
221
|
+
|
|
222
|
+
if sys.platform == "darwin":
|
|
223
|
+
ok, output = run_command(["sysctl", "-n", "hw.memsize"])
|
|
224
|
+
if ok and output.isdigit():
|
|
225
|
+
return int(output) / (1024 ** 3)
|
|
226
|
+
return None
|
|
227
|
+
|
|
228
|
+
if sys.platform.startswith("linux"):
|
|
229
|
+
try:
|
|
230
|
+
with open("/proc/meminfo", "r", encoding="utf-8") as handle:
|
|
231
|
+
for line in handle:
|
|
232
|
+
if line.startswith("MemTotal:"):
|
|
233
|
+
parts = line.split()
|
|
234
|
+
return int(parts[1]) * 1024 / (1024 ** 3)
|
|
235
|
+
except OSError:
|
|
236
|
+
return None
|
|
237
|
+
|
|
238
|
+
return None
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
def recommend_candidates(memory_gib: float | None) -> list[str]:
|
|
242
|
+
if memory_gib is None:
|
|
243
|
+
return [
|
|
244
|
+
"embeddinggemma:300m-qat-q8_0",
|
|
245
|
+
"nomic-embed-text:latest",
|
|
246
|
+
"qwen3-embedding:0.6b",
|
|
247
|
+
]
|
|
248
|
+
if memory_gib < 8:
|
|
249
|
+
return [
|
|
250
|
+
"nomic-embed-text:latest",
|
|
251
|
+
"embeddinggemma:300m-qat-q8_0",
|
|
252
|
+
]
|
|
253
|
+
if memory_gib < 32:
|
|
254
|
+
return [
|
|
255
|
+
"embeddinggemma:300m-qat-q8_0",
|
|
256
|
+
"nomic-embed-text:latest",
|
|
257
|
+
"qwen3-embedding:0.6b",
|
|
258
|
+
]
|
|
259
|
+
return [
|
|
260
|
+
"embeddinggemma:300m-qat-q8_0",
|
|
261
|
+
"nomic-embed-text:latest",
|
|
262
|
+
"qwen3-embedding:0.6b",
|
|
263
|
+
"qwen3-embedding:latest",
|
|
264
|
+
]
|
|
265
|
+
|
|
266
|
+
|
|
267
|
+
def probe_ollama(host: str) -> dict[str, Any]:
|
|
268
|
+
url = host.rstrip("/") + "/api/tags"
|
|
269
|
+
try:
|
|
270
|
+
with urllib.request.urlopen(url, timeout=5) as response:
|
|
271
|
+
data = json.load(response)
|
|
272
|
+
except (urllib.error.URLError, TimeoutError, json.JSONDecodeError):
|
|
273
|
+
return {"reachable": False, "models": []}
|
|
274
|
+
|
|
275
|
+
models = data.get("models", [])
|
|
276
|
+
names = []
|
|
277
|
+
if isinstance(models, list):
|
|
278
|
+
for item in models:
|
|
279
|
+
if isinstance(item, dict) and isinstance(item.get("name"), str):
|
|
280
|
+
names.append(item["name"])
|
|
281
|
+
return {"reachable": True, "models": names}
|
|
282
|
+
|
|
283
|
+
|
|
284
|
+
def build_report(ollama_host: str) -> dict[str, Any]:
|
|
285
|
+
memory_gib = total_memory_gib()
|
|
286
|
+
commands = {}
|
|
287
|
+
versions = {}
|
|
288
|
+
for command in ["ollama", "openclaw", "node", "npm"]:
|
|
289
|
+
path = resolve_command_path(command)
|
|
290
|
+
commands[command] = path
|
|
291
|
+
if path:
|
|
292
|
+
ok, output = run_command([path, "--version"])
|
|
293
|
+
versions[command] = output.splitlines()[0] if ok and output else None
|
|
294
|
+
else:
|
|
295
|
+
versions[command] = None
|
|
296
|
+
|
|
297
|
+
management_path = discover_management_entrypoint()
|
|
298
|
+
commands["openclaw_management"] = management_path
|
|
299
|
+
if management_path:
|
|
300
|
+
if management_path == commands["openclaw"]:
|
|
301
|
+
versions["openclaw_management"] = versions["openclaw"]
|
|
302
|
+
else:
|
|
303
|
+
ok, output = run_command([management_path, "--version"])
|
|
304
|
+
versions["openclaw_management"] = (
|
|
305
|
+
output.splitlines()[0] if ok and output else None
|
|
306
|
+
)
|
|
307
|
+
else:
|
|
308
|
+
versions["openclaw_management"] = None
|
|
309
|
+
|
|
310
|
+
ollama_probe = probe_ollama(ollama_host)
|
|
311
|
+
system_name = platform.system()
|
|
312
|
+
|
|
313
|
+
return {
|
|
314
|
+
"os": system_name,
|
|
315
|
+
"release": platform.release(),
|
|
316
|
+
"arch": platform.machine(),
|
|
317
|
+
"shell_context": shell_context(),
|
|
318
|
+
"cpu_count": os.cpu_count(),
|
|
319
|
+
"memory_gib": round(memory_gib, 2) if memory_gib is not None else None,
|
|
320
|
+
"commands": commands,
|
|
321
|
+
"versions": versions,
|
|
322
|
+
"ollama_api": ollama_probe,
|
|
323
|
+
"recommended_candidates": recommend_candidates(memory_gib),
|
|
324
|
+
"openclaw_install_note": (
|
|
325
|
+
"Use Mac or Linux directly; use WSL on Windows for current official OpenClaw setup."
|
|
326
|
+
if system_name == "Windows"
|
|
327
|
+
else "Use the official Ollama OpenClaw launch flow."
|
|
328
|
+
),
|
|
329
|
+
}
|
|
330
|
+
|
|
331
|
+
|
|
332
|
+
def print_text(report: dict[str, Any], ollama_host: str) -> None:
|
|
333
|
+
print("System")
|
|
334
|
+
print(f" OS: {report['os']} {report['release']}")
|
|
335
|
+
print(f" Context: {report['shell_context']}")
|
|
336
|
+
print(f" Arch: {report['arch']}")
|
|
337
|
+
print(f" CPUs: {report['cpu_count']}")
|
|
338
|
+
print(f" RAM GiB: {report['memory_gib'] if report['memory_gib'] is not None else 'unknown'}")
|
|
339
|
+
print()
|
|
340
|
+
print("Commands")
|
|
341
|
+
for command, path in report["commands"].items():
|
|
342
|
+
version = report["versions"].get(command)
|
|
343
|
+
label = command
|
|
344
|
+
if command == "openclaw_management":
|
|
345
|
+
label = "openclaw management"
|
|
346
|
+
if path:
|
|
347
|
+
suffix = f" ({version})" if version else ""
|
|
348
|
+
print(f" {label}: {path}{suffix}")
|
|
349
|
+
else:
|
|
350
|
+
print(f" {label}: missing")
|
|
351
|
+
print()
|
|
352
|
+
print("Ollama API")
|
|
353
|
+
if report["ollama_api"]["reachable"]:
|
|
354
|
+
models = report["ollama_api"]["models"]
|
|
355
|
+
print(f" {ollama_host}: reachable")
|
|
356
|
+
print(f" Models: {', '.join(models) if models else 'none pulled'}")
|
|
357
|
+
else:
|
|
358
|
+
print(f" {ollama_host}: unreachable")
|
|
359
|
+
print()
|
|
360
|
+
print("Recommended candidates")
|
|
361
|
+
for model in report["recommended_candidates"]:
|
|
362
|
+
print(f" - {model}")
|
|
363
|
+
print()
|
|
364
|
+
print("OpenClaw note")
|
|
365
|
+
print(f" {report['openclaw_install_note']}")
|
|
366
|
+
|
|
367
|
+
|
|
368
|
+
def main() -> int:
|
|
369
|
+
args = parse_args()
|
|
370
|
+
report = build_report(args.ollama_host)
|
|
371
|
+
if args.json:
|
|
372
|
+
print(json.dumps(report, indent=2))
|
|
373
|
+
else:
|
|
374
|
+
print_text(report, args.ollama_host)
|
|
375
|
+
return 0
|
|
376
|
+
|
|
377
|
+
|
|
378
|
+
if __name__ == "__main__":
|
|
379
|
+
sys.exit(main())
|
package/package.json
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@dhf-hermes/grix",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"description": "Standalone Hermes skill bundle for Grix and OpenClaw workflows.",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"bin": {
|
|
7
|
+
"grix-hermes": "./bin/grix-hermes.mjs"
|
|
8
|
+
},
|
|
9
|
+
"files": [
|
|
10
|
+
"bin",
|
|
11
|
+
"lib",
|
|
12
|
+
"shared",
|
|
13
|
+
"grix-admin",
|
|
14
|
+
"grix-egg",
|
|
15
|
+
"grix-group",
|
|
16
|
+
"grix-query",
|
|
17
|
+
"grix-register",
|
|
18
|
+
"grix-update",
|
|
19
|
+
"message-send",
|
|
20
|
+
"message-unsend",
|
|
21
|
+
"openclaw-memory-setup",
|
|
22
|
+
"README.md",
|
|
23
|
+
"LICENSE",
|
|
24
|
+
".gitignore"
|
|
25
|
+
],
|
|
26
|
+
"scripts": {
|
|
27
|
+
"list": "node ./bin/grix-hermes.mjs list",
|
|
28
|
+
"manifest": "node ./bin/grix-hermes.mjs manifest",
|
|
29
|
+
"install:local": "node ./bin/grix-hermes.mjs install",
|
|
30
|
+
"test": "node --test tests/*.test.mjs",
|
|
31
|
+
"publish:preview": "bash ./publish.sh",
|
|
32
|
+
"publish:npm": "bash ./publish.sh --publish"
|
|
33
|
+
},
|
|
34
|
+
"keywords": [
|
|
35
|
+
"hermes",
|
|
36
|
+
"skills",
|
|
37
|
+
"grix",
|
|
38
|
+
"openclaw"
|
|
39
|
+
],
|
|
40
|
+
"license": "MIT",
|
|
41
|
+
"publishConfig": {
|
|
42
|
+
"access": "public"
|
|
43
|
+
},
|
|
44
|
+
"engines": {
|
|
45
|
+
"node": ">=20"
|
|
46
|
+
},
|
|
47
|
+
"dependencies": {
|
|
48
|
+
"ws": "^8.18.0",
|
|
49
|
+
"yaml": "^2.8.1"
|
|
50
|
+
}
|
|
51
|
+
}
|