arc402-cli 1.4.24 → 1.4.25
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +4 -3
- package/workroom/Dockerfile +56 -0
- package/workroom/Dockerfile.gpu +74 -0
- package/workroom/arena-policy.yaml +152 -0
- package/workroom/credentials.template.toml +167 -0
- package/workroom/derive-policy.sh +84 -0
- package/workroom/dns-refresh.sh +57 -0
- package/workroom/entrypoint.sh +235 -0
- package/workroom/policy-parser.sh +25 -0
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "arc402-cli",
|
|
3
|
-
"version": "1.4.
|
|
3
|
+
"version": "1.4.25",
|
|
4
4
|
"description": "ARC-402 CLI for discovery, negotiation payloads, hire/remediation/dispute workflows, and network reads",
|
|
5
5
|
"bin": {
|
|
6
6
|
"arc402": "./dist/index.js"
|
|
@@ -8,11 +8,12 @@
|
|
|
8
8
|
"main": "./dist/index.js",
|
|
9
9
|
"files": [
|
|
10
10
|
"dist/",
|
|
11
|
-
"
|
|
11
|
+
"workroom/"
|
|
12
12
|
],
|
|
13
13
|
"scripts": {
|
|
14
14
|
"build": "tsc",
|
|
15
|
-
"test": "node --test test/**/*.test.js"
|
|
15
|
+
"test": "node --test test/**/*.test.js",
|
|
16
|
+
"prepublishOnly": "npm run build && node -e \"const fs=require('fs'),path=require('path'); const src=path.resolve(__dirname,'../workroom'); const dst=path.resolve(__dirname,'workroom'); if(fs.existsSync(src)){fs.cpSync(src,dst,{recursive:true}); console.log('Copied workroom/ into package');} else { console.warn('WARNING: ../workroom not found — workroom files will be missing from package'); }\""
|
|
16
17
|
},
|
|
17
18
|
"dependencies": {
|
|
18
19
|
"@arc402/sdk": "^0.6.0",
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
FROM node:22-slim
|
|
2
|
+
|
|
3
|
+
# Runtime dependencies for network enforcement + diagnostics
|
|
4
|
+
RUN apt-get update && apt-get install -y --no-install-recommends \
|
|
5
|
+
iptables curl dnsutils iproute2 procps jq ca-certificates python3 make g++ \
|
|
6
|
+
&& rm -rf /var/lib/apt/lists/*
|
|
7
|
+
|
|
8
|
+
# Install arc402-cli globally — Linux-compiled native binaries (better-sqlite3, etc.)
|
|
9
|
+
# This runs at image build time so native addons compile for Linux, not the host OS.
|
|
10
|
+
# ARG allows pinning a specific version: docker build --build-arg ARC402_CLI_VERSION=1.4.19
|
|
11
|
+
ARG ARC402_CLI_VERSION=latest
|
|
12
|
+
RUN npm install -g arc402-cli@${ARC402_CLI_VERSION} --build-from-source 2>&1 | tail -5
|
|
13
|
+
# Stamp the CLI version as an image label so workroom start can detect stale images.
|
|
14
|
+
LABEL arc402.cli.version=${ARC402_CLI_VERSION}
|
|
15
|
+
|
|
16
|
+
# Create workroom user with the same UID/GID as the host user (default: 1000).
|
|
17
|
+
# This ensures bind-mounted volumes are readable/writable without permission fights.
|
|
18
|
+
# Override at build time: docker build --build-arg HOST_UID=1001 --build-arg HOST_GID=1001
|
|
19
|
+
ARG HOST_UID=1000
|
|
20
|
+
ARG HOST_GID=1000
|
|
21
|
+
# node:22-slim already has a 'node' user at UID 1000. Rename it to 'workroom'
|
|
22
|
+
# so the container user matches the host user (bind mount permissions).
|
|
23
|
+
RUN usermod -l workroom -d /workroom -m node \
|
|
24
|
+
&& groupmod -n workroom node \
|
|
25
|
+
&& usermod -u ${HOST_UID} workroom 2>/dev/null || true \
|
|
26
|
+
&& groupmod -g ${HOST_GID} workroom 2>/dev/null || true
|
|
27
|
+
|
|
28
|
+
# Workroom scripts
|
|
29
|
+
COPY entrypoint.sh /entrypoint.sh
|
|
30
|
+
COPY dns-refresh.sh /dns-refresh.sh
|
|
31
|
+
COPY policy-parser.sh /policy-parser.sh
|
|
32
|
+
COPY derive-policy.sh /derive-policy.sh
|
|
33
|
+
RUN chmod +x /entrypoint.sh /dns-refresh.sh /policy-parser.sh /derive-policy.sh
|
|
34
|
+
|
|
35
|
+
# Job workspace directory
|
|
36
|
+
RUN mkdir -p /workroom/jobs && chown workroom:workroom /workroom/jobs
|
|
37
|
+
|
|
38
|
+
# OpenClaw runtime directory (mounted from host)
|
|
39
|
+
RUN mkdir -p /workroom/openclaw && chown workroom:workroom /workroom/openclaw
|
|
40
|
+
|
|
41
|
+
# Worker specialisation directories (created by worker init, but ensure they exist)
|
|
42
|
+
RUN mkdir -p /workroom/.arc402/worker/{knowledge,datasets,skills,memory} \
|
|
43
|
+
&& chown -R workroom:workroom /workroom/.arc402
|
|
44
|
+
|
|
45
|
+
# Home directory for workroom user (auth files mounted here)
|
|
46
|
+
RUN mkdir -p /home/workroom/.openclaw && chown -R workroom:workroom /home/workroom
|
|
47
|
+
|
|
48
|
+
# Arena directories — feed index, profile cache, daemon state, approval queue
|
|
49
|
+
RUN mkdir -p /workroom/arena/{feed,profile,state,queue} \
|
|
50
|
+
&& chown -R workroom:workroom /workroom/arena
|
|
51
|
+
|
|
52
|
+
# Default arena policy (can be overridden by mount)
|
|
53
|
+
COPY arena-policy.yaml /workroom/defaults/arena-policy.yaml
|
|
54
|
+
|
|
55
|
+
WORKDIR /workroom
|
|
56
|
+
ENTRYPOINT ["/entrypoint.sh"]
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
# ARC-402 GPU Workroom
|
|
2
|
+
# CUDA-enabled variant of the standard workroom container.
|
|
3
|
+
# Run with: docker run --gpus all --runtime=nvidia arc402-workroom:gpu
|
|
4
|
+
#
|
|
5
|
+
# Build:
|
|
6
|
+
# docker build -f workroom/Dockerfile.gpu -t arc402-workroom:gpu workroom/
|
|
7
|
+
|
|
8
|
+
FROM nvidia/cuda:12.4.0-runtime-ubuntu22.04
|
|
9
|
+
|
|
10
|
+
# ─── System dependencies ──────────────────────────────────────────────────────
|
|
11
|
+
|
|
12
|
+
RUN apt-get update && apt-get install -y --no-install-recommends \
|
|
13
|
+
# Same runtime deps as base Dockerfile
|
|
14
|
+
iptables curl dnsutils iproute2 procps jq ca-certificates \
|
|
15
|
+
# GPU metering
|
|
16
|
+
python3-minimal \
|
|
17
|
+
# Node.js 22 (LTS) via NodeSource
|
|
18
|
+
&& curl -fsSL https://deb.nodesource.com/setup_22.x | bash - \
|
|
19
|
+
&& apt-get install -y --no-install-recommends nodejs \
|
|
20
|
+
&& rm -rf /var/lib/apt/lists/*
|
|
21
|
+
|
|
22
|
+
# ─── NVIDIA container toolkit check ──────────────────────────────────────────
|
|
23
|
+
# nvidia-smi is provided by the host driver mounted into the container.
|
|
24
|
+
# Validate it's accessible at container startup (see entrypoint.sh).
|
|
25
|
+
# NVIDIA_VISIBLE_DEVICES and NVIDIA_DRIVER_CAPABILITIES must be set at runtime.
|
|
26
|
+
|
|
27
|
+
ENV NVIDIA_VISIBLE_DEVICES=all
|
|
28
|
+
ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility
|
|
29
|
+
|
|
30
|
+
# ─── User setup (mirrors base Dockerfile) ────────────────────────────────────
|
|
31
|
+
|
|
32
|
+
ARG HOST_UID=1000
|
|
33
|
+
ARG HOST_GID=1000
|
|
34
|
+
|
|
35
|
+
# Ubuntu 22.04 ships with ubuntu user at UID 1000; rename to workroom.
|
|
36
|
+
RUN groupadd -f -g ${HOST_GID} workroom 2>/dev/null || true \
|
|
37
|
+
&& useradd -u ${HOST_UID} -g ${HOST_GID} -m -d /workroom -s /bin/bash workroom 2>/dev/null \
|
|
38
|
+
|| usermod -u ${HOST_UID} -g ${HOST_GID} -d /workroom workroom 2>/dev/null || true
|
|
39
|
+
|
|
40
|
+
# ─── Workroom scripts ─────────────────────────────────────────────────────────
|
|
41
|
+
|
|
42
|
+
COPY entrypoint.sh /entrypoint.sh
|
|
43
|
+
COPY dns-refresh.sh /dns-refresh.sh
|
|
44
|
+
COPY policy-parser.sh /policy-parser.sh
|
|
45
|
+
COPY derive-policy.sh /derive-policy.sh
|
|
46
|
+
RUN chmod +x /entrypoint.sh /dns-refresh.sh /policy-parser.sh /derive-policy.sh
|
|
47
|
+
|
|
48
|
+
# ─── Directory layout (same as base Dockerfile) ───────────────────────────────
|
|
49
|
+
|
|
50
|
+
RUN mkdir -p /workroom/jobs && chown workroom:workroom /workroom/jobs
|
|
51
|
+
RUN mkdir -p /workroom/openclaw && chown workroom:workroom /workroom/openclaw
|
|
52
|
+
RUN mkdir -p /workroom/.arc402/worker/{knowledge,datasets,skills,memory} \
|
|
53
|
+
&& chown -R workroom:workroom /workroom/.arc402
|
|
54
|
+
RUN mkdir -p /home/workroom/.openclaw && chown -R workroom:workroom /home/workroom
|
|
55
|
+
RUN mkdir -p /workroom/arena/{feed,profile,state,queue} && chown -R workroom:workroom /workroom/arena
|
|
56
|
+
|
|
57
|
+
# ─── GPU-specific directories ─────────────────────────────────────────────────
|
|
58
|
+
|
|
59
|
+
# Compute session data directory (metrics + reports written by metering daemon)
|
|
60
|
+
RUN mkdir -p /workroom/.arc402/compute && chown workroom:workroom /workroom/.arc402/compute
|
|
61
|
+
|
|
62
|
+
# ─── Default arena policy ─────────────────────────────────────────────────────
|
|
63
|
+
|
|
64
|
+
COPY arena-policy.yaml /workroom/defaults/arena-policy.yaml
|
|
65
|
+
|
|
66
|
+
WORKDIR /workroom
|
|
67
|
+
ENTRYPOINT ["/entrypoint.sh"]
|
|
68
|
+
|
|
69
|
+
# ─── Runtime labels ───────────────────────────────────────────────────────────
|
|
70
|
+
|
|
71
|
+
LABEL org.opencontainers.image.title="arc402-workroom-gpu"
|
|
72
|
+
LABEL org.opencontainers.image.description="ARC-402 GPU workroom — CUDA 12.4, nvidia-smi metering"
|
|
73
|
+
LABEL arc402.gpu="true"
|
|
74
|
+
LABEL arc402.cuda.version="12.4.0"
|
|
@@ -0,0 +1,152 @@
|
|
|
1
|
+
# ARC-402 Arena Policy — extends the base workroom policy
|
|
2
|
+
# This file defines the network, spend, and behavior boundaries for Arena operations
|
|
3
|
+
# running inside the Workroom container.
|
|
4
|
+
#
|
|
5
|
+
# Merged with openshell-policy.yaml at workroom startup.
|
|
6
|
+
# Arena-specific policies are additive — they don't override base security.
|
|
7
|
+
|
|
8
|
+
version: 1
|
|
9
|
+
|
|
10
|
+
# ─── Arena Network Policy ──────────────────────────────────────────────────────
|
|
11
|
+
# Which external hosts Arena processes can reach from inside the Workroom.
|
|
12
|
+
# Everything else is DROP via iptables.
|
|
13
|
+
|
|
14
|
+
arena_network:
|
|
15
|
+
# Base mainnet RPC — for contract reads and transaction submission
|
|
16
|
+
base_rpc:
|
|
17
|
+
endpoints:
|
|
18
|
+
- host: mainnet.base.org
|
|
19
|
+
port: 443
|
|
20
|
+
- host: base-mainnet.g.alchemy.com
|
|
21
|
+
port: 443
|
|
22
|
+
- host: base.llamarpc.com
|
|
23
|
+
port: 443
|
|
24
|
+
|
|
25
|
+
# ARC-402 infrastructure — relay, API, public pages
|
|
26
|
+
arc402_infra:
|
|
27
|
+
endpoints:
|
|
28
|
+
- host: api.arc402.xyz
|
|
29
|
+
port: 443
|
|
30
|
+
- host: arc402.xyz
|
|
31
|
+
port: 443
|
|
32
|
+
- host: relay.arc402.xyz
|
|
33
|
+
port: 443
|
|
34
|
+
- host: gigabrain.arc402.xyz
|
|
35
|
+
port: 443
|
|
36
|
+
|
|
37
|
+
# ERC-4337 bundler — for UserOp submission
|
|
38
|
+
bundler:
|
|
39
|
+
endpoints:
|
|
40
|
+
- host: public.pimlico.io
|
|
41
|
+
port: 443
|
|
42
|
+
|
|
43
|
+
# Notifications — Telegram alerts for arena events
|
|
44
|
+
notifications:
|
|
45
|
+
endpoints:
|
|
46
|
+
- host: api.telegram.org
|
|
47
|
+
port: 443
|
|
48
|
+
|
|
49
|
+
# Data feeds — for prediction round research (read-only intelligence)
|
|
50
|
+
# Add specific API hosts as needed. Each addition is a policy decision.
|
|
51
|
+
data_feeds:
|
|
52
|
+
endpoints:
|
|
53
|
+
- host: api.coingecko.com
|
|
54
|
+
port: 443
|
|
55
|
+
- host: pro-api.coinmarketcap.com
|
|
56
|
+
port: 443
|
|
57
|
+
|
|
58
|
+
# ─── Arena Spend Policy ────────────────────────────────────────────────────────
|
|
59
|
+
# Workroom-level spend limits. These are ADDITIONAL to PolicyEngine onchain limits.
|
|
60
|
+
# The tighter of the two always wins.
|
|
61
|
+
#
|
|
62
|
+
# These are enforced by the Arena daemon before submitting transactions.
|
|
63
|
+
# They protect against bugs, not just malice.
|
|
64
|
+
|
|
65
|
+
arena_spend:
|
|
66
|
+
handshake:
|
|
67
|
+
max_per_handshake_eth: "0.01" # max ETH tip per handshake
|
|
68
|
+
max_per_handshake_usdc: "10.00" # max USDC tip per handshake
|
|
69
|
+
daily_count_cap: 50 # mirrors contract cap
|
|
70
|
+
daily_spend_cap_eth: "0.5" # total handshake spend per day
|
|
71
|
+
daily_spend_cap_usdc: "100.00" # total handshake USDC spend per day
|
|
72
|
+
|
|
73
|
+
arena:
|
|
74
|
+
max_stake_per_round_eth: "0.1" # max stake per prediction round
|
|
75
|
+
max_stake_per_round_usdc: "50.00"
|
|
76
|
+
daily_rounds_cap: 10 # max rounds entered per day
|
|
77
|
+
daily_spend_cap_eth: "1.0" # total arena spend per day
|
|
78
|
+
daily_spend_cap_usdc: "500.00"
|
|
79
|
+
|
|
80
|
+
subscriptions:
|
|
81
|
+
max_per_subscription_eth: "0.05" # max per subscription payment
|
|
82
|
+
max_per_subscription_usdc: "25.00"
|
|
83
|
+
max_active_subscriptions: 20 # cap on simultaneous subscriptions
|
|
84
|
+
|
|
85
|
+
# ─── Arena Behavior Policy ─────────────────────────────────────────────────────
|
|
86
|
+
# Controls what the Arena daemon can do autonomously vs what requires approval.
|
|
87
|
+
|
|
88
|
+
arena_behavior:
|
|
89
|
+
# Autonomous actions — daemon can do these without human approval
|
|
90
|
+
autonomous:
|
|
91
|
+
- send_handshake # respond to inbound handshakes
|
|
92
|
+
- post_status # publish status updates
|
|
93
|
+
- index_feed # consume and index feed events
|
|
94
|
+
- update_profile # refresh profile metadata
|
|
95
|
+
|
|
96
|
+
# Approval-required actions — daemon queues these for human review
|
|
97
|
+
approval_required:
|
|
98
|
+
- enter_arena_round # stake money on prediction
|
|
99
|
+
- back_agent # back another agent's forecast
|
|
100
|
+
- create_subscription # open a paid subscription channel
|
|
101
|
+
- publish_newsletter # publish content to subscribers
|
|
102
|
+
- join_squad # join a research squad
|
|
103
|
+
|
|
104
|
+
# Forbidden actions — daemon cannot do these at all
|
|
105
|
+
forbidden:
|
|
106
|
+
- modify_wallet_policy # never change its own spending rules
|
|
107
|
+
- transfer_ownership # never transfer wallet ownership
|
|
108
|
+
- upgrade_registry # never propose registry updates
|
|
109
|
+
- whitelist_contracts # never add new contract whitelists
|
|
110
|
+
|
|
111
|
+
# ─── Arena Daemon Config ───────────────────────────────────────────────────────
|
|
112
|
+
# How the Arena daemon runs inside the Workroom.
|
|
113
|
+
|
|
114
|
+
arena_daemon:
|
|
115
|
+
# Feed indexer — listens for HandshakeSent, NewConnection, arena events
|
|
116
|
+
indexer:
|
|
117
|
+
poll_interval_seconds: 30
|
|
118
|
+
start_block: "latest" # or specific block number
|
|
119
|
+
contracts:
|
|
120
|
+
- name: handshake
|
|
121
|
+
address: "" # filled after deployment
|
|
122
|
+
events:
|
|
123
|
+
- HandshakeSent
|
|
124
|
+
- NewConnection
|
|
125
|
+
# Future: ArenaRound events added here
|
|
126
|
+
|
|
127
|
+
# Status publisher — posts agent status updates
|
|
128
|
+
status:
|
|
129
|
+
auto_publish: false # manual by default
|
|
130
|
+
max_per_day: 10
|
|
131
|
+
|
|
132
|
+
# Discovery — how the agent finds other agents
|
|
133
|
+
discovery:
|
|
134
|
+
refresh_interval_seconds: 300
|
|
135
|
+
trending_window_hours: 24
|
|
136
|
+
|
|
137
|
+
# ─── Arena Filesystem Policy ───────────────────────────────────────────────────
|
|
138
|
+
# What Arena processes can read and write inside the Workroom.
|
|
139
|
+
|
|
140
|
+
arena_filesystem:
|
|
141
|
+
read_write:
|
|
142
|
+
- /workroom/arena/feed # indexed feed data
|
|
143
|
+
- /workroom/arena/profile # agent profile cache
|
|
144
|
+
- /workroom/arena/state # daemon state (last block, etc.)
|
|
145
|
+
- /workroom/arena/queue # approval queue for human review
|
|
146
|
+
|
|
147
|
+
read_only:
|
|
148
|
+
- /workroom/runtime # CLI and daemon code
|
|
149
|
+
- /workroom/.arc402 # policy files
|
|
150
|
+
|
|
151
|
+
forbidden:
|
|
152
|
+
- /workroom/.arc402/keys # private keys (injected as env vars only)
|
|
@@ -0,0 +1,167 @@
|
|
|
1
|
+
# ─────────────────────────────────────────────────────────────────────────────
|
|
2
|
+
# ARC-402 Workroom Credentials Template
|
|
3
|
+
#
|
|
4
|
+
# This file configures LLM provider access for your workroom worker.
|
|
5
|
+
# Copy to ~/.arc402/worker/credentials.toml and fill in your keys.
|
|
6
|
+
#
|
|
7
|
+
# NEVER commit this file. It is gitignored by default.
|
|
8
|
+
# The workroom setup script reads this to:
|
|
9
|
+
# 1. Pass API keys as Docker env vars (never written to disk in container)
|
|
10
|
+
# 2. Auto-generate network policy entries (iptables whitelist)
|
|
11
|
+
#
|
|
12
|
+
# Two modes:
|
|
13
|
+
# Tier 1 (recommended): Use OpenClaw as your worker runtime.
|
|
14
|
+
# → OpenClaw's openclaw.json already has all provider configs.
|
|
15
|
+
# → You DON'T need this file. Everything is auto-derived.
|
|
16
|
+
# → Set runtime = "openclaw" in worker/config.json.
|
|
17
|
+
#
|
|
18
|
+
# Tier 2 (advanced): Bring your own harness (raw Claude Code, Codex, etc.)
|
|
19
|
+
# → Fill in the providers below for your chosen harness.
|
|
20
|
+
# → The setup script generates matching iptables rules.
|
|
21
|
+
# ─────────────────────────────────────────────────────────────────────────────
|
|
22
|
+
|
|
23
|
+
# ── Anthropic (Claude Code) ─────────────────────────────────────────────────
|
|
24
|
+
# Two auth modes:
|
|
25
|
+
# api_key: Direct API key (pay per token)
|
|
26
|
+
# oauth: Max subscription OAuth (unlimited, mounted from host)
|
|
27
|
+
|
|
28
|
+
[providers.anthropic]
|
|
29
|
+
enabled = false
|
|
30
|
+
auth = "api_key" # "api_key" or "oauth"
|
|
31
|
+
env = "ANTHROPIC_API_KEY" # Set this env var on the host
|
|
32
|
+
# oauth_token_path = "~/.claude/oauth" # For OAuth mode (mounted read-only)
|
|
33
|
+
hosts = ["api.anthropic.com"]
|
|
34
|
+
|
|
35
|
+
# ── OpenAI (Codex, GPT) ────────────────────────────────────────────────────
|
|
36
|
+
|
|
37
|
+
[providers.openai]
|
|
38
|
+
enabled = false
|
|
39
|
+
auth = "api_key"
|
|
40
|
+
env = "OPENAI_API_KEY"
|
|
41
|
+
hosts = ["api.openai.com"]
|
|
42
|
+
|
|
43
|
+
# ── OpenAI Codex (dedicated endpoint) ──────────────────────────────────────
|
|
44
|
+
|
|
45
|
+
[providers.openai_codex]
|
|
46
|
+
enabled = false
|
|
47
|
+
auth = "api_key"
|
|
48
|
+
env = "OPENAI_API_KEY" # Same key as OpenAI
|
|
49
|
+
hosts = ["api.openai.com"] # Same endpoint
|
|
50
|
+
|
|
51
|
+
# ── Google (Gemini) ─────────────────────────────────────────────────────────
|
|
52
|
+
|
|
53
|
+
[providers.google]
|
|
54
|
+
enabled = false
|
|
55
|
+
auth = "api_key"
|
|
56
|
+
env = "GOOGLE_API_KEY"
|
|
57
|
+
# Alternative: service account JSON
|
|
58
|
+
# service_account_path = "~/.config/gcloud/application_default_credentials.json"
|
|
59
|
+
hosts = ["generativelanguage.googleapis.com"]
|
|
60
|
+
|
|
61
|
+
# ── OpenRouter ──────────────────────────────────────────────────────────────
|
|
62
|
+
# Routes to multiple providers (Anthropic, OpenAI, Google, etc.)
|
|
63
|
+
# Single key, many models.
|
|
64
|
+
|
|
65
|
+
[providers.openrouter]
|
|
66
|
+
enabled = false
|
|
67
|
+
auth = "api_key"
|
|
68
|
+
env = "OPENROUTER_API_KEY"
|
|
69
|
+
hosts = ["openrouter.ai"]
|
|
70
|
+
|
|
71
|
+
# ── Mistral ─────────────────────────────────────────────────────────────────
|
|
72
|
+
|
|
73
|
+
[providers.mistral]
|
|
74
|
+
enabled = false
|
|
75
|
+
auth = "api_key"
|
|
76
|
+
env = "MISTRAL_API_KEY"
|
|
77
|
+
hosts = ["api.mistral.ai"]
|
|
78
|
+
|
|
79
|
+
# ── Groq ────────────────────────────────────────────────────────────────────
|
|
80
|
+
|
|
81
|
+
[providers.groq]
|
|
82
|
+
enabled = false
|
|
83
|
+
auth = "api_key"
|
|
84
|
+
env = "GROQ_API_KEY"
|
|
85
|
+
hosts = ["api.groq.com"]
|
|
86
|
+
|
|
87
|
+
# ── Together AI ─────────────────────────────────────────────────────────────
|
|
88
|
+
|
|
89
|
+
[providers.together]
|
|
90
|
+
enabled = false
|
|
91
|
+
auth = "api_key"
|
|
92
|
+
env = "TOGETHER_API_KEY"
|
|
93
|
+
hosts = ["api.together.xyz"]
|
|
94
|
+
|
|
95
|
+
# ── Fireworks AI ────────────────────────────────────────────────────────────
|
|
96
|
+
|
|
97
|
+
[providers.fireworks]
|
|
98
|
+
enabled = false
|
|
99
|
+
auth = "api_key"
|
|
100
|
+
env = "FIREWORKS_API_KEY"
|
|
101
|
+
hosts = ["api.fireworks.ai"]
|
|
102
|
+
|
|
103
|
+
# ── DeepSeek ────────────────────────────────────────────────────────────────
|
|
104
|
+
|
|
105
|
+
[providers.deepseek]
|
|
106
|
+
enabled = false
|
|
107
|
+
auth = "api_key"
|
|
108
|
+
env = "DEEPSEEK_API_KEY"
|
|
109
|
+
hosts = ["api.deepseek.com"]
|
|
110
|
+
|
|
111
|
+
# ── Cohere ──────────────────────────────────────────────────────────────────
|
|
112
|
+
|
|
113
|
+
[providers.cohere]
|
|
114
|
+
enabled = false
|
|
115
|
+
auth = "api_key"
|
|
116
|
+
env = "COHERE_API_KEY"
|
|
117
|
+
hosts = ["api.cohere.ai"]
|
|
118
|
+
|
|
119
|
+
# ── Perplexity ──────────────────────────────────────────────────────────────
|
|
120
|
+
|
|
121
|
+
[providers.perplexity]
|
|
122
|
+
enabled = false
|
|
123
|
+
auth = "api_key"
|
|
124
|
+
env = "PERPLEXITY_API_KEY"
|
|
125
|
+
hosts = ["api.perplexity.ai"]
|
|
126
|
+
|
|
127
|
+
# ── xAI (Grok) ─────────────────────────────────────────────────────────────
|
|
128
|
+
|
|
129
|
+
[providers.xai]
|
|
130
|
+
enabled = false
|
|
131
|
+
auth = "api_key"
|
|
132
|
+
env = "XAI_API_KEY"
|
|
133
|
+
hosts = ["api.x.ai"]
|
|
134
|
+
|
|
135
|
+
# ── Custom Provider ─────────────────────────────────────────────────────────
|
|
136
|
+
# Add your own provider. The setup script reads `hosts` to generate
|
|
137
|
+
# iptables rules and `env` to mount the key into the container.
|
|
138
|
+
|
|
139
|
+
# [providers.custom_provider]
|
|
140
|
+
# enabled = false
|
|
141
|
+
# auth = "api_key"
|
|
142
|
+
# env = "CUSTOM_API_KEY"
|
|
143
|
+
# hosts = ["api.custom-provider.com"]
|
|
144
|
+
|
|
145
|
+
# ─────────────────────────────────────────────────────────────────────────────
|
|
146
|
+
# USAGE:
|
|
147
|
+
#
|
|
148
|
+
# 1. Copy this file:
|
|
149
|
+
# cp credentials.template.toml ~/.arc402/worker/credentials.toml
|
|
150
|
+
#
|
|
151
|
+
# 2. Enable the providers you need:
|
|
152
|
+
# Set enabled = true for each provider you want
|
|
153
|
+
#
|
|
154
|
+
# 3. Set your API keys as environment variables:
|
|
155
|
+
# export ANTHROPIC_API_KEY="sk-ant-..."
|
|
156
|
+
# export OPENAI_API_KEY="sk-..."
|
|
157
|
+
#
|
|
158
|
+
# 4. Run workroom setup (reads credentials, generates network policy):
|
|
159
|
+
# arc402 workroom init
|
|
160
|
+
#
|
|
161
|
+
# 5. The workroom container will:
|
|
162
|
+
# - Receive your API keys as Docker env vars (never on disk)
|
|
163
|
+
# - Have iptables rules allowing only your enabled providers' hosts
|
|
164
|
+
# - Block all other outbound network traffic
|
|
165
|
+
#
|
|
166
|
+
# For OpenClaw users (Tier 1): skip all of this. OpenClaw handles it.
|
|
167
|
+
# ─────────────────────────────────────────────────────────────────────────────
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
# ─────────────────────────────────────────────────────────────────────────────
|
|
3
|
+
# ARC-402 Policy Derivation — Auto-generate network policy from OpenClaw config
|
|
4
|
+
#
|
|
5
|
+
# Reads the OpenClaw configuration to determine which LLM providers are
|
|
6
|
+
# configured, then ensures those endpoints are in the workroom network policy.
|
|
7
|
+
#
|
|
8
|
+
# Usage: ./derive-policy.sh [openclaw-config-path] [policy-output-path]
|
|
9
|
+
# ─────────────────────────────────────────────────────────────────────────────
|
|
10
|
+
set -euo pipefail
|
|
11
|
+
|
|
12
|
+
OPENCLAW_CONFIG="${1:-$HOME/.openclaw/openclaw.json}"
|
|
13
|
+
POLICY_FILE="${2:-$HOME/.arc402/openshell-policy.yaml}"
|
|
14
|
+
|
|
15
|
+
# Known LLM provider → endpoint mapping
|
|
16
|
+
declare -A PROVIDER_HOSTS
|
|
17
|
+
PROVIDER_HOSTS=(
|
|
18
|
+
["anthropic"]="api.anthropic.com"
|
|
19
|
+
["openai"]="api.openai.com"
|
|
20
|
+
["google"]="generativelanguage.googleapis.com"
|
|
21
|
+
["mistral"]="api.mistral.ai"
|
|
22
|
+
["groq"]="api.groq.com"
|
|
23
|
+
["openrouter"]="openrouter.ai"
|
|
24
|
+
["together"]="api.together.xyz"
|
|
25
|
+
["fireworks"]="api.fireworks.ai"
|
|
26
|
+
["deepseek"]="api.deepseek.com"
|
|
27
|
+
["cohere"]="api.cohere.ai"
|
|
28
|
+
["perplexity"]="api.perplexity.ai"
|
|
29
|
+
["xai"]="api.x.ai"
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
if [ ! -f "$OPENCLAW_CONFIG" ]; then
|
|
33
|
+
echo "[derive-policy] OpenClaw config not found: $OPENCLAW_CONFIG"
|
|
34
|
+
exit 0
|
|
35
|
+
fi
|
|
36
|
+
|
|
37
|
+
if [ ! -f "$POLICY_FILE" ]; then
|
|
38
|
+
echo "[derive-policy] Policy file not found: $POLICY_FILE"
|
|
39
|
+
exit 1
|
|
40
|
+
fi
|
|
41
|
+
|
|
42
|
+
echo "[derive-policy] Reading OpenClaw config: $OPENCLAW_CONFIG"
|
|
43
|
+
|
|
44
|
+
# Extract configured providers from openclaw.json
|
|
45
|
+
# Look for provider keys in models/providers config
|
|
46
|
+
CONFIGURED_PROVIDERS=$(jq -r '
|
|
47
|
+
(.models // {} | keys[]) // empty,
|
|
48
|
+
(.providers // {} | keys[]) // empty
|
|
49
|
+
' "$OPENCLAW_CONFIG" 2>/dev/null | sort -u)
|
|
50
|
+
|
|
51
|
+
ADDED=0
|
|
52
|
+
for provider in $CONFIGURED_PROVIDERS; do
|
|
53
|
+
# Normalize provider name (strip prefixes like "openai-codex" → "openai")
|
|
54
|
+
base_provider=$(echo "$provider" | sed 's/-.*//')
|
|
55
|
+
|
|
56
|
+
host="${PROVIDER_HOSTS[$base_provider]:-}"
|
|
57
|
+
if [ -z "$host" ]; then
|
|
58
|
+
continue
|
|
59
|
+
fi
|
|
60
|
+
|
|
61
|
+
# Check if host is already in the policy
|
|
62
|
+
if grep -q "$host" "$POLICY_FILE" 2>/dev/null; then
|
|
63
|
+
echo "[derive-policy] Already in policy: $host (for $provider)"
|
|
64
|
+
continue
|
|
65
|
+
fi
|
|
66
|
+
|
|
67
|
+
# Append to policy file
|
|
68
|
+
cat >> "$POLICY_FILE" << EOF
|
|
69
|
+
${base_provider}_api:
|
|
70
|
+
name: ${base_provider}-llm-api
|
|
71
|
+
endpoints:
|
|
72
|
+
- host: ${host}
|
|
73
|
+
port: 443
|
|
74
|
+
protocol: rest
|
|
75
|
+
tls: terminate
|
|
76
|
+
enforcement: enforce
|
|
77
|
+
access: read-write
|
|
78
|
+
binaries: *a1
|
|
79
|
+
EOF
|
|
80
|
+
echo "[derive-policy] Added: $host (for $provider)"
|
|
81
|
+
ADDED=$((ADDED + 1))
|
|
82
|
+
done
|
|
83
|
+
|
|
84
|
+
echo "[derive-policy] Done. $ADDED new provider endpoints added to policy."
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
# ─────────────────────────────────────────────────────────────────────────────
|
|
3
|
+
# ARC-402 Workroom DNS Refresh
|
|
4
|
+
#
|
|
5
|
+
# Periodically re-resolves policy hostnames and updates iptables rules.
|
|
6
|
+
# Handles CDN IP rotation, failover, and DNS changes without container restart.
|
|
7
|
+
#
|
|
8
|
+
# Runs as root in the background. The entrypoint starts this automatically.
|
|
9
|
+
# ─────────────────────────────────────────────────────────────────────────────
|
|
10
|
+
set -euo pipefail
|
|
11
|
+
|
|
12
|
+
readonly POLICY_FILE="${1:-/workroom/.arc402/openshell-policy.yaml}"
|
|
13
|
+
readonly REFRESH_INTERVAL="${ARC402_DNS_REFRESH_SECONDS:-300}"
|
|
14
|
+
readonly RULES_LOG="/workroom/.arc402/iptables-rules.log"
|
|
15
|
+
|
|
16
|
+
log() { echo "[dns-refresh] $*"; }
|
|
17
|
+
|
|
18
|
+
log "Starting (interval: ${REFRESH_INTERVAL}s, policy: $POLICY_FILE)"
|
|
19
|
+
|
|
20
|
+
while true; do
|
|
21
|
+
sleep "$REFRESH_INTERVAL"
|
|
22
|
+
|
|
23
|
+
log "Refreshing..."
|
|
24
|
+
|
|
25
|
+
# Flush and rebuild OUTPUT chain from scratch
|
|
26
|
+
# This is atomic from the kernel's perspective — no gap in enforcement.
|
|
27
|
+
iptables -F OUTPUT 2>/dev/null || true
|
|
28
|
+
|
|
29
|
+
# Core rules (always present)
|
|
30
|
+
iptables -A OUTPUT -o lo -j ACCEPT
|
|
31
|
+
iptables -A OUTPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
|
|
32
|
+
iptables -A OUTPUT -p udp --dport 53 -j ACCEPT
|
|
33
|
+
iptables -A OUTPUT -p tcp --dport 53 -j ACCEPT
|
|
34
|
+
|
|
35
|
+
# Re-resolve and apply
|
|
36
|
+
RULE_COUNT=0
|
|
37
|
+
while IFS=: read -r host port; do
|
|
38
|
+
[ -z "$host" ] && continue
|
|
39
|
+
port="${port:-443}"
|
|
40
|
+
|
|
41
|
+
ips=$(getent ahosts "$host" 2>/dev/null | awk '{print $1}' | sort -u || true)
|
|
42
|
+
if [ -z "$ips" ]; then
|
|
43
|
+
log "WARN: Could not resolve $host"
|
|
44
|
+
continue
|
|
45
|
+
fi
|
|
46
|
+
|
|
47
|
+
while IFS= read -r ip; do
|
|
48
|
+
iptables -A OUTPUT -p tcp -d "$ip" --dport "$port" -j ACCEPT
|
|
49
|
+
RULE_COUNT=$((RULE_COUNT + 1))
|
|
50
|
+
done <<< "$ips"
|
|
51
|
+
done < <(/policy-parser.sh "$POLICY_FILE")
|
|
52
|
+
|
|
53
|
+
# Log updated rules
|
|
54
|
+
iptables -L OUTPUT -n --line-numbers > "$RULES_LOG" 2>/dev/null || true
|
|
55
|
+
|
|
56
|
+
log "Refreshed: $RULE_COUNT rules applied"
|
|
57
|
+
done
|
|
@@ -0,0 +1,235 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
# ─────────────────────────────────────────────────────────────────────────────
|
|
3
|
+
# ARC-402 Workroom Entrypoint
|
|
4
|
+
#
|
|
5
|
+
# This is the governed execution environment for hired work. The entrypoint:
|
|
6
|
+
# 1. Reads the workroom policy (YAML → host:port pairs)
|
|
7
|
+
# 2. Resolves all hostnames to IPs while DNS is still open
|
|
8
|
+
# 3. Applies iptables rules: ALLOW resolved IPs, DROP everything else
|
|
9
|
+
# 4. Starts the DNS refresh daemon for IP rotation
|
|
10
|
+
# 5. Drops privileges and starts the ARC-402 daemon
|
|
11
|
+
#
|
|
12
|
+
# Runs as root for iptables setup. Drops to 'workroom' user for the daemon.
|
|
13
|
+
# ─────────────────────────────────────────────────────────────────────────────
|
|
14
|
+
set -euo pipefail
|
|
15
|
+
|
|
16
|
+
# ─── log() must be defined first — used throughout ────────────────────────────
|
|
17
|
+
log() { echo "[workroom] $*"; }
|
|
18
|
+
|
|
19
|
+
readonly POLICY_FILE="/workroom/.arc402/openshell-policy.yaml"
|
|
20
|
+
readonly RULES_LOG="/workroom/.arc402/iptables-rules.log"
|
|
21
|
+
readonly ARENA_POLICY="/workroom/.arc402/arena-policy.yaml"
|
|
22
|
+
readonly ARENA_DEFAULT="/workroom/defaults/arena-policy.yaml"
|
|
23
|
+
|
|
24
|
+
# ─── Locate globally installed arc402-cli (Linux-native binaries) ─────────────
|
|
25
|
+
# The image runs npm install -g arc402-cli --build-from-source at build time.
|
|
26
|
+
# We must always use this global install for native addons (better-sqlite3 etc.),
|
|
27
|
+
# never the host mount which carries macOS/Windows binaries.
|
|
28
|
+
GLOBAL_NPM_ROOT=$(npm root -g 2>/dev/null || echo "")
|
|
29
|
+
GLOBAL_CLI_ROOT="${GLOBAL_NPM_ROOT}/arc402-cli"
|
|
30
|
+
GLOBAL_DAEMON="${GLOBAL_CLI_ROOT}/dist/daemon/index.js"
|
|
31
|
+
log "Global npm root: ${GLOBAL_NPM_ROOT:-not found}"
|
|
32
|
+
log "Global cli root: ${GLOBAL_CLI_ROOT}"
|
|
33
|
+
|
|
34
|
+
# NODE_PATH: if a host dist/ is mounted (--dev mode), require() calls from it
|
|
35
|
+
# must still resolve native addons from the Linux global install.
|
|
36
|
+
# Set unconditionally — harmless if the mount doesn't exist.
|
|
37
|
+
if [ -d "${GLOBAL_CLI_ROOT}/node_modules" ]; then
|
|
38
|
+
export NODE_PATH="${GLOBAL_CLI_ROOT}/node_modules${NODE_PATH:+:$NODE_PATH}"
|
|
39
|
+
log "NODE_PATH → ${GLOBAL_CLI_ROOT}/node_modules (Linux-native addons)"
|
|
40
|
+
fi
|
|
41
|
+
|
|
42
|
+
# ─── Resolve daemon entry point ───────────────────────────────────────────────
|
|
43
|
+
# Production (no --dev mount): /workroom/runtime/dist/daemon/index.js won't exist.
|
|
44
|
+
# → use global install directly.
|
|
45
|
+
# Dev (--dev mount): mounted dist/ exists, global node_modules via NODE_PATH.
|
|
46
|
+
# → use mounted dist so JS changes propagate without rebuild.
|
|
47
|
+
if [ -f "/workroom/runtime/dist/daemon/index.js" ]; then
|
|
48
|
+
DAEMON_ENTRY="/workroom/runtime/dist/daemon/index.js"
|
|
49
|
+
log "Daemon: host dist/ mount (dev mode)"
|
|
50
|
+
elif [ -f "${GLOBAL_DAEMON}" ]; then
|
|
51
|
+
DAEMON_ENTRY="${GLOBAL_DAEMON}"
|
|
52
|
+
log "Daemon: global install (production)"
|
|
53
|
+
else
|
|
54
|
+
DAEMON_ENTRY=""
|
|
55
|
+
fi
|
|
56
|
+
readonly DAEMON_ENTRY
|
|
57
|
+
|
|
58
|
+
# ─── Validate prerequisites ────────────────────────────────────────────────
|
|
59
|
+
|
|
60
|
+
if [ ! -f "$POLICY_FILE" ]; then
|
|
61
|
+
log "ERROR: Policy file not found at $POLICY_FILE"
|
|
62
|
+
log "Run 'arc402 workroom init' on the host first."
|
|
63
|
+
exit 1
|
|
64
|
+
fi
|
|
65
|
+
|
|
66
|
+
# ─── Phase 0b: Auto-derive LLM provider endpoints from OpenClaw config ────
|
|
67
|
+
#
|
|
68
|
+
# If OpenClaw config is mounted, read it and ensure all configured LLM
|
|
69
|
+
# providers have their API endpoints in the network policy. This runs
|
|
70
|
+
# BEFORE host resolution so the derived hosts get resolved too.
|
|
71
|
+
|
|
72
|
+
OPENCLAW_CONFIG="/home/workroom/.openclaw/openclaw.json"
|
|
73
|
+
if [ -f "$OPENCLAW_CONFIG" ]; then
|
|
74
|
+
log "OpenClaw config found — deriving LLM provider endpoints..."
|
|
75
|
+
/derive-policy.sh "$OPENCLAW_CONFIG" "$POLICY_FILE" || log "WARN: Policy derivation failed (non-fatal)"
|
|
76
|
+
else
|
|
77
|
+
log "No OpenClaw config mounted — using static policy only"
|
|
78
|
+
fi
|
|
79
|
+
|
|
80
|
+
# ─── Phase 1: Resolve all policy hosts (DNS is still open) ─────────────────
|
|
81
|
+
#
|
|
82
|
+
# We resolve hostnames BEFORE applying the DROP policy. Once DROP is active,
|
|
83
|
+
# only explicitly allowed IPs are reachable. By resolving first, we capture
|
|
84
|
+
# the current DNS state into concrete IP rules.
|
|
85
|
+
|
|
86
|
+
declare -a RESOLVED_IPS=()
|
|
87
|
+
declare -a RESOLVED_PORTS=()
|
|
88
|
+
RESOLVE_COUNT=0
|
|
89
|
+
|
|
90
|
+
resolve_policy_hosts() {
|
|
91
|
+
local policy_file="$1"
|
|
92
|
+
|
|
93
|
+
while IFS=: read -r host port; do
|
|
94
|
+
[ -z "$host" ] && continue
|
|
95
|
+
port="${port:-443}"
|
|
96
|
+
|
|
97
|
+
local ips
|
|
98
|
+
ips=$(getent ahosts "$host" 2>/dev/null | awk '{print $1}' | sort -u || true)
|
|
99
|
+
|
|
100
|
+
if [ -z "$ips" ]; then
|
|
101
|
+
log "WARN: Could not resolve $host — skipping"
|
|
102
|
+
continue
|
|
103
|
+
fi
|
|
104
|
+
|
|
105
|
+
local ip_count
|
|
106
|
+
ip_count=$(echo "$ips" | wc -l | tr -d ' ')
|
|
107
|
+
log "Resolved: $host → $ip_count IPs"
|
|
108
|
+
|
|
109
|
+
while IFS= read -r ip; do
|
|
110
|
+
RESOLVED_IPS+=("$ip")
|
|
111
|
+
RESOLVED_PORTS+=("$port")
|
|
112
|
+
RESOLVE_COUNT=$((RESOLVE_COUNT + 1))
|
|
113
|
+
done <<< "$ips"
|
|
114
|
+
done < <(/policy-parser.sh "$policy_file")
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
log "Resolving policy hosts..."
|
|
118
|
+
resolve_policy_hosts "$POLICY_FILE"
|
|
119
|
+
|
|
120
|
+
# Also resolve arena policy if present
|
|
121
|
+
if [ -f "$ARENA_POLICY" ]; then
|
|
122
|
+
resolve_policy_hosts "$ARENA_POLICY"
|
|
123
|
+
elif [ -f "$ARENA_DEFAULT" ]; then
|
|
124
|
+
resolve_policy_hosts "$ARENA_DEFAULT"
|
|
125
|
+
fi
|
|
126
|
+
|
|
127
|
+
# ─── Phase 2: Apply network enforcement ────────────────────────────────────
|
|
128
|
+
#
|
|
129
|
+
# Default policy: DROP all outbound.
|
|
130
|
+
# Exceptions: loopback, established connections, DNS, and resolved policy hosts.
|
|
131
|
+
#
|
|
132
|
+
# DNS is allowed broadly (not just 127.0.0.11) because the daemon and worker
|
|
133
|
+
# need to resolve hostnames at runtime. The iptables rules restrict which IPs
|
|
134
|
+
# are reachable, so DNS resolution alone doesn't grant access — only hosts
|
|
135
|
+
# whose IPs match a rule can actually be connected to.
|
|
136
|
+
|
|
137
|
+
iptables -P OUTPUT DROP 2>/dev/null || true
|
|
138
|
+
iptables -F OUTPUT 2>/dev/null || true
|
|
139
|
+
|
|
140
|
+
# Core rules: loopback, established, DNS
|
|
141
|
+
iptables -A OUTPUT -o lo -j ACCEPT
|
|
142
|
+
iptables -A OUTPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
|
|
143
|
+
iptables -A OUTPUT -p udp --dport 53 -j ACCEPT
|
|
144
|
+
iptables -A OUTPUT -p tcp --dport 53 -j ACCEPT
|
|
145
|
+
|
|
146
|
+
log "Default policy: DROP all outbound (except loopback, established, DNS)"
|
|
147
|
+
|
|
148
|
+
# Apply resolved IP rules
|
|
149
|
+
for i in "${!RESOLVED_IPS[@]}"; do
|
|
150
|
+
iptables -A OUTPUT -p tcp -d "${RESOLVED_IPS[$i]}" --dport "${RESOLVED_PORTS[$i]}" -j ACCEPT
|
|
151
|
+
done
|
|
152
|
+
|
|
153
|
+
log "$RESOLVE_COUNT iptables rules applied"
|
|
154
|
+
|
|
155
|
+
# ─── Phase 3: Log applied rules ───────────────────────────────────────────
|
|
156
|
+
|
|
157
|
+
iptables -L OUTPUT -n --line-numbers > "$RULES_LOG" 2>/dev/null || true
|
|
158
|
+
log "Rules logged to $RULES_LOG"
|
|
159
|
+
|
|
160
|
+
# ─── Phase 4: Start DNS refresh daemon ─────────────────────────────────────
|
|
161
|
+
#
|
|
162
|
+
# Hostnames may resolve to different IPs over time (CDN rotation, failover).
|
|
163
|
+
# The refresh daemon re-resolves all policy hosts periodically and atomically
|
|
164
|
+
# updates iptables rules.
|
|
165
|
+
|
|
166
|
+
/dns-refresh.sh "$POLICY_FILE" &
|
|
167
|
+
local_dns_pid=$!
|
|
168
|
+
log "DNS refresh daemon started (PID: $local_dns_pid, interval: ${ARC402_DNS_REFRESH_SECONDS:-300}s)"
|
|
169
|
+
|
|
170
|
+
# ─── Phase 5: Validate daemon entry point ──────────────────────────────────
|
|
171
|
+
|
|
172
|
+
if [ -z "$DAEMON_ENTRY" ] || [ ! -f "$DAEMON_ENTRY" ]; then
|
|
173
|
+
log "ERROR: Daemon entry point not found."
|
|
174
|
+
log "Tried: /workroom/runtime/dist/daemon/index.js (host --dev mount)"
|
|
175
|
+
log "Tried: ${GLOBAL_DAEMON} (global npm install inside image)"
|
|
176
|
+
log "Rebuild the workroom image: arc402 workroom init"
|
|
177
|
+
exit 1
|
|
178
|
+
fi
|
|
179
|
+
log "Daemon entry: $DAEMON_ENTRY"
|
|
180
|
+
|
|
181
|
+
# ─── Phase 5b: Worker identity + agent runtimes on PATH ────────────────────
|
|
182
|
+
|
|
183
|
+
WORKER_DIR="/workroom/.arc402/worker"
|
|
184
|
+
|
|
185
|
+
# Verify worker identity
|
|
186
|
+
if [ -f "$WORKER_DIR/SOUL.md" ]; then
|
|
187
|
+
log "Worker SOUL.md: found"
|
|
188
|
+
else
|
|
189
|
+
log "WARN: No worker SOUL.md — agent will use generic identity"
|
|
190
|
+
log " Run 'arc402 worker init' on the host to create one"
|
|
191
|
+
fi
|
|
192
|
+
|
|
193
|
+
if [ -f "$WORKER_DIR/config.json" ]; then
|
|
194
|
+
WORKER_NAME=$(jq -r '.name // "unnamed"' "$WORKER_DIR/config.json" 2>/dev/null || echo "unnamed")
|
|
195
|
+
WORKER_CAPS=$(jq -r '.capabilities // [] | join(", ")' "$WORKER_DIR/config.json" 2>/dev/null || echo "none")
|
|
196
|
+
log "Worker: $WORKER_NAME | Capabilities: $WORKER_CAPS"
|
|
197
|
+
else
|
|
198
|
+
log "WARN: No worker config.json"
|
|
199
|
+
fi
|
|
200
|
+
|
|
201
|
+
# Check knowledge/datasets/skills directories
|
|
202
|
+
for dir in knowledge datasets skills; do
|
|
203
|
+
if [ -d "$WORKER_DIR/$dir" ]; then
|
|
204
|
+
count=$(find "$WORKER_DIR/$dir" -type f 2>/dev/null | wc -l)
|
|
205
|
+
log "Worker $dir/: $count files"
|
|
206
|
+
fi
|
|
207
|
+
done
|
|
208
|
+
|
|
209
|
+
# Add agent runtimes to PATH
|
|
210
|
+
PATH_ADDITIONS=""
|
|
211
|
+
# OpenClaw (preferred runtime)
|
|
212
|
+
if [ -d "/workroom/openclaw" ]; then
|
|
213
|
+
PATH_ADDITIONS="/workroom/openclaw:$PATH_ADDITIONS"
|
|
214
|
+
log "OpenClaw runtime found at /workroom/openclaw"
|
|
215
|
+
fi
|
|
216
|
+
# Claude Code
|
|
217
|
+
if [ -d "/workroom/claude-code" ]; then
|
|
218
|
+
PATH_ADDITIONS="/workroom/claude-code:$PATH_ADDITIONS"
|
|
219
|
+
log "Claude Code found at /workroom/claude-code"
|
|
220
|
+
fi
|
|
221
|
+
if [ -n "$PATH_ADDITIONS" ]; then
|
|
222
|
+
export PATH="$PATH_ADDITIONS$PATH"
|
|
223
|
+
fi
|
|
224
|
+
|
|
225
|
+
# Verify Claude auth
|
|
226
|
+
if [ -f "/home/workroom/.claude.json" ]; then
|
|
227
|
+
log "AUTH OK: Claude auth file found"
|
|
228
|
+
else
|
|
229
|
+
log "WARN: No Claude auth — mount -v ~/.claude.json:/home/workroom/.claude.json:ro"
|
|
230
|
+
fi
|
|
231
|
+
|
|
232
|
+
# ─── Phase 6: Drop privileges and start daemon ────────────────────────────
|
|
233
|
+
|
|
234
|
+
log "Starting ARC-402 daemon as user 'workroom'..."
|
|
235
|
+
exec su -s /bin/bash workroom -c "export PATH='$PATH' && export ARC402_WORKER_DIR='$WORKER_DIR' && node $DAEMON_ENTRY --foreground"
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
# ─────────────────────────────────────────────────────────────────────────────
|
|
3
|
+
# ARC-402 Workroom Policy Parser
|
|
4
|
+
#
|
|
5
|
+
# Reads the workroom policy YAML and outputs HOST:PORT pairs, one per line.
|
|
6
|
+
# Used by the entrypoint and DNS refresh scripts to build iptables rules.
|
|
7
|
+
#
|
|
8
|
+
# Input: YAML file with network_policies.<name>.endpoints[].host/port
|
|
9
|
+
# Output: hostname:port (one per line, to stdout)
|
|
10
|
+
# ─────────────────────────────────────────────────────────────────────────────
|
|
11
|
+
set -euo pipefail
|
|
12
|
+
|
|
13
|
+
readonly POLICY_FILE="${1:-/workroom/.arc402/openshell-policy.yaml}"
|
|
14
|
+
|
|
15
|
+
if [ ! -f "$POLICY_FILE" ]; then
|
|
16
|
+
echo "ERROR: Policy file not found: $POLICY_FILE" >&2
|
|
17
|
+
exit 1
|
|
18
|
+
fi
|
|
19
|
+
|
|
20
|
+
# Extract host:port pairs from the YAML network_policies section.
|
|
21
|
+
# Each policy block contains endpoints with host and port fields.
|
|
22
|
+
awk '
|
|
23
|
+
/host:/ { gsub(/.*host: */, ""); host = $0 }
|
|
24
|
+
/port:/ { gsub(/.*port: */, ""); if (host != "") { print host ":" $0; host = "" } }
|
|
25
|
+
' "$POLICY_FILE"
|