@rizom/brain 0.2.0-alpha.5 → 0.2.0-alpha.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/brain.js +1298 -870
- package/dist/site.js +5 -5
- package/dist/site.js.map +1 -1
- package/package.json +3 -2
- package/templates/deploy/Caddyfile +66 -0
- package/templates/deploy/Dockerfile +38 -0
- package/templates/deploy/scripts/provision-server.ts +109 -0
- package/templates/deploy/scripts/update-dns.ts +55 -0
- package/templates/deploy/scripts/write-ssh-key.ts +17 -0
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@rizom/brain",
|
|
3
|
-
"version": "0.2.0-alpha.
|
|
3
|
+
"version": "0.2.0-alpha.7",
|
|
4
4
|
"description": "Brain runtime + CLI — scaffold, run, and manage AI brain instances",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"bin": {
|
|
@@ -22,7 +22,8 @@
|
|
|
22
22
|
}
|
|
23
23
|
},
|
|
24
24
|
"files": [
|
|
25
|
-
"dist"
|
|
25
|
+
"dist",
|
|
26
|
+
"templates"
|
|
26
27
|
],
|
|
27
28
|
"scripts": {
|
|
28
29
|
"build": "bun scripts/build.ts",
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
# Internal Caddy — path-based routing to brain services.
|
|
2
|
+
# kamal-proxy terminates TLS externally; this runs inside the container.
|
|
3
|
+
:80 {
|
|
4
|
+
@preview host preview.*
|
|
5
|
+
handle @preview {
|
|
6
|
+
reverse_proxy localhost:4321
|
|
7
|
+
|
|
8
|
+
header {
|
|
9
|
+
X-Frame-Options "SAMEORIGIN"
|
|
10
|
+
X-Content-Type-Options "nosniff"
|
|
11
|
+
Referrer-Policy "strict-origin-when-cross-origin"
|
|
12
|
+
}
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
# MCP endpoint
|
|
16
|
+
handle /mcp* {
|
|
17
|
+
reverse_proxy localhost:3333
|
|
18
|
+
|
|
19
|
+
header {
|
|
20
|
+
X-Content-Type-Options "nosniff"
|
|
21
|
+
Access-Control-Allow-Origin "*"
|
|
22
|
+
Access-Control-Allow-Methods "GET, POST, DELETE, OPTIONS"
|
|
23
|
+
Access-Control-Allow-Headers "Content-Type, Authorization, MCP-Session-Id"
|
|
24
|
+
}
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
# A2A endpoints
|
|
28
|
+
handle /.well-known/agent-card.json {
|
|
29
|
+
reverse_proxy localhost:3334
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
handle /a2a {
|
|
33
|
+
reverse_proxy localhost:3334
|
|
34
|
+
|
|
35
|
+
header {
|
|
36
|
+
X-Content-Type-Options "nosniff"
|
|
37
|
+
Access-Control-Allow-Origin "*"
|
|
38
|
+
Access-Control-Allow-Methods "GET, POST, OPTIONS"
|
|
39
|
+
Access-Control-Allow-Headers "Content-Type, Authorization"
|
|
40
|
+
}
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
# Plugin API routes
|
|
44
|
+
handle /api/* {
|
|
45
|
+
reverse_proxy localhost:3335
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
# Production site: proxy to webserver if running, otherwise serve
|
|
49
|
+
# a minimal static fallback so the healthcheck and bare-domain
|
|
50
|
+
# requests succeed even on the core preset (no webserver).
|
|
51
|
+
handle {
|
|
52
|
+
reverse_proxy localhost:8080
|
|
53
|
+
|
|
54
|
+
header {
|
|
55
|
+
X-Frame-Options "SAMEORIGIN"
|
|
56
|
+
X-Content-Type-Options "nosniff"
|
|
57
|
+
Referrer-Policy "strict-origin-when-cross-origin"
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
handle_errors {
|
|
62
|
+
root * /srv/fallback
|
|
63
|
+
try_files /index.html
|
|
64
|
+
file_server
|
|
65
|
+
}
|
|
66
|
+
}
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
ARG BUN_VERSION=1.3.10
|
|
2
|
+
FROM oven/bun:${BUN_VERSION}-slim AS runtime
|
|
3
|
+
|
|
4
|
+
WORKDIR /app
|
|
5
|
+
|
|
6
|
+
RUN apt-get update && apt-get install -y --no-install-recommends \
|
|
7
|
+
curl ca-certificates git gnupg debian-keyring debian-archive-keyring apt-transport-https \
|
|
8
|
+
&& curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/gpg.key' | gpg --dearmor -o /usr/share/keyrings/caddy-stable-archive-keyring.gpg \
|
|
9
|
+
&& curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/debian.deb.txt' | tee /etc/apt/sources.list.d/caddy-stable.list \
|
|
10
|
+
&& apt-get update && apt-get install -y --no-install-recommends caddy libcap2-bin \
|
|
11
|
+
&& setcap cap_net_bind_service=+ep $(which caddy) \
|
|
12
|
+
&& rm -rf /var/lib/apt/lists/*
|
|
13
|
+
|
|
14
|
+
COPY deploy/Caddyfile /etc/caddy/Caddyfile
|
|
15
|
+
|
|
16
|
+
RUN mkdir -p /srv/fallback && \
|
|
17
|
+
printf '<!doctype html><html><head><meta charset="utf-8"><title>brain</title></head><body></body></html>\n' \
|
|
18
|
+
> /srv/fallback/index.html
|
|
19
|
+
|
|
20
|
+
ENV XDG_DATA_HOME=/data
|
|
21
|
+
ENV XDG_CONFIG_HOME=/config
|
|
22
|
+
RUN mkdir -p /app/data /app/cache /app/brain-data && \
|
|
23
|
+
chmod -R 777 /app/data /app/cache /app/brain-data
|
|
24
|
+
|
|
25
|
+
CMD ["sh", "-c", "caddy start --config /etc/caddy/Caddyfile && exec ./node_modules/.bin/brain start"]
|
|
26
|
+
|
|
27
|
+
# --- standalone: bake full project into image (brain-cli deploy) ---
|
|
28
|
+
FROM runtime AS standalone
|
|
29
|
+
COPY package.json ./package.json
|
|
30
|
+
RUN bun install --production --ignore-scripts
|
|
31
|
+
COPY . .
|
|
32
|
+
|
|
33
|
+
# --- fleet: install published brain at pinned version (ops deploy) ---
|
|
34
|
+
FROM runtime AS fleet
|
|
35
|
+
ARG BRAIN_VERSION
|
|
36
|
+
RUN test -n "$BRAIN_VERSION" \
|
|
37
|
+
&& printf '{"name":"rover-pilot-runtime","private":true}\n' > package.json \
|
|
38
|
+
&& bun add @rizom/brain@$BRAIN_VERSION
|
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
import {
|
|
2
|
+
readJsonResponse,
|
|
3
|
+
requireEnv,
|
|
4
|
+
writeGitHubOutput,
|
|
5
|
+
writeGitHubEnv,
|
|
6
|
+
} from "./helpers";
|
|
7
|
+
|
|
8
|
+
const token = requireEnv("HCLOUD_TOKEN");
|
|
9
|
+
const instanceName = requireEnv("INSTANCE_NAME");
|
|
10
|
+
const sshKeyName = requireEnv("HCLOUD_SSH_KEY_NAME");
|
|
11
|
+
const serverType = requireEnv("HCLOUD_SERVER_TYPE");
|
|
12
|
+
const location = requireEnv("HCLOUD_LOCATION");
|
|
13
|
+
|
|
14
|
+
const headers: Record<string, string> = {
|
|
15
|
+
Authorization: `Bearer ${token}`,
|
|
16
|
+
"Content-Type": "application/json",
|
|
17
|
+
};
|
|
18
|
+
const baseUrl = "https://api.hetzner.cloud/v1";
|
|
19
|
+
const labelSelector = `brain=${instanceName}`;
|
|
20
|
+
const MAX_POLLS = 30;
|
|
21
|
+
const POLL_INTERVAL_MS = 10_000;
|
|
22
|
+
|
|
23
|
+
interface HetznerServer {
|
|
24
|
+
id: number;
|
|
25
|
+
status: string;
|
|
26
|
+
public_net?: { ipv4?: { ip?: string } };
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
function sleep(ms: number): Promise<void> {
|
|
30
|
+
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
async function listServers(): Promise<HetznerServer[]> {
|
|
34
|
+
const url = `${baseUrl}/servers?label_selector=${encodeURIComponent(labelSelector)}`;
|
|
35
|
+
const response = await fetch(url, { headers });
|
|
36
|
+
const payload = (await readJsonResponse(
|
|
37
|
+
response,
|
|
38
|
+
"Hetzner server lookup",
|
|
39
|
+
)) as {
|
|
40
|
+
servers?: HetznerServer[];
|
|
41
|
+
};
|
|
42
|
+
if (!response.ok || !payload.servers) {
|
|
43
|
+
throw new Error(`Hetzner server lookup failed: ${JSON.stringify(payload)}`);
|
|
44
|
+
}
|
|
45
|
+
return payload.servers;
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
async function createServer(): Promise<HetznerServer> {
|
|
49
|
+
const response = await fetch(`${baseUrl}/servers`, {
|
|
50
|
+
method: "POST",
|
|
51
|
+
headers,
|
|
52
|
+
body: JSON.stringify({
|
|
53
|
+
name: instanceName,
|
|
54
|
+
server_type: serverType,
|
|
55
|
+
image: "ubuntu-22.04",
|
|
56
|
+
location,
|
|
57
|
+
ssh_keys: [sshKeyName],
|
|
58
|
+
labels: { brain: instanceName },
|
|
59
|
+
}),
|
|
60
|
+
});
|
|
61
|
+
const payload = (await readJsonResponse(
|
|
62
|
+
response,
|
|
63
|
+
"Hetzner server create",
|
|
64
|
+
)) as {
|
|
65
|
+
server?: HetznerServer;
|
|
66
|
+
};
|
|
67
|
+
if (!response.ok || !payload.server) {
|
|
68
|
+
throw new Error(`Hetzner server create failed: ${JSON.stringify(payload)}`);
|
|
69
|
+
}
|
|
70
|
+
return payload.server;
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
async function getServer(id: number): Promise<HetznerServer> {
|
|
74
|
+
const response = await fetch(`${baseUrl}/servers/${id}`, { headers });
|
|
75
|
+
const payload = (await readJsonResponse(response, "Hetzner server poll")) as {
|
|
76
|
+
server?: HetznerServer;
|
|
77
|
+
};
|
|
78
|
+
if (!response.ok || !payload.server) {
|
|
79
|
+
throw new Error(`Hetzner server poll failed: ${JSON.stringify(payload)}`);
|
|
80
|
+
}
|
|
81
|
+
return payload.server;
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
let server: HetznerServer | undefined = (await listServers())[0];
|
|
85
|
+
server ??= await createServer();
|
|
86
|
+
|
|
87
|
+
let polls = 0;
|
|
88
|
+
while (server.status !== "running" || !server.public_net?.ipv4?.ip) {
|
|
89
|
+
if (++polls > MAX_POLLS) {
|
|
90
|
+
throw new Error(
|
|
91
|
+
`Server ${server.id} did not become ready after ${(MAX_POLLS * POLL_INTERVAL_MS) / 1000}s (status: ${server.status})`,
|
|
92
|
+
);
|
|
93
|
+
}
|
|
94
|
+
if (server.status === "error") {
|
|
95
|
+
throw new Error(`Server ${server.id} entered error state`);
|
|
96
|
+
}
|
|
97
|
+
console.log(
|
|
98
|
+
`Waiting for server ${server.id} (status: ${server.status}, poll ${polls}/${MAX_POLLS})...`,
|
|
99
|
+
);
|
|
100
|
+
await sleep(POLL_INTERVAL_MS);
|
|
101
|
+
server = await getServer(server.id);
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
const serverIp = server.public_net?.ipv4?.ip;
|
|
105
|
+
if (!serverIp) {
|
|
106
|
+
throw new Error(`Server ${server.id} running but has no IPv4 address`);
|
|
107
|
+
}
|
|
108
|
+
writeGitHubOutput("server_ip", serverIp);
|
|
109
|
+
writeGitHubEnv("SERVER_IP", serverIp);
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
import { readJsonResponse, requireEnv } from "./helpers";
|
|
2
|
+
|
|
3
|
+
const token = requireEnv("CF_API_TOKEN");
|
|
4
|
+
const zoneId = requireEnv("CF_ZONE_ID");
|
|
5
|
+
const domain = requireEnv("BRAIN_DOMAIN");
|
|
6
|
+
const serverIp = requireEnv("SERVER_IP");
|
|
7
|
+
|
|
8
|
+
const headers: Record<string, string> = {
|
|
9
|
+
Authorization: `Bearer ${token}`,
|
|
10
|
+
"Content-Type": "application/json",
|
|
11
|
+
};
|
|
12
|
+
const baseUrl = "https://api.cloudflare.com/client/v4";
|
|
13
|
+
|
|
14
|
+
interface CloudflareResult {
|
|
15
|
+
success: boolean;
|
|
16
|
+
result?: Array<{ id: string }>;
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
async function upsertRecord(name: string): Promise<void> {
|
|
20
|
+
const lookupUrl = `${baseUrl}/zones/${zoneId}/dns_records?type=A&name=${encodeURIComponent(name)}`;
|
|
21
|
+
const lookup = await fetch(lookupUrl, { headers });
|
|
22
|
+
const payload = (await readJsonResponse(
|
|
23
|
+
lookup,
|
|
24
|
+
"Cloudflare DNS lookup",
|
|
25
|
+
)) as CloudflareResult;
|
|
26
|
+
if (!lookup.ok || !payload.success) {
|
|
27
|
+
throw new Error(`Cloudflare DNS lookup failed: ${JSON.stringify(payload)}`);
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
const existing = payload.result?.[0];
|
|
31
|
+
const url = existing
|
|
32
|
+
? `${baseUrl}/zones/${zoneId}/dns_records/${existing.id}`
|
|
33
|
+
: `${baseUrl}/zones/${zoneId}/dns_records`;
|
|
34
|
+
|
|
35
|
+
const response = await fetch(url, {
|
|
36
|
+
method: existing ? "PUT" : "POST",
|
|
37
|
+
headers,
|
|
38
|
+
body: JSON.stringify({
|
|
39
|
+
type: "A",
|
|
40
|
+
name,
|
|
41
|
+
content: serverIp,
|
|
42
|
+
ttl: 1,
|
|
43
|
+
proxied: true,
|
|
44
|
+
}),
|
|
45
|
+
});
|
|
46
|
+
const result = (await readJsonResponse(
|
|
47
|
+
response,
|
|
48
|
+
"Cloudflare DNS upsert",
|
|
49
|
+
)) as CloudflareResult;
|
|
50
|
+
if (!response.ok || !result.success) {
|
|
51
|
+
throw new Error(`Cloudflare DNS upsert failed: ${JSON.stringify(result)}`);
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
await upsertRecord(domain);
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
import { mkdirSync, writeFileSync } from "node:fs";
|
|
2
|
+
import { join } from "node:path";
|
|
3
|
+
import { requireEnv } from "./helpers";
|
|
4
|
+
|
|
5
|
+
const privateKey = requireEnv("KAMAL_SSH_PRIVATE_KEY");
|
|
6
|
+
|
|
7
|
+
let normalized = privateKey.replace(/\r\n/g, "\n").replace(/\\n/g, "\n");
|
|
8
|
+
if (!normalized.endsWith("\n")) {
|
|
9
|
+
normalized += "\n";
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
const sshDir = join(process.env["HOME"] ?? "/root", ".ssh");
|
|
13
|
+
mkdirSync(sshDir, { recursive: true });
|
|
14
|
+
writeFileSync(join(sshDir, "id_ed25519"), normalized, {
|
|
15
|
+
encoding: "utf8",
|
|
16
|
+
mode: 0o600,
|
|
17
|
+
});
|