@arcote.tech/arc-cli 0.5.1 → 0.5.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,282 @@
1
+ // Embedded provisioning assets. Stored as TypeScript template literals so
2
+ // they survive bundling without any special loader config. Edit here — these
3
+ // ARE the source of truth. The mirror files under ./assets/ exist only as
4
+ // human-readable references and are NOT imported at runtime.
5
+ //
6
+ // Escaping notes:
7
+ // - Terraform interpolations like "${var.x}" must be escaped as "\${var.x}"
8
+ // in the template literal (TS would otherwise evaluate them as JS).
9
+ // - Ansible/Jinja variables use {{ var }} which is safe in template literals.
10
+
11
+ const TERRAFORM_MAIN_TF = `terraform {
12
+ required_providers {
13
+ hcloud = {
14
+ source = "hetznercloud/hcloud"
15
+ version = "~> 1.51"
16
+ }
17
+ }
18
+ }
19
+
20
+ provider "hcloud" {
21
+ token = var.hcloud_token
22
+ }
23
+
24
+ resource "hcloud_ssh_key" "deploy" {
25
+ name = "\${var.server_name}-deploy"
26
+ public_key = file(var.ssh_public_key)
27
+ }
28
+
29
+ resource "hcloud_server" "arc" {
30
+ name = var.server_name
31
+ image = var.server_image
32
+ server_type = var.server_type
33
+ location = var.server_location
34
+ ssh_keys = [hcloud_ssh_key.deploy.id]
35
+
36
+ public_net {
37
+ ipv4_enabled = true
38
+ ipv6_enabled = true
39
+ }
40
+ }
41
+
42
+ output "server_ip" {
43
+ value = hcloud_server.arc.ipv4_address
44
+ }
45
+
46
+ output "server_name" {
47
+ value = hcloud_server.arc.name
48
+ }
49
+ `;
50
+
51
+ const TERRAFORM_VARIABLES_TF = `variable "hcloud_token" {
52
+ description = "Hetzner Cloud API token"
53
+ type = string
54
+ sensitive = true
55
+ }
56
+
57
+ variable "server_name" {
58
+ description = "Name of the Hetzner server (shown in the console)"
59
+ type = string
60
+ default = "arc-platform"
61
+ }
62
+
63
+ variable "server_type" {
64
+ description = "Hetzner server type (cx22, cx32, cx42, ...)"
65
+ type = string
66
+ default = "cx32"
67
+ }
68
+
69
+ variable "server_location" {
70
+ description = "Hetzner datacenter location (nbg1, fsn1, hel1, ...)"
71
+ type = string
72
+ default = "nbg1"
73
+ }
74
+
75
+ variable "server_image" {
76
+ description = "OS image"
77
+ type = string
78
+ default = "ubuntu-22.04"
79
+ }
80
+
81
+ variable "ssh_public_key" {
82
+ description = "Path to the public key uploaded to the server"
83
+ type = string
84
+ default = "~/.ssh/id_ed25519.pub"
85
+ }
86
+ `;
87
+
88
+ const ANSIBLE_SITE_YML = `---
89
+ # Arc platform bootstrap playbook — minimal hardened Docker host.
90
+ - name: Bootstrap Arc host
91
+ hosts: all
92
+ become: true
93
+ gather_facts: true
94
+ vars:
95
+ deploy_user: "{{ username | default('deploy') }}"
96
+ ssh_port: "{{ ssh_port | default(22) }}"
97
+ extra_allowed_ips: "{{ extra_allowed_ips | default([]) }}"
98
+
99
+ tasks:
100
+ - name: Update apt cache
101
+ apt:
102
+ update_cache: true
103
+ cache_valid_time: 3600
104
+
105
+ - name: Install base packages
106
+ apt:
107
+ name:
108
+ - ca-certificates
109
+ - curl
110
+ - gnupg
111
+ - ufw
112
+ - fail2ban
113
+ - unattended-upgrades
114
+ - python3-docker
115
+ state: present
116
+
117
+ - name: Create deploy user
118
+ user:
119
+ name: "{{ deploy_user }}"
120
+ shell: /bin/bash
121
+ groups: sudo
122
+ append: true
123
+ create_home: true
124
+ state: present
125
+
126
+ - name: Copy SSH key from root to deploy user
127
+ shell: |
128
+ mkdir -p /home/{{ deploy_user }}/.ssh
129
+ cp /root/.ssh/authorized_keys /home/{{ deploy_user }}/.ssh/authorized_keys
130
+ chown -R {{ deploy_user }}:{{ deploy_user }} /home/{{ deploy_user }}/.ssh
131
+ chmod 700 /home/{{ deploy_user }}/.ssh
132
+ chmod 600 /home/{{ deploy_user }}/.ssh/authorized_keys
133
+ args:
134
+ creates: "/home/{{ deploy_user }}/.ssh/authorized_keys"
135
+
136
+ - name: Passwordless sudo for deploy user
137
+ copy:
138
+ dest: /etc/sudoers.d/99-{{ deploy_user }}
139
+ content: "{{ deploy_user }} ALL=(ALL) NOPASSWD:ALL\\n"
140
+ mode: "0440"
141
+ validate: "visudo -cf %s"
142
+
143
+ - name: Harden sshd
144
+ lineinfile:
145
+ path: /etc/ssh/sshd_config
146
+ regexp: "{{ item.re }}"
147
+ line: "{{ item.line }}"
148
+ state: present
149
+ loop:
150
+ - { re: "^#?PermitRootLogin", line: "PermitRootLogin no" }
151
+ - { re: "^#?PasswordAuthentication", line: "PasswordAuthentication no" }
152
+ - { re: "^#?PubkeyAuthentication", line: "PubkeyAuthentication yes" }
153
+ - { re: "^#?MaxAuthTries", line: "MaxAuthTries 3" }
154
+ notify: restart ssh
155
+
156
+ - name: Install Docker via official convenience script
157
+ shell: |
158
+ curl -fsSL https://get.docker.com | sh
159
+ args:
160
+ creates: /usr/bin/docker
161
+
162
+ - name: Enable and start docker
163
+ systemd:
164
+ name: docker
165
+ enabled: true
166
+ state: started
167
+
168
+ - name: Add deploy user to docker group
169
+ user:
170
+ name: "{{ deploy_user }}"
171
+ groups: docker
172
+ append: true
173
+
174
+ - name: Configure docker log rotation
175
+ copy:
176
+ dest: /etc/docker/daemon.json
177
+ content: |
178
+ {
179
+ "log-driver": "json-file",
180
+ "log-opts": {
181
+ "max-size": "10m",
182
+ "max-file": "3"
183
+ }
184
+ }
185
+ mode: "0644"
186
+ notify: restart docker
187
+
188
+ - name: Ensure /opt/arc exists
189
+ file:
190
+ path: /opt/arc
191
+ state: directory
192
+ owner: "{{ deploy_user }}"
193
+ group: "{{ deploy_user }}"
194
+ mode: "0755"
195
+
196
+ - name: Configure ufw defaults
197
+ ufw:
198
+ policy: "{{ item.policy }}"
199
+ direction: "{{ item.dir }}"
200
+ loop:
201
+ - { policy: deny, dir: incoming }
202
+ - { policy: allow, dir: outgoing }
203
+
204
+ - name: Open firewall ports
205
+ ufw:
206
+ rule: allow
207
+ port: "{{ item }}"
208
+ proto: tcp
209
+ loop:
210
+ - "{{ ssh_port }}"
211
+ - "80"
212
+ - "443"
213
+
214
+ - name: Enable ufw
215
+ ufw:
216
+ state: enabled
217
+
218
+ - name: Configure fail2ban for sshd
219
+ copy:
220
+ dest: /etc/fail2ban/jail.local
221
+ content: |
222
+ [sshd]
223
+ enabled = true
224
+ port = {{ ssh_port }}
225
+ maxretry = 5
226
+ findtime = 600
227
+ bantime = 3600
228
+ ignoreip = 127.0.0.1/8 ::1 {{ extra_allowed_ips | join(' ') }}
229
+ mode: "0644"
230
+ notify: restart fail2ban
231
+
232
+ - name: Enable unattended upgrades
233
+ copy:
234
+ dest: /etc/apt/apt.conf.d/20auto-upgrades
235
+ content: |
236
+ APT::Periodic::Update-Package-Lists "1";
237
+ APT::Periodic::Unattended-Upgrade "1";
238
+ APT::Periodic::AutocleanInterval "7";
239
+ mode: "0644"
240
+
241
+ handlers:
242
+ - name: restart ssh
243
+ systemd:
244
+ name: ssh
245
+ state: restarted
246
+
247
+ - name: restart docker
248
+ systemd:
249
+ name: docker
250
+ state: restarted
251
+
252
+ - name: restart fail2ban
253
+ systemd:
254
+ name: fail2ban
255
+ state: restarted
256
+ `;
257
+
258
+ export const ASSETS = {
259
+ terraform: {
260
+ "main.tf": TERRAFORM_MAIN_TF,
261
+ "variables.tf": TERRAFORM_VARIABLES_TF,
262
+ },
263
+ ansible: {
264
+ "site.yml": ANSIBLE_SITE_YML,
265
+ },
266
+ } as const;
267
+
268
+ /**
269
+ * Materialize a directory of asset files at a real filesystem path.
270
+ * Used by terraform/ansible runners before they shell out to the binary.
271
+ */
272
+ export async function materializeAssets(
273
+ targetDir: string,
274
+ files: Readonly<Record<string, string>>,
275
+ ): Promise<void> {
276
+ const { mkdirSync, writeFileSync } = await import("fs");
277
+ const { join } = await import("path");
278
+ mkdirSync(targetDir, { recursive: true });
279
+ for (const [name, content] of Object.entries(files)) {
280
+ writeFileSync(join(targetDir, name), content);
281
+ }
282
+ }
@@ -0,0 +1,131 @@
1
+ import { mkdirSync, writeFileSync } from "fs";
2
+ import { tmpdir } from "os";
3
+ import { join } from "path";
4
+ import { runAnsible } from "./ansible";
5
+ import { generateCaddyfile } from "./caddyfile";
6
+ import { generateCompose } from "./compose";
7
+ import type { DeployConfig } from "./config";
8
+ import { runTerraform } from "./terraform";
9
+ import { saveDeployConfig } from "./config";
10
+ import { ok, log, err } from "../platform/shared";
11
+ import { writeStateMarker, STATE_MARKER_PATH } from "./remote-state";
12
+ import type { RemoteState } from "./remote-state";
13
+ import { assertExec, scpUpload, waitForSsh } from "./ssh";
14
+
15
+ // ---------------------------------------------------------------------------
16
+ // Bootstrap orchestrator.
17
+ //
18
+ // State diagram:
19
+ // unreachable + provision.terraform → terraform apply → ansible → stack up
20
+ // unreachable + !provision.terraform → error (ask user to provide host)
21
+ // no-docker → ansible → stack up
22
+ // no-stack → stack up
23
+ // ready → nothing, deploy goes straight to sync
24
+ //
25
+ // "Stack up" means: generate Caddyfile + docker-compose.yml, scp to /opt/arc,
26
+ // `docker compose up -d`, write state marker.
27
+ // ---------------------------------------------------------------------------
28
+
29
+ export interface BootstrapInputs {
30
+ cfg: DeployConfig;
31
+ rootDir: string;
32
+ state: RemoteState;
33
+ cliVersion: string;
34
+ /** sha256 of deploy.arc.json — used for the remote state marker. */
35
+ configHash: string;
36
+ }
37
+
38
+ export async function bootstrap(inputs: BootstrapInputs): Promise<void> {
39
+ const { cfg, state, rootDir } = inputs;
40
+
41
+ if (state.kind === "unreachable") {
42
+ if (!cfg.provision?.terraform) {
43
+ throw new Error(
44
+ `Server ${cfg.target.host} is unreachable and deploy.arc.json has no provision.terraform section. Either fix SSH access or add a provision block.`,
45
+ );
46
+ }
47
+ log("Provisioning server via Terraform...");
48
+ const token = process.env[cfg.provision.terraform.tokenEnv];
49
+ if (!token) {
50
+ throw new Error(
51
+ `Environment variable ${cfg.provision.terraform.tokenEnv} is not set`,
52
+ );
53
+ }
54
+ const tfOut = await runTerraform({
55
+ tf: cfg.provision.terraform,
56
+ token,
57
+ serverName: `arc-${Object.keys(cfg.envs)[0] ?? "host"}`,
58
+ });
59
+ ok(`Server provisioned: ${tfOut.serverIp}`);
60
+
61
+ // Persist the new host back into deploy.arc.json so subsequent runs skip terraform
62
+ cfg.target.host = tfOut.serverIp;
63
+ saveDeployConfig(rootDir, cfg);
64
+
65
+ log("Waiting for SSH to come up...");
66
+ await waitForSsh({ ...cfg.target, user: "root" });
67
+ ok("SSH reachable");
68
+ }
69
+
70
+ if (state.kind === "unreachable" || state.kind === "no-docker") {
71
+ log("Running Ansible bootstrap (Docker + firewall + SSH hardening)...");
72
+ // On a freshly provisioned Hetzner VM, only root exists before the playbook
73
+ // creates the deploy user; re-use `asRoot: true` for that first shot.
74
+ const asRoot = state.kind === "unreachable";
75
+ await runAnsible({
76
+ target: cfg.target,
77
+ ansible: cfg.provision?.ansible,
78
+ asRoot,
79
+ });
80
+ ok("Host bootstrapped");
81
+ }
82
+
83
+ if (state.kind !== "ready") {
84
+ await upStack(inputs);
85
+ ok("Docker stack up");
86
+ }
87
+
88
+ // Keep marker fresh
89
+ await writeStateMarker(cfg.target, {
90
+ cliVersion: inputs.cliVersion,
91
+ configHash: inputs.configHash,
92
+ updatedAt: new Date().toISOString(),
93
+ });
94
+ }
95
+
96
+ async function upStack(inputs: BootstrapInputs): Promise<void> {
97
+ const { cfg } = inputs;
98
+ const workDir = join(tmpdir(), "arc-deploy", `stack-${Date.now()}`);
99
+ mkdirSync(workDir, { recursive: true });
100
+
101
+ writeFileSync(join(workDir, "Caddyfile"), generateCaddyfile(cfg));
102
+ writeFileSync(join(workDir, "docker-compose.yml"), generateCompose({ cfg }));
103
+
104
+ // Ensure remoteDir exists
105
+ await assertExec(
106
+ cfg.target,
107
+ `sudo mkdir -p ${cfg.target.remoteDir} && sudo chown ${cfg.target.user}:${cfg.target.user} ${cfg.target.remoteDir}`,
108
+ );
109
+ for (const name of Object.keys(cfg.envs)) {
110
+ await assertExec(
111
+ cfg.target,
112
+ `mkdir -p ${cfg.target.remoteDir}/${name}`,
113
+ );
114
+ }
115
+
116
+ await scpUpload(
117
+ cfg.target,
118
+ join(workDir, "Caddyfile"),
119
+ `${cfg.target.remoteDir}/Caddyfile`,
120
+ );
121
+ await scpUpload(
122
+ cfg.target,
123
+ join(workDir, "docker-compose.yml"),
124
+ `${cfg.target.remoteDir}/docker-compose.yml`,
125
+ );
126
+
127
+ await assertExec(
128
+ cfg.target,
129
+ `cd ${cfg.target.remoteDir} && docker compose pull --ignore-pull-failures && docker compose up -d`,
130
+ );
131
+ }
@@ -0,0 +1,59 @@
1
+ import type { DeployConfig } from "./config";
2
+
3
+ // ---------------------------------------------------------------------------
4
+ // Caddyfile generator
5
+ //
6
+ // Two kinds of blocks are produced:
7
+ //
8
+ // 1. Public site blocks (80/443) — one per env, routed by Host header.
9
+ // Reverse-proxy to arc-<env>:5005 BUT strip any /api/deploy/* path so
10
+ // the hot-swap endpoint never leaks to the internet.
11
+ //
12
+ // 2. Internal management listener on 127.0.0.1:2019 — handles
13
+ // /env/<name>/api/deploy/* paths by rewriting and proxying to
14
+ // arc-<name>:5005/api/deploy/*. This is the ONLY path to the deploy
15
+ // API from off-host; the listener is bound to loopback inside the
16
+ // Caddy container, and docker-compose publishes 127.0.0.1:2019:2019.
17
+ // CLI reaches it via `ssh -L`.
18
+ // ---------------------------------------------------------------------------
19
+
20
+ export function generateCaddyfile(cfg: DeployConfig): string {
21
+ const email =
22
+ cfg.caddy.email === "internal" ? "" : `\n email ${cfg.caddy.email}`;
23
+ const tlsDirective =
24
+ cfg.caddy.email === "internal" ? "\n tls internal" : "";
25
+
26
+ const lines: string[] = [];
27
+ lines.push("# Generated by `arc platform deploy` — do not edit by hand.");
28
+ lines.push("");
29
+ lines.push("{");
30
+ lines.push(" admin off");
31
+ if (email) lines.push(` ${email.trim()}`);
32
+ lines.push("}");
33
+ lines.push("");
34
+
35
+ // Public blocks — one per env
36
+ for (const [name, env] of Object.entries(cfg.envs)) {
37
+ lines.push(`${env.domain} {${tlsDirective}`);
38
+ lines.push(" @deploy path /api/deploy /api/deploy/*");
39
+ lines.push(" respond @deploy 404");
40
+ lines.push("");
41
+ lines.push(` reverse_proxy arc-${name}:5005`);
42
+ lines.push("}");
43
+ lines.push("");
44
+ }
45
+
46
+ // Internal management listener
47
+ lines.push("# Loopback-only management listener (SSH tunnel access).");
48
+ lines.push("http://127.0.0.1:2019 {");
49
+ lines.push(" bind 127.0.0.1");
50
+ for (const [name] of Object.entries(cfg.envs)) {
51
+ lines.push(` handle_path /env/${name}/* {`);
52
+ lines.push(` reverse_proxy arc-${name}:5005`);
53
+ lines.push(` }`);
54
+ }
55
+ lines.push(" respond 404");
56
+ lines.push("}");
57
+
58
+ return lines.join("\n") + "\n";
59
+ }
@@ -0,0 +1,73 @@
1
+ import type { DeployConfig } from "./config";
2
+
3
+ // ---------------------------------------------------------------------------
4
+ // docker-compose.yml generator
5
+ //
6
+ // Services:
7
+ // - caddy (public 80/443, loopback 127.0.0.1:2019 for deploy tunnel)
8
+ // - arc-<env> per entry in deploy.arc.json envs (bind-mounts project dir)
9
+ //
10
+ // No custom images: vanilla `caddy:2-alpine` and `oven/bun:1-alpine` from
11
+ // Docker Hub. The Arc CLI and user's built artifacts come via the volume
12
+ // mount at /opt/arc/<env>/ — rsynced by the deploy command.
13
+ // ---------------------------------------------------------------------------
14
+
15
+ export interface ComposeOptions {
16
+ cfg: DeployConfig;
17
+ }
18
+
19
+ export function generateCompose({ cfg }: ComposeOptions): string {
20
+ const lines: string[] = [];
21
+ lines.push("# Generated by `arc platform deploy` — do not edit by hand.");
22
+ lines.push("");
23
+ lines.push("services:");
24
+ lines.push(" caddy:");
25
+ lines.push(" image: caddy:2-alpine");
26
+ lines.push(" restart: unless-stopped");
27
+ lines.push(" ports:");
28
+ lines.push(' - "80:80"');
29
+ lines.push(' - "443:443"');
30
+ lines.push(' - "127.0.0.1:2019:2019"');
31
+ lines.push(" volumes:");
32
+ lines.push(" - ./Caddyfile:/etc/caddy/Caddyfile:ro");
33
+ lines.push(" - caddy_data:/data");
34
+ lines.push(" - caddy_config:/config");
35
+ lines.push(" networks:");
36
+ lines.push(" - arc-net");
37
+ lines.push("");
38
+
39
+ for (const [name, env] of Object.entries(cfg.envs)) {
40
+ lines.push(` arc-${name}:`);
41
+ lines.push(" image: oven/bun:1-alpine");
42
+ lines.push(" restart: unless-stopped");
43
+ lines.push(` working_dir: /app`);
44
+ lines.push(" volumes:");
45
+ lines.push(` - ${cfg.target.remoteDir}/${name}:/app`);
46
+ lines.push(` - arc-data-${name}:/app/.arc/data`);
47
+ lines.push(" environment:");
48
+ lines.push(" PORT: 5005");
49
+ lines.push(" ARC_DEPLOY_API: \"1\"");
50
+ lines.push(" NODE_ENV: production");
51
+ for (const [k, v] of Object.entries(env.envVars ?? {})) {
52
+ lines.push(` ${k}: ${JSON.stringify(v)}`);
53
+ }
54
+ lines.push(" command: [\"node_modules/.bin/arc\", \"platform\", \"start\"]");
55
+ lines.push(" networks:");
56
+ lines.push(" - arc-net");
57
+ lines.push(" expose:");
58
+ lines.push(' - "5005"');
59
+ lines.push("");
60
+ }
61
+
62
+ lines.push("networks:");
63
+ lines.push(" arc-net:");
64
+ lines.push("");
65
+ lines.push("volumes:");
66
+ lines.push(" caddy_data:");
67
+ lines.push(" caddy_config:");
68
+ for (const [name] of Object.entries(cfg.envs)) {
69
+ lines.push(` arc-data-${name}:`);
70
+ }
71
+
72
+ return lines.join("\n") + "\n";
73
+ }