vmsan 0.1.0-alpha.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +219 -0
- package/dist/_chunks/agent.mjs +121 -0
- package/dist/_chunks/cleanup.mjs +328 -0
- package/dist/_chunks/connect.mjs +13 -0
- package/dist/_chunks/connect2.mjs +72 -0
- package/dist/_chunks/create.mjs +352 -0
- package/dist/_chunks/download.mjs +84 -0
- package/dist/_chunks/environment.mjs +1064 -0
- package/dist/_chunks/errors.mjs +232 -0
- package/dist/_chunks/firecracker.mjs +110 -0
- package/dist/_chunks/image-rootfs.mjs +329 -0
- package/dist/_chunks/list.mjs +83 -0
- package/dist/_chunks/logger.mjs +75 -0
- package/dist/_chunks/network.mjs +79 -0
- package/dist/_chunks/paths.mjs +36 -0
- package/dist/_chunks/remove.mjs +88 -0
- package/dist/_chunks/rolldown-runtime.mjs +11 -0
- package/dist/_chunks/shell.mjs +152 -0
- package/dist/_chunks/start.mjs +192 -0
- package/dist/_chunks/stop.mjs +76 -0
- package/dist/_chunks/upload.mjs +81 -0
- package/dist/_chunks/validation.mjs +125 -0
- package/dist/_chunks/vm-state.mjs +206 -0
- package/dist/_chunks/vm.mjs +208 -0
- package/dist/bin/cli.d.mts +1 -0
- package/dist/bin/cli.mjs +52 -0
- package/dist/index.d.mts +2892 -0
- package/dist/index.mjs +42 -0
- package/package.json +69 -0
|
@@ -0,0 +1,232 @@
|
|
|
1
|
+
import { r as getOutputMode } from "./logger.mjs";
|
|
2
|
+
import { EvlogError } from "evlog";
|
|
3
|
+
import { consola } from "consola";
|
|
4
|
+
var VmsanError = class extends EvlogError {
|
|
5
|
+
code;
|
|
6
|
+
constructor(code, options) {
|
|
7
|
+
super(options);
|
|
8
|
+
this.name = "VmsanError";
|
|
9
|
+
this.code = code;
|
|
10
|
+
if (Error.captureStackTrace) Error.captureStackTrace(this, new.target);
|
|
11
|
+
}
|
|
12
|
+
toJSON() {
|
|
13
|
+
return {
|
|
14
|
+
...super.toJSON(),
|
|
15
|
+
code: this.code
|
|
16
|
+
};
|
|
17
|
+
}
|
|
18
|
+
};
|
|
19
|
+
var ValidationError = class extends VmsanError {
|
|
20
|
+
flag;
|
|
21
|
+
constructor(code, options) {
|
|
22
|
+
super(code, options);
|
|
23
|
+
this.name = "ValidationError";
|
|
24
|
+
this.flag = options.flag;
|
|
25
|
+
}
|
|
26
|
+
toJSON() {
|
|
27
|
+
return {
|
|
28
|
+
...super.toJSON(),
|
|
29
|
+
...this.flag !== void 0 && { flag: this.flag }
|
|
30
|
+
};
|
|
31
|
+
}
|
|
32
|
+
};
|
|
33
|
+
const invalidIntegerFlagError = (flag, value, min, max, unitSuffix = "") => new ValidationError("ERR_VALIDATION_INTEGER", {
|
|
34
|
+
flag,
|
|
35
|
+
message: `Invalid --${flag}: "${value}". Must be an integer between ${min} and ${max}${unitSuffix}.`
|
|
36
|
+
});
|
|
37
|
+
const invalidRuntimeError = (runtime, validRuntimes) => new ValidationError("ERR_VALIDATION_RUNTIME", {
|
|
38
|
+
flag: "runtime",
|
|
39
|
+
message: `Invalid --runtime: "${runtime}". Must be one of: ${validRuntimes.join(", ")}`
|
|
40
|
+
});
|
|
41
|
+
const invalidNetworkPolicyError = (policy, validPolicies) => new ValidationError("ERR_VALIDATION_NETWORK_POLICY", {
|
|
42
|
+
flag: "network-policy",
|
|
43
|
+
message: `Invalid --network-policy: "${policy}". Must be one of: ${validPolicies.join(", ")}`
|
|
44
|
+
});
|
|
45
|
+
const invalidPortError = (port) => new ValidationError("ERR_VALIDATION_PORT", {
|
|
46
|
+
flag: "publish-port",
|
|
47
|
+
message: `Invalid port: ${port}`
|
|
48
|
+
});
|
|
49
|
+
const portConflictError = (conflictSummary) => new ValidationError("ERR_VALIDATION_PORT_CONFLICT", {
|
|
50
|
+
flag: "publish-port",
|
|
51
|
+
message: `Published port conflict: ${conflictSummary}`
|
|
52
|
+
});
|
|
53
|
+
const invalidDomainError = (domain) => new ValidationError("ERR_VALIDATION_DOMAIN", {
|
|
54
|
+
flag: "allowed-domain",
|
|
55
|
+
message: `Invalid domain: "${domain}"`
|
|
56
|
+
});
|
|
57
|
+
const invalidDomainPatternError = (domain, detail) => new ValidationError("ERR_VALIDATION_DOMAIN", {
|
|
58
|
+
flag: "allowed-domain",
|
|
59
|
+
message: detail ? `Invalid domain pattern: "${domain}". ${detail}` : `Invalid domain pattern: "${domain}"`
|
|
60
|
+
});
|
|
61
|
+
const invalidCidrFormatError = (cidr) => new ValidationError("ERR_VALIDATION_CIDR", { message: `Invalid CIDR format: "${cidr}". Expected format: x.x.x.x/y` });
|
|
62
|
+
const invalidCidrPrefixError = (cidr) => new ValidationError("ERR_VALIDATION_CIDR", { message: `Invalid CIDR prefix length: "${cidr}". Must be 0-32.` });
|
|
63
|
+
const invalidCidrOctetError = (cidr) => new ValidationError("ERR_VALIDATION_CIDR", { message: `Invalid CIDR IP octet: "${cidr}". Each octet must be 0-255.` });
|
|
64
|
+
const invalidImageRefEmptyError = () => new ValidationError("ERR_VALIDATION_IMAGE_REF", {
|
|
65
|
+
flag: "from-image",
|
|
66
|
+
message: "Invalid --from-image: image reference cannot be empty."
|
|
67
|
+
});
|
|
68
|
+
const invalidImageRefTagError = (ref) => new ValidationError("ERR_VALIDATION_IMAGE_REF", {
|
|
69
|
+
flag: "from-image",
|
|
70
|
+
message: `Invalid --from-image: "${ref}". Tag cannot be empty.`
|
|
71
|
+
});
|
|
72
|
+
const invalidDiskSizeFormatError = (value) => new ValidationError("ERR_VALIDATION_DISK_SIZE", {
|
|
73
|
+
flag: "disk",
|
|
74
|
+
message: `Invalid --disk: "${value}". Expected format like 10gb.`
|
|
75
|
+
});
|
|
76
|
+
const invalidDiskSizeRangeError = (value) => new ValidationError("ERR_VALIDATION_DISK_SIZE", {
|
|
77
|
+
flag: "disk",
|
|
78
|
+
message: `Invalid --disk: "${value}". Must be an integer between 1gb and 1024gb.`
|
|
79
|
+
});
|
|
80
|
+
const invalidDurationError = (input) => new ValidationError("ERR_VALIDATION_DURATION", { message: `Invalid duration: "${input}". Use format like "1h", "30m", "2h30m", or plain minutes.` });
|
|
81
|
+
const mutuallyExclusiveFlagsError = (flagA, flagB) => new ValidationError("ERR_VALIDATION_FLAGS", {
|
|
82
|
+
message: `Cannot use ${flagA} and ${flagB} together`,
|
|
83
|
+
why: "They are mutually exclusive.",
|
|
84
|
+
fix: `Use either ${flagA} or ${flagB}, not both.`
|
|
85
|
+
});
|
|
86
|
+
const policyConflictError = () => new ValidationError("ERR_VALIDATION_POLICY_CONFLICT", {
|
|
87
|
+
flag: "network-policy",
|
|
88
|
+
message: "Cannot combine --network-policy deny-all with --allowed-domain, --allowed-cidr, or --denied-cidr."
|
|
89
|
+
});
|
|
90
|
+
var VmError = class extends VmsanError {
|
|
91
|
+
vmId;
|
|
92
|
+
constructor(code, options) {
|
|
93
|
+
super(code, options);
|
|
94
|
+
this.name = "VmError";
|
|
95
|
+
this.vmId = options.vmId;
|
|
96
|
+
}
|
|
97
|
+
toJSON() {
|
|
98
|
+
return {
|
|
99
|
+
...super.toJSON(),
|
|
100
|
+
...this.vmId !== void 0 && { vmId: this.vmId }
|
|
101
|
+
};
|
|
102
|
+
}
|
|
103
|
+
};
|
|
104
|
+
const vmNotFoundError = (vmId) => new VmError("ERR_VM_NOT_FOUND", {
|
|
105
|
+
vmId,
|
|
106
|
+
message: `VM not found: ${vmId}`,
|
|
107
|
+
fix: "Run 'vmsan list' to see available VMs."
|
|
108
|
+
});
|
|
109
|
+
const vmStateNotFoundError = (vmId) => new VmError("ERR_VM_STATE_NOT_FOUND", {
|
|
110
|
+
vmId,
|
|
111
|
+
message: `VM state not found: ${vmId}`
|
|
112
|
+
});
|
|
113
|
+
const vmNotStoppedError = (vmId, currentStatus) => new VmError("ERR_VM_NOT_STOPPED", {
|
|
114
|
+
vmId,
|
|
115
|
+
message: `VM ${vmId} is not stopped (current status: ${currentStatus})`,
|
|
116
|
+
fix: "Run 'vmsan stop <vm-id>' first, or use --force (-f) to stop and remove in one step."
|
|
117
|
+
});
|
|
118
|
+
const chrootNotFoundError = (vmId) => new VmError("ERR_VM_CHROOT_NOT_FOUND", {
|
|
119
|
+
vmId,
|
|
120
|
+
message: `Chroot directory not found for VM ${vmId}`,
|
|
121
|
+
why: "The VM data may have been removed.",
|
|
122
|
+
fix: "The VM must be recreated with 'vmsan create'."
|
|
123
|
+
});
|
|
124
|
+
const networkSlotsExhaustedError = () => new VmError("ERR_VM_NETWORK_SLOTS_EXHAUSTED", { message: "No available network slots (max 255 VMs)" });
|
|
125
|
+
const vmNotRunningError = (vmId) => new VmError("ERR_VM_NOT_RUNNING", {
|
|
126
|
+
vmId,
|
|
127
|
+
message: `VM ${vmId} is not running`,
|
|
128
|
+
fix: "The VM must be running to update its network policy. Start it with 'vmsan start <vm-id>'."
|
|
129
|
+
});
|
|
130
|
+
const snapshotNotFoundError = (snapshotId) => new VmError("ERR_VM_SNAPSHOT_NOT_FOUND", { message: `Snapshot not found: ${snapshotId}` });
|
|
131
|
+
var FirecrackerApiError = class extends VmsanError {
|
|
132
|
+
method;
|
|
133
|
+
path;
|
|
134
|
+
httpStatus;
|
|
135
|
+
constructor(code, options) {
|
|
136
|
+
super(code, options);
|
|
137
|
+
this.name = "FirecrackerApiError";
|
|
138
|
+
this.method = options.method;
|
|
139
|
+
this.path = options.path;
|
|
140
|
+
this.httpStatus = options.httpStatus;
|
|
141
|
+
}
|
|
142
|
+
toJSON() {
|
|
143
|
+
return {
|
|
144
|
+
...super.toJSON(),
|
|
145
|
+
method: this.method,
|
|
146
|
+
path: this.path,
|
|
147
|
+
httpStatus: this.httpStatus
|
|
148
|
+
};
|
|
149
|
+
}
|
|
150
|
+
};
|
|
151
|
+
const firecrackerApiError = (method, path, httpStatus, body) => new FirecrackerApiError("ERR_FIRECRACKER_API", {
|
|
152
|
+
method,
|
|
153
|
+
path,
|
|
154
|
+
httpStatus,
|
|
155
|
+
message: `${method} ${path} failed (${httpStatus}): ${body}`
|
|
156
|
+
});
|
|
157
|
+
var NetworkError = class extends VmsanError {
|
|
158
|
+
constructor(code, options) {
|
|
159
|
+
super(code, options);
|
|
160
|
+
this.name = "NetworkError";
|
|
161
|
+
}
|
|
162
|
+
};
|
|
163
|
+
const defaultInterfaceNotFoundError = () => new NetworkError("ERR_NETWORK_DEFAULT_INTERFACE", { message: "Could not determine default network interface. Check your network configuration." });
|
|
164
|
+
var TimeoutError = class extends VmsanError {
|
|
165
|
+
target;
|
|
166
|
+
timeoutMs;
|
|
167
|
+
constructor(code, options) {
|
|
168
|
+
super(code, options);
|
|
169
|
+
this.name = "TimeoutError";
|
|
170
|
+
this.target = options.target;
|
|
171
|
+
this.timeoutMs = options.timeoutMs;
|
|
172
|
+
}
|
|
173
|
+
toJSON() {
|
|
174
|
+
return {
|
|
175
|
+
...super.toJSON(),
|
|
176
|
+
...this.target !== void 0 && { target: this.target },
|
|
177
|
+
...this.timeoutMs !== void 0 && { timeoutMs: this.timeoutMs }
|
|
178
|
+
};
|
|
179
|
+
}
|
|
180
|
+
};
|
|
181
|
+
const socketTimeoutError = (socketPath) => new TimeoutError("ERR_TIMEOUT_SOCKET", {
|
|
182
|
+
target: socketPath,
|
|
183
|
+
message: `Timeout waiting for API socket at ${socketPath}`
|
|
184
|
+
});
|
|
185
|
+
const lockTimeoutError = (lockName) => new TimeoutError("ERR_TIMEOUT_LOCK", {
|
|
186
|
+
target: lockName,
|
|
187
|
+
message: `Timed out waiting for ${lockName} lock`
|
|
188
|
+
});
|
|
189
|
+
const agentTimeoutError = (guestIp, timeoutMs) => new TimeoutError("ERR_TIMEOUT_AGENT", {
|
|
190
|
+
target: guestIp,
|
|
191
|
+
timeoutMs,
|
|
192
|
+
message: `Agent at ${guestIp} not available after ${timeoutMs}ms`
|
|
193
|
+
});
|
|
194
|
+
var SetupError = class extends VmsanError {
|
|
195
|
+
constructor(code, options) {
|
|
196
|
+
super(code, options);
|
|
197
|
+
this.name = "SetupError";
|
|
198
|
+
}
|
|
199
|
+
};
|
|
200
|
+
const INSTALL_FIX = `Run the install script to set up all dependencies:\n\ncurl -fsSL https://raw.githubusercontent.com/angelorc/vmsan/main/install.sh | bash`;
|
|
201
|
+
const missingBinaryError = (binary, path) => new SetupError("ERR_SETUP_MISSING_BINARY", {
|
|
202
|
+
message: `${binary} not found at ${path}`,
|
|
203
|
+
fix: INSTALL_FIX
|
|
204
|
+
});
|
|
205
|
+
const noKernelDirError = () => new SetupError("ERR_SETUP_NO_KERNEL_DIR", {
|
|
206
|
+
message: "No kernels directory found.",
|
|
207
|
+
fix: INSTALL_FIX
|
|
208
|
+
});
|
|
209
|
+
const noKernelError = () => new SetupError("ERR_SETUP_NO_KERNEL", {
|
|
210
|
+
message: "No kernel found in ~/.vmsan/kernels/.",
|
|
211
|
+
fix: INSTALL_FIX
|
|
212
|
+
});
|
|
213
|
+
const noRootfsDirError = () => new SetupError("ERR_SETUP_NO_ROOTFS_DIR", {
|
|
214
|
+
message: "No rootfs directory found.",
|
|
215
|
+
fix: INSTALL_FIX
|
|
216
|
+
});
|
|
217
|
+
const noExt4RootfsError = () => new SetupError("ERR_SETUP_NO_EXT4_ROOTFS", {
|
|
218
|
+
message: "No ext4 rootfs found in ~/.vmsan/rootfs/.",
|
|
219
|
+
fix: INSTALL_FIX
|
|
220
|
+
});
|
|
221
|
+
function handleCommandError(error, cmdLog) {
|
|
222
|
+
cmdLog.error(error instanceof Error ? error : String(error));
|
|
223
|
+
cmdLog.emit();
|
|
224
|
+
if (getOutputMode() === "json") return;
|
|
225
|
+
if (error instanceof EvlogError) {
|
|
226
|
+
consola.error(error.message);
|
|
227
|
+
if (error.why) consola.log(` Why: ${error.why}`);
|
|
228
|
+
if (error.fix) consola.log(`\n Fix:\n\n${error.fix}\n`);
|
|
229
|
+
if (error.link) consola.log(` More: ${error.link}`);
|
|
230
|
+
} else consola.error(error instanceof Error ? error.message : String(error));
|
|
231
|
+
}
|
|
232
|
+
export { invalidDomainError as A, policyConflictError as B, vmStateNotFoundError as C, invalidCidrPrefixError as D, invalidCidrOctetError as E, invalidIntegerFlagError as F, VmsanError as H, invalidNetworkPolicyError as I, invalidPortError as L, invalidDurationError as M, invalidImageRefEmptyError as N, invalidDiskSizeFormatError as O, invalidImageRefTagError as P, invalidRuntimeError as R, vmNotStoppedError as S, invalidCidrFormatError as T, portConflictError as V, chrootNotFoundError as _, noKernelDirError as a, vmNotFoundError as b, TimeoutError as c, socketTimeoutError as d, NetworkError as f, VmError as g, firecrackerApiError as h, noExt4RootfsError as i, invalidDomainPatternError as j, invalidDiskSizeRangeError as k, agentTimeoutError as l, FirecrackerApiError as m, SetupError as n, noKernelError as o, defaultInterfaceNotFoundError as p, missingBinaryError as r, noRootfsDirError as s, handleCommandError as t, lockTimeoutError as u, networkSlotsExhaustedError as v, ValidationError as w, vmNotRunningError as x, snapshotNotFoundError as y, mutuallyExclusiveFlagsError as z };
|
|
@@ -0,0 +1,110 @@
|
|
|
1
|
+
import { t as __exportAll } from "./rolldown-runtime.mjs";
|
|
2
|
+
import { h as firecrackerApiError } from "./errors.mjs";
|
|
3
|
+
import { join } from "node:path";
|
|
4
|
+
import { request } from "node:http";
|
|
5
|
+
var firecracker_exports = /* @__PURE__ */ __exportAll({
|
|
6
|
+
FirecrackerClient: () => FirecrackerClient,
|
|
7
|
+
firecrackerFetch: () => firecrackerFetch,
|
|
8
|
+
firecrackerRequest: () => firecrackerRequest
|
|
9
|
+
});
|
|
10
|
+
function firecrackerRequest(socketPath, method, path, body) {
|
|
11
|
+
return new Promise((resolve, reject) => {
|
|
12
|
+
const data = body ? JSON.stringify(body) : void 0;
|
|
13
|
+
const req = request({
|
|
14
|
+
socketPath,
|
|
15
|
+
method,
|
|
16
|
+
path,
|
|
17
|
+
headers: {
|
|
18
|
+
"Content-Type": "application/json",
|
|
19
|
+
...data ? { "Content-Length": String(Buffer.byteLength(data)) } : {}
|
|
20
|
+
}
|
|
21
|
+
}, (res) => {
|
|
22
|
+
const chunks = [];
|
|
23
|
+
res.on("data", (chunk) => chunks.push(chunk));
|
|
24
|
+
res.on("end", () => {
|
|
25
|
+
resolve({
|
|
26
|
+
statusCode: res.statusCode || 0,
|
|
27
|
+
body: Buffer.concat(chunks).toString()
|
|
28
|
+
});
|
|
29
|
+
});
|
|
30
|
+
});
|
|
31
|
+
req.on("error", reject);
|
|
32
|
+
if (data) req.write(data);
|
|
33
|
+
req.end();
|
|
34
|
+
});
|
|
35
|
+
}
|
|
36
|
+
/**
|
|
37
|
+
* Send a type-safe request to the Firecracker API over a Unix socket.
|
|
38
|
+
*
|
|
39
|
+
* The generic parameters ensure the compiler validates:
|
|
40
|
+
* - `path` is a valid Firecracker API path
|
|
41
|
+
* - `method` is a valid HTTP method for that path
|
|
42
|
+
* - `body` matches the expected request body schema
|
|
43
|
+
*
|
|
44
|
+
* Returns the parsed JSON response for 200 responses, `void` for 204,
|
|
45
|
+
* and throws on any error status code.
|
|
46
|
+
*/
|
|
47
|
+
async function firecrackerFetch(socketPath, method, path, ...args) {
|
|
48
|
+
const res = await firecrackerRequest(socketPath, method, path, args[0]);
|
|
49
|
+
if (res.statusCode >= 400) throw firecrackerApiError(method, path, res.statusCode, res.body);
|
|
50
|
+
if (res.statusCode === 204 || !res.body) return;
|
|
51
|
+
return JSON.parse(res.body);
|
|
52
|
+
}
|
|
53
|
+
var FirecrackerClient = class {
|
|
54
|
+
constructor(socketPath) {
|
|
55
|
+
this.socketPath = socketPath;
|
|
56
|
+
}
|
|
57
|
+
async boot(kernelPath, bootArgs) {
|
|
58
|
+
await firecrackerFetch(this.socketPath, "PUT", "/boot-source", {
|
|
59
|
+
kernel_image_path: kernelPath,
|
|
60
|
+
boot_args: bootArgs
|
|
61
|
+
});
|
|
62
|
+
}
|
|
63
|
+
async addDrive(driveId, pathOnHost, isRoot, isReadOnly) {
|
|
64
|
+
await firecrackerFetch(this.socketPath, "PUT", `/drives/${driveId}`, {
|
|
65
|
+
drive_id: driveId,
|
|
66
|
+
path_on_host: pathOnHost,
|
|
67
|
+
is_root_device: isRoot,
|
|
68
|
+
is_read_only: isReadOnly,
|
|
69
|
+
cache_type: "Unsafe",
|
|
70
|
+
io_engine: "Sync"
|
|
71
|
+
});
|
|
72
|
+
}
|
|
73
|
+
async configure(vcpus, memMib) {
|
|
74
|
+
await firecrackerFetch(this.socketPath, "PUT", "/machine-config", {
|
|
75
|
+
vcpu_count: vcpus,
|
|
76
|
+
mem_size_mib: memMib,
|
|
77
|
+
smt: false,
|
|
78
|
+
track_dirty_pages: false
|
|
79
|
+
});
|
|
80
|
+
}
|
|
81
|
+
async addNetwork(ifaceId, tapDev, macAddress) {
|
|
82
|
+
await firecrackerFetch(this.socketPath, "PUT", `/network-interfaces/${ifaceId}`, {
|
|
83
|
+
iface_id: ifaceId,
|
|
84
|
+
host_dev_name: tapDev,
|
|
85
|
+
guest_mac: macAddress
|
|
86
|
+
});
|
|
87
|
+
}
|
|
88
|
+
async start() {
|
|
89
|
+
await firecrackerFetch(this.socketPath, "PUT", "/actions", { action_type: "InstanceStart" });
|
|
90
|
+
}
|
|
91
|
+
async loadSnapshot(snapshotPath, memPath) {
|
|
92
|
+
await firecrackerFetch(this.socketPath, "PUT", "/snapshot/load", {
|
|
93
|
+
snapshot_path: snapshotPath,
|
|
94
|
+
mem_file_path: memPath
|
|
95
|
+
});
|
|
96
|
+
}
|
|
97
|
+
async resume() {
|
|
98
|
+
await firecrackerFetch(this.socketPath, "PATCH", "/vm", { state: "Resumed" });
|
|
99
|
+
}
|
|
100
|
+
static async getVersion(baseDir) {
|
|
101
|
+
const fcPath = join(baseDir, "bin", "firecracker");
|
|
102
|
+
try {
|
|
103
|
+
const { execSync } = await import("node:child_process");
|
|
104
|
+
return execSync(`"${fcPath}" --version`, { encoding: "utf-8" }).trim();
|
|
105
|
+
} catch {
|
|
106
|
+
return;
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
};
|
|
110
|
+
export { firecrackerFetch as n, firecracker_exports as r, FirecrackerClient as t };
|
|
@@ -0,0 +1,329 @@
|
|
|
1
|
+
import { B as policyConflictError } from "./errors.mjs";
|
|
2
|
+
import { a as parseDuration } from "./vm-state.mjs";
|
|
3
|
+
import { t as assertSnapshotExists } from "./environment.mjs";
|
|
4
|
+
import { c as parsePublishedPorts, d as validateCidr, f as validatePublishedPortsAvailable, i as parseDomains, l as parseRuntime, n as parseCidrList, o as parseMemoryMib, r as parseDiskSizeGb, s as parseNetworkPolicy, u as parseVcpuCount } from "./validation.mjs";
|
|
5
|
+
import { dirname, join } from "node:path";
|
|
6
|
+
import { execFileSync, execSync } from "node:child_process";
|
|
7
|
+
import { consola } from "consola";
|
|
8
|
+
import { copyFileSync, existsSync, mkdirSync, statSync, writeFileSync } from "node:fs";
|
|
9
|
+
function parseCreateInput(args, paths) {
|
|
10
|
+
const vcpus = parseVcpuCount(args.vcpus);
|
|
11
|
+
const memMib = parseMemoryMib(args.memory);
|
|
12
|
+
const runtime = parseRuntime(args.runtime);
|
|
13
|
+
const networkPolicy = parseNetworkPolicy(args["network-policy"]);
|
|
14
|
+
const ports = parsePublishedPorts(args["publish-port"]);
|
|
15
|
+
const domains = parseDomains(args["allowed-domain"]);
|
|
16
|
+
const allowedCidrs = parseCidrList(args["allowed-cidr"]);
|
|
17
|
+
const deniedCidrs = parseCidrList(args["denied-cidr"]);
|
|
18
|
+
const timeoutMs = typeof args.timeout === "string" ? parseDuration(args.timeout) : null;
|
|
19
|
+
const snapshotId = typeof args.snapshot === "string" ? args.snapshot : null;
|
|
20
|
+
const diskSizeGb = parseDiskSizeGb(args.disk);
|
|
21
|
+
validatePublishedPortsAvailable(ports, paths);
|
|
22
|
+
for (const cidr of allowedCidrs) validateCidr(cidr);
|
|
23
|
+
for (const cidr of deniedCidrs) validateCidr(cidr);
|
|
24
|
+
if (networkPolicy === "deny-all" && (domains.length || allowedCidrs.length || deniedCidrs.length)) throw policyConflictError();
|
|
25
|
+
if (snapshotId) assertSnapshotExists(snapshotId, paths);
|
|
26
|
+
return {
|
|
27
|
+
vcpus,
|
|
28
|
+
memMib,
|
|
29
|
+
runtime,
|
|
30
|
+
networkPolicy: networkPolicy === "allow-all" && (domains.length > 0 || allowedCidrs.length > 0 || deniedCidrs.length > 0) ? "custom" : networkPolicy,
|
|
31
|
+
ports,
|
|
32
|
+
domains,
|
|
33
|
+
allowedCidrs,
|
|
34
|
+
deniedCidrs,
|
|
35
|
+
timeoutMs,
|
|
36
|
+
snapshotId,
|
|
37
|
+
diskSizeGb
|
|
38
|
+
};
|
|
39
|
+
}
|
|
40
|
+
function buildInitialVmState(input) {
|
|
41
|
+
const now = (/* @__PURE__ */ new Date()).toISOString();
|
|
42
|
+
return {
|
|
43
|
+
id: input.vmId,
|
|
44
|
+
project: input.project,
|
|
45
|
+
runtime: input.runtime,
|
|
46
|
+
diskSizeGb: input.diskSizeGb,
|
|
47
|
+
status: "creating",
|
|
48
|
+
pid: null,
|
|
49
|
+
apiSocket: "",
|
|
50
|
+
chrootDir: "",
|
|
51
|
+
kernel: input.kernelPath,
|
|
52
|
+
rootfs: input.rootfsPath,
|
|
53
|
+
vcpuCount: input.vcpus,
|
|
54
|
+
memSizeMib: input.memMib,
|
|
55
|
+
network: {
|
|
56
|
+
tapDevice: input.tapDevice,
|
|
57
|
+
hostIp: input.hostIp,
|
|
58
|
+
guestIp: input.guestIp,
|
|
59
|
+
subnetMask: input.subnetMask,
|
|
60
|
+
macAddress: input.macAddress,
|
|
61
|
+
networkPolicy: input.networkPolicy,
|
|
62
|
+
allowedDomains: input.domains,
|
|
63
|
+
allowedCidrs: input.allowedCidrs,
|
|
64
|
+
deniedCidrs: input.deniedCidrs,
|
|
65
|
+
publishedPorts: input.ports,
|
|
66
|
+
tunnelHostname: null,
|
|
67
|
+
tunnelHostnames: [],
|
|
68
|
+
bandwidthMbit: input.bandwidthMbit,
|
|
69
|
+
netnsName: input.netnsName
|
|
70
|
+
},
|
|
71
|
+
snapshot: input.snapshotId,
|
|
72
|
+
timeoutMs: input.timeoutMs,
|
|
73
|
+
timeoutAt: input.timeoutMs ? new Date(Date.now() + input.timeoutMs).toISOString() : null,
|
|
74
|
+
createdAt: now,
|
|
75
|
+
error: null,
|
|
76
|
+
agentToken: input.agentToken,
|
|
77
|
+
agentPort: input.agentPort
|
|
78
|
+
};
|
|
79
|
+
}
|
|
80
|
+
function buildCreateSummaryLines(input) {
|
|
81
|
+
return [
|
|
82
|
+
`VM Created: ${input.vmId}`,
|
|
83
|
+
"",
|
|
84
|
+
" Status: running",
|
|
85
|
+
` PID: ${input.pid || "unknown"}`,
|
|
86
|
+
` vCPUs: ${input.vcpus}`,
|
|
87
|
+
` Memory: ${input.memMib} MiB`,
|
|
88
|
+
` Runtime: ${input.runtime}`,
|
|
89
|
+
` Disk: ${input.diskSizeGb} GB`,
|
|
90
|
+
...input.project ? [` Project: ${input.project}`] : [],
|
|
91
|
+
"",
|
|
92
|
+
" Network:",
|
|
93
|
+
` TAP: ${input.tapDevice}`,
|
|
94
|
+
` Host: ${input.hostIp}`,
|
|
95
|
+
` Guest: ${input.guestIp}`,
|
|
96
|
+
` MAC: ${input.macAddress}`,
|
|
97
|
+
` Policy: ${input.networkPolicy}`,
|
|
98
|
+
...input.domains.length > 0 ? [` Domains: ${input.domains.join(", ")}`] : [],
|
|
99
|
+
...input.allowedCidrs.length > 0 ? [` Allowed CIDRs: ${input.allowedCidrs.join(", ")}`] : [],
|
|
100
|
+
...input.deniedCidrs.length > 0 ? [` Denied CIDRs: ${input.deniedCidrs.join(", ")}`] : [],
|
|
101
|
+
...input.ports.length > 0 ? [` Ports: ${input.ports.join(", ")}`] : [],
|
|
102
|
+
"",
|
|
103
|
+
` Kernel: ${input.kernelPath}`,
|
|
104
|
+
` Rootfs: ${input.rootfsPath}`,
|
|
105
|
+
...input.snapshotId ? [` Snapshot: ${input.snapshotId}`] : [],
|
|
106
|
+
...input.timeout ? [` Timeout: ${input.timeout}`] : [],
|
|
107
|
+
"",
|
|
108
|
+
` Socket: ${input.socketPath}`,
|
|
109
|
+
` Chroot: ${input.chrootDir}`,
|
|
110
|
+
` State: ${input.stateFilePath}`
|
|
111
|
+
];
|
|
112
|
+
}
|
|
113
|
+
const VALID_ARCHES = ["x86_64", "aarch64"];
|
|
114
|
+
const MAX_FILTER_SIZE = 1048576;
|
|
115
|
+
/**
|
|
116
|
+
* Compile a Firecracker seccomp JSON filter to BPF using seccompiler-bin.
|
|
117
|
+
* Falls back to using the JSON filter directly if seccompiler-bin is not available.
|
|
118
|
+
*/
|
|
119
|
+
function compileSeccompFilter(jsonPath, outputPath, arch) {
|
|
120
|
+
const targetArch = arch ?? "x86_64";
|
|
121
|
+
if (!VALID_ARCHES.includes(targetArch)) throw new Error(`unsupported seccomp arch: ${targetArch} (allowed: ${VALID_ARCHES.join(", ")})`);
|
|
122
|
+
const stat = statSync(jsonPath);
|
|
123
|
+
if (stat.size > MAX_FILTER_SIZE) throw new Error(`seccomp filter too large: ${stat.size} bytes (max ${MAX_FILTER_SIZE})`);
|
|
124
|
+
mkdirSync(dirname(outputPath), { recursive: true });
|
|
125
|
+
execFileSync("seccompiler-bin", [
|
|
126
|
+
"--input-file",
|
|
127
|
+
jsonPath,
|
|
128
|
+
"--target-arch",
|
|
129
|
+
targetArch,
|
|
130
|
+
"--output-file",
|
|
131
|
+
outputPath
|
|
132
|
+
], { stdio: "pipe" });
|
|
133
|
+
}
|
|
134
|
+
/**
|
|
135
|
+
* Ensure a seccomp filter is available for Firecracker.
|
|
136
|
+
*
|
|
137
|
+
* 1. If a compiled BPF exists at paths.seccompDir/default.bpf, return it.
|
|
138
|
+
* 2. If the JSON source exists, try to compile it; return BPF path on success.
|
|
139
|
+
* 3. If compilation fails (seccompiler-bin not installed), return null
|
|
140
|
+
* (Firecracker requires compiled BPF, not raw JSON).
|
|
141
|
+
* 4. If no filter source exists at all, return null.
|
|
142
|
+
*/
|
|
143
|
+
function ensureSeccompFilter(paths) {
|
|
144
|
+
const bpfPath = join(paths.seccompDir, "default.bpf");
|
|
145
|
+
if (existsSync(bpfPath)) {
|
|
146
|
+
consola.debug(`seccomp: using compiled BPF filter at ${bpfPath}`);
|
|
147
|
+
return bpfPath;
|
|
148
|
+
}
|
|
149
|
+
const bundledJson = join(dirname(dirname(__dirname)), "seccomp", "default.json");
|
|
150
|
+
const userJson = paths.seccompFilter;
|
|
151
|
+
let sourceJson = null;
|
|
152
|
+
try {
|
|
153
|
+
const mode = statSync(userJson).mode;
|
|
154
|
+
if (mode & 18) consola.warn(`seccomp: filter at ${userJson} is group/world writable (mode ${(mode & 511).toString(8)}); consider restricting permissions`);
|
|
155
|
+
consola.debug(`seccomp: using user filter at ${userJson}`);
|
|
156
|
+
sourceJson = userJson;
|
|
157
|
+
} catch {}
|
|
158
|
+
if (!sourceJson && existsSync(bundledJson)) {
|
|
159
|
+
mkdirSync(paths.seccompDir, { recursive: true });
|
|
160
|
+
copyFileSync(bundledJson, userJson);
|
|
161
|
+
consola.debug(`seccomp: copied bundled filter to ${userJson}`);
|
|
162
|
+
sourceJson = userJson;
|
|
163
|
+
}
|
|
164
|
+
if (!sourceJson) return null;
|
|
165
|
+
try {
|
|
166
|
+
compileSeccompFilter(sourceJson, bpfPath);
|
|
167
|
+
consola.debug(`seccomp: compiled BPF filter at ${bpfPath}`);
|
|
168
|
+
return bpfPath;
|
|
169
|
+
} catch {
|
|
170
|
+
consola.warn("seccomp: BPF compilation failed (seccompiler-bin not available?); seccomp filtering disabled. Install seccompiler-bin for seccomp support.");
|
|
171
|
+
return null;
|
|
172
|
+
}
|
|
173
|
+
}
|
|
174
|
+
function dockerUnavailableError() {
|
|
175
|
+
return /* @__PURE__ */ new Error("Docker is not available. Install Docker and ensure the daemon is running.");
|
|
176
|
+
}
|
|
177
|
+
const APT_PACKAGES = [
|
|
178
|
+
"bind9-utils",
|
|
179
|
+
"bzip2",
|
|
180
|
+
"findutils",
|
|
181
|
+
"git",
|
|
182
|
+
"gzip",
|
|
183
|
+
"iputils-ping",
|
|
184
|
+
"libicu-dev",
|
|
185
|
+
"libjpeg-dev",
|
|
186
|
+
"libpng-dev",
|
|
187
|
+
"ncurses-base",
|
|
188
|
+
"libssl-dev",
|
|
189
|
+
"openssh-server",
|
|
190
|
+
"openssl",
|
|
191
|
+
"procps",
|
|
192
|
+
"tar",
|
|
193
|
+
"unzip",
|
|
194
|
+
"debianutils",
|
|
195
|
+
"whois",
|
|
196
|
+
"zstd"
|
|
197
|
+
];
|
|
198
|
+
const DNF_PACKAGES = [
|
|
199
|
+
"bind-utils",
|
|
200
|
+
"bzip2",
|
|
201
|
+
"findutils",
|
|
202
|
+
"git",
|
|
203
|
+
"gzip",
|
|
204
|
+
"iputils",
|
|
205
|
+
"libicu",
|
|
206
|
+
"libjpeg",
|
|
207
|
+
"libpng",
|
|
208
|
+
"ncurses-libs",
|
|
209
|
+
"openssh-server",
|
|
210
|
+
"openssl",
|
|
211
|
+
"openssl-libs",
|
|
212
|
+
"procps",
|
|
213
|
+
"tar",
|
|
214
|
+
"unzip",
|
|
215
|
+
"which",
|
|
216
|
+
"whois",
|
|
217
|
+
"zstd"
|
|
218
|
+
];
|
|
219
|
+
const APK_PACKAGES = [
|
|
220
|
+
"bind-tools",
|
|
221
|
+
"bzip2",
|
|
222
|
+
"findutils",
|
|
223
|
+
"git",
|
|
224
|
+
"gzip",
|
|
225
|
+
"iputils",
|
|
226
|
+
"icu-libs",
|
|
227
|
+
"libjpeg-turbo",
|
|
228
|
+
"libpng",
|
|
229
|
+
"ncurses-libs",
|
|
230
|
+
"openrc",
|
|
231
|
+
"openssh",
|
|
232
|
+
"openssl",
|
|
233
|
+
"procps",
|
|
234
|
+
"tar",
|
|
235
|
+
"unzip",
|
|
236
|
+
"whois",
|
|
237
|
+
"zstd"
|
|
238
|
+
];
|
|
239
|
+
function generateDockerfile(baseImage) {
|
|
240
|
+
return `FROM ${baseImage}
|
|
241
|
+
RUN if command -v apt-get >/dev/null 2>&1; then ${`apt-get update && apt-get install -y --no-install-recommends ${APT_PACKAGES.join(" ")} && rm -rf /var/lib/apt/lists/*`}; \\
|
|
242
|
+
elif command -v dnf >/dev/null 2>&1; then ${`dnf install -y ${DNF_PACKAGES.join(" ")} && dnf clean all`}; \\
|
|
243
|
+
elif command -v yum >/dev/null 2>&1; then ${`yum install -y ${DNF_PACKAGES.join(" ")} && yum clean all`}; \\
|
|
244
|
+
elif command -v apk >/dev/null 2>&1; then ${`apk add --no-cache ${APK_PACKAGES.join(" ")}`}; \\
|
|
245
|
+
fi
|
|
246
|
+
RUN ssh-keygen -A 2>/dev/null || true; \\
|
|
247
|
+
mkdir -p /root/.ssh && chmod 700 /root/.ssh; \\
|
|
248
|
+
if [ -f /etc/ssh/sshd_config ]; then \\
|
|
249
|
+
sed -i 's/^#*PermitRootLogin.*/PermitRootLogin yes/' /etc/ssh/sshd_config; \\
|
|
250
|
+
fi; \\
|
|
251
|
+
if command -v rc-update >/dev/null 2>&1; then \\
|
|
252
|
+
rc-update add devfs sysinit 2>/dev/null || true; \\
|
|
253
|
+
rc-update add mdev sysinit 2>/dev/null || true; \\
|
|
254
|
+
rc-update add hwdrivers sysinit 2>/dev/null || true; \\
|
|
255
|
+
rc-update add modules boot 2>/dev/null || true; \\
|
|
256
|
+
rc-update add sysctl boot 2>/dev/null || true; \\
|
|
257
|
+
rc-update add hostname boot 2>/dev/null || true; \\
|
|
258
|
+
rc-update add bootmisc boot 2>/dev/null || true; \\
|
|
259
|
+
rc-update add networking boot 2>/dev/null || true; \\
|
|
260
|
+
rc-update add sshd default 2>/dev/null || true; \\
|
|
261
|
+
printf '%s\\n' '::sysinit:/sbin/openrc sysinit' '::sysinit:/sbin/openrc boot' '::wait:/sbin/openrc default' '::shutdown:/sbin/openrc shutdown' 'ttyS0::respawn:/sbin/getty 115200 ttyS0' > /etc/inittab; \\
|
|
262
|
+
fi; \\
|
|
263
|
+
if command -v systemctl >/dev/null 2>&1; then systemctl enable sshd 2>/dev/null || systemctl enable ssh 2>/dev/null || true; fi
|
|
264
|
+
`;
|
|
265
|
+
}
|
|
266
|
+
function verifyDocker() {
|
|
267
|
+
try {
|
|
268
|
+
execSync("docker info", { stdio: "pipe" });
|
|
269
|
+
} catch {
|
|
270
|
+
throw dockerUnavailableError();
|
|
271
|
+
}
|
|
272
|
+
}
|
|
273
|
+
function buildImageRootfs(imageRef, cacheDir) {
|
|
274
|
+
const ext4Path = join(cacheDir, "rootfs.ext4");
|
|
275
|
+
verifyDocker();
|
|
276
|
+
const buildTag = `vmsan-rootfs-${imageRef.name.replace(/[^a-z0-9._-]/gi, "-")}:${imageRef.tag}`;
|
|
277
|
+
const containerName = `vmsan-export-${Date.now()}`;
|
|
278
|
+
const tmpTar = join(cacheDir, "rootfs.tar");
|
|
279
|
+
mkdirSync(cacheDir, { recursive: true });
|
|
280
|
+
try {
|
|
281
|
+
consola.start(`Building image from ${imageRef.full}...`);
|
|
282
|
+
execSync(`docker build -t "${buildTag}" -f - . <<'DOCKERFILE'\n${generateDockerfile(imageRef.full)}\nDOCKERFILE`, {
|
|
283
|
+
stdio: "pipe",
|
|
284
|
+
shell: "/bin/bash"
|
|
285
|
+
});
|
|
286
|
+
consola.start("Exporting filesystem...");
|
|
287
|
+
execSync(`docker create --name "${containerName}" "${buildTag}"`, { stdio: "pipe" });
|
|
288
|
+
execSync(`docker export "${containerName}" -o "${tmpTar}"`, { stdio: "pipe" });
|
|
289
|
+
consola.start("Converting to ext4...");
|
|
290
|
+
const tarSizeOutput = execSync(`stat -c %s "${tmpTar}"`, { encoding: "utf-8" }).trim();
|
|
291
|
+
const tarMb = Number(tarSizeOutput) / 1024 / 1024;
|
|
292
|
+
const imageSizeMb = Math.max(1024, Math.ceil(tarMb + 512));
|
|
293
|
+
execSync(`dd if=/dev/zero of="${ext4Path}" bs=1M count=${imageSizeMb} 2>/dev/null`, { stdio: "pipe" });
|
|
294
|
+
execSync(`mkfs.ext4 -q "${ext4Path}"`, { stdio: "pipe" });
|
|
295
|
+
execSync(`tune2fs -m 0 "${ext4Path}"`, { stdio: "pipe" });
|
|
296
|
+
const tmpMount = join(cacheDir, "mnt");
|
|
297
|
+
mkdirSync(tmpMount, { recursive: true });
|
|
298
|
+
execSync(`mount -o loop "${ext4Path}" "${tmpMount}"`, { stdio: "pipe" });
|
|
299
|
+
try {
|
|
300
|
+
execSync(`tar -xf "${tmpTar}" -C "${tmpMount}"`, { stdio: "pipe" });
|
|
301
|
+
} finally {
|
|
302
|
+
execSync(`umount "${tmpMount}"`, { stdio: "pipe" });
|
|
303
|
+
execSync(`rm -rf "${tmpMount}"`, { stdio: "pipe" });
|
|
304
|
+
}
|
|
305
|
+
writeFileSync(join(cacheDir, "metadata.json"), JSON.stringify({
|
|
306
|
+
image: imageRef.full,
|
|
307
|
+
builtAt: (/* @__PURE__ */ new Date()).toISOString()
|
|
308
|
+
}, null, 2));
|
|
309
|
+
consola.success(`Rootfs built from ${imageRef.full} (${imageSizeMb} MB)`);
|
|
310
|
+
return ext4Path;
|
|
311
|
+
} finally {
|
|
312
|
+
try {
|
|
313
|
+
execSync(`docker rm -f "${containerName}" 2>/dev/null`, { stdio: "pipe" });
|
|
314
|
+
} catch {}
|
|
315
|
+
try {
|
|
316
|
+
execSync(`rm -f "${tmpTar}"`, { stdio: "pipe" });
|
|
317
|
+
} catch {}
|
|
318
|
+
}
|
|
319
|
+
}
|
|
320
|
+
function resolveImageRootfs(imageRef, registryDir) {
|
|
321
|
+
const cacheDir = join(registryDir, imageRef.cacheKey);
|
|
322
|
+
const ext4Path = join(cacheDir, "rootfs.ext4");
|
|
323
|
+
if (existsSync(ext4Path)) {
|
|
324
|
+
consola.info(`Using cached rootfs for ${imageRef.full}`);
|
|
325
|
+
return ext4Path;
|
|
326
|
+
}
|
|
327
|
+
return buildImageRootfs(imageRef, cacheDir);
|
|
328
|
+
}
|
|
329
|
+
export { buildInitialVmState as a, buildCreateSummaryLines as i, compileSeccompFilter as n, parseCreateInput as o, ensureSeccompFilter as r, resolveImageRootfs as t };
|