@lamalibre/install-portlama-e2e-mcp 0.1.2 → 0.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +2 -2
- package/src/config.js +43 -0
- package/src/index.js +3 -0
- package/src/lib/deps.js +0 -2
- package/src/lib/multipass.js +1 -1
- package/src/lib/state.js +43 -0
- package/src/tools/provision.js +471 -174
- package/src/tools/snapshots.js +94 -13
- package/src/tools/status.js +2 -0
- package/src/tools/tests.js +16 -14
- package/src/tools/vm.js +61 -6
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@lamalibre/install-portlama-e2e-mcp",
|
|
3
|
-
"version": "0.1
|
|
3
|
+
"version": "0.2.1",
|
|
4
4
|
"description": "MCP server for Portlama E2E test infrastructure — VM lifecycle, snapshots, test execution",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"license": "SEE LICENSE IN LICENSE.md",
|
|
@@ -18,7 +18,7 @@
|
|
|
18
18
|
"build": "echo 'No build step for e2e-mcp'"
|
|
19
19
|
},
|
|
20
20
|
"dependencies": {
|
|
21
|
-
"@modelcontextprotocol/sdk": "^1.
|
|
21
|
+
"@modelcontextprotocol/sdk": "^1.28.0",
|
|
22
22
|
"chalk": "^5.3.0",
|
|
23
23
|
"execa": "^9.6.1",
|
|
24
24
|
"listr2": "^8.0.0",
|
package/src/config.js
CHANGED
|
@@ -31,6 +31,21 @@ export const TEST_DOMAIN = 'test.portlama.local';
|
|
|
31
31
|
/** VM short-name → full multipass name mapping. */
|
|
32
32
|
export const VM_NAME_MAP = { host: VM_HOST, agent: VM_AGENT, visitor: VM_VISITOR };
|
|
33
33
|
|
|
34
|
+
/**
|
|
35
|
+
* Static IPs for deterministic VM networking.
|
|
36
|
+
* Uses 10.13.37.0/24 — a private subnet completely outside the Multipass DHCP range
|
|
37
|
+
* (192.168.2.0/24), eliminating any collision risk. Added as secondary addresses
|
|
38
|
+
* alongside DHCP, so Multipass connectivity and internet access are unaffected.
|
|
39
|
+
* These survive snapshot restores via a systemd oneshot service, ensuring provisioned
|
|
40
|
+
* configs (nginx, /etc/hosts, agent enrollment) remain valid.
|
|
41
|
+
*/
|
|
42
|
+
export const VM_STATIC_IPS = {
|
|
43
|
+
[VM_HOST]: '10.13.37.1',
|
|
44
|
+
[VM_AGENT]: '10.13.37.2',
|
|
45
|
+
[VM_VISITOR]: '10.13.37.3',
|
|
46
|
+
};
|
|
47
|
+
export const STATIC_SUBNET = '10.13.37.0/24';
|
|
48
|
+
|
|
34
49
|
/** VM profiles — resource allocation tiers. */
|
|
35
50
|
export const PROFILES = {
|
|
36
51
|
production: {
|
|
@@ -58,3 +73,31 @@ export const CHECKPOINTS = {
|
|
|
58
73
|
'post-create': 'VMs exist but no setup has run',
|
|
59
74
|
'post-setup': 'All VMs provisioned, onboarding complete, services running',
|
|
60
75
|
};
|
|
76
|
+
|
|
77
|
+
/** Snapshot tier definitions — layered provisioning stages. */
|
|
78
|
+
export const TIERS = {
|
|
79
|
+
'node-ready': {
|
|
80
|
+
level: 1,
|
|
81
|
+
description: 'Node.js 22.x installed, apt cache warm',
|
|
82
|
+
appliesTo: ['host', 'agent', 'visitor'],
|
|
83
|
+
coordinated: false,
|
|
84
|
+
},
|
|
85
|
+
'installed': {
|
|
86
|
+
level: 2,
|
|
87
|
+
description: 'Portlama installed (create-portlama completed)',
|
|
88
|
+
appliesTo: ['host'],
|
|
89
|
+
coordinated: false,
|
|
90
|
+
},
|
|
91
|
+
'provisioned': {
|
|
92
|
+
level: 3,
|
|
93
|
+
description: 'Onboarding complete, agent enrolled, visitor configured',
|
|
94
|
+
appliesTo: ['host', 'agent', 'visitor'],
|
|
95
|
+
coordinated: true,
|
|
96
|
+
},
|
|
97
|
+
};
|
|
98
|
+
|
|
99
|
+
/** Ordered tier names from lowest to highest. */
|
|
100
|
+
export const TIER_ORDER = ['node-ready', 'installed', 'provisioned'];
|
|
101
|
+
|
|
102
|
+
/** Snapshot name prefix for tier snapshots. */
|
|
103
|
+
export const TIER_SNAPSHOT_PREFIX = 'tier-';
|
package/src/index.js
CHANGED
|
@@ -17,6 +17,7 @@
|
|
|
17
17
|
// snapshot_create — snapshot VMs at a checkpoint
|
|
18
18
|
// snapshot_restore — restore VMs to a checkpoint
|
|
19
19
|
// snapshot_list — list available snapshots
|
|
20
|
+
// provision — smart tier-aware provisioning with layered snapshots
|
|
20
21
|
// provision_host — full host provisioning pipeline
|
|
21
22
|
// provision_agent — agent setup with cert transfer
|
|
22
23
|
// provision_visitor — visitor setup
|
|
@@ -41,6 +42,7 @@ import {
|
|
|
41
42
|
snapshotListTool,
|
|
42
43
|
} from './tools/snapshots.js';
|
|
43
44
|
import {
|
|
45
|
+
provisionTool,
|
|
44
46
|
provisionHostTool,
|
|
45
47
|
provisionAgentTool,
|
|
46
48
|
provisionVisitorTool,
|
|
@@ -70,6 +72,7 @@ const tools = [
|
|
|
70
72
|
snapshotCreateTool,
|
|
71
73
|
snapshotRestoreTool,
|
|
72
74
|
snapshotListTool,
|
|
75
|
+
provisionTool,
|
|
73
76
|
provisionHostTool,
|
|
74
77
|
provisionAgentTool,
|
|
75
78
|
provisionVisitorTool,
|
package/src/lib/deps.js
CHANGED
|
@@ -79,7 +79,6 @@ export const SINGLE_VM_DEPS = {
|
|
|
79
79
|
11: [3], // input-validation
|
|
80
80
|
12: [3], // user-invitations
|
|
81
81
|
13: [3], // site-lifecycle
|
|
82
|
-
14: [3], // shell-lifecycle
|
|
83
82
|
15: [3], // plugin-lifecycle
|
|
84
83
|
16: [3], // enrollment-tokens
|
|
85
84
|
};
|
|
@@ -99,7 +98,6 @@ export const THREE_VM_DEPS = {
|
|
|
99
98
|
7: [1], // site-visitor-journey
|
|
100
99
|
8: [1], // invitation-journey
|
|
101
100
|
9: [1], // agent-site-deploy
|
|
102
|
-
10: [1], // shell-lifecycle
|
|
103
101
|
11: [1], // plugin-lifecycle
|
|
104
102
|
12: [1], // enrollment-lifecycle
|
|
105
103
|
};
|
package/src/lib/multipass.js
CHANGED
package/src/lib/state.js
CHANGED
|
@@ -25,6 +25,8 @@ function defaultState() {
|
|
|
25
25
|
credentials: null,
|
|
26
26
|
lastRun: null,
|
|
27
27
|
runs: [],
|
|
28
|
+
tiers: {},
|
|
29
|
+
tierSnapshots: {},
|
|
28
30
|
};
|
|
29
31
|
}
|
|
30
32
|
|
|
@@ -68,6 +70,47 @@ export function removeVmState(name) {
|
|
|
68
70
|
saveState(state);
|
|
69
71
|
}
|
|
70
72
|
|
|
73
|
+
/** Update the current tier for a VM. */
|
|
74
|
+
export function setVmTier(vmName, tier) {
|
|
75
|
+
const state = loadState();
|
|
76
|
+
if (!state.tiers) state.tiers = {};
|
|
77
|
+
state.tiers[vmName] = tier;
|
|
78
|
+
saveState(state);
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
/** Get the current tier for a VM. Returns null if not set. */
|
|
82
|
+
export function getVmTier(vmName) {
|
|
83
|
+
const state = loadState();
|
|
84
|
+
return state.tiers?.[vmName] || null;
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
/** Record that a tier snapshot was created. */
|
|
88
|
+
export function recordTierSnapshot(tierName, vmNames) {
|
|
89
|
+
const state = loadState();
|
|
90
|
+
if (!state.tierSnapshots) state.tierSnapshots = {};
|
|
91
|
+
state.tierSnapshots[tierName] = {
|
|
92
|
+
vms: Object.fromEntries(vmNames.map((vm) => [vm, true])),
|
|
93
|
+
createdAt: new Date().toISOString(),
|
|
94
|
+
};
|
|
95
|
+
saveState(state);
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
/** Check if a tier snapshot exists for all required VMs. */
|
|
99
|
+
export function hasTierSnapshot(tierName, requiredVms) {
|
|
100
|
+
const state = loadState();
|
|
101
|
+
const snap = state.tierSnapshots?.[tierName];
|
|
102
|
+
if (!snap) return false;
|
|
103
|
+
return requiredVms.every((vm) => snap.vms?.[vm]);
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
/** Clear all tier snapshot records (called when VMs are recreated/deleted). */
|
|
107
|
+
export function clearTierSnapshots() {
|
|
108
|
+
const state = loadState();
|
|
109
|
+
state.tierSnapshots = {};
|
|
110
|
+
state.tiers = {};
|
|
111
|
+
saveState(state);
|
|
112
|
+
}
|
|
113
|
+
|
|
71
114
|
/** Record a test run result. */
|
|
72
115
|
export function recordRun(run) {
|
|
73
116
|
const state = loadState();
|
package/src/tools/provision.js
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
// ============================================================================
|
|
2
|
-
// Provisioning Tools — provision_host, provision_agent,
|
|
2
|
+
// Provisioning Tools — provision, provision_host, provision_agent,
|
|
3
|
+
// provision_visitor, hot_reload
|
|
3
4
|
// ============================================================================
|
|
4
5
|
|
|
5
6
|
import { z } from 'zod';
|
|
@@ -11,11 +12,28 @@ import {
|
|
|
11
12
|
VM_HOST,
|
|
12
13
|
VM_AGENT,
|
|
13
14
|
VM_VISITOR,
|
|
15
|
+
ALL_VMS,
|
|
14
16
|
REPO_ROOT,
|
|
15
17
|
THREE_VM_DIR,
|
|
16
18
|
TEST_DOMAIN,
|
|
19
|
+
VM_NAME_MAP,
|
|
20
|
+
VM_STATIC_IPS,
|
|
21
|
+
TIERS,
|
|
22
|
+
TIER_SNAPSHOT_PREFIX,
|
|
17
23
|
} from '../config.js';
|
|
18
|
-
import {
|
|
24
|
+
import {
|
|
25
|
+
loadState,
|
|
26
|
+
updateState,
|
|
27
|
+
setVmState,
|
|
28
|
+
setVmTier,
|
|
29
|
+
getVmTier,
|
|
30
|
+
recordTierSnapshot,
|
|
31
|
+
hasTierSnapshot,
|
|
32
|
+
} from '../lib/state.js';
|
|
33
|
+
|
|
34
|
+
// ---------------------------------------------------------------------------
|
|
35
|
+
// Helpers
|
|
36
|
+
// ---------------------------------------------------------------------------
|
|
19
37
|
|
|
20
38
|
/** Pack a workspace package and return the tarball path. */
|
|
21
39
|
async function packPackage(packageName) {
|
|
@@ -27,205 +45,492 @@ async function packPackage(packageName) {
|
|
|
27
45
|
return `/tmp/${tarballName}`;
|
|
28
46
|
}
|
|
29
47
|
|
|
30
|
-
/** Transfer test scripts to a VM. */
|
|
48
|
+
/** Transfer test scripts to a VM (sequential to avoid SSH overload). */
|
|
31
49
|
async function transferTestScripts(vmName) {
|
|
32
50
|
await mp.exec(vmName, 'mkdir -p /tmp/e2e && chmod 777 /tmp/e2e', { sudo: true });
|
|
33
51
|
|
|
34
52
|
const files = fs.readdirSync(THREE_VM_DIR).filter((f) => f.endsWith('.sh'));
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
),
|
|
39
|
-
);
|
|
53
|
+
for (const file of files) {
|
|
54
|
+
await mp.transfer(path.join(THREE_VM_DIR, file), `${vmName}:/tmp/e2e/${file}`);
|
|
55
|
+
}
|
|
40
56
|
|
|
41
|
-
// Transfer VM-side API helpers in parallel
|
|
42
57
|
const helpers = ['vm-api-helper.sh', 'vm-api-status-helper.sh'];
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
}),
|
|
53
|
-
);
|
|
58
|
+
for (const helper of helpers) {
|
|
59
|
+
const helperPath = path.join(THREE_VM_DIR, helper);
|
|
60
|
+
try {
|
|
61
|
+
await mp.transfer(helperPath, `${vmName}:/tmp/${helper}`);
|
|
62
|
+
await mp.exec(vmName, `chmod +x /tmp/${helper}`, { sudo: true });
|
|
63
|
+
} catch {
|
|
64
|
+
// Helper may not exist
|
|
65
|
+
}
|
|
66
|
+
}
|
|
54
67
|
}
|
|
55
68
|
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
69
|
+
// ---------------------------------------------------------------------------
|
|
70
|
+
// Stage functions (internal — called by provisionTool and legacy tools)
|
|
71
|
+
// ---------------------------------------------------------------------------
|
|
72
|
+
|
|
73
|
+
/** Stage 1: Install Node.js 22.x on a VM via NodeSource. */
|
|
74
|
+
async function stageInstallNode(vmName) {
|
|
75
|
+
const npmCheck = await mp.exec(vmName, 'npm --version', { allowFailure: true });
|
|
76
|
+
if (npmCheck.exitCode === 0) {
|
|
77
|
+
return { skipped: true, message: `npm already available (v${npmCheck.stdout.trim()})` };
|
|
78
|
+
}
|
|
79
|
+
await mp.exec(vmName, 'apt-get update', { sudo: true, timeout: 180_000 });
|
|
80
|
+
await mp.exec(vmName, 'apt-get install -y ca-certificates curl gnupg', {
|
|
81
|
+
sudo: true,
|
|
82
|
+
timeout: 180_000,
|
|
83
|
+
});
|
|
84
|
+
await mp.exec(
|
|
85
|
+
vmName,
|
|
86
|
+
'curl -fsSL https://deb.nodesource.com/setup_22.x | bash -',
|
|
87
|
+
{ sudo: true, timeout: 300_000 },
|
|
88
|
+
);
|
|
89
|
+
await mp.exec(vmName, 'apt-get install -y nodejs', {
|
|
90
|
+
sudo: true,
|
|
91
|
+
timeout: 180_000,
|
|
92
|
+
});
|
|
93
|
+
return { skipped: false, message: 'Node.js 22.x installed via NodeSource' };
|
|
94
|
+
}
|
|
71
95
|
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
96
|
+
/** Stage 2: Pack, transfer, install, and run create-portlama on host. */
|
|
97
|
+
async function stageInstallPortlama() {
|
|
98
|
+
const tarball = await packPackage('create-portlama');
|
|
99
|
+
await mp.transfer(tarball, `${VM_HOST}:/tmp/create-portlama.tgz`);
|
|
100
|
+
await mp.exec(VM_HOST, 'npm install -g /tmp/create-portlama.tgz', {
|
|
101
|
+
sudo: true,
|
|
102
|
+
timeout: 120_000,
|
|
103
|
+
});
|
|
104
|
+
await mp.exec(VM_HOST, 'create-portlama --dev --skip-harden --yes', {
|
|
105
|
+
sudo: true,
|
|
106
|
+
timeout: 300_000,
|
|
107
|
+
});
|
|
78
108
|
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
109
|
+
// Patch panel.json to use the static IP instead of the DHCP IP that
|
|
110
|
+
// create-portlama auto-detected. The static IP is what dnsmasq resolves to
|
|
111
|
+
// and what agents/visitors use for connectivity.
|
|
112
|
+
const staticIp = VM_STATIC_IPS[VM_HOST];
|
|
113
|
+
if (staticIp) {
|
|
114
|
+
await mp.exec(
|
|
115
|
+
VM_HOST,
|
|
116
|
+
`sed -i 's/"ip": *"[^"]*"/"ip": "${staticIp}"/' /etc/portlama/panel.json`,
|
|
117
|
+
{ sudo: true, timeout: 10_000 },
|
|
118
|
+
);
|
|
119
|
+
// Restart panel server to pick up the new IP
|
|
120
|
+
await mp.exec(VM_HOST, 'systemctl restart portlama-panel', {
|
|
82
121
|
sudo: true,
|
|
83
|
-
timeout:
|
|
122
|
+
timeout: 15_000,
|
|
123
|
+
allowFailure: true,
|
|
84
124
|
});
|
|
85
|
-
|
|
125
|
+
}
|
|
86
126
|
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
sudo: true,
|
|
90
|
-
timeout: 300_000,
|
|
91
|
-
});
|
|
92
|
-
steps.push('Portlama installed');
|
|
127
|
+
return { message: 'create-portlama installed and executed' };
|
|
128
|
+
}
|
|
93
129
|
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
130
|
+
/** Stage 3a: Run setup-host.sh (onboarding, certs, user creation). */
|
|
131
|
+
async function stageSetupHost(domain) {
|
|
132
|
+
await transferTestScripts(VM_HOST);
|
|
133
|
+
const hostIp = VM_STATIC_IPS[VM_HOST] || await mp.getIp(VM_HOST);
|
|
97
134
|
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
135
|
+
const setupResult = await mp.exec(
|
|
136
|
+
VM_HOST,
|
|
137
|
+
`bash /tmp/e2e/setup-host.sh "${hostIp}" "${domain}"`,
|
|
138
|
+
{ sudo: true, timeout: 180_000, allowFailure: true },
|
|
139
|
+
);
|
|
103
140
|
|
|
104
|
-
|
|
105
|
-
|
|
141
|
+
const ok = setupResult.exitCode === 0;
|
|
142
|
+
let credentials = null;
|
|
106
143
|
|
|
107
|
-
|
|
144
|
+
if (ok) {
|
|
108
145
|
const credsResult = await mp.exec(
|
|
109
146
|
VM_HOST,
|
|
110
147
|
'cat /tmp/portlama-test-credentials.json',
|
|
111
148
|
{ sudo: true, allowFailure: true },
|
|
112
149
|
);
|
|
113
|
-
|
|
114
|
-
let credentials = null;
|
|
115
150
|
if (credsResult.exitCode === 0) {
|
|
116
151
|
try {
|
|
117
152
|
credentials = JSON.parse(credsResult.stdout);
|
|
118
|
-
updateState({ credentials, domain });
|
|
119
|
-
steps.push('Credentials extracted');
|
|
120
153
|
} catch {
|
|
121
|
-
|
|
154
|
+
// Parse failure handled by caller
|
|
122
155
|
}
|
|
123
156
|
}
|
|
157
|
+
}
|
|
124
158
|
|
|
125
|
-
|
|
159
|
+
return { ok, credentials, error: ok ? null : setupResult.stderr.slice(-500) };
|
|
160
|
+
}
|
|
126
161
|
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
162
|
+
/** Stage 3b: Pack agent, transfer tarball + enrollment token, run setup-agent.sh. */
|
|
163
|
+
async function stageSetupAgent(domain, enrollmentToken) {
|
|
164
|
+
const hostIp = VM_STATIC_IPS[VM_HOST] || await mp.getIp(VM_HOST);
|
|
165
|
+
|
|
166
|
+
// Pack and transfer portlama-agent tarball
|
|
167
|
+
const agentTarball = await packPackage('portlama-agent');
|
|
168
|
+
await mp.transfer(agentTarball, `${VM_AGENT}:/tmp/portlama-agent.tgz`);
|
|
169
|
+
|
|
170
|
+
await transferTestScripts(VM_AGENT);
|
|
171
|
+
|
|
172
|
+
// Transfer enrollment token via file (never in process args)
|
|
173
|
+
const tmpTokenFile = `/tmp/.portlama-enroll-token-${Date.now()}`;
|
|
174
|
+
try {
|
|
175
|
+
fs.writeFileSync(tmpTokenFile, enrollmentToken, { mode: 0o600 });
|
|
176
|
+
await mp.transfer(tmpTokenFile, `${VM_AGENT}:/tmp/.enroll-token`);
|
|
177
|
+
} finally {
|
|
178
|
+
try { fs.unlinkSync(tmpTokenFile); } catch { /* may not exist */ }
|
|
179
|
+
}
|
|
180
|
+
await mp.exec(VM_AGENT, 'chmod 600 /tmp/.enroll-token', { sudo: true });
|
|
181
|
+
|
|
182
|
+
const result = await mp.exec(
|
|
183
|
+
VM_AGENT,
|
|
184
|
+
`bash /tmp/e2e/setup-agent.sh "${hostIp}" "${domain}" "$(cat /tmp/.enroll-token)"`,
|
|
185
|
+
{ sudo: true, timeout: 180_000, allowFailure: true },
|
|
186
|
+
);
|
|
187
|
+
|
|
188
|
+
await mp.exec(VM_AGENT, 'rm -f /tmp/.enroll-token', { sudo: true, allowFailure: true });
|
|
189
|
+
|
|
190
|
+
return { ok: result.exitCode === 0, error: result.exitCode === 0 ? null : result.stderr.slice(-500) };
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
/** Stage 3c: Transfer scripts and run setup-visitor.sh. */
|
|
194
|
+
async function stageSetupVisitor(domain) {
|
|
195
|
+
const hostIp = VM_STATIC_IPS[VM_HOST] || await mp.getIp(VM_HOST);
|
|
196
|
+
await transferTestScripts(VM_VISITOR);
|
|
197
|
+
|
|
198
|
+
const result = await mp.exec(
|
|
199
|
+
VM_VISITOR,
|
|
200
|
+
`bash /tmp/e2e/setup-visitor.sh "${hostIp}" "${domain}"`,
|
|
201
|
+
{ sudo: true, timeout: 120_000, allowFailure: true },
|
|
202
|
+
);
|
|
203
|
+
|
|
204
|
+
return { ok: result.exitCode === 0, error: result.exitCode === 0 ? null : result.stderr.slice(-500) };
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
// ---------------------------------------------------------------------------
|
|
208
|
+
// Tier snapshot helpers
|
|
209
|
+
// ---------------------------------------------------------------------------
|
|
210
|
+
|
|
211
|
+
/** Create a tier snapshot for the given VMs. Stops, snapshots, restarts. */
|
|
212
|
+
async function createTierSnapshot(tierName, vmNames) {
|
|
213
|
+
const snapshotName = TIER_SNAPSHOT_PREFIX + tierName;
|
|
214
|
+
|
|
215
|
+
// Delete existing tier snapshot if present (overwrite)
|
|
216
|
+
for (const vm of vmNames) {
|
|
217
|
+
const existing = await mp.listSnapshots(vm);
|
|
218
|
+
if (existing.includes(snapshotName)) {
|
|
219
|
+
await mp.deleteSnapshot(vm, snapshotName);
|
|
220
|
+
}
|
|
221
|
+
}
|
|
222
|
+
|
|
223
|
+
await Promise.all(vmNames.map((vm) => mp.run(['stop', vm], { allowFailure: true })));
|
|
224
|
+
await Promise.all(vmNames.map((vm) => mp.snapshot(vm, snapshotName)));
|
|
225
|
+
await Promise.all(vmNames.map((vm) => mp.run(['start', vm], { timeout: 600_000 })));
|
|
226
|
+
|
|
227
|
+
recordTierSnapshot(tierName, vmNames);
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
/** Restore VMs to a tier snapshot. Stops, restores, restarts. */
|
|
231
|
+
async function restoreTierSnapshot(tierName, vmNames) {
|
|
232
|
+
const snapshotName = TIER_SNAPSHOT_PREFIX + tierName;
|
|
233
|
+
|
|
234
|
+
await Promise.all(vmNames.map((vm) => mp.run(['stop', vm], { allowFailure: true })));
|
|
235
|
+
await Promise.all(vmNames.map((vm) => mp.restore(vm, snapshotName)));
|
|
236
|
+
await Promise.all(vmNames.map((vm) => mp.run(['start', vm], { timeout: 600_000 })));
|
|
237
|
+
|
|
238
|
+
for (const vm of vmNames) {
|
|
239
|
+
setVmTier(vm, tierName);
|
|
240
|
+
}
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
// ---------------------------------------------------------------------------
|
|
244
|
+
// Smart provisioning tool
|
|
245
|
+
// ---------------------------------------------------------------------------
|
|
246
|
+
|
|
247
|
+
export const provisionTool = {
|
|
248
|
+
name: 'provision',
|
|
249
|
+
description:
|
|
250
|
+
'Smart provisioning with layered snapshots. Restores from cached tier snapshots ' +
|
|
251
|
+
'when possible, only runs stages that are needed. Auto-snapshots after each tier ' +
|
|
252
|
+
'for fast future restores. Tiers: node-ready -> installed -> provisioned.',
|
|
253
|
+
inputSchema: z.object({
|
|
254
|
+
targetTier: z
|
|
255
|
+
.enum(['node-ready', 'installed', 'provisioned'])
|
|
256
|
+
.default('provisioned')
|
|
257
|
+
.describe('Target tier to reach'),
|
|
258
|
+
domain: z
|
|
259
|
+
.string()
|
|
260
|
+
.regex(/^[a-zA-Z0-9][a-zA-Z0-9.-]+$/)
|
|
261
|
+
.default(TEST_DOMAIN)
|
|
262
|
+
.describe('Test domain'),
|
|
263
|
+
skipSnapshots: z.coerce
|
|
264
|
+
.boolean()
|
|
265
|
+
.default(false)
|
|
266
|
+
.describe('Skip auto-snapshotting after each tier (faster but no cache)'),
|
|
267
|
+
forceReprovision: z.coerce
|
|
268
|
+
.boolean()
|
|
269
|
+
.default(false)
|
|
270
|
+
.describe('Ignore existing snapshots and reprovision from scratch'),
|
|
271
|
+
}),
|
|
272
|
+
async handler({ targetTier, domain, skipSnapshots, forceReprovision } = {}) {
|
|
273
|
+
targetTier = targetTier || 'provisioned';
|
|
274
|
+
domain = domain || TEST_DOMAIN;
|
|
275
|
+
const steps = [];
|
|
276
|
+
const targetLevel = TIERS[targetTier].level;
|
|
277
|
+
|
|
278
|
+
try {
|
|
279
|
+
// --- TIER 1: node-ready ---
|
|
280
|
+
if (targetLevel >= 1) {
|
|
281
|
+
const tier = 'node-ready';
|
|
282
|
+
const tierDef = TIERS[tier];
|
|
283
|
+
// Determine which VMs need Node.js (all that apply for the final target)
|
|
284
|
+
const targetVms = targetLevel >= 3
|
|
285
|
+
? tierDef.appliesTo.map((v) => VM_NAME_MAP[v])
|
|
286
|
+
: [VM_HOST];
|
|
287
|
+
|
|
288
|
+
const canRestore = !forceReprovision && hasTierSnapshot(tier, targetVms);
|
|
289
|
+
if (canRestore) {
|
|
290
|
+
await restoreTierSnapshot(tier, targetVms);
|
|
291
|
+
steps.push(`Restored tier "${tier}" from snapshot (${targetVms.length} VMs)`);
|
|
292
|
+
} else {
|
|
293
|
+
// Install Node.js on each VM that needs it
|
|
294
|
+
for (const vm of targetVms) {
|
|
295
|
+
const currentTier = getVmTier(vm);
|
|
296
|
+
const currentLevel = currentTier ? (TIERS[currentTier]?.level || 0) : 0;
|
|
297
|
+
if (currentLevel >= 1) {
|
|
298
|
+
steps.push(`${vm}: already at tier "${currentTier}" — skipping Node.js install`);
|
|
299
|
+
} else {
|
|
300
|
+
const result = await stageInstallNode(vm);
|
|
301
|
+
steps.push(`${vm}: ${result.message}`);
|
|
302
|
+
setVmTier(vm, tier);
|
|
303
|
+
}
|
|
304
|
+
}
|
|
305
|
+
|
|
306
|
+
if (!skipSnapshots) {
|
|
307
|
+
await createTierSnapshot(tier, targetVms);
|
|
308
|
+
steps.push(`Snapshot "${TIER_SNAPSHOT_PREFIX}${tier}" created (${targetVms.length} VMs)`);
|
|
309
|
+
}
|
|
310
|
+
}
|
|
311
|
+
}
|
|
312
|
+
|
|
313
|
+
// --- TIER 2: installed (host only) ---
|
|
314
|
+
if (targetLevel >= 2) {
|
|
315
|
+
const tier = 'installed';
|
|
316
|
+
const targetVms = [VM_HOST];
|
|
317
|
+
|
|
318
|
+
const canRestore = !forceReprovision && hasTierSnapshot(tier, targetVms);
|
|
319
|
+
if (canRestore) {
|
|
320
|
+
await restoreTierSnapshot(tier, targetVms);
|
|
321
|
+
steps.push(`Restored tier "${tier}" from snapshot`);
|
|
322
|
+
} else {
|
|
323
|
+
const currentTier = getVmTier(VM_HOST);
|
|
324
|
+
const currentLevel = currentTier ? (TIERS[currentTier]?.level || 0) : 0;
|
|
325
|
+
if (currentLevel >= 2) {
|
|
326
|
+
steps.push(`Host already at tier "${currentTier}" — skipping Portlama install`);
|
|
327
|
+
} else {
|
|
328
|
+
const result = await stageInstallPortlama();
|
|
329
|
+
steps.push(`Host: ${result.message}`);
|
|
330
|
+
setVmTier(VM_HOST, tier);
|
|
331
|
+
}
|
|
332
|
+
|
|
333
|
+
if (!skipSnapshots) {
|
|
334
|
+
await createTierSnapshot(tier, targetVms);
|
|
335
|
+
steps.push(`Snapshot "${TIER_SNAPSHOT_PREFIX}${tier}" created`);
|
|
336
|
+
}
|
|
337
|
+
}
|
|
338
|
+
}
|
|
339
|
+
|
|
340
|
+
// --- TIER 3: provisioned (coordinated) ---
|
|
341
|
+
if (targetLevel >= 3) {
|
|
342
|
+
const tier = 'provisioned';
|
|
343
|
+
const targetVms = ALL_VMS;
|
|
344
|
+
|
|
345
|
+
const canRestore = !forceReprovision && hasTierSnapshot(tier, targetVms);
|
|
346
|
+
if (canRestore) {
|
|
347
|
+
await restoreTierSnapshot(tier, targetVms);
|
|
348
|
+
// Restore credentials from state (they survive in state.json)
|
|
349
|
+
steps.push(`Restored tier "${tier}" from snapshot (all 3 VMs)`);
|
|
350
|
+
} else {
|
|
351
|
+
// 3a: Setup host
|
|
352
|
+
const hostResult = await stageSetupHost(domain);
|
|
353
|
+
if (!hostResult.ok) {
|
|
354
|
+
return {
|
|
355
|
+
content: [{
|
|
356
|
+
type: 'text',
|
|
357
|
+
text: JSON.stringify({
|
|
358
|
+
ok: false,
|
|
359
|
+
steps: [...steps, 'setup-host.sh failed'],
|
|
360
|
+
error: hostResult.error,
|
|
361
|
+
}, null, 2),
|
|
362
|
+
}],
|
|
363
|
+
};
|
|
364
|
+
}
|
|
365
|
+
|
|
366
|
+
if (hostResult.credentials) {
|
|
367
|
+
updateState({ credentials: hostResult.credentials, domain });
|
|
368
|
+
}
|
|
369
|
+
setVmState(VM_HOST, { provisioned: true, domain });
|
|
370
|
+
setVmTier(VM_HOST, tier);
|
|
371
|
+
steps.push('Host: setup completed, credentials extracted');
|
|
372
|
+
|
|
373
|
+
// 3b + 3c: Setup agent and visitor in parallel
|
|
374
|
+
const enrollmentToken = hostResult.credentials?.enrollmentToken;
|
|
375
|
+
if (!enrollmentToken) {
|
|
376
|
+
return {
|
|
377
|
+
content: [{
|
|
378
|
+
type: 'text',
|
|
379
|
+
text: JSON.stringify({
|
|
380
|
+
ok: false,
|
|
381
|
+
steps: [...steps, 'No enrollment token in credentials'],
|
|
382
|
+
error: 'Credentials extraction failed',
|
|
383
|
+
}, null, 2),
|
|
384
|
+
}],
|
|
385
|
+
};
|
|
386
|
+
}
|
|
387
|
+
|
|
388
|
+
const [agentResult, visitorResult] = await Promise.all([
|
|
389
|
+
stageSetupAgent(domain, enrollmentToken),
|
|
390
|
+
stageSetupVisitor(domain),
|
|
391
|
+
]);
|
|
392
|
+
|
|
393
|
+
setVmState(VM_AGENT, { provisioned: agentResult.ok });
|
|
394
|
+
setVmState(VM_VISITOR, { provisioned: visitorResult.ok });
|
|
395
|
+
|
|
396
|
+
if (agentResult.ok) {
|
|
397
|
+
setVmTier(VM_AGENT, tier);
|
|
398
|
+
steps.push('Agent: setup completed');
|
|
399
|
+
} else {
|
|
400
|
+
steps.push(`Agent: setup failed — ${agentResult.error}`);
|
|
401
|
+
}
|
|
402
|
+
|
|
403
|
+
if (visitorResult.ok) {
|
|
404
|
+
setVmTier(VM_VISITOR, tier);
|
|
405
|
+
steps.push('Visitor: setup completed');
|
|
406
|
+
} else {
|
|
407
|
+
steps.push(`Visitor: setup failed — ${visitorResult.error}`);
|
|
408
|
+
}
|
|
409
|
+
|
|
410
|
+
const allOk = agentResult.ok && visitorResult.ok;
|
|
411
|
+
if (allOk && !skipSnapshots) {
|
|
412
|
+
await createTierSnapshot(tier, targetVms);
|
|
413
|
+
steps.push(`Snapshot "${TIER_SNAPSHOT_PREFIX}${tier}" created (all 3 VMs)`);
|
|
414
|
+
}
|
|
415
|
+
|
|
416
|
+
if (!allOk) {
|
|
417
|
+
return {
|
|
418
|
+
content: [{
|
|
419
|
+
type: 'text',
|
|
420
|
+
text: JSON.stringify({ ok: false, targetTier, steps }, null, 2),
|
|
421
|
+
}],
|
|
422
|
+
};
|
|
423
|
+
}
|
|
424
|
+
}
|
|
425
|
+
}
|
|
426
|
+
|
|
427
|
+
return {
|
|
428
|
+
content: [{
|
|
130
429
|
type: 'text',
|
|
131
|
-
text: JSON.stringify(
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
430
|
+
text: JSON.stringify({ ok: true, targetTier, steps }, null, 2),
|
|
431
|
+
}],
|
|
432
|
+
};
|
|
433
|
+
} catch (err) {
|
|
434
|
+
return {
|
|
435
|
+
content: [{
|
|
436
|
+
type: 'text',
|
|
437
|
+
text: JSON.stringify({
|
|
438
|
+
ok: false,
|
|
439
|
+
targetTier,
|
|
440
|
+
steps,
|
|
441
|
+
error: err.message,
|
|
442
|
+
}, null, 2),
|
|
443
|
+
}],
|
|
444
|
+
};
|
|
445
|
+
}
|
|
143
446
|
},
|
|
144
447
|
};
|
|
145
448
|
|
|
146
|
-
|
|
147
|
-
|
|
449
|
+
// ---------------------------------------------------------------------------
|
|
450
|
+
// Legacy provisioning tools (delegate to stage functions)
|
|
451
|
+
// ---------------------------------------------------------------------------
|
|
452
|
+
|
|
453
|
+
export const provisionHostTool = {
|
|
454
|
+
name: 'provision_host',
|
|
148
455
|
description:
|
|
149
|
-
'
|
|
150
|
-
'
|
|
456
|
+
'Pack create-portlama, transfer to host VM, install, and run setup. ' +
|
|
457
|
+
'Consider using "provision" instead for tier-aware smart provisioning.',
|
|
151
458
|
inputSchema: z.object({
|
|
152
459
|
domain: z.string().regex(/^[a-zA-Z0-9][a-zA-Z0-9.-]+$/).default(TEST_DOMAIN).describe('Test domain'),
|
|
153
460
|
}),
|
|
154
461
|
async handler({ domain } = {}) {
|
|
155
462
|
domain = domain || TEST_DOMAIN;
|
|
156
|
-
const state = loadState();
|
|
157
|
-
const agentP12Password = state.credentials?.agentP12Password;
|
|
158
|
-
if (!agentP12Password) {
|
|
159
|
-
return {
|
|
160
|
-
content: [
|
|
161
|
-
{
|
|
162
|
-
type: 'text',
|
|
163
|
-
text: JSON.stringify({
|
|
164
|
-
ok: false,
|
|
165
|
-
error: 'No agent P12 password in state — run provision_host first',
|
|
166
|
-
}),
|
|
167
|
-
},
|
|
168
|
-
],
|
|
169
|
-
};
|
|
170
|
-
}
|
|
171
|
-
|
|
172
463
|
const steps = [];
|
|
173
|
-
const hostIp = await mp.getIp(VM_HOST);
|
|
174
464
|
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
'cp /etc/portlama/pki/agents/test-agent/client.p12 /tmp/agent-export.p12 && chmod 644 /tmp/agent-export.p12',
|
|
179
|
-
{ sudo: true },
|
|
180
|
-
);
|
|
181
|
-
const tmpP12 = `/tmp/portlama-agent-${Date.now()}`;
|
|
182
|
-
try {
|
|
183
|
-
await mp.transferFrom(`${VM_HOST}:/tmp/agent-export.p12`, tmpP12);
|
|
184
|
-
await mp.transfer(tmpP12, `${VM_AGENT}:/tmp/agent.p12`);
|
|
185
|
-
steps.push('Agent P12 transferred');
|
|
186
|
-
} finally {
|
|
187
|
-
// Clean up temp P12 from host machine
|
|
188
|
-
try { fs.unlinkSync(tmpP12); } catch { /* may not exist */ }
|
|
189
|
-
}
|
|
465
|
+
const nodeResult = await stageInstallNode(VM_HOST);
|
|
466
|
+
steps.push(`Node.js: ${nodeResult.message}`);
|
|
467
|
+
setVmTier(VM_HOST, 'node-ready');
|
|
190
468
|
|
|
191
|
-
|
|
192
|
-
|
|
469
|
+
const installResult = await stageInstallPortlama();
|
|
470
|
+
steps.push(installResult.message);
|
|
471
|
+
setVmTier(VM_HOST, 'installed');
|
|
193
472
|
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
try { fs.unlinkSync(tmpPassFile); } catch { /* may not exist */ }
|
|
473
|
+
const setupResult = await stageSetupHost(domain);
|
|
474
|
+
steps.push(setupResult.ok ? 'setup-host.sh completed' : 'setup-host.sh failed');
|
|
475
|
+
|
|
476
|
+
if (setupResult.credentials) {
|
|
477
|
+
updateState({ credentials: setupResult.credentials, domain });
|
|
478
|
+
steps.push('Credentials extracted');
|
|
201
479
|
}
|
|
202
|
-
await mp.exec(VM_AGENT, 'chmod 600 /tmp/.agent-p12-pass', { sudo: true });
|
|
203
480
|
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
481
|
+
setVmState(VM_HOST, { provisioned: setupResult.ok, domain });
|
|
482
|
+
if (setupResult.ok) setVmTier(VM_HOST, 'provisioned');
|
|
483
|
+
|
|
484
|
+
return {
|
|
485
|
+
content: [{
|
|
486
|
+
type: 'text',
|
|
487
|
+
text: JSON.stringify({
|
|
488
|
+
ok: setupResult.ok,
|
|
489
|
+
steps,
|
|
490
|
+
...(setupResult.ok ? {} : { error: setupResult.error }),
|
|
491
|
+
}, null, 2),
|
|
492
|
+
}],
|
|
493
|
+
};
|
|
494
|
+
},
|
|
495
|
+
};
|
|
210
496
|
|
|
211
|
-
|
|
212
|
-
|
|
497
|
+
export const provisionAgentTool = {
|
|
498
|
+
name: 'provision_agent',
|
|
499
|
+
description:
|
|
500
|
+
'Transfer agent tarball and run enrollment on the agent VM. ' +
|
|
501
|
+
'Requires host to be provisioned first (needs enrollment token).',
|
|
502
|
+
inputSchema: z.object({
|
|
503
|
+
domain: z.string().regex(/^[a-zA-Z0-9][a-zA-Z0-9.-]+$/).default(TEST_DOMAIN).describe('Test domain'),
|
|
504
|
+
}),
|
|
505
|
+
async handler({ domain } = {}) {
|
|
506
|
+
domain = domain || TEST_DOMAIN;
|
|
507
|
+
const state = loadState();
|
|
508
|
+
const enrollmentToken = state.credentials?.enrollmentToken;
|
|
509
|
+
if (!enrollmentToken) {
|
|
510
|
+
return {
|
|
511
|
+
content: [{
|
|
512
|
+
type: 'text',
|
|
513
|
+
text: JSON.stringify({
|
|
514
|
+
ok: false,
|
|
515
|
+
error: 'No enrollment token in state — run provision_host first',
|
|
516
|
+
}),
|
|
517
|
+
}],
|
|
518
|
+
};
|
|
519
|
+
}
|
|
213
520
|
|
|
214
|
-
const
|
|
215
|
-
|
|
216
|
-
|
|
521
|
+
const result = await stageSetupAgent(domain, enrollmentToken);
|
|
522
|
+
setVmState(VM_AGENT, { provisioned: result.ok });
|
|
523
|
+
if (result.ok) setVmTier(VM_AGENT, 'provisioned');
|
|
217
524
|
|
|
218
525
|
return {
|
|
219
|
-
content: [
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
},
|
|
228
|
-
],
|
|
526
|
+
content: [{
|
|
527
|
+
type: 'text',
|
|
528
|
+
text: JSON.stringify({
|
|
529
|
+
ok: result.ok,
|
|
530
|
+
steps: [result.ok ? 'setup-agent.sh completed' : 'setup-agent.sh failed'],
|
|
531
|
+
...(result.ok ? {} : { error: result.error }),
|
|
532
|
+
}, null, 2),
|
|
533
|
+
}],
|
|
229
534
|
};
|
|
230
535
|
},
|
|
231
536
|
};
|
|
@@ -238,36 +543,28 @@ export const provisionVisitorTool = {
|
|
|
238
543
|
}),
|
|
239
544
|
async handler({ domain } = {}) {
|
|
240
545
|
domain = domain || TEST_DOMAIN;
|
|
241
|
-
const steps = [];
|
|
242
|
-
const hostIp = await mp.getIp(VM_HOST);
|
|
243
|
-
|
|
244
|
-
await transferTestScripts(VM_VISITOR);
|
|
245
546
|
|
|
246
|
-
const result = await
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
{ sudo: true, timeout: 120_000, allowFailure: true },
|
|
250
|
-
);
|
|
251
|
-
|
|
252
|
-
const ok = result.exitCode === 0;
|
|
253
|
-
steps.push(ok ? 'setup-visitor.sh completed' : `setup-visitor.sh failed (exit ${result.exitCode})`);
|
|
254
|
-
setVmState(VM_VISITOR, { provisioned: ok });
|
|
547
|
+
const result = await stageSetupVisitor(domain);
|
|
548
|
+
setVmState(VM_VISITOR, { provisioned: result.ok });
|
|
549
|
+
if (result.ok) setVmTier(VM_VISITOR, 'provisioned');
|
|
255
550
|
|
|
256
551
|
return {
|
|
257
|
-
content: [
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
},
|
|
266
|
-
],
|
|
552
|
+
content: [{
|
|
553
|
+
type: 'text',
|
|
554
|
+
text: JSON.stringify({
|
|
555
|
+
ok: result.ok,
|
|
556
|
+
steps: [result.ok ? 'setup-visitor.sh completed' : 'setup-visitor.sh failed'],
|
|
557
|
+
...(result.ok ? {} : { error: result.error }),
|
|
558
|
+
}, null, 2),
|
|
559
|
+
}],
|
|
267
560
|
};
|
|
268
561
|
},
|
|
269
562
|
};
|
|
270
563
|
|
|
564
|
+
// ---------------------------------------------------------------------------
|
|
565
|
+
// Hot reload tool (unchanged)
|
|
566
|
+
// ---------------------------------------------------------------------------
|
|
567
|
+
|
|
271
568
|
export const hotReloadTool = {
|
|
272
569
|
name: 'hot_reload',
|
|
273
570
|
description:
|
package/src/tools/snapshots.js
CHANGED
|
@@ -4,31 +4,72 @@
|
|
|
4
4
|
|
|
5
5
|
import { z } from 'zod';
|
|
6
6
|
import * as mp from '../lib/multipass.js';
|
|
7
|
-
import {
|
|
7
|
+
import {
|
|
8
|
+
ALL_VMS,
|
|
9
|
+
VM_NAME_MAP,
|
|
10
|
+
CHECKPOINTS,
|
|
11
|
+
TIERS,
|
|
12
|
+
TIER_SNAPSHOT_PREFIX,
|
|
13
|
+
} from '../config.js';
|
|
14
|
+
import {
|
|
15
|
+
loadState,
|
|
16
|
+
setVmTier,
|
|
17
|
+
recordTierSnapshot,
|
|
18
|
+
} from '../lib/state.js';
|
|
8
19
|
|
|
9
20
|
export const snapshotCreateTool = {
|
|
10
21
|
name: 'snapshot_create',
|
|
11
22
|
description:
|
|
12
23
|
'Create a named snapshot of one or all VMs. Use checkpoint names like ' +
|
|
13
|
-
'"post-create" or "post-setup" for standard save-points,
|
|
24
|
+
'"post-create" or "post-setup" for standard save-points, a tier name for ' +
|
|
25
|
+
'tier snapshots, or any custom name.',
|
|
14
26
|
inputSchema: z.object({
|
|
15
|
-
name: z
|
|
27
|
+
name: z
|
|
28
|
+
.string()
|
|
29
|
+
.regex(/^[a-zA-Z0-9][a-zA-Z0-9_-]*$/)
|
|
30
|
+
.optional()
|
|
31
|
+
.describe('Snapshot name (e.g. "post-setup", "before-plugin-fix"). Required unless tier is set.'),
|
|
32
|
+
tier: z
|
|
33
|
+
.enum(['node-ready', 'installed', 'provisioned'])
|
|
34
|
+
.optional()
|
|
35
|
+
.describe('Tier name — auto-generates snapshot name as "tier-<tierName>" and records in state'),
|
|
16
36
|
vms: z
|
|
17
37
|
.array(z.enum(['host', 'agent', 'visitor']))
|
|
18
38
|
.optional()
|
|
19
39
|
.describe('Which VMs to snapshot (default: all three)'),
|
|
20
40
|
}),
|
|
21
|
-
async handler({ name, vms } = {}) {
|
|
41
|
+
async handler({ name, tier, vms } = {}) {
|
|
42
|
+
if (!name && !tier) {
|
|
43
|
+
return {
|
|
44
|
+
content: [{
|
|
45
|
+
type: 'text',
|
|
46
|
+
text: JSON.stringify({ ok: false, error: 'Either "name" or "tier" must be provided' }, null, 2),
|
|
47
|
+
}],
|
|
48
|
+
};
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
const snapshotName = tier ? TIER_SNAPSHOT_PREFIX + tier : name;
|
|
22
52
|
const targets = vms ? vms.map((v) => VM_NAME_MAP[v]) : ALL_VMS;
|
|
23
53
|
const results = [];
|
|
24
54
|
|
|
55
|
+
// Delete existing snapshot if overwriting a tier
|
|
56
|
+
if (tier) {
|
|
57
|
+
for (const vm of targets) {
|
|
58
|
+
const existing = await mp.listSnapshots(vm);
|
|
59
|
+
if (existing.includes(snapshotName)) {
|
|
60
|
+
await mp.deleteSnapshot(vm, snapshotName);
|
|
61
|
+
results.push(`${vm}: deleted existing "${snapshotName}"`);
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
|
|
25
66
|
// Stop all VMs in parallel (required for snapshots)
|
|
26
67
|
await Promise.all(targets.map((vm) => mp.run(['stop', vm], { allowFailure: true })));
|
|
27
68
|
results.push(`Stopped ${targets.length} VMs`);
|
|
28
69
|
|
|
29
70
|
// Snapshot all VMs in parallel
|
|
30
|
-
await Promise.all(targets.map((vm) => mp.snapshot(vm,
|
|
31
|
-
results.push(`Created snapshot "${
|
|
71
|
+
await Promise.all(targets.map((vm) => mp.snapshot(vm, snapshotName)));
|
|
72
|
+
results.push(`Created snapshot "${snapshotName}" on ${targets.length} VMs`);
|
|
32
73
|
|
|
33
74
|
// Restart all VMs in parallel
|
|
34
75
|
await Promise.all(
|
|
@@ -38,6 +79,12 @@ export const snapshotCreateTool = {
|
|
|
38
79
|
}),
|
|
39
80
|
);
|
|
40
81
|
|
|
82
|
+
// Record tier snapshot in state
|
|
83
|
+
if (tier) {
|
|
84
|
+
recordTierSnapshot(tier, targets);
|
|
85
|
+
results.push(`Recorded tier "${tier}" in state`);
|
|
86
|
+
}
|
|
87
|
+
|
|
41
88
|
return {
|
|
42
89
|
content: [
|
|
43
90
|
{
|
|
@@ -53,15 +100,34 @@ export const snapshotRestoreTool = {
|
|
|
53
100
|
name: 'snapshot_restore',
|
|
54
101
|
description:
|
|
55
102
|
'Restore one or all VMs to a named snapshot. This resets the VM to the ' +
|
|
56
|
-
'exact state when the snapshot was taken — much faster than reprovisioning.'
|
|
103
|
+
'exact state when the snapshot was taken — much faster than reprovisioning. ' +
|
|
104
|
+
'Use "tier" param for tier-aware restores that update VM tier state.',
|
|
57
105
|
inputSchema: z.object({
|
|
58
|
-
name: z
|
|
106
|
+
name: z
|
|
107
|
+
.string()
|
|
108
|
+
.regex(/^[a-zA-Z0-9][a-zA-Z0-9_-]*$/)
|
|
109
|
+
.optional()
|
|
110
|
+
.describe('Snapshot name to restore. Required unless tier is set.'),
|
|
111
|
+
tier: z
|
|
112
|
+
.enum(['node-ready', 'installed', 'provisioned'])
|
|
113
|
+
.optional()
|
|
114
|
+
.describe('Tier name — uses "tier-<tierName>" as snapshot name and updates VM tier state'),
|
|
59
115
|
vms: z
|
|
60
116
|
.array(z.enum(['host', 'agent', 'visitor']))
|
|
61
117
|
.optional()
|
|
62
118
|
.describe('Which VMs to restore (default: all three)'),
|
|
63
119
|
}),
|
|
64
|
-
async handler({ name, vms } = {}) {
|
|
120
|
+
async handler({ name, tier, vms } = {}) {
|
|
121
|
+
if (!name && !tier) {
|
|
122
|
+
return {
|
|
123
|
+
content: [{
|
|
124
|
+
type: 'text',
|
|
125
|
+
text: JSON.stringify({ ok: false, error: 'Either "name" or "tier" must be provided' }, null, 2),
|
|
126
|
+
}],
|
|
127
|
+
};
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
const snapshotName = tier ? TIER_SNAPSHOT_PREFIX + tier : name;
|
|
65
131
|
const targets = vms ? vms.map((v) => VM_NAME_MAP[v]) : ALL_VMS;
|
|
66
132
|
const results = [];
|
|
67
133
|
|
|
@@ -71,8 +137,8 @@ export const snapshotRestoreTool = {
|
|
|
71
137
|
// Restore all VMs in parallel
|
|
72
138
|
await Promise.all(
|
|
73
139
|
targets.map(async (vm) => {
|
|
74
|
-
await mp.restore(vm,
|
|
75
|
-
results.push(`${vm}: restored to "${
|
|
140
|
+
await mp.restore(vm, snapshotName);
|
|
141
|
+
results.push(`${vm}: restored to "${snapshotName}"`);
|
|
76
142
|
}),
|
|
77
143
|
);
|
|
78
144
|
|
|
@@ -88,6 +154,14 @@ export const snapshotRestoreTool = {
|
|
|
88
154
|
}),
|
|
89
155
|
);
|
|
90
156
|
|
|
157
|
+
// Update tier state after restore
|
|
158
|
+
if (tier) {
|
|
159
|
+
for (const vm of targets) {
|
|
160
|
+
setVmTier(vm, tier);
|
|
161
|
+
}
|
|
162
|
+
results.push(`Updated tier state to "${tier}" for ${targets.length} VMs`);
|
|
163
|
+
}
|
|
164
|
+
|
|
91
165
|
return {
|
|
92
166
|
content: [
|
|
93
167
|
{
|
|
@@ -101,9 +175,11 @@ export const snapshotRestoreTool = {
|
|
|
101
175
|
|
|
102
176
|
export const snapshotListTool = {
|
|
103
177
|
name: 'snapshot_list',
|
|
104
|
-
description: 'List all available snapshots across VMs, plus known checkpoint descriptions.',
|
|
178
|
+
description: 'List all available snapshots across VMs, plus known checkpoint descriptions and tier state.',
|
|
105
179
|
inputSchema: z.object({}),
|
|
106
180
|
async handler() {
|
|
181
|
+
const state = loadState();
|
|
182
|
+
|
|
107
183
|
// Query all VMs in parallel
|
|
108
184
|
const entries = await Promise.all(
|
|
109
185
|
ALL_VMS.map(async (vmName) => [vmName, await mp.listSnapshots(vmName)]),
|
|
@@ -115,7 +191,12 @@ export const snapshotListTool = {
|
|
|
115
191
|
{
|
|
116
192
|
type: 'text',
|
|
117
193
|
text: JSON.stringify(
|
|
118
|
-
{
|
|
194
|
+
{
|
|
195
|
+
snapshots,
|
|
196
|
+
checkpoints: CHECKPOINTS,
|
|
197
|
+
tierSnapshots: state.tierSnapshots || {},
|
|
198
|
+
currentTiers: state.tiers || {},
|
|
199
|
+
},
|
|
119
200
|
null,
|
|
120
201
|
2,
|
|
121
202
|
),
|
package/src/tools/status.js
CHANGED
package/src/tools/tests.js
CHANGED
|
@@ -27,6 +27,7 @@ import {
|
|
|
27
27
|
VM_HOST,
|
|
28
28
|
VM_AGENT,
|
|
29
29
|
VM_VISITOR,
|
|
30
|
+
VM_STATIC_IPS,
|
|
30
31
|
REPO_ROOT,
|
|
31
32
|
THREE_VM_DIR,
|
|
32
33
|
SINGLE_VM_DIR,
|
|
@@ -55,10 +56,12 @@ async function resetAuthelia() {
|
|
|
55
56
|
|
|
56
57
|
/** Build the test environment object with VM IPs and credentials. */
|
|
57
58
|
async function buildTestEnv(state) {
|
|
59
|
+
// Prefer static IPs from config (deterministic across snapshot restores),
|
|
60
|
+
// fall back to Multipass query for VMs without static assignments.
|
|
58
61
|
const [hostIp, agentIp, visitorIp] = await Promise.all([
|
|
59
|
-
mp.getIp(VM_HOST),
|
|
60
|
-
mp.getIp(VM_AGENT),
|
|
61
|
-
mp.getIp(VM_VISITOR),
|
|
62
|
+
VM_STATIC_IPS[VM_HOST] || mp.getIp(VM_HOST),
|
|
63
|
+
VM_STATIC_IPS[VM_AGENT] || mp.getIp(VM_AGENT),
|
|
64
|
+
VM_STATIC_IPS[VM_VISITOR] || mp.getIp(VM_VISITOR),
|
|
62
65
|
]);
|
|
63
66
|
const domain = state.domain || TEST_DOMAIN;
|
|
64
67
|
|
|
@@ -68,7 +71,7 @@ async function buildTestEnv(state) {
|
|
|
68
71
|
VISITOR_IP: visitorIp || '',
|
|
69
72
|
TEST_DOMAIN: domain,
|
|
70
73
|
ADMIN_PASSWORD: 'not-used-mTLS-only',
|
|
71
|
-
AGENT_P12_PASSWORD: state.credentials?.agentP12Password || '',
|
|
74
|
+
AGENT_P12_PASSWORD: state.credentials?.agentP12Password || 'not-used-enrollment-flow',
|
|
72
75
|
TEST_USER: 'testuser',
|
|
73
76
|
TEST_USER_PASSWORD: 'TestPassword-E2E-123',
|
|
74
77
|
LOG_LEVEL: '1',
|
|
@@ -236,16 +239,15 @@ export const testRunAllTool = {
|
|
|
236
239
|
// Single-VM tests
|
|
237
240
|
if (suite === 'single-vm' || suite === 'both') {
|
|
238
241
|
// Transfer single-VM test scripts to host
|
|
239
|
-
await mp.exec(VM_HOST, 'mkdir -p /tmp/e2e-single', { sudo: true });
|
|
242
|
+
await mp.exec(VM_HOST, 'mkdir -p /tmp/e2e-single && chmod 777 /tmp/e2e-single', { sudo: true });
|
|
240
243
|
const files = fs.readdirSync(SINGLE_VM_DIR).filter((f) => f.endsWith('.sh'));
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
);
|
|
244
|
+
// Transfer sequentially to avoid overwhelming the VM's SSH daemon
|
|
245
|
+
for (const file of files) {
|
|
246
|
+
await mp.transfer(
|
|
247
|
+
path.join(SINGLE_VM_DIR, file),
|
|
248
|
+
`${VM_HOST}:/tmp/e2e-single/${file}`,
|
|
249
|
+
);
|
|
250
|
+
}
|
|
249
251
|
|
|
250
252
|
for (const [, file] of Object.entries(getSingleVmTests()).sort(
|
|
251
253
|
([a], [b]) => Number(a) - Number(b),
|
|
@@ -442,7 +444,7 @@ export const testPublishTool = {
|
|
|
442
444
|
AGENT_IP: env.AGENT_IP,
|
|
443
445
|
VISITOR_IP: env.VISITOR_IP,
|
|
444
446
|
TEST_DOMAIN: env.TEST_DOMAIN,
|
|
445
|
-
AGENT_P12_PASSWORD: state.credentials?.agentP12Password || '',
|
|
447
|
+
AGENT_P12_PASSWORD: state.credentials?.agentP12Password || 'not-used-enrollment-flow',
|
|
446
448
|
},
|
|
447
449
|
all: true,
|
|
448
450
|
},
|
package/src/tools/vm.js
CHANGED
|
@@ -4,8 +4,52 @@
|
|
|
4
4
|
|
|
5
5
|
import { z } from 'zod';
|
|
6
6
|
import * as mp from '../lib/multipass.js';
|
|
7
|
-
import { PROFILES, ALL_VMS, VM_NAME_MAP } from '../config.js';
|
|
8
|
-
import { setVmState, removeVmState, updateState } from '../lib/state.js';
|
|
7
|
+
import { PROFILES, ALL_VMS, VM_NAME_MAP, VM_STATIC_IPS } from '../config.js';
|
|
8
|
+
import { setVmState, removeVmState, updateState, clearTierSnapshots } from '../lib/state.js';
|
|
9
|
+
|
|
10
|
+
/**
|
|
11
|
+
* Apply a static IP to a VM post-boot via netplan.
|
|
12
|
+
* Writes a netplan config, removes the DHCP default, and applies.
|
|
13
|
+
* Multipass agent stays connected because it communicates via virtio, not IP.
|
|
14
|
+
*/
|
|
15
|
+
async function applyStaticIp(vmName) {
|
|
16
|
+
const ip = VM_STATIC_IPS[vmName];
|
|
17
|
+
if (!ip) return null;
|
|
18
|
+
|
|
19
|
+
// Add a secondary static IP on 10.13.37.0/24 via `ip addr add`. This subnet is
|
|
20
|
+
// completely outside the Multipass DHCP range, eliminating collision risk. DHCP
|
|
21
|
+
// stays untouched — Multipass connectivity and internet access are unaffected.
|
|
22
|
+
// Inter-VM traffic on 10.13.37.x works at L2 because all VMs share the same bridge.
|
|
23
|
+
await mp.exec(
|
|
24
|
+
vmName,
|
|
25
|
+
`ip addr add ${ip}/24 dev enp0s1 2>/dev/null || true`,
|
|
26
|
+
{ sudo: true, timeout: 10_000 },
|
|
27
|
+
);
|
|
28
|
+
|
|
29
|
+
// Create a systemd service to re-apply on boot (survives snapshot restore)
|
|
30
|
+
const unitFile = [
|
|
31
|
+
'[Unit]',
|
|
32
|
+
'Description=Static IP for E2E testing',
|
|
33
|
+
'After=network-online.target',
|
|
34
|
+
'Wants=network-online.target',
|
|
35
|
+
'',
|
|
36
|
+
'[Service]',
|
|
37
|
+
'Type=oneshot',
|
|
38
|
+
`ExecStart=/sbin/ip addr add ${ip}/24 dev enp0s1`,
|
|
39
|
+
'RemainAfterExit=yes',
|
|
40
|
+
'',
|
|
41
|
+
'[Install]',
|
|
42
|
+
'WantedBy=multi-user.target',
|
|
43
|
+
].join('\\n');
|
|
44
|
+
|
|
45
|
+
await mp.exec(
|
|
46
|
+
vmName,
|
|
47
|
+
`printf '${unitFile}\\n' > /etc/systemd/system/portlama-static-ip.service && systemctl daemon-reload && systemctl enable portlama-static-ip`,
|
|
48
|
+
{ sudo: true, timeout: 15_000 },
|
|
49
|
+
);
|
|
50
|
+
|
|
51
|
+
return ip;
|
|
52
|
+
}
|
|
9
53
|
|
|
10
54
|
export const vmCreateTool = {
|
|
11
55
|
name: 'vm_create',
|
|
@@ -41,17 +85,23 @@ export const vmCreateTool = {
|
|
|
41
85
|
}),
|
|
42
86
|
);
|
|
43
87
|
|
|
44
|
-
// Create VMs in parallel
|
|
88
|
+
// Create VMs in parallel (DHCP initially)
|
|
45
89
|
await Promise.all(
|
|
46
90
|
targets.map(async (name) => {
|
|
47
91
|
await mp.launch(name, specs);
|
|
48
|
-
const ip = await mp.getIp(name);
|
|
49
|
-
setVmState(name, { ip, profile: p, state: 'running' });
|
|
50
|
-
results.push(`Created ${name} (${ip}) — ${specs.cpus} CPU, ${specs.memory} RAM`);
|
|
51
92
|
}),
|
|
52
93
|
);
|
|
53
94
|
|
|
95
|
+
// Apply static IPs post-boot (sequential — each VM's netplan apply is fast)
|
|
96
|
+
for (const name of targets) {
|
|
97
|
+
const staticIp = await applyStaticIp(name);
|
|
98
|
+
const ip = staticIp || await mp.getIp(name);
|
|
99
|
+
setVmState(name, { ip, profile: p, state: 'running' });
|
|
100
|
+
results.push(`Created ${name} (${ip}) — ${specs.cpus} CPU, ${specs.memory} RAM`);
|
|
101
|
+
}
|
|
102
|
+
|
|
54
103
|
updateState({ profile: p });
|
|
104
|
+
clearTierSnapshots();
|
|
55
105
|
|
|
56
106
|
return {
|
|
57
107
|
content: [
|
|
@@ -108,6 +158,11 @@ export const vmDeleteTool = {
|
|
|
108
158
|
}),
|
|
109
159
|
);
|
|
110
160
|
|
|
161
|
+
// Invalidate tier snapshots when deleting all VMs
|
|
162
|
+
if (!vms || targets.length === 3) {
|
|
163
|
+
clearTierSnapshots();
|
|
164
|
+
}
|
|
165
|
+
|
|
111
166
|
return {
|
|
112
167
|
content: [
|
|
113
168
|
{
|