@lamalibre/install-portlama-e2e-mcp 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,353 @@
1
+ // ============================================================================
2
+ // Provisioning Tools — provision_host, provision_agent, provision_visitor, hot_reload
3
+ // ============================================================================
4
+
5
+ import { z } from 'zod';
6
+ import path from 'node:path';
7
+ import fs from 'node:fs';
8
+ import { execa } from 'execa';
9
+ import * as mp from '../lib/multipass.js';
10
+ import {
11
+ VM_HOST,
12
+ VM_AGENT,
13
+ VM_VISITOR,
14
+ REPO_ROOT,
15
+ THREE_VM_DIR,
16
+ TEST_DOMAIN,
17
+ } from '../config.js';
18
+ import { loadState, updateState, setVmState } from '../lib/state.js';
19
+
20
+ /** Pack a workspace package and return the tarball path. */
21
+ async function packPackage(packageName) {
22
+ const pkgDir = path.join(REPO_ROOT, 'packages', packageName);
23
+ const result = await execa('npm', ['pack', '--pack-destination', '/tmp'], {
24
+ cwd: pkgDir,
25
+ });
26
+ const tarballName = result.stdout.trim().split('\n').pop();
27
+ return `/tmp/${tarballName}`;
28
+ }
29
+
30
+ /** Transfer test scripts to a VM. */
31
+ async function transferTestScripts(vmName) {
32
+ await mp.exec(vmName, 'mkdir -p /tmp/e2e && chmod 777 /tmp/e2e', { sudo: true });
33
+
34
+ const files = fs.readdirSync(THREE_VM_DIR).filter((f) => f.endsWith('.sh'));
35
+ await Promise.all(
36
+ files.map((file) =>
37
+ mp.transfer(path.join(THREE_VM_DIR, file), `${vmName}:/tmp/e2e/${file}`),
38
+ ),
39
+ );
40
+
41
+ // Transfer VM-side API helpers in parallel
42
+ const helpers = ['vm-api-helper.sh', 'vm-api-status-helper.sh'];
43
+ await Promise.all(
44
+ helpers.map(async (helper) => {
45
+ const helperPath = path.join(THREE_VM_DIR, helper);
46
+ try {
47
+ await mp.transfer(helperPath, `${vmName}:/tmp/${helper}`);
48
+ await mp.exec(vmName, `chmod +x /tmp/${helper}`, { sudo: true });
49
+ } catch {
50
+ // Helper may not exist
51
+ }
52
+ }),
53
+ );
54
+ }
55
+
56
+ export const provisionHostTool = {
57
+ name: 'provision_host',
58
+ description:
59
+ 'Pack create-portlama, transfer to host VM, install, and run setup. ' +
60
+ 'This is the full provisioning pipeline for the host VM.',
61
+ inputSchema: z.object({
62
+ domain: z.string().regex(/^[a-zA-Z0-9][a-zA-Z0-9.-]+$/).default(TEST_DOMAIN).describe('Test domain'),
63
+ }),
64
+ async handler({ domain } = {}) {
65
+ domain = domain || TEST_DOMAIN;
66
+ const steps = [];
67
+
68
+ // 1. Pack installer
69
+ const tarball = await packPackage('create-portlama');
70
+ steps.push(`Packed installer: ${tarball}`);
71
+
72
+ // 2. Install npm on host
73
+ await mp.exec(VM_HOST, 'apt-get install -y npm', {
74
+ sudo: true,
75
+ timeout: 120_000,
76
+ });
77
+ steps.push('npm installed on host');
78
+
79
+ // 3. Transfer and install
80
+ await mp.transfer(tarball, `${VM_HOST}:/tmp/create-portlama.tgz`);
81
+ await mp.exec(VM_HOST, 'npm install -g /tmp/create-portlama.tgz', {
82
+ sudo: true,
83
+ timeout: 120_000,
84
+ });
85
+ steps.push('create-portlama installed');
86
+
87
+ // 4. Run installer
88
+ await mp.exec(VM_HOST, 'create-portlama --dev --skip-harden --yes', {
89
+ sudo: true,
90
+ timeout: 300_000,
91
+ });
92
+ steps.push('Portlama installed');
93
+
94
+ // 5. Transfer test scripts and run setup
95
+ await transferTestScripts(VM_HOST);
96
+ const hostIp = await mp.getIp(VM_HOST);
97
+
98
+ const setupResult = await mp.exec(
99
+ VM_HOST,
100
+ `bash /tmp/e2e/setup-host.sh "${hostIp}" "${domain}"`,
101
+ { sudo: true, timeout: 180_000, allowFailure: true },
102
+ );
103
+
104
+ const ok = setupResult.exitCode === 0;
105
+ steps.push(ok ? 'setup-host.sh completed' : `setup-host.sh failed (exit ${setupResult.exitCode})`);
106
+
107
+ // 6. Extract credentials
108
+ const credsResult = await mp.exec(
109
+ VM_HOST,
110
+ 'cat /tmp/portlama-test-credentials.json',
111
+ { sudo: true, allowFailure: true },
112
+ );
113
+
114
+ let credentials = null;
115
+ if (credsResult.exitCode === 0) {
116
+ try {
117
+ credentials = JSON.parse(credsResult.stdout);
118
+ updateState({ credentials, domain });
119
+ steps.push('Credentials extracted');
120
+ } catch {
121
+ steps.push('Warning: could not parse credentials');
122
+ }
123
+ }
124
+
125
+ setVmState(VM_HOST, { provisioned: ok, domain });
126
+
127
+ return {
128
+ content: [
129
+ {
130
+ type: 'text',
131
+ text: JSON.stringify(
132
+ {
133
+ ok,
134
+ steps,
135
+ ...(ok ? {} : { error: setupResult.stderr.slice(-500) }),
136
+ },
137
+ null,
138
+ 2,
139
+ ),
140
+ },
141
+ ],
142
+ };
143
+ },
144
+ };
145
+
146
+ export const provisionAgentTool = {
147
+ name: 'provision_agent',
148
+ description:
149
+ 'Transfer agent certificate and run setup on the agent VM. ' +
150
+ 'Requires host to be provisioned first (needs credentials).',
151
+ inputSchema: z.object({
152
+ domain: z.string().regex(/^[a-zA-Z0-9][a-zA-Z0-9.-]+$/).default(TEST_DOMAIN).describe('Test domain'),
153
+ }),
154
+ async handler({ domain } = {}) {
155
+ domain = domain || TEST_DOMAIN;
156
+ const state = loadState();
157
+ const agentP12Password = state.credentials?.agentP12Password;
158
+ if (!agentP12Password) {
159
+ return {
160
+ content: [
161
+ {
162
+ type: 'text',
163
+ text: JSON.stringify({
164
+ ok: false,
165
+ error: 'No agent P12 password in state — run provision_host first',
166
+ }),
167
+ },
168
+ ],
169
+ };
170
+ }
171
+
172
+ const steps = [];
173
+ const hostIp = await mp.getIp(VM_HOST);
174
+
175
+ // Transfer P12 from host to agent
176
+ await mp.exec(
177
+ VM_HOST,
178
+ 'cp /etc/portlama/pki/agents/test-agent/client.p12 /tmp/agent-export.p12 && chmod 644 /tmp/agent-export.p12',
179
+ { sudo: true },
180
+ );
181
+ const tmpP12 = `/tmp/portlama-agent-${Date.now()}`;
182
+ try {
183
+ await mp.transferFrom(`${VM_HOST}:/tmp/agent-export.p12`, tmpP12);
184
+ await mp.transfer(tmpP12, `${VM_AGENT}:/tmp/agent.p12`);
185
+ steps.push('Agent P12 transferred');
186
+ } finally {
187
+ // Clean up temp P12 from host machine
188
+ try { fs.unlinkSync(tmpP12); } catch { /* may not exist */ }
189
+ }
190
+
191
+ // Transfer test scripts
192
+ await transferTestScripts(VM_AGENT);
193
+
194
+ // Transfer P12 password via file to avoid process listing exposure
195
+ const tmpPassFile = `/tmp/.portlama-p12-pass-${Date.now()}`;
196
+ try {
197
+ fs.writeFileSync(tmpPassFile, agentP12Password, { mode: 0o600 });
198
+ await mp.transfer(tmpPassFile, `${VM_AGENT}:/tmp/.agent-p12-pass`);
199
+ } finally {
200
+ try { fs.unlinkSync(tmpPassFile); } catch { /* may not exist */ }
201
+ }
202
+ await mp.exec(VM_AGENT, 'chmod 600 /tmp/.agent-p12-pass', { sudo: true });
203
+
204
+ // Run setup — reads password from file via $3
205
+ const result = await mp.exec(
206
+ VM_AGENT,
207
+ `bash /tmp/e2e/setup-agent.sh "${hostIp}" "${domain}" "$(cat /tmp/.agent-p12-pass)"`,
208
+ { sudo: true, timeout: 120_000, allowFailure: true },
209
+ );
210
+
211
+ // Clean up password file inside VM
212
+ await mp.exec(VM_AGENT, 'rm -f /tmp/.agent-p12-pass', { sudo: true, allowFailure: true });
213
+
214
+ const ok = result.exitCode === 0;
215
+ steps.push(ok ? 'setup-agent.sh completed' : `setup-agent.sh failed (exit ${result.exitCode})`);
216
+ setVmState(VM_AGENT, { provisioned: ok });
217
+
218
+ return {
219
+ content: [
220
+ {
221
+ type: 'text',
222
+ text: JSON.stringify(
223
+ { ok, steps, ...(ok ? {} : { error: result.stderr.slice(-500) }) },
224
+ null,
225
+ 2,
226
+ ),
227
+ },
228
+ ],
229
+ };
230
+ },
231
+ };
232
+
233
+ export const provisionVisitorTool = {
234
+ name: 'provision_visitor',
235
+ description: 'Run setup on the visitor VM. Requires host to be provisioned.',
236
+ inputSchema: z.object({
237
+ domain: z.string().regex(/^[a-zA-Z0-9][a-zA-Z0-9.-]+$/).default(TEST_DOMAIN).describe('Test domain'),
238
+ }),
239
+ async handler({ domain } = {}) {
240
+ domain = domain || TEST_DOMAIN;
241
+ const steps = [];
242
+ const hostIp = await mp.getIp(VM_HOST);
243
+
244
+ await transferTestScripts(VM_VISITOR);
245
+
246
+ const result = await mp.exec(
247
+ VM_VISITOR,
248
+ `bash /tmp/e2e/setup-visitor.sh "${hostIp}" "${domain}"`,
249
+ { sudo: true, timeout: 120_000, allowFailure: true },
250
+ );
251
+
252
+ const ok = result.exitCode === 0;
253
+ steps.push(ok ? 'setup-visitor.sh completed' : `setup-visitor.sh failed (exit ${result.exitCode})`);
254
+ setVmState(VM_VISITOR, { provisioned: ok });
255
+
256
+ return {
257
+ content: [
258
+ {
259
+ type: 'text',
260
+ text: JSON.stringify(
261
+ { ok, steps, ...(ok ? {} : { error: result.stderr.slice(-500) }) },
262
+ null,
263
+ 2,
264
+ ),
265
+ },
266
+ ],
267
+ };
268
+ },
269
+ };
270
+
271
+ export const hotReloadTool = {
272
+ name: 'hot_reload',
273
+ description:
274
+ 'Re-pack a specific workspace package, transfer it to the host VM, and restart ' +
275
+ 'the relevant service. Much faster than full reprovisioning — use during iteration.',
276
+ inputSchema: z.object({
277
+ package: z
278
+ .enum([
279
+ 'panel-server',
280
+ 'panel-client',
281
+ 'create-portlama',
282
+ 'portlama-agent',
283
+ ])
284
+ .describe('Which package to reload'),
285
+ }),
286
+ async handler({ package: pkgName }) {
287
+ const steps = [];
288
+
289
+ try {
290
+ // Build the package first
291
+ await execa('npm', ['run', 'build', '-w', `packages/${pkgName}`], {
292
+ cwd: REPO_ROOT,
293
+ });
294
+ steps.push(`Built ${pkgName}`);
295
+
296
+ // Pack it
297
+ const tarball = await packPackage(pkgName);
298
+ steps.push(`Packed: ${tarball}`);
299
+
300
+ // Transfer to host
301
+ const remotePath = `/tmp/${pkgName}.tgz`;
302
+ await mp.transfer(tarball, `${VM_HOST}:${remotePath}`);
303
+ steps.push('Transferred to host');
304
+
305
+ // Install on host
306
+ await mp.exec(VM_HOST, `npm install -g ${remotePath}`, {
307
+ sudo: true,
308
+ timeout: 60_000,
309
+ });
310
+ steps.push('Installed on host');
311
+
312
+ // Restart relevant service
313
+ const serviceMap = {
314
+ 'panel-server': 'portlama-panel',
315
+ 'panel-client': null, // static files, no service
316
+ 'create-portlama': null, // installer, no service
317
+ 'portlama-agent': null, // runs on agent VM, not host
318
+ };
319
+
320
+ const service = serviceMap[pkgName];
321
+ if (service) {
322
+ await mp.exec(VM_HOST, `systemctl restart ${service}`, { sudo: true });
323
+ await mp.exec(VM_HOST, `sleep 2 && systemctl is-active ${service}`, {
324
+ sudo: true,
325
+ allowFailure: true,
326
+ });
327
+ steps.push(`Restarted ${service}`);
328
+ }
329
+
330
+ return {
331
+ content: [
332
+ {
333
+ type: 'text',
334
+ text: JSON.stringify({ ok: true, package: pkgName, steps }, null, 2),
335
+ },
336
+ ],
337
+ };
338
+ } catch (err) {
339
+ return {
340
+ content: [
341
+ {
342
+ type: 'text',
343
+ text: JSON.stringify(
344
+ { ok: false, package: pkgName, steps, error: err.message },
345
+ null,
346
+ 2,
347
+ ),
348
+ },
349
+ ],
350
+ };
351
+ }
352
+ },
353
+ };
@@ -0,0 +1,126 @@
1
+ // ============================================================================
2
+ // Snapshot Tools — snapshot_create, snapshot_restore, snapshot_list
3
+ // ============================================================================
4
+
5
+ import { z } from 'zod';
6
+ import * as mp from '../lib/multipass.js';
7
+ import { ALL_VMS, VM_NAME_MAP, CHECKPOINTS } from '../config.js';
8
+
9
+ export const snapshotCreateTool = {
10
+ name: 'snapshot_create',
11
+ description:
12
+ 'Create a named snapshot of one or all VMs. Use checkpoint names like ' +
13
+ '"post-create" or "post-setup" for standard save-points, or any custom name.',
14
+ inputSchema: z.object({
15
+ name: z.string().regex(/^[a-zA-Z0-9][a-zA-Z0-9_-]*$/).describe('Snapshot name (e.g. "post-setup", "before-plugin-fix")'),
16
+ vms: z
17
+ .array(z.enum(['host', 'agent', 'visitor']))
18
+ .optional()
19
+ .describe('Which VMs to snapshot (default: all three)'),
20
+ }),
21
+ async handler({ name, vms } = {}) {
22
+ const targets = vms ? vms.map((v) => VM_NAME_MAP[v]) : ALL_VMS;
23
+ const results = [];
24
+
25
+ // Stop all VMs in parallel (required for snapshots)
26
+ await Promise.all(targets.map((vm) => mp.run(['stop', vm], { allowFailure: true })));
27
+ results.push(`Stopped ${targets.length} VMs`);
28
+
29
+ // Snapshot all VMs in parallel
30
+ await Promise.all(targets.map((vm) => mp.snapshot(vm, name)));
31
+ results.push(`Created snapshot "${name}" on ${targets.length} VMs`);
32
+
33
+ // Restart all VMs in parallel
34
+ await Promise.all(
35
+ targets.map(async (vm) => {
36
+ await mp.run(['start', vm], { timeout: 600_000 });
37
+ results.push(`${vm}: restarted`);
38
+ }),
39
+ );
40
+
41
+ return {
42
+ content: [
43
+ {
44
+ type: 'text',
45
+ text: JSON.stringify({ ok: true, snapshots: results }, null, 2),
46
+ },
47
+ ],
48
+ };
49
+ },
50
+ };
51
+
52
+ export const snapshotRestoreTool = {
53
+ name: 'snapshot_restore',
54
+ description:
55
+ 'Restore one or all VMs to a named snapshot. This resets the VM to the ' +
56
+ 'exact state when the snapshot was taken — much faster than reprovisioning.',
57
+ inputSchema: z.object({
58
+ name: z.string().regex(/^[a-zA-Z0-9][a-zA-Z0-9_-]*$/).describe('Snapshot name to restore'),
59
+ vms: z
60
+ .array(z.enum(['host', 'agent', 'visitor']))
61
+ .optional()
62
+ .describe('Which VMs to restore (default: all three)'),
63
+ }),
64
+ async handler({ name, vms } = {}) {
65
+ const targets = vms ? vms.map((v) => VM_NAME_MAP[v]) : ALL_VMS;
66
+ const results = [];
67
+
68
+ // Stop all VMs in parallel
69
+ await Promise.all(targets.map((vm) => mp.run(['stop', vm], { allowFailure: true })));
70
+
71
+ // Restore all VMs in parallel
72
+ await Promise.all(
73
+ targets.map(async (vm) => {
74
+ await mp.restore(vm, name);
75
+ results.push(`${vm}: restored to "${name}"`);
76
+ }),
77
+ );
78
+
79
+ // Restart all VMs in parallel (graceful error handling)
80
+ await Promise.all(
81
+ targets.map(async (vm) => {
82
+ try {
83
+ await mp.run(['start', vm], { timeout: 600_000 });
84
+ results.push(`${vm}: restarted`);
85
+ } catch (err) {
86
+ results.push(`${vm}: start failed (${err.message}) — may need manual start`);
87
+ }
88
+ }),
89
+ );
90
+
91
+ return {
92
+ content: [
93
+ {
94
+ type: 'text',
95
+ text: JSON.stringify({ ok: true, restored: results }, null, 2),
96
+ },
97
+ ],
98
+ };
99
+ },
100
+ };
101
+
102
+ export const snapshotListTool = {
103
+ name: 'snapshot_list',
104
+ description: 'List all available snapshots across VMs, plus known checkpoint descriptions.',
105
+ inputSchema: z.object({}),
106
+ async handler() {
107
+ // Query all VMs in parallel
108
+ const entries = await Promise.all(
109
+ ALL_VMS.map(async (vmName) => [vmName, await mp.listSnapshots(vmName)]),
110
+ );
111
+ const snapshots = Object.fromEntries(entries.filter(([, snaps]) => snaps.length > 0));
112
+
113
+ return {
114
+ content: [
115
+ {
116
+ type: 'text',
117
+ text: JSON.stringify(
118
+ { snapshots, checkpoints: CHECKPOINTS },
119
+ null,
120
+ 2,
121
+ ),
122
+ },
123
+ ],
124
+ };
125
+ },
126
+ };
@@ -0,0 +1,161 @@
1
+ // ============================================================================
2
+ // Status & Log Inspection Tools — env_status, test_log
3
+ // ============================================================================
4
+
5
+ import { z } from 'zod';
6
+ import * as mp from '../lib/multipass.js';
7
+ import { loadState } from '../lib/state.js';
8
+ import { readTestLog, readSummary, listRuns } from '../lib/logs.js';
9
+ import { ALL_VMS, VM_HOST } from '../config.js';
10
+
11
+ export const envStatusTool = {
12
+ name: 'env_status',
13
+ description:
14
+ 'Full environment health check: are VMs running? Are services up? ' +
15
+ 'What profile are they using? Are there snapshots available? ' +
16
+ 'What was the last test run result?',
17
+ inputSchema: z.object({}),
18
+ async handler() {
19
+ const state = loadState();
20
+
21
+ // Query VM info and snapshots in parallel
22
+ const [vmInfos, snapshotEntries] = await Promise.all([
23
+ Promise.all(ALL_VMS.map(async (vmName) => [vmName, await mp.info(vmName)])),
24
+ Promise.all(ALL_VMS.map(async (vmName) => [vmName, await mp.listSnapshots(vmName)])),
25
+ ]);
26
+
27
+ // Build VM status map
28
+ const vms = {};
29
+ for (const [vmName, info] of vmInfos) {
30
+ if (info?.info?.[vmName]) {
31
+ const vmInfo = info.info[vmName];
32
+ vms[vmName] = {
33
+ state: vmInfo.state,
34
+ ipv4: vmInfo.ipv4?.[0] || null,
35
+ cpus: vmInfo.cpu_count,
36
+ memory: vmInfo.memory?.total
37
+ ? `${Math.round(vmInfo.memory.total / (1024 * 1024))}M`
38
+ : null,
39
+ disk: vmInfo.disk?.total
40
+ ? `${Math.round(vmInfo.disk.total / (1024 * 1024 * 1024))}G`
41
+ : null,
42
+ };
43
+ } else {
44
+ vms[vmName] = { state: 'not-found' };
45
+ }
46
+ }
47
+
48
+ // Check services on host in parallel (if running)
49
+ let services = null;
50
+ if (vms[VM_HOST]?.state === 'Running') {
51
+ const serviceNames = ['portlama-panel', 'nginx', 'authelia', 'chisel-server'];
52
+ const serviceResults = await Promise.all(
53
+ serviceNames.map(async (svc) => {
54
+ const result = await mp.exec(VM_HOST, `systemctl is-active ${svc} 2>/dev/null | head -1`, {
55
+ sudo: true,
56
+ allowFailure: true,
57
+ });
58
+ return [svc, result.stdout.trim() || 'unknown'];
59
+ }),
60
+ );
61
+ services = Object.fromEntries(serviceResults);
62
+ }
63
+
64
+ // Build snapshots map
65
+ const snapshots = Object.fromEntries(snapshotEntries.filter(([, snaps]) => snaps.length > 0));
66
+
67
+ // Last run
68
+ const runs = listRuns();
69
+ let lastRun = null;
70
+ if (runs.length > 0) {
71
+ lastRun = readSummary(runs[0]);
72
+ }
73
+
74
+ return {
75
+ content: [
76
+ {
77
+ type: 'text',
78
+ text: JSON.stringify(
79
+ {
80
+ vms,
81
+ profile: state.profile,
82
+ domain: state.domain,
83
+ services,
84
+ snapshots: Object.keys(snapshots).length > 0 ? snapshots : null,
85
+ lastRun: lastRun
86
+ ? {
87
+ id: lastRun.runId,
88
+ passed: lastRun.passed,
89
+ failed: lastRun.failed,
90
+ durationMs: lastRun.durationMs,
91
+ }
92
+ : null,
93
+ hasCredentials: !!state.credentials,
94
+ },
95
+ null,
96
+ 2,
97
+ ),
98
+ },
99
+ ],
100
+ };
101
+ },
102
+ };
103
+
104
+ export const testLogTool = {
105
+ name: 'test_log',
106
+ description:
107
+ 'Fetch the full raw log output for a specific test from an intermediate run. ' +
108
+ 'Use this after test_run shows a failure and you need the complete output to debug.',
109
+ inputSchema: z.object({
110
+ testName: z
111
+ .string()
112
+ .describe(
113
+ 'Test name (e.g. "01-onboarding-complete", "11-plugin-lifecycle")',
114
+ ),
115
+ runId: z
116
+ .string()
117
+ .optional()
118
+ .describe('Run ID (default: most recent run)'),
119
+ }),
120
+ async handler({ testName, runId } = {}) {
121
+ const targetRunId = runId || listRuns()[0];
122
+ if (!targetRunId) {
123
+ return {
124
+ content: [
125
+ {
126
+ type: 'text',
127
+ text: JSON.stringify({
128
+ ok: false,
129
+ error: 'No test runs found',
130
+ }),
131
+ },
132
+ ],
133
+ };
134
+ }
135
+
136
+ const log = readTestLog(targetRunId, testName);
137
+ if (!log) {
138
+ return {
139
+ content: [
140
+ {
141
+ type: 'text',
142
+ text: JSON.stringify({
143
+ ok: false,
144
+ error: `No log found for test "${testName}" in run "${targetRunId}"`,
145
+ availableRuns: listRuns().slice(0, 5),
146
+ }),
147
+ },
148
+ ],
149
+ };
150
+ }
151
+
152
+ return {
153
+ content: [
154
+ {
155
+ type: 'text',
156
+ text: log,
157
+ },
158
+ ],
159
+ };
160
+ },
161
+ };