hackerrun 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/settings.local.json +22 -0
- package/.env.example +9 -0
- package/CLAUDE.md +532 -0
- package/README.md +94 -0
- package/dist/index.js +2813 -0
- package/package.json +38 -0
- package/src/commands/app.ts +394 -0
- package/src/commands/builds.ts +314 -0
- package/src/commands/config.ts +129 -0
- package/src/commands/connect.ts +197 -0
- package/src/commands/deploy.ts +227 -0
- package/src/commands/env.ts +174 -0
- package/src/commands/login.ts +120 -0
- package/src/commands/logs.ts +97 -0
- package/src/index.ts +43 -0
- package/src/lib/app-config.ts +95 -0
- package/src/lib/cluster.ts +428 -0
- package/src/lib/config.ts +137 -0
- package/src/lib/platform-auth.ts +20 -0
- package/src/lib/platform-client.ts +637 -0
- package/src/lib/platform.ts +87 -0
- package/src/lib/ssh-cert.ts +264 -0
- package/src/lib/uncloud-runner.ts +342 -0
- package/src/lib/uncloud.ts +149 -0
- package/tsconfig.json +17 -0
- package/tsup.config.ts +17 -0
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
// Local app configuration file (hackerrun.yaml)
|
|
2
|
+
// This file links a local directory to a deployed app
|
|
3
|
+
|
|
4
|
+
import { existsSync, readFileSync, writeFileSync } from 'fs';
|
|
5
|
+
import { join } from 'path';
|
|
6
|
+
import { parse, stringify } from 'yaml';
|
|
7
|
+
|
|
8
|
+
export interface AppConfig {
|
|
9
|
+
appName: string; // Links this directory to an app
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
const CONFIG_FILENAME = 'hackerrun.yaml';
|
|
13
|
+
|
|
14
|
+
/**
|
|
15
|
+
* Get the config file path for a directory
|
|
16
|
+
*/
|
|
17
|
+
export function getConfigPath(directory: string = process.cwd()): string {
|
|
18
|
+
return join(directory, CONFIG_FILENAME);
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
/**
|
|
22
|
+
* Check if a hackerrun.yaml file exists
|
|
23
|
+
*/
|
|
24
|
+
export function hasAppConfig(directory: string = process.cwd()): boolean {
|
|
25
|
+
return existsSync(getConfigPath(directory));
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
/**
|
|
29
|
+
* Read the hackerrun.yaml config
|
|
30
|
+
*/
|
|
31
|
+
export function readAppConfig(directory: string = process.cwd()): AppConfig | null {
|
|
32
|
+
const configPath = getConfigPath(directory);
|
|
33
|
+
if (!existsSync(configPath)) {
|
|
34
|
+
return null;
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
try {
|
|
38
|
+
const content = readFileSync(configPath, 'utf-8');
|
|
39
|
+
const config = parse(content) as AppConfig;
|
|
40
|
+
|
|
41
|
+
if (!config.appName) {
|
|
42
|
+
return null;
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
return config;
|
|
46
|
+
} catch (error) {
|
|
47
|
+
console.error(`Failed to parse ${CONFIG_FILENAME}:`, error);
|
|
48
|
+
return null;
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
/**
|
|
53
|
+
* Write the hackerrun.yaml config
|
|
54
|
+
*/
|
|
55
|
+
export function writeAppConfig(config: AppConfig, directory: string = process.cwd()): void {
|
|
56
|
+
const configPath = getConfigPath(directory);
|
|
57
|
+
const content = stringify(config);
|
|
58
|
+
writeFileSync(configPath, content, 'utf-8');
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
/**
|
|
62
|
+
* Get the app name for the current directory
|
|
63
|
+
* - First checks hackerrun.yaml
|
|
64
|
+
* - Falls back to folder name if no config exists
|
|
65
|
+
*/
|
|
66
|
+
export function getAppName(directory: string = process.cwd()): string {
|
|
67
|
+
const config = readAppConfig(directory);
|
|
68
|
+
if (config?.appName) {
|
|
69
|
+
return config.appName;
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
// Fallback to folder name (for backwards compatibility)
|
|
73
|
+
const folderName = directory.split('/').pop() || 'app';
|
|
74
|
+
return folderName.toLowerCase().replace(/[^a-z0-9_-]/g, '-');
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
/**
|
|
78
|
+
* Link the current directory to an app
|
|
79
|
+
*/
|
|
80
|
+
export function linkApp(appName: string, directory: string = process.cwd()): void {
|
|
81
|
+
writeAppConfig({ appName }, directory);
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
/**
|
|
85
|
+
* Unlink the current directory from an app
|
|
86
|
+
*/
|
|
87
|
+
export function unlinkApp(directory: string = process.cwd()): boolean {
|
|
88
|
+
const configPath = getConfigPath(directory);
|
|
89
|
+
if (existsSync(configPath)) {
|
|
90
|
+
const { unlinkSync } = require('fs');
|
|
91
|
+
unlinkSync(configPath);
|
|
92
|
+
return true;
|
|
93
|
+
}
|
|
94
|
+
return false;
|
|
95
|
+
}
|
|
@@ -0,0 +1,428 @@
|
|
|
1
|
+
import { AppCluster, VMNode, PlatformClient } from './platform-client.js';
|
|
2
|
+
import { SSHCertManager } from './ssh-cert.js';
|
|
3
|
+
import { execSync } from 'child_process';
|
|
4
|
+
import ora from 'ora';
|
|
5
|
+
import chalk from 'chalk';
|
|
6
|
+
|
|
7
|
+
// Platform SSH keys cached for the session
|
|
8
|
+
let platformKeysCache: { caPublicKey: string; platformPublicKey: string } | null = null;
|
|
9
|
+
|
|
10
|
+
export interface ClusterInitOptions {
|
|
11
|
+
appName: string;
|
|
12
|
+
location: string;
|
|
13
|
+
vmSize: string;
|
|
14
|
+
storageSize: number; // GB
|
|
15
|
+
bootImage: string;
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
export class ClusterManager {
|
|
19
|
+
private sshCertManager: SSHCertManager;
|
|
20
|
+
|
|
21
|
+
constructor(
|
|
22
|
+
private platformClient: PlatformClient
|
|
23
|
+
) {
|
|
24
|
+
this.sshCertManager = new SSHCertManager(platformClient);
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
/**
|
|
28
|
+
* Get platform SSH keys (cached for the session)
|
|
29
|
+
* Returns CA public key and platform public key for VM creation
|
|
30
|
+
*/
|
|
31
|
+
private async getPlatformKeys(): Promise<{ caPublicKey: string; platformPublicKey: string }> {
|
|
32
|
+
if (platformKeysCache) {
|
|
33
|
+
return platformKeysCache;
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
try {
|
|
37
|
+
platformKeysCache = await this.platformClient.getPlatformSSHKeys();
|
|
38
|
+
return platformKeysCache;
|
|
39
|
+
} catch (error) {
|
|
40
|
+
throw new Error(`Failed to get platform SSH keys: ${(error as Error).message}`);
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
/**
|
|
45
|
+
* Initialize a new cluster for an app (creates first VM and sets up uncloud)
|
|
46
|
+
*
|
|
47
|
+
* Flow:
|
|
48
|
+
* 1. Create VM with platform SSH key
|
|
49
|
+
* 2. Wait for VM to get IPv6 address
|
|
50
|
+
* 3. Call platform API to setup VM (DNS64, WireGuard, SSH CA)
|
|
51
|
+
* 4. Run `uc machine init` to install Docker + uncloud
|
|
52
|
+
* 5. Configure Docker for NAT64
|
|
53
|
+
* 6. Save app state to platform
|
|
54
|
+
*
|
|
55
|
+
* Users access VMs via SSH certificates signed by the platform CA.
|
|
56
|
+
*/
|
|
57
|
+
async initializeCluster(options: ClusterInitOptions): Promise<AppCluster> {
|
|
58
|
+
const { appName, location, vmSize, storageSize, bootImage } = options;
|
|
59
|
+
|
|
60
|
+
// Generate VM name
|
|
61
|
+
const vmName = `${appName}-vm-${this.generateId()}`;
|
|
62
|
+
|
|
63
|
+
let spinner = ora(`Creating VM '${vmName}' in ${location}...`).start();
|
|
64
|
+
|
|
65
|
+
try {
|
|
66
|
+
// Get platform SSH keys (for VM creation)
|
|
67
|
+
spinner.text = 'Fetching platform SSH keys...';
|
|
68
|
+
const platformKeys = await this.getPlatformKeys();
|
|
69
|
+
|
|
70
|
+
// Get gateway info to get the private subnet ID for NAT64 routing
|
|
71
|
+
const gateway = await this.platformClient.getGateway(location);
|
|
72
|
+
const privateSubnetId = gateway?.subnetId;
|
|
73
|
+
|
|
74
|
+
// Create IPv6-only VM via platform API with platform SSH key
|
|
75
|
+
// VM is placed in the same private subnet as the gateway for NAT64 routing
|
|
76
|
+
spinner.text = `Creating VM '${vmName}'...`;
|
|
77
|
+
const vm = await this.platformClient.createVM({
|
|
78
|
+
name: vmName,
|
|
79
|
+
location,
|
|
80
|
+
size: vmSize,
|
|
81
|
+
storage_size: storageSize,
|
|
82
|
+
boot_image: bootImage,
|
|
83
|
+
unix_user: 'root',
|
|
84
|
+
public_key: platformKeys.platformPublicKey,
|
|
85
|
+
enable_ip4: !privateSubnetId, // IPv6-only if we have a subnet for NAT64
|
|
86
|
+
private_subnet_id: privateSubnetId,
|
|
87
|
+
});
|
|
88
|
+
|
|
89
|
+
spinner.text = `Waiting for VM to be ready...`;
|
|
90
|
+
spinner.stop();
|
|
91
|
+
|
|
92
|
+
console.log(chalk.cyan('\nWaiting for VM to get an IPv6 address...'));
|
|
93
|
+
|
|
94
|
+
// Wait for VM to have IPv6 (we're creating IPv6-only VMs)
|
|
95
|
+
const vmWithIp = await this.waitForVM(location, vmName, 600, false); // 10 min timeout
|
|
96
|
+
|
|
97
|
+
spinner = ora('Setting up VM...').start();
|
|
98
|
+
|
|
99
|
+
// Wait for SSH to be ready
|
|
100
|
+
spinner.text = 'Waiting for SSH to be ready...';
|
|
101
|
+
await this.sleep(30000); // 30 seconds
|
|
102
|
+
|
|
103
|
+
// Step 1: Platform sets up VM (DNS64, WireGuard, SSH CA)
|
|
104
|
+
spinner.text = 'Configuring VM (DNS64, NAT64, SSH CA)...';
|
|
105
|
+
await this.platformClient.setupVM(vmWithIp.ip6!, location, appName);
|
|
106
|
+
|
|
107
|
+
// Create cluster state and save to backend BEFORE requesting SSH certificate
|
|
108
|
+
// (certificate request requires app to exist in database)
|
|
109
|
+
const cluster: AppCluster = {
|
|
110
|
+
appName,
|
|
111
|
+
location,
|
|
112
|
+
nodes: [{
|
|
113
|
+
name: vmName,
|
|
114
|
+
id: vm.id,
|
|
115
|
+
ipv6: vmWithIp.ip6,
|
|
116
|
+
isPrimary: true,
|
|
117
|
+
}],
|
|
118
|
+
uncloudContext: appName,
|
|
119
|
+
createdAt: new Date().toISOString(),
|
|
120
|
+
};
|
|
121
|
+
spinner.text = 'Registering app...';
|
|
122
|
+
const savedCluster = await this.platformClient.saveApp(cluster);
|
|
123
|
+
|
|
124
|
+
// Step 2: Initialize uncloud (installs Docker + uncloud daemon)
|
|
125
|
+
spinner.text = 'Installing Docker and Uncloud...';
|
|
126
|
+
spinner.stop();
|
|
127
|
+
console.log(chalk.cyan('\nInitializing uncloud (this may take a few minutes)...'));
|
|
128
|
+
await this.initializeUncloud(vmWithIp.ip6!, appName);
|
|
129
|
+
|
|
130
|
+
spinner = ora('Configuring Docker for NAT64...').start();
|
|
131
|
+
|
|
132
|
+
// Step 3: Configure Docker for NAT64
|
|
133
|
+
await this.configureDockerNAT64(vmWithIp.ip6!);
|
|
134
|
+
|
|
135
|
+
spinner.succeed(chalk.green(`Cluster initialized successfully`));
|
|
136
|
+
|
|
137
|
+
return savedCluster;
|
|
138
|
+
} catch (error) {
|
|
139
|
+
spinner.fail(chalk.red('Failed to initialize cluster'));
|
|
140
|
+
throw error;
|
|
141
|
+
}
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
/**
|
|
145
|
+
* Add a node to an existing cluster
|
|
146
|
+
*/
|
|
147
|
+
async addNode(appName: string, vmSize: string, bootImage: string): Promise<VMNode> {
|
|
148
|
+
const cluster = await this.platformClient.getApp(appName);
|
|
149
|
+
if (!cluster) {
|
|
150
|
+
throw new Error(`App '${appName}' not found`);
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
const primaryNode = await this.platformClient.getPrimaryNode(appName);
|
|
154
|
+
if (!primaryNode || !primaryNode.ipv6) {
|
|
155
|
+
throw new Error(`Primary node not found or has no IPv6 address`);
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
// Get gateway info to get the private subnet ID for NAT64 routing
|
|
159
|
+
const gateway = await this.platformClient.getGateway(cluster.location);
|
|
160
|
+
const privateSubnetId = gateway?.subnetId;
|
|
161
|
+
|
|
162
|
+
const vmName = `${appName}-vm-${this.generateId()}`;
|
|
163
|
+
|
|
164
|
+
let spinner = ora(`Adding node '${vmName}' to cluster...`).start();
|
|
165
|
+
|
|
166
|
+
try {
|
|
167
|
+
// Get platform SSH keys (for VM creation)
|
|
168
|
+
spinner.text = 'Fetching platform SSH keys...';
|
|
169
|
+
const platformKeys = await this.getPlatformKeys();
|
|
170
|
+
|
|
171
|
+
// Create new IPv6-only VM via platform API with platform SSH key
|
|
172
|
+
spinner.text = `Creating VM '${vmName}'...`;
|
|
173
|
+
const vm = await this.platformClient.createVM({
|
|
174
|
+
name: vmName,
|
|
175
|
+
location: cluster.location,
|
|
176
|
+
size: vmSize,
|
|
177
|
+
boot_image: bootImage,
|
|
178
|
+
unix_user: 'root',
|
|
179
|
+
public_key: platformKeys.platformPublicKey,
|
|
180
|
+
enable_ip4: !privateSubnetId,
|
|
181
|
+
private_subnet_id: privateSubnetId,
|
|
182
|
+
});
|
|
183
|
+
|
|
184
|
+
spinner.text = `Waiting for VM to be ready...`;
|
|
185
|
+
spinner.stop();
|
|
186
|
+
console.log(chalk.cyan('\nWaiting for VM to get an IPv6 address...'));
|
|
187
|
+
const vmWithIp = await this.waitForVM(cluster.location, vmName, 600, false);
|
|
188
|
+
|
|
189
|
+
spinner = ora('Setting up VM...').start();
|
|
190
|
+
await this.sleep(30000);
|
|
191
|
+
|
|
192
|
+
// Step 1: Platform sets up VM (DNS64, WireGuard, SSH CA)
|
|
193
|
+
spinner.text = 'Configuring VM...';
|
|
194
|
+
await this.platformClient.setupVM(vmWithIp.ip6!, cluster.location, appName);
|
|
195
|
+
|
|
196
|
+
// Step 2: Get join token from primary and join cluster
|
|
197
|
+
spinner.text = 'Joining uncloud cluster...';
|
|
198
|
+
spinner.stop();
|
|
199
|
+
console.log(chalk.cyan('\nJoining uncloud cluster...'));
|
|
200
|
+
await this.joinUncloudCluster(vmWithIp.ip6!, primaryNode.ipv6, appName);
|
|
201
|
+
|
|
202
|
+
spinner = ora('Configuring Docker for NAT64...').start();
|
|
203
|
+
|
|
204
|
+
// Step 3: Configure Docker for NAT64
|
|
205
|
+
await this.configureDockerNAT64(vmWithIp.ip6!);
|
|
206
|
+
|
|
207
|
+
spinner.succeed(chalk.green(`Node '${vmName}' added successfully`));
|
|
208
|
+
|
|
209
|
+
const newNode: VMNode = {
|
|
210
|
+
name: vmName,
|
|
211
|
+
id: vm.id,
|
|
212
|
+
ipv6: vmWithIp.ip6,
|
|
213
|
+
isPrimary: false,
|
|
214
|
+
};
|
|
215
|
+
|
|
216
|
+
// Update cluster state on backend
|
|
217
|
+
cluster.nodes.push(newNode);
|
|
218
|
+
await this.platformClient.saveApp(cluster);
|
|
219
|
+
|
|
220
|
+
return newNode;
|
|
221
|
+
} catch (error) {
|
|
222
|
+
spinner.fail(chalk.red('Failed to add node'));
|
|
223
|
+
throw error;
|
|
224
|
+
}
|
|
225
|
+
}
|
|
226
|
+
|
|
227
|
+
/**
|
|
228
|
+
* Initialize uncloud on the VM using uc machine init
|
|
229
|
+
* Uses --no-dns because we manage our own domain via gateway
|
|
230
|
+
*
|
|
231
|
+
* Before running uc, we get an SSH certificate from the platform
|
|
232
|
+
* and add it to the ssh-agent. This allows uc to authenticate
|
|
233
|
+
* since the VM only accepts platform SSH key or signed certificates.
|
|
234
|
+
*/
|
|
235
|
+
private async initializeUncloud(vmIp: string, contextName: string): Promise<void> {
|
|
236
|
+
try {
|
|
237
|
+
// Get SSH certificate and add to agent (required for auth)
|
|
238
|
+
// The VM was created with platform SSH key, so we need a certificate
|
|
239
|
+
await this.sshCertManager.getSession(contextName, vmIp);
|
|
240
|
+
|
|
241
|
+
// uc machine init connects to the VM and installs Docker + uncloud daemon
|
|
242
|
+
execSync(`uc machine init -c "${contextName}" --no-dns root@${vmIp}`, {
|
|
243
|
+
stdio: 'inherit',
|
|
244
|
+
timeout: 600000, // 10 min timeout
|
|
245
|
+
});
|
|
246
|
+
} catch (error) {
|
|
247
|
+
throw new Error(`Failed to initialize uncloud: ${(error as Error).message}`);
|
|
248
|
+
}
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
/**
|
|
252
|
+
* Join a new node to an existing uncloud cluster
|
|
253
|
+
* Uses --connect ssh:// to avoid local context dependency
|
|
254
|
+
*/
|
|
255
|
+
private async joinUncloudCluster(newVmIp: string, primaryVmIp: string, contextName: string): Promise<void> {
|
|
256
|
+
try {
|
|
257
|
+
// Get SSH certificates for both nodes and add to agent
|
|
258
|
+
await this.sshCertManager.getSession(contextName, primaryVmIp);
|
|
259
|
+
await this.sshCertManager.getSession(contextName, newVmIp);
|
|
260
|
+
|
|
261
|
+
// Get join token from primary node using --connect ssh://
|
|
262
|
+
const token = execSync(`uc --connect ssh://root@${primaryVmIp} machine token`, {
|
|
263
|
+
encoding: 'utf-8',
|
|
264
|
+
timeout: 30000,
|
|
265
|
+
}).trim();
|
|
266
|
+
|
|
267
|
+
if (!token) {
|
|
268
|
+
throw new Error('Failed to get join token from primary node');
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
// Join the new node to the cluster using --connect ssh://
|
|
272
|
+
execSync(`uc --connect ssh://root@${primaryVmIp} machine add root@${newVmIp} --token "${token}"`, {
|
|
273
|
+
stdio: 'inherit',
|
|
274
|
+
timeout: 600000, // 10 min timeout
|
|
275
|
+
});
|
|
276
|
+
} catch (error) {
|
|
277
|
+
throw new Error(`Failed to join cluster: ${(error as Error).message}`);
|
|
278
|
+
}
|
|
279
|
+
}
|
|
280
|
+
|
|
281
|
+
/**
|
|
282
|
+
* Configure Docker for NAT64 support on IPv6-only VMs
|
|
283
|
+
*
|
|
284
|
+
* Problem: DNS64 returns both A (IPv4) and AAAA (IPv6) records. Applications may try
|
|
285
|
+
* IPv4 first, which times out because there's no route to IPv4 internet.
|
|
286
|
+
*
|
|
287
|
+
* Solution:
|
|
288
|
+
* 1. Enable IPv6 on Docker networks so containers get IPv6 addresses
|
|
289
|
+
* 2. Block IPv4 forwarding from Docker to internet so IPv4 fails immediately
|
|
290
|
+
* 3. Applications then use IPv6 (NAT64) which works via the gateway
|
|
291
|
+
*/
|
|
292
|
+
private async configureDockerNAT64(vmIp: string): Promise<void> {
|
|
293
|
+
const setupScript = `#!/bin/bash
|
|
294
|
+
set -e
|
|
295
|
+
|
|
296
|
+
# Step 1: Add IPv6 support to Docker daemon config
|
|
297
|
+
DAEMON_JSON='/etc/docker/daemon.json'
|
|
298
|
+
if [ -f "$DAEMON_JSON" ]; then
|
|
299
|
+
# Merge with existing config using jq
|
|
300
|
+
jq '. + {"ipv6": true, "fixed-cidr-v6": "fd00:d0c6:e4::/64"}' "$DAEMON_JSON" > /tmp/daemon.json
|
|
301
|
+
mv /tmp/daemon.json "$DAEMON_JSON"
|
|
302
|
+
else
|
|
303
|
+
cat > "$DAEMON_JSON" << 'EOFJSON'
|
|
304
|
+
{
|
|
305
|
+
"ipv6": true,
|
|
306
|
+
"fixed-cidr-v6": "fd00:d0c6:e4::/64"
|
|
307
|
+
}
|
|
308
|
+
EOFJSON
|
|
309
|
+
fi
|
|
310
|
+
|
|
311
|
+
# Step 2: Create systemd service to block IPv4 forwarding from Docker containers
|
|
312
|
+
cat > /etc/systemd/system/docker-ipv6-nat64.service << 'EOFSVC'
|
|
313
|
+
[Unit]
|
|
314
|
+
Description=Block IPv4 forwarding from Docker containers (force NAT64)
|
|
315
|
+
After=docker.service
|
|
316
|
+
Requires=docker.service
|
|
317
|
+
|
|
318
|
+
[Service]
|
|
319
|
+
Type=oneshot
|
|
320
|
+
RemainAfterExit=yes
|
|
321
|
+
ExecStart=/sbin/iptables -I FORWARD -i br-+ -o ens3 -j REJECT --reject-with icmp-net-unreachable
|
|
322
|
+
ExecStop=/sbin/iptables -D FORWARD -i br-+ -o ens3 -j REJECT --reject-with icmp-net-unreachable
|
|
323
|
+
|
|
324
|
+
[Install]
|
|
325
|
+
WantedBy=multi-user.target
|
|
326
|
+
EOFSVC
|
|
327
|
+
|
|
328
|
+
systemctl daemon-reload
|
|
329
|
+
systemctl enable docker-ipv6-nat64
|
|
330
|
+
|
|
331
|
+
# Step 3: Restart Docker to apply IPv6 config
|
|
332
|
+
systemctl restart docker
|
|
333
|
+
sleep 3
|
|
334
|
+
|
|
335
|
+
# Step 4: Recreate uncloud network with IPv6 enabled
|
|
336
|
+
CONTAINERS=$(docker ps -aq --filter network=uncloud 2>/dev/null || true)
|
|
337
|
+
|
|
338
|
+
for container in $CONTAINERS; do
|
|
339
|
+
docker network disconnect uncloud "$container" 2>/dev/null || true
|
|
340
|
+
done
|
|
341
|
+
|
|
342
|
+
docker network rm uncloud 2>/dev/null || true
|
|
343
|
+
docker network create --driver bridge \
|
|
344
|
+
--subnet 10.210.0.0/24 --gateway 10.210.0.1 \
|
|
345
|
+
--ipv6 --subnet fd00:a10:210::/64 --gateway fd00:a10:210::1 \
|
|
346
|
+
uncloud
|
|
347
|
+
|
|
348
|
+
for container in $CONTAINERS; do
|
|
349
|
+
docker network connect uncloud "$container" 2>/dev/null || true
|
|
350
|
+
done
|
|
351
|
+
|
|
352
|
+
# Step 5: Start the iptables service
|
|
353
|
+
systemctl start docker-ipv6-nat64
|
|
354
|
+
|
|
355
|
+
echo "Docker NAT64 configuration complete"
|
|
356
|
+
`;
|
|
357
|
+
|
|
358
|
+
try {
|
|
359
|
+
// Use SSH to run the script on the VM
|
|
360
|
+
// After platform setup, SSH certificate auth is available
|
|
361
|
+
execSync(`ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null root@${vmIp} 'bash -s' << 'REMOTESCRIPT'
|
|
362
|
+
${setupScript}
|
|
363
|
+
REMOTESCRIPT`, {
|
|
364
|
+
stdio: 'inherit',
|
|
365
|
+
timeout: 120000, // 2 min timeout
|
|
366
|
+
});
|
|
367
|
+
} catch (error) {
|
|
368
|
+
throw new Error(`Failed to configure Docker for NAT64: ${(error as Error).message}`);
|
|
369
|
+
}
|
|
370
|
+
}
|
|
371
|
+
|
|
372
|
+
/**
|
|
373
|
+
* Wait for VM to have an IP address
|
|
374
|
+
* @param requireIpv4 - If true, wait for IPv4 (for gateway VMs). Otherwise wait for IPv6.
|
|
375
|
+
*/
|
|
376
|
+
private async waitForVM(location: string, vmName: string, timeoutSeconds: number, requireIpv4: boolean = false): Promise<any> {
|
|
377
|
+
const startTime = Date.now();
|
|
378
|
+
const timeoutMs = timeoutSeconds * 1000;
|
|
379
|
+
let lastStatus = '';
|
|
380
|
+
|
|
381
|
+
while (Date.now() - startTime < timeoutMs) {
|
|
382
|
+
const vm = await this.platformClient.getVM(location, vmName);
|
|
383
|
+
|
|
384
|
+
// Log status changes
|
|
385
|
+
if (vm.status && vm.status !== lastStatus) {
|
|
386
|
+
console.log(chalk.dim(` Status: ${vm.status}`));
|
|
387
|
+
lastStatus = vm.status;
|
|
388
|
+
}
|
|
389
|
+
|
|
390
|
+
// Check for the required IP type
|
|
391
|
+
const hasRequiredIp = requireIpv4 ? vm.ip4 : vm.ip6;
|
|
392
|
+
if (hasRequiredIp) {
|
|
393
|
+
const ipDisplay = requireIpv4 ? vm.ip4 : vm.ip6;
|
|
394
|
+
console.log(chalk.green(` IP assigned: ${ipDisplay}`));
|
|
395
|
+
return vm;
|
|
396
|
+
}
|
|
397
|
+
|
|
398
|
+
const elapsed = Math.floor((Date.now() - startTime) / 1000);
|
|
399
|
+
const ipType = requireIpv4 ? 'IPv4' : 'IPv6';
|
|
400
|
+
process.stdout.write(`\r Waiting for ${ipType}... (${elapsed}s / ${timeoutSeconds}s)`);
|
|
401
|
+
|
|
402
|
+
await this.sleep(5000); // Check every 5 seconds
|
|
403
|
+
}
|
|
404
|
+
|
|
405
|
+
process.stdout.write('\n');
|
|
406
|
+
throw new Error(
|
|
407
|
+
`Timeout waiting for VM to get IP address after ${timeoutSeconds}s.\n\n` +
|
|
408
|
+
`The VM may still be provisioning. You can:\n` +
|
|
409
|
+
` 1. Check VM status: hackerrun vm list\n` +
|
|
410
|
+
` 2. Delete and retry: hackerrun vm delete ${vmName}\n` +
|
|
411
|
+
` 3. Check Ubicloud console: https://console.ubicloud.com`
|
|
412
|
+
);
|
|
413
|
+
}
|
|
414
|
+
|
|
415
|
+
/**
|
|
416
|
+
* Generate a random ID
|
|
417
|
+
*/
|
|
418
|
+
private generateId(): string {
|
|
419
|
+
return Math.random().toString(36).substring(2, 9);
|
|
420
|
+
}
|
|
421
|
+
|
|
422
|
+
/**
|
|
423
|
+
* Sleep for specified milliseconds
|
|
424
|
+
*/
|
|
425
|
+
private sleep(ms: number): Promise<void> {
|
|
426
|
+
return new Promise(resolve => setTimeout(resolve, ms));
|
|
427
|
+
}
|
|
428
|
+
}
|
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
import { existsSync, readFileSync, writeFileSync, mkdirSync } from 'fs';
|
|
2
|
+
import { homedir } from 'os';
|
|
3
|
+
import { join } from 'path';
|
|
4
|
+
|
|
5
|
+
export interface Config {
|
|
6
|
+
apiToken: string;
|
|
7
|
+
}
|
|
8
|
+
|
|
9
|
+
export class ConfigManager {
|
|
10
|
+
private configDir: string;
|
|
11
|
+
private configFile: string;
|
|
12
|
+
|
|
13
|
+
constructor() {
|
|
14
|
+
// XDG Base Directory Specification
|
|
15
|
+
this.configDir = join(homedir(), '.config', 'hackerrun');
|
|
16
|
+
this.configFile = join(this.configDir, 'config.json');
|
|
17
|
+
this.ensureConfigDir();
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
private ensureConfigDir(): void {
|
|
21
|
+
if (!existsSync(this.configDir)) {
|
|
22
|
+
mkdirSync(this.configDir, { recursive: true, mode: 0o700 });
|
|
23
|
+
}
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
/**
|
|
27
|
+
* Read config from ~/.config/hackerrun/config.json
|
|
28
|
+
*/
|
|
29
|
+
private readConfig(): Partial<Config> {
|
|
30
|
+
if (!existsSync(this.configFile)) {
|
|
31
|
+
return {};
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
try {
|
|
35
|
+
const content = readFileSync(this.configFile, 'utf-8');
|
|
36
|
+
return JSON.parse(content);
|
|
37
|
+
} catch (error) {
|
|
38
|
+
console.warn('Failed to read config file');
|
|
39
|
+
return {};
|
|
40
|
+
}
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
/**
|
|
44
|
+
* Write config to ~/.config/hackerrun/config.json
|
|
45
|
+
*/
|
|
46
|
+
private writeConfig(config: Partial<Config>): void {
|
|
47
|
+
writeFileSync(this.configFile, JSON.stringify(config, null, 2), { mode: 0o600 });
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
/**
|
|
51
|
+
* Load and validate config
|
|
52
|
+
*/
|
|
53
|
+
load(): Config {
|
|
54
|
+
const config = this.readConfig();
|
|
55
|
+
|
|
56
|
+
if (!config.apiToken) {
|
|
57
|
+
throw new Error(
|
|
58
|
+
`Missing API token. Please run:\n\n` +
|
|
59
|
+
` hackerrun login\n`
|
|
60
|
+
);
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
return {
|
|
64
|
+
apiToken: config.apiToken,
|
|
65
|
+
};
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
/**
|
|
69
|
+
* Set a config value
|
|
70
|
+
*/
|
|
71
|
+
set(key: keyof Config, value: string): void {
|
|
72
|
+
const config = this.readConfig();
|
|
73
|
+
config[key] = value;
|
|
74
|
+
this.writeConfig(config);
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
/**
|
|
78
|
+
* Get a config value
|
|
79
|
+
*/
|
|
80
|
+
get(key: keyof Config): string | undefined {
|
|
81
|
+
const config = this.readConfig();
|
|
82
|
+
return config[key];
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
/**
|
|
86
|
+
* Get all config values (with sensitive data masked)
|
|
87
|
+
*/
|
|
88
|
+
getAll(maskSensitive: boolean = true): Partial<Config> {
|
|
89
|
+
const config = this.readConfig();
|
|
90
|
+
|
|
91
|
+
if (maskSensitive && config.apiToken) {
|
|
92
|
+
config.apiToken = this.maskToken(config.apiToken);
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
return config;
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
/**
|
|
99
|
+
* Mask sensitive token for display
|
|
100
|
+
*/
|
|
101
|
+
private maskToken(token: string): string {
|
|
102
|
+
if (token.length <= 8) return '***';
|
|
103
|
+
return token.substring(0, 8) + '...' + token.substring(token.length - 4);
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
/**
|
|
107
|
+
* Delete a config value
|
|
108
|
+
*/
|
|
109
|
+
unset(key: keyof Config): void {
|
|
110
|
+
const config = this.readConfig();
|
|
111
|
+
delete config[key];
|
|
112
|
+
this.writeConfig(config);
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
/**
|
|
116
|
+
* Check if config exists and is valid
|
|
117
|
+
*/
|
|
118
|
+
exists(): boolean {
|
|
119
|
+
const config = this.readConfig();
|
|
120
|
+
return !!config.apiToken;
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
/**
|
|
124
|
+
* Get config file path
|
|
125
|
+
*/
|
|
126
|
+
getConfigPath(): string {
|
|
127
|
+
return this.configFile;
|
|
128
|
+
}
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
/**
|
|
132
|
+
* Legacy function for backward compatibility
|
|
133
|
+
*/
|
|
134
|
+
export function loadConfig(): Config {
|
|
135
|
+
const configManager = new ConfigManager();
|
|
136
|
+
return configManager.load();
|
|
137
|
+
}
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
// Platform authentication utilities
|
|
2
|
+
import chalk from 'chalk';
|
|
3
|
+
import { ConfigManager } from './config.js';
|
|
4
|
+
|
|
5
|
+
/**
|
|
6
|
+
* Get platform auth token from config
|
|
7
|
+
*/
|
|
8
|
+
export function getPlatformToken(): string {
|
|
9
|
+
const configManager = new ConfigManager();
|
|
10
|
+
|
|
11
|
+
try {
|
|
12
|
+
const config = configManager.load();
|
|
13
|
+
return config.apiToken;
|
|
14
|
+
} catch (error) {
|
|
15
|
+
console.error(chalk.red('\n Not logged in'));
|
|
16
|
+
console.log(chalk.cyan('\nPlease login first:\n'));
|
|
17
|
+
console.log(` ${chalk.bold('hackerrun login')}\n`);
|
|
18
|
+
process.exit(1);
|
|
19
|
+
}
|
|
20
|
+
}
|