kova-node-cli 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +138 -0
- package/bin/cli.js +2 -0
- package/dist/__tests__/auto-bidder.test.js +267 -0
- package/dist/__tests__/container-manager.test.js +189 -0
- package/dist/__tests__/deployment-executor.test.js +332 -0
- package/dist/__tests__/heartbeat.test.js +191 -0
- package/dist/__tests__/lease-handler.test.js +268 -0
- package/dist/__tests__/resource-limits.test.js +164 -0
- package/dist/api/server.js +607 -0
- package/dist/cli.js +47 -0
- package/dist/commands/deploy.js +568 -0
- package/dist/commands/earnings.js +70 -0
- package/dist/commands/start.js +358 -0
- package/dist/commands/status.js +50 -0
- package/dist/commands/stop.js +101 -0
- package/dist/lib/client.js +87 -0
- package/dist/lib/config.js +107 -0
- package/dist/lib/docker.js +415 -0
- package/dist/lib/logger.js +12 -0
- package/dist/lib/message-signer.js +93 -0
- package/dist/lib/monitor.js +105 -0
- package/dist/lib/p2p.js +186 -0
- package/dist/lib/resource-limits.js +84 -0
- package/dist/lib/state.js +113 -0
- package/dist/lib/types.js +2 -0
- package/dist/lib/usage-meter.js +63 -0
- package/dist/services/auto-bidder.js +332 -0
- package/dist/services/container-manager.js +282 -0
- package/dist/services/deployment-executor.js +1562 -0
- package/dist/services/heartbeat.js +110 -0
- package/dist/services/job-handler.js +241 -0
- package/dist/services/lease-handler.js +382 -0
- package/package.json +51 -0
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
import convict from 'convict';
|
|
2
|
+
import { readFileSync, existsSync } from 'fs';
|
|
3
|
+
import { join } from 'path';
|
|
4
|
+
import { homedir } from 'os';
|
|
5
|
+
// config with sensible defaults
|
|
6
|
+
const schema = {
|
|
7
|
+
node: {
|
|
8
|
+
id: {
|
|
9
|
+
doc: 'unique node identifier',
|
|
10
|
+
format: String,
|
|
11
|
+
default: '',
|
|
12
|
+
env: 'KOVA_NODE_ID'
|
|
13
|
+
},
|
|
14
|
+
name: {
|
|
15
|
+
doc: 'friendly name for this node',
|
|
16
|
+
format: String,
|
|
17
|
+
default: 'kova-node',
|
|
18
|
+
env: 'KOVA_NODE_NAME'
|
|
19
|
+
}
|
|
20
|
+
},
|
|
21
|
+
network: {
|
|
22
|
+
port: {
|
|
23
|
+
doc: 'p2p port to listen on',
|
|
24
|
+
format: 'port',
|
|
25
|
+
default: 4001,
|
|
26
|
+
env: 'KOVA_P2P_PORT'
|
|
27
|
+
},
|
|
28
|
+
bootstrapNodes: {
|
|
29
|
+
doc: 'initial nodes to connect to',
|
|
30
|
+
format: Array,
|
|
31
|
+
default: [],
|
|
32
|
+
env: 'KOVA_BOOTSTRAP_NODES'
|
|
33
|
+
}
|
|
34
|
+
},
|
|
35
|
+
orchestratorUrl: {
|
|
36
|
+
doc: 'orchestrator API URL for registration',
|
|
37
|
+
format: String,
|
|
38
|
+
default: 'https://orchestrator.kovanetwork.com',
|
|
39
|
+
env: 'KOVA_ORCHESTRATOR_URL'
|
|
40
|
+
},
|
|
41
|
+
resources: {
|
|
42
|
+
maxCpu: {
|
|
43
|
+
doc: 'max cpu cores to offer',
|
|
44
|
+
format: Number,
|
|
45
|
+
default: 0, // 0 = auto detect
|
|
46
|
+
env: 'KOVA_MAX_CPU'
|
|
47
|
+
},
|
|
48
|
+
maxMemory: {
|
|
49
|
+
doc: 'max memory in gb to offer',
|
|
50
|
+
format: Number,
|
|
51
|
+
default: 0, // 0 = auto detect
|
|
52
|
+
env: 'KOVA_MAX_MEMORY'
|
|
53
|
+
},
|
|
54
|
+
maxDisk: {
|
|
55
|
+
doc: 'max disk space in gb to offer',
|
|
56
|
+
format: Number,
|
|
57
|
+
default: 100,
|
|
58
|
+
env: 'KOVA_MAX_DISK'
|
|
59
|
+
}
|
|
60
|
+
},
|
|
61
|
+
earnings: {
|
|
62
|
+
wallet: {
|
|
63
|
+
doc: 'wallet address for payments',
|
|
64
|
+
format: String,
|
|
65
|
+
default: '',
|
|
66
|
+
env: 'KOVA_WALLET_ADDRESS'
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
};
|
|
70
|
+
export class NodeConfig {
|
|
71
|
+
static instance;
|
|
72
|
+
static async load(configPath) {
|
|
73
|
+
this.instance = convict(schema);
|
|
74
|
+
// try to load config from file
|
|
75
|
+
const paths = [
|
|
76
|
+
configPath,
|
|
77
|
+
join(process.cwd(), 'kova.config.json'),
|
|
78
|
+
join(homedir(), '.kova', 'config.json')
|
|
79
|
+
].filter(p => p);
|
|
80
|
+
for (const path of paths) {
|
|
81
|
+
if (path && existsSync(path)) {
|
|
82
|
+
try {
|
|
83
|
+
const configFile = readFileSync(path, 'utf8');
|
|
84
|
+
this.instance.load(JSON.parse(configFile));
|
|
85
|
+
break;
|
|
86
|
+
}
|
|
87
|
+
catch (err) {
|
|
88
|
+
// whatever just use defaults
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
// validate it
|
|
93
|
+
this.instance.validate({ allowed: 'strict' });
|
|
94
|
+
return this.instance.getProperties();
|
|
95
|
+
}
|
|
96
|
+
static get(path) {
|
|
97
|
+
if (!this.instance) {
|
|
98
|
+
throw new Error('config not loaded yet');
|
|
99
|
+
}
|
|
100
|
+
if (path) {
|
|
101
|
+
// @ts-ignore - convict types are too complex
|
|
102
|
+
const result = this.instance.get(path);
|
|
103
|
+
return result;
|
|
104
|
+
}
|
|
105
|
+
return this.instance.getProperties();
|
|
106
|
+
}
|
|
107
|
+
}
|
|
@@ -0,0 +1,415 @@
|
|
|
1
|
+
import Docker from 'dockerode';
|
|
2
|
+
import { logger } from './logger.js';
|
|
3
|
+
export class DockerManager {
|
|
4
|
+
docker;
|
|
5
|
+
persistentVolumes = new Map(); // deploymentId -> volumeNames
|
|
6
|
+
constructor() {
|
|
7
|
+
// use socket if on linux, named pipe on windows
|
|
8
|
+
this.docker = new Docker({
|
|
9
|
+
socketPath: process.platform === 'win32'
|
|
10
|
+
? '//./pipe/docker_engine'
|
|
11
|
+
: '/var/run/docker.sock'
|
|
12
|
+
});
|
|
13
|
+
}
|
|
14
|
+
async checkDocker() {
|
|
15
|
+
try {
|
|
16
|
+
await this.docker.ping();
|
|
17
|
+
const info = await this.docker.info();
|
|
18
|
+
logger.debug({
|
|
19
|
+
containers: info.Containers,
|
|
20
|
+
images: info.Images,
|
|
21
|
+
version: info.ServerVersion
|
|
22
|
+
}, 'docker is running');
|
|
23
|
+
return true;
|
|
24
|
+
}
|
|
25
|
+
catch (err) {
|
|
26
|
+
logger.error({ err }, 'docker not available');
|
|
27
|
+
return false;
|
|
28
|
+
}
|
|
29
|
+
}
|
|
30
|
+
async pullImage(image) {
|
|
31
|
+
// check if we already have it
|
|
32
|
+
try {
|
|
33
|
+
await this.docker.getImage(image).inspect();
|
|
34
|
+
logger.debug({ image }, 'image already exists');
|
|
35
|
+
return;
|
|
36
|
+
}
|
|
37
|
+
catch (err) {
|
|
38
|
+
// need to pull
|
|
39
|
+
}
|
|
40
|
+
logger.info({ image }, 'pulling docker image');
|
|
41
|
+
const stream = await this.docker.pull(image);
|
|
42
|
+
// wait for pull to complete
|
|
43
|
+
return new Promise((resolve, reject) => {
|
|
44
|
+
this.docker.modem.followProgress(stream, (err, output) => {
|
|
45
|
+
if (err) {
|
|
46
|
+
logger.error({ err, image }, 'failed to pull image');
|
|
47
|
+
reject(err);
|
|
48
|
+
}
|
|
49
|
+
else {
|
|
50
|
+
logger.info({ image }, 'image pulled successfully');
|
|
51
|
+
resolve(output);
|
|
52
|
+
}
|
|
53
|
+
});
|
|
54
|
+
});
|
|
55
|
+
}
|
|
56
|
+
async createContainer(spec) {
|
|
57
|
+
// Create isolated network for this container (prevents inter-container communication)
|
|
58
|
+
const networkName = `kova-net-${spec.jobId}`;
|
|
59
|
+
let network;
|
|
60
|
+
try {
|
|
61
|
+
network = await this.docker.createNetwork({
|
|
62
|
+
Name: networkName,
|
|
63
|
+
Driver: 'bridge',
|
|
64
|
+
Internal: false, // needs internet access
|
|
65
|
+
EnableIPv6: false,
|
|
66
|
+
Labels: {
|
|
67
|
+
'kova.job.id': spec.jobId,
|
|
68
|
+
'kova.isolated': 'true'
|
|
69
|
+
}
|
|
70
|
+
});
|
|
71
|
+
logger.debug({ networkName }, 'created isolated network for job');
|
|
72
|
+
}
|
|
73
|
+
catch (err) {
|
|
74
|
+
logger.error({ err, jobId: spec.jobId }, 'failed to create isolated network');
|
|
75
|
+
}
|
|
76
|
+
// check if there's a startup script in env vars
|
|
77
|
+
let containerCmd = spec.cmd;
|
|
78
|
+
let envVars = spec.env || [];
|
|
79
|
+
// handle both array and object formats for env
|
|
80
|
+
if (typeof spec.env === 'object' && !Array.isArray(spec.env)) {
|
|
81
|
+
// convert object to array of "KEY=VALUE" strings
|
|
82
|
+
envVars = Object.entries(spec.env).map(([k, v]) => `${k}=${v}`);
|
|
83
|
+
// check for startup script
|
|
84
|
+
if (spec.env.KOVA_STARTUP_SCRIPT) {
|
|
85
|
+
const script = Buffer.from(spec.env.KOVA_STARTUP_SCRIPT, 'base64').toString('utf-8');
|
|
86
|
+
containerCmd = ['/bin/sh', '-c', script];
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
else if (Array.isArray(spec.env)) {
|
|
90
|
+
const startupScript = spec.env.find((e) => e.startsWith('KOVA_STARTUP_SCRIPT='));
|
|
91
|
+
if (startupScript) {
|
|
92
|
+
const scriptB64 = startupScript.split('=')[1];
|
|
93
|
+
const script = Buffer.from(scriptB64, 'base64').toString('utf-8');
|
|
94
|
+
containerCmd = ['/bin/sh', '-c', script];
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
// handle volumes - persistent vs ephemeral
|
|
98
|
+
const binds = [];
|
|
99
|
+
const volumeNames = [];
|
|
100
|
+
if (spec.volumes && Array.isArray(spec.volumes)) {
|
|
101
|
+
for (const vol of spec.volumes) {
|
|
102
|
+
if (vol.persistent) {
|
|
103
|
+
// create or reuse persistent docker volume
|
|
104
|
+
const volumeName = `kova-pv-${spec.jobId}-${vol.name}`;
|
|
105
|
+
try {
|
|
106
|
+
// check if volume exists
|
|
107
|
+
await this.docker.getVolume(volumeName).inspect();
|
|
108
|
+
logger.debug({ volumeName }, 'reusing existing persistent volume');
|
|
109
|
+
}
|
|
110
|
+
catch (err) {
|
|
111
|
+
// create new volume
|
|
112
|
+
await this.docker.createVolume({
|
|
113
|
+
Name: volumeName,
|
|
114
|
+
Labels: {
|
|
115
|
+
'kova.job.id': spec.jobId,
|
|
116
|
+
'kova.volume.name': vol.name,
|
|
117
|
+
'kova.persistent': 'true'
|
|
118
|
+
}
|
|
119
|
+
});
|
|
120
|
+
logger.info({ volumeName, mountPath: vol.mountPath }, 'created persistent volume');
|
|
121
|
+
}
|
|
122
|
+
binds.push(`${volumeName}:${vol.mountPath}:rw`);
|
|
123
|
+
volumeNames.push(volumeName);
|
|
124
|
+
}
|
|
125
|
+
else {
|
|
126
|
+
// ephemeral volume - use tmpfs or empty volume
|
|
127
|
+
const volumeName = `kova-vol-${spec.jobId}-${vol.name}`;
|
|
128
|
+
await this.docker.createVolume({
|
|
129
|
+
Name: volumeName,
|
|
130
|
+
Labels: {
|
|
131
|
+
'kova.job.id': spec.jobId,
|
|
132
|
+
'kova.volume.name': vol.name,
|
|
133
|
+
'kova.persistent': 'false'
|
|
134
|
+
}
|
|
135
|
+
});
|
|
136
|
+
binds.push(`${volumeName}:${vol.mountPath}:rw`);
|
|
137
|
+
volumeNames.push(volumeName);
|
|
138
|
+
}
|
|
139
|
+
}
|
|
140
|
+
}
|
|
141
|
+
// track persistent volumes for this deployment
|
|
142
|
+
if (volumeNames.length > 0) {
|
|
143
|
+
this.persistentVolumes.set(spec.jobId, volumeNames);
|
|
144
|
+
}
|
|
145
|
+
// determine tmpfs mounts - only use for /app if no custom volumes mounted there
|
|
146
|
+
const tmpfsMounts = {};
|
|
147
|
+
const hasAppMount = spec.volumes?.some((v) => v.mountPath === '/app');
|
|
148
|
+
if (!hasAppMount) {
|
|
149
|
+
tmpfsMounts['/app'] = 'rw,size=100m,mode=1777';
|
|
150
|
+
}
|
|
151
|
+
// build host config with optional gpu support
|
|
152
|
+
const hostConfig = {
|
|
153
|
+
Memory: spec.memory * 1024 * 1024, // mb to bytes
|
|
154
|
+
NanoCpus: spec.cpus * 1000000000, // cpu cores to nanocpus
|
|
155
|
+
ReadonlyRootfs: false, // allow writing for interactive use
|
|
156
|
+
// mount tmpfs for /app only if no custom volumes there
|
|
157
|
+
Tmpfs: Object.keys(tmpfsMounts).length > 0 ? tmpfsMounts : undefined,
|
|
158
|
+
// mount persistent and ephemeral volumes
|
|
159
|
+
Binds: binds.length > 0 ? binds : undefined,
|
|
160
|
+
CapDrop: ['ALL'],
|
|
161
|
+
CapAdd: [], // NO capabilities
|
|
162
|
+
SecurityOpt: ['no-new-privileges'],
|
|
163
|
+
// disk quota if supported
|
|
164
|
+
StorageOpt: spec.disk ? { size: `${spec.disk}G` } : undefined,
|
|
165
|
+
// NETWORK ISOLATION: Each container gets its own network
|
|
166
|
+
NetworkMode: network ? networkName : 'none',
|
|
167
|
+
// auto remove after exit
|
|
168
|
+
AutoRemove: false,
|
|
169
|
+
// Prevent container from accessing host services
|
|
170
|
+
ExtraHosts: [],
|
|
171
|
+
Dns: ['8.8.8.8', '1.1.1.1']
|
|
172
|
+
};
|
|
173
|
+
// add gpu support if requested
|
|
174
|
+
if (spec.gpu && spec.gpu > 0) {
|
|
175
|
+
hostConfig.DeviceRequests = [{
|
|
176
|
+
Driver: '',
|
|
177
|
+
Count: spec.gpu,
|
|
178
|
+
DeviceIDs: [],
|
|
179
|
+
Capabilities: [['gpu']],
|
|
180
|
+
Options: {}
|
|
181
|
+
}];
|
|
182
|
+
logger.info({ jobId: spec.jobId, gpuCount: spec.gpu }, 'enabling gpu access for container');
|
|
183
|
+
}
|
|
184
|
+
const container = await this.docker.createContainer({
|
|
185
|
+
Image: spec.image || 'alpine:latest',
|
|
186
|
+
name: `kova-${spec.jobId}`,
|
|
187
|
+
// execute startup script or default command
|
|
188
|
+
Cmd: containerCmd || ['/bin/sh', '-c', 'echo "no command provided" && sleep 60'],
|
|
189
|
+
Tty: false,
|
|
190
|
+
OpenStdin: false,
|
|
191
|
+
HostConfig: hostConfig,
|
|
192
|
+
Env: envVars,
|
|
193
|
+
WorkingDir: '/app',
|
|
194
|
+
// labels for tracking
|
|
195
|
+
Labels: {
|
|
196
|
+
'kova.job.id': spec.jobId,
|
|
197
|
+
'kova.job.user': spec.userId || 'unknown',
|
|
198
|
+
'kova.version': '0.0.1',
|
|
199
|
+
'kova.has.persistent.volumes': volumeNames.some(v => v.includes('-pv-')) ? 'true' : 'false',
|
|
200
|
+
'kova.gpu.count': spec.gpu ? String(spec.gpu) : '0'
|
|
201
|
+
}
|
|
202
|
+
});
|
|
203
|
+
await container.start();
|
|
204
|
+
return container;
|
|
205
|
+
}
|
|
206
|
+
async getContainerStats(containerId) {
|
|
207
|
+
const container = this.docker.getContainer(containerId);
|
|
208
|
+
// check if container exists first
|
|
209
|
+
try {
|
|
210
|
+
await container.inspect();
|
|
211
|
+
}
|
|
212
|
+
catch (err) {
|
|
213
|
+
throw new Error('container not found or removed');
|
|
214
|
+
}
|
|
215
|
+
const stream = await container.stats({ stream: false });
|
|
216
|
+
// calc actual usage with safe access
|
|
217
|
+
const memUsage = stream.memory_stats?.usage ? stream.memory_stats.usage / (1024 * 1024) : 0; // mb
|
|
218
|
+
let cpuPercent = 0;
|
|
219
|
+
if (stream.cpu_stats?.cpu_usage && stream.precpu_stats?.cpu_usage) {
|
|
220
|
+
const cpuDelta = stream.cpu_stats.cpu_usage.total_usage -
|
|
221
|
+
stream.precpu_stats.cpu_usage.total_usage;
|
|
222
|
+
const systemDelta = stream.cpu_stats.system_cpu_usage -
|
|
223
|
+
stream.precpu_stats.system_cpu_usage;
|
|
224
|
+
if (systemDelta > 0) {
|
|
225
|
+
cpuPercent = (cpuDelta / systemDelta) *
|
|
226
|
+
(stream.cpu_stats.online_cpus || 1) * 100;
|
|
227
|
+
}
|
|
228
|
+
}
|
|
229
|
+
return {
|
|
230
|
+
memory: Math.round(memUsage),
|
|
231
|
+
cpu: Math.round(cpuPercent * 100) / 100,
|
|
232
|
+
network: {
|
|
233
|
+
rx: stream.networks?.eth0?.rx_bytes || 0,
|
|
234
|
+
tx: stream.networks?.eth0?.tx_bytes || 0
|
|
235
|
+
}
|
|
236
|
+
};
|
|
237
|
+
}
|
|
238
|
+
async cleanupContainer(containerId, deleteVolumes = false) {
|
|
239
|
+
try {
|
|
240
|
+
const container = this.docker.getContainer(containerId);
|
|
241
|
+
// get container info to find its network
|
|
242
|
+
const info = await container.inspect();
|
|
243
|
+
const jobId = info.Config.Labels?.['kova.job.id'];
|
|
244
|
+
const hasPersistentVolumes = info.Config.Labels?.['kova.has.persistent.volumes'] === 'true';
|
|
245
|
+
// check if its still running
|
|
246
|
+
if (info.State.Running) {
|
|
247
|
+
await container.stop({ t: 10 }); // 10 sec grace
|
|
248
|
+
}
|
|
249
|
+
await container.remove();
|
|
250
|
+
logger.debug({ containerId }, 'container cleaned up');
|
|
251
|
+
// Clean up isolated network
|
|
252
|
+
if (jobId) {
|
|
253
|
+
const networkName = `kova-net-${jobId}`;
|
|
254
|
+
try {
|
|
255
|
+
const network = this.docker.getNetwork(networkName);
|
|
256
|
+
await network.remove();
|
|
257
|
+
logger.debug({ networkName }, 'isolated network cleaned up');
|
|
258
|
+
}
|
|
259
|
+
catch (err) {
|
|
260
|
+
// network might not exist or already removed
|
|
261
|
+
if (err.statusCode !== 404) {
|
|
262
|
+
logger.debug({ err, networkName }, 'network cleanup failed');
|
|
263
|
+
}
|
|
264
|
+
}
|
|
265
|
+
// cleanup volumes - only delete persistent volumes if explicitly requested
|
|
266
|
+
const volumeNames = this.persistentVolumes.get(jobId) || [];
|
|
267
|
+
for (const volumeName of volumeNames) {
|
|
268
|
+
const isPersistent = volumeName.includes('-pv-');
|
|
269
|
+
if (!isPersistent || deleteVolumes) {
|
|
270
|
+
try {
|
|
271
|
+
const volume = this.docker.getVolume(volumeName);
|
|
272
|
+
await volume.remove();
|
|
273
|
+
logger.debug({ volumeName, isPersistent }, 'volume cleaned up');
|
|
274
|
+
}
|
|
275
|
+
catch (err) {
|
|
276
|
+
if (err.statusCode !== 404) {
|
|
277
|
+
logger.debug({ err, volumeName }, 'volume cleanup failed');
|
|
278
|
+
}
|
|
279
|
+
}
|
|
280
|
+
}
|
|
281
|
+
else {
|
|
282
|
+
logger.debug({ volumeName }, 'keeping persistent volume for reuse');
|
|
283
|
+
}
|
|
284
|
+
}
|
|
285
|
+
this.persistentVolumes.delete(jobId);
|
|
286
|
+
}
|
|
287
|
+
}
|
|
288
|
+
catch (err) {
|
|
289
|
+
// probably already gone
|
|
290
|
+
if (err.statusCode !== 404) {
|
|
291
|
+
logger.debug({ err, containerId }, 'cleanup failed');
|
|
292
|
+
}
|
|
293
|
+
}
|
|
294
|
+
}
|
|
295
|
+
// delete all volumes for a deployment (called when deployment is closed)
|
|
296
|
+
async deleteDeploymentVolumes(jobId) {
|
|
297
|
+
const volumes = await this.docker.listVolumes({
|
|
298
|
+
filters: {
|
|
299
|
+
label: [`kova.job.id=${jobId}`]
|
|
300
|
+
}
|
|
301
|
+
});
|
|
302
|
+
for (const vol of volumes.Volumes || []) {
|
|
303
|
+
try {
|
|
304
|
+
const volume = this.docker.getVolume(vol.Name);
|
|
305
|
+
await volume.remove();
|
|
306
|
+
logger.info({ volumeName: vol.Name }, 'deleted deployment volume');
|
|
307
|
+
}
|
|
308
|
+
catch (err) {
|
|
309
|
+
if (err.statusCode !== 404) {
|
|
310
|
+
logger.warn({ err, volumeName: vol.Name }, 'failed to delete volume');
|
|
311
|
+
}
|
|
312
|
+
}
|
|
313
|
+
}
|
|
314
|
+
this.persistentVolumes.delete(jobId);
|
|
315
|
+
}
|
|
316
|
+
// list persistent volumes for a deployment
|
|
317
|
+
async listDeploymentVolumes(jobId) {
|
|
318
|
+
const volumes = await this.docker.listVolumes({
|
|
319
|
+
filters: {
|
|
320
|
+
label: [`kova.job.id=${jobId}`]
|
|
321
|
+
}
|
|
322
|
+
});
|
|
323
|
+
return (volumes.Volumes || []).map(vol => ({
|
|
324
|
+
name: vol.Labels?.['kova.volume.name'] || vol.Name,
|
|
325
|
+
size: 0, // docker doesn't track volume size easily
|
|
326
|
+
persistent: vol.Labels?.['kova.persistent'] === 'true'
|
|
327
|
+
}));
|
|
328
|
+
}
|
|
329
|
+
async listKovaContainers() {
|
|
330
|
+
const containers = await this.docker.listContainers({
|
|
331
|
+
all: true,
|
|
332
|
+
filters: {
|
|
333
|
+
label: ['kova.job.id']
|
|
334
|
+
}
|
|
335
|
+
});
|
|
336
|
+
return containers.map(c => ({
|
|
337
|
+
id: c.Id,
|
|
338
|
+
jobId: c.Labels['kova.job.id'],
|
|
339
|
+
state: c.State,
|
|
340
|
+
status: c.Status,
|
|
341
|
+
created: new Date(c.Created * 1000)
|
|
342
|
+
}));
|
|
343
|
+
}
|
|
344
|
+
async execCommand(containerId, command) {
|
|
345
|
+
const container = this.docker.getContainer(containerId);
|
|
346
|
+
const exec = await container.exec({
|
|
347
|
+
Cmd: ['/bin/sh', '-c', command],
|
|
348
|
+
AttachStdout: true,
|
|
349
|
+
AttachStderr: true
|
|
350
|
+
});
|
|
351
|
+
return new Promise((resolve, reject) => {
|
|
352
|
+
exec.start({ hijack: true, stdin: false }, (err, stream) => {
|
|
353
|
+
if (err) {
|
|
354
|
+
reject(err);
|
|
355
|
+
return;
|
|
356
|
+
}
|
|
357
|
+
let stdout = '';
|
|
358
|
+
let stderr = '';
|
|
359
|
+
const { Transform } = require('stream');
|
|
360
|
+
const demuxStream = new Transform({
|
|
361
|
+
transform(chunk, encoding, callback) {
|
|
362
|
+
// docker multiplexes stdout/stderr
|
|
363
|
+
// first byte: 1=stdout, 2=stderr
|
|
364
|
+
const type = chunk[0];
|
|
365
|
+
const data = chunk.slice(8).toString();
|
|
366
|
+
if (type === 1) {
|
|
367
|
+
stdout += data;
|
|
368
|
+
}
|
|
369
|
+
else if (type === 2) {
|
|
370
|
+
stderr += data;
|
|
371
|
+
}
|
|
372
|
+
callback();
|
|
373
|
+
}
|
|
374
|
+
});
|
|
375
|
+
stream.pipe(demuxStream);
|
|
376
|
+
stream.on('end', async () => {
|
|
377
|
+
const result = await exec.inspect();
|
|
378
|
+
resolve({
|
|
379
|
+
stdout: stdout.trim(),
|
|
380
|
+
stderr: stderr.trim(),
|
|
381
|
+
exitCode: result.ExitCode || 0
|
|
382
|
+
});
|
|
383
|
+
});
|
|
384
|
+
stream.on('error', reject);
|
|
385
|
+
});
|
|
386
|
+
});
|
|
387
|
+
}
|
|
388
|
+
async getContainerLogs(containerId, tail = 100) {
|
|
389
|
+
const container = this.docker.getContainer(containerId);
|
|
390
|
+
const logs = await container.logs({
|
|
391
|
+
stdout: true,
|
|
392
|
+
stderr: true,
|
|
393
|
+
tail,
|
|
394
|
+
timestamps: true
|
|
395
|
+
});
|
|
396
|
+
return logs.toString();
|
|
397
|
+
}
|
|
398
|
+
async streamContainerLogs(containerId, onData) {
|
|
399
|
+
const container = this.docker.getContainer(containerId);
|
|
400
|
+
const stream = await container.logs({
|
|
401
|
+
stdout: true,
|
|
402
|
+
stderr: true,
|
|
403
|
+
follow: true,
|
|
404
|
+
timestamps: true
|
|
405
|
+
});
|
|
406
|
+
stream.on('data', (chunk) => {
|
|
407
|
+
onData(chunk.toString());
|
|
408
|
+
});
|
|
409
|
+
return () => {
|
|
410
|
+
if (stream && typeof stream.destroy === 'function') {
|
|
411
|
+
stream.destroy();
|
|
412
|
+
}
|
|
413
|
+
};
|
|
414
|
+
}
|
|
415
|
+
}
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
import pino from 'pino';
|
|
2
|
+
// just a logger nothing fancy
|
|
3
|
+
export const logger = pino({
|
|
4
|
+
level: process.env.LOG_LEVEL || 'info',
|
|
5
|
+
transport: process.env.NODE_ENV !== 'production' ? {
|
|
6
|
+
target: 'pino-pretty',
|
|
7
|
+
options: {
|
|
8
|
+
colorize: true,
|
|
9
|
+
ignore: 'pid,hostname'
|
|
10
|
+
}
|
|
11
|
+
} : undefined
|
|
12
|
+
});
|
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
import crypto from 'crypto';
|
|
2
|
+
import { logger } from './logger.js';
|
|
3
|
+
export class MessageSigner {
|
|
4
|
+
privateKey;
|
|
5
|
+
publicKey;
|
|
6
|
+
publicKeyPem;
|
|
7
|
+
usedNonces = new Set();
|
|
8
|
+
nonceCleanupInterval;
|
|
9
|
+
constructor(keyPair) {
|
|
10
|
+
if (keyPair) {
|
|
11
|
+
this.privateKey = crypto.createPrivateKey(keyPair.privateKey);
|
|
12
|
+
this.publicKey = crypto.createPublicKey(keyPair.publicKey);
|
|
13
|
+
this.publicKeyPem = keyPair.publicKey;
|
|
14
|
+
}
|
|
15
|
+
else {
|
|
16
|
+
const { privateKey, publicKey } = crypto.generateKeyPairSync('ed25519');
|
|
17
|
+
this.privateKey = privateKey;
|
|
18
|
+
this.publicKey = publicKey;
|
|
19
|
+
this.publicKeyPem = this.publicKey.export({ type: 'spki', format: 'pem' }).toString();
|
|
20
|
+
}
|
|
21
|
+
// cleanup old nonces every 5 min to prevent replay attacks
|
|
22
|
+
this.nonceCleanupInterval = setInterval(() => {
|
|
23
|
+
this.usedNonces.clear();
|
|
24
|
+
}, 5 * 60 * 1000);
|
|
25
|
+
}
|
|
26
|
+
sign(payload) {
|
|
27
|
+
const timestamp = Date.now();
|
|
28
|
+
const nonce = crypto.randomBytes(16).toString('hex');
|
|
29
|
+
const message = {
|
|
30
|
+
payload,
|
|
31
|
+
timestamp,
|
|
32
|
+
nonce
|
|
33
|
+
};
|
|
34
|
+
const messageString = JSON.stringify(message);
|
|
35
|
+
const signature = crypto.sign(null, Buffer.from(messageString), this.privateKey);
|
|
36
|
+
return {
|
|
37
|
+
payload,
|
|
38
|
+
signature: signature.toString('base64'),
|
|
39
|
+
publicKey: this.publicKeyPem,
|
|
40
|
+
timestamp,
|
|
41
|
+
nonce
|
|
42
|
+
};
|
|
43
|
+
}
|
|
44
|
+
verify(signedMessage, maxAge = 60000) {
|
|
45
|
+
try {
|
|
46
|
+
// reject old or future messages
|
|
47
|
+
const age = Date.now() - signedMessage.timestamp;
|
|
48
|
+
if (age > maxAge || age < 0) {
|
|
49
|
+
logger.warn({ age, maxAge }, 'message timestamp out of range');
|
|
50
|
+
return false;
|
|
51
|
+
}
|
|
52
|
+
// prevent replay attacks
|
|
53
|
+
if (this.usedNonces.has(signedMessage.nonce)) {
|
|
54
|
+
logger.warn({ nonce: signedMessage.nonce }, 'nonce already used');
|
|
55
|
+
return false;
|
|
56
|
+
}
|
|
57
|
+
const message = {
|
|
58
|
+
payload: signedMessage.payload,
|
|
59
|
+
timestamp: signedMessage.timestamp,
|
|
60
|
+
nonce: signedMessage.nonce
|
|
61
|
+
};
|
|
62
|
+
const messageString = JSON.stringify(message);
|
|
63
|
+
const signature = Buffer.from(signedMessage.signature, 'base64');
|
|
64
|
+
const publicKey = crypto.createPublicKey(signedMessage.publicKey);
|
|
65
|
+
const isValid = crypto.verify(null, Buffer.from(messageString), publicKey, signature);
|
|
66
|
+
if (isValid) {
|
|
67
|
+
this.usedNonces.add(signedMessage.nonce);
|
|
68
|
+
}
|
|
69
|
+
return isValid;
|
|
70
|
+
}
|
|
71
|
+
catch (err) {
|
|
72
|
+
logger.error({ err }, 'failed to verify signature');
|
|
73
|
+
return false;
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
getPublicKey() {
|
|
77
|
+
return this.publicKeyPem;
|
|
78
|
+
}
|
|
79
|
+
getPrivateKey() {
|
|
80
|
+
return this.privateKey.export({ type: 'pkcs8', format: 'pem' }).toString();
|
|
81
|
+
}
|
|
82
|
+
getFingerprint() {
|
|
83
|
+
const hash = crypto.createHash('sha256');
|
|
84
|
+
hash.update(this.publicKeyPem);
|
|
85
|
+
return hash.digest('hex').substring(0, 16);
|
|
86
|
+
}
|
|
87
|
+
destroy() {
|
|
88
|
+
if (this.nonceCleanupInterval) {
|
|
89
|
+
clearInterval(this.nonceCleanupInterval);
|
|
90
|
+
}
|
|
91
|
+
this.usedNonces.clear();
|
|
92
|
+
}
|
|
93
|
+
}
|