kova-node-cli 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +138 -0
- package/bin/cli.js +2 -0
- package/dist/__tests__/auto-bidder.test.js +267 -0
- package/dist/__tests__/container-manager.test.js +189 -0
- package/dist/__tests__/deployment-executor.test.js +332 -0
- package/dist/__tests__/heartbeat.test.js +191 -0
- package/dist/__tests__/lease-handler.test.js +268 -0
- package/dist/__tests__/resource-limits.test.js +164 -0
- package/dist/api/server.js +607 -0
- package/dist/cli.js +47 -0
- package/dist/commands/deploy.js +568 -0
- package/dist/commands/earnings.js +70 -0
- package/dist/commands/start.js +358 -0
- package/dist/commands/status.js +50 -0
- package/dist/commands/stop.js +101 -0
- package/dist/lib/client.js +87 -0
- package/dist/lib/config.js +107 -0
- package/dist/lib/docker.js +415 -0
- package/dist/lib/logger.js +12 -0
- package/dist/lib/message-signer.js +93 -0
- package/dist/lib/monitor.js +105 -0
- package/dist/lib/p2p.js +186 -0
- package/dist/lib/resource-limits.js +84 -0
- package/dist/lib/state.js +113 -0
- package/dist/lib/types.js +2 -0
- package/dist/lib/usage-meter.js +63 -0
- package/dist/services/auto-bidder.js +332 -0
- package/dist/services/container-manager.js +282 -0
- package/dist/services/deployment-executor.js +1562 -0
- package/dist/services/heartbeat.js +110 -0
- package/dist/services/job-handler.js +241 -0
- package/dist/services/lease-handler.js +382 -0
- package/package.json +51 -0
|
@@ -0,0 +1,382 @@
|
|
|
1
|
+
// lease handler - monitors for won bids and executes deployments
|
|
2
|
+
// fetches manifests, starts containers, streams logs
|
|
3
|
+
import { logger } from '../lib/logger.js';
|
|
4
|
+
import { stateManager } from '../lib/state.js';
|
|
5
|
+
import Docker from 'dockerode';
|
|
6
|
+
export class LeaseHandler {
|
|
7
|
+
config;
|
|
8
|
+
executor;
|
|
9
|
+
p2pNode;
|
|
10
|
+
docker;
|
|
11
|
+
pollingInterval = null;
|
|
12
|
+
healthCheckInterval = null;
|
|
13
|
+
activeLeases = new Set();
|
|
14
|
+
filesVersions = new Map();
|
|
15
|
+
restartAttempts = new Map();
|
|
16
|
+
// log batching - buffer logs per deployment and flush periodically
|
|
17
|
+
logBuffer = new Map();
|
|
18
|
+
logFlushInterval = null;
|
|
19
|
+
static LOG_FLUSH_MS = 2000; // flush every 2 seconds
|
|
20
|
+
static LOG_BATCH_MAX = 50; // flush if buffer hits this size
|
|
21
|
+
constructor(config, executor, p2pNode) {
|
|
22
|
+
this.config = config;
|
|
23
|
+
this.executor = executor;
|
|
24
|
+
this.p2pNode = p2pNode;
|
|
25
|
+
this.docker = new Docker();
|
|
26
|
+
this.executor.on('log', (logData) => {
|
|
27
|
+
this.bufferLog(logData);
|
|
28
|
+
});
|
|
29
|
+
// setup p2p event listeners for real-time notifications
|
|
30
|
+
if (this.p2pNode) {
|
|
31
|
+
this.setupP2PListeners();
|
|
32
|
+
}
|
|
33
|
+
}
|
|
34
|
+
setupP2PListeners() {
|
|
35
|
+
if (!this.p2pNode)
|
|
36
|
+
return;
|
|
37
|
+
// handle manifest delivery via p2p
|
|
38
|
+
this.p2pNode.on('deployment-manifest', async (data) => {
|
|
39
|
+
if (data.nodeId !== this.config.nodeId)
|
|
40
|
+
return;
|
|
41
|
+
logger.info({ deploymentId: data.deploymentId }, 'received manifest via p2p');
|
|
42
|
+
try {
|
|
43
|
+
await this.executor.executeDeployment({
|
|
44
|
+
deploymentId: data.deploymentId,
|
|
45
|
+
leaseId: data.leaseId || '',
|
|
46
|
+
manifest: data.manifest
|
|
47
|
+
});
|
|
48
|
+
this.activeLeases.add(data.deploymentId);
|
|
49
|
+
this.filesVersions.set(data.deploymentId, 0);
|
|
50
|
+
stateManager.addDeployment(data.deploymentId);
|
|
51
|
+
logger.info({ deploymentId: data.deploymentId }, 'deployment started from p2p manifest');
|
|
52
|
+
}
|
|
53
|
+
catch (err) {
|
|
54
|
+
logger.error({ err, deploymentId: data.deploymentId }, 'failed to execute deployment from p2p');
|
|
55
|
+
}
|
|
56
|
+
});
|
|
57
|
+
// handle deployment closure notification
|
|
58
|
+
this.p2pNode.on('deployment-close', async (data) => {
|
|
59
|
+
if (data.nodeId !== this.config.nodeId)
|
|
60
|
+
return;
|
|
61
|
+
logger.info({ deploymentId: data.deploymentId }, 'received closure notification via p2p');
|
|
62
|
+
try {
|
|
63
|
+
// use closeDeployment to permanently delete all resources including volumes
|
|
64
|
+
await this.executor.closeDeployment(data.deploymentId);
|
|
65
|
+
this.activeLeases.delete(data.deploymentId);
|
|
66
|
+
this.filesVersions.delete(data.deploymentId);
|
|
67
|
+
stateManager.removeDeployment(data.deploymentId);
|
|
68
|
+
logger.info({ deploymentId: data.deploymentId }, 'deployment closed permanently via p2p notification');
|
|
69
|
+
}
|
|
70
|
+
catch (err) {
|
|
71
|
+
logger.error({ err, deploymentId: data.deploymentId }, 'failed to close deployment from p2p');
|
|
72
|
+
}
|
|
73
|
+
});
|
|
74
|
+
// handle insufficient funds pause notification
|
|
75
|
+
this.p2pNode.on('deployment-paused', async (data) => {
|
|
76
|
+
if (data.nodeId !== this.config.nodeId)
|
|
77
|
+
return;
|
|
78
|
+
logger.warn({
|
|
79
|
+
deploymentId: data.deploymentId,
|
|
80
|
+
reason: data.reason
|
|
81
|
+
}, 'received pause notification - insufficient funds');
|
|
82
|
+
// container will keep running but user is warned
|
|
83
|
+
// orchestrator will close deployment if funds not added
|
|
84
|
+
});
|
|
85
|
+
}
|
|
86
|
+
// buffer a log line for batched sending
|
|
87
|
+
bufferLog(logData) {
|
|
88
|
+
const { deploymentId, serviceName, logLine, stream } = logData;
|
|
89
|
+
let buffer = this.logBuffer.get(deploymentId);
|
|
90
|
+
if (!buffer) {
|
|
91
|
+
buffer = [];
|
|
92
|
+
this.logBuffer.set(deploymentId, buffer);
|
|
93
|
+
}
|
|
94
|
+
buffer.push({ serviceName, logLine, stream });
|
|
95
|
+
// flush immediately if buffer is full
|
|
96
|
+
if (buffer.length >= LeaseHandler.LOG_BATCH_MAX) {
|
|
97
|
+
this.flushLogs(deploymentId).catch(err => {
|
|
98
|
+
logger.warn({ err: err.message, deploymentId }, 'failed to flush logs');
|
|
99
|
+
});
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
// flush buffered logs for a deployment (or all if no id)
|
|
103
|
+
async flushLogs(deploymentId) {
|
|
104
|
+
const ids = deploymentId ? [deploymentId] : Array.from(this.logBuffer.keys());
|
|
105
|
+
for (const id of ids) {
|
|
106
|
+
const buffer = this.logBuffer.get(id);
|
|
107
|
+
if (!buffer || buffer.length === 0)
|
|
108
|
+
continue;
|
|
109
|
+
// take the buffer and clear it
|
|
110
|
+
const logs = buffer.splice(0, buffer.length);
|
|
111
|
+
try {
|
|
112
|
+
await this.sendLogBatchToOrchestrator(id, logs);
|
|
113
|
+
}
|
|
114
|
+
catch (err) {
|
|
115
|
+
logger.debug({ err, deploymentId: id, count: logs.length }, 'failed to send log batch');
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
// start monitoring for new leases
|
|
120
|
+
start(intervalMs = 10000) {
|
|
121
|
+
if (this.pollingInterval) {
|
|
122
|
+
logger.warn('lease handler already running');
|
|
123
|
+
return;
|
|
124
|
+
}
|
|
125
|
+
logger.info({ intervalMs }, 'starting lease handler');
|
|
126
|
+
this.pollingInterval = setInterval(async () => {
|
|
127
|
+
try {
|
|
128
|
+
await this.pollLeases();
|
|
129
|
+
}
|
|
130
|
+
catch (err) {
|
|
131
|
+
logger.error({ err }, 'lease polling failed');
|
|
132
|
+
}
|
|
133
|
+
}, intervalMs);
|
|
134
|
+
// start container health check (every 30 seconds)
|
|
135
|
+
this.healthCheckInterval = setInterval(async () => {
|
|
136
|
+
try {
|
|
137
|
+
await this.checkAndRestartContainers();
|
|
138
|
+
}
|
|
139
|
+
catch (err) {
|
|
140
|
+
logger.error({ err }, 'container health check failed');
|
|
141
|
+
}
|
|
142
|
+
}, 30000);
|
|
143
|
+
// start log flush timer
|
|
144
|
+
this.logFlushInterval = setInterval(async () => {
|
|
145
|
+
try {
|
|
146
|
+
await this.flushLogs();
|
|
147
|
+
}
|
|
148
|
+
catch (err) {
|
|
149
|
+
logger.debug({ err }, 'log flush failed');
|
|
150
|
+
}
|
|
151
|
+
}, LeaseHandler.LOG_FLUSH_MS);
|
|
152
|
+
// run immediately
|
|
153
|
+
this.pollLeases();
|
|
154
|
+
}
|
|
155
|
+
// check container health and restart stopped containers
|
|
156
|
+
async checkAndRestartContainers() {
|
|
157
|
+
for (const deploymentId of this.activeLeases) {
|
|
158
|
+
try {
|
|
159
|
+
// find containers for this deployment
|
|
160
|
+
const containers = await this.docker.listContainers({
|
|
161
|
+
all: true,
|
|
162
|
+
filters: { label: [`kova.deployment=${deploymentId}`] }
|
|
163
|
+
});
|
|
164
|
+
for (const containerInfo of containers) {
|
|
165
|
+
const containerName = containerInfo.Names[0]?.replace('/', '') || '';
|
|
166
|
+
const isRunning = containerInfo.State === 'running';
|
|
167
|
+
if (!isRunning) {
|
|
168
|
+
const attempts = this.restartAttempts.get(deploymentId) || 0;
|
|
169
|
+
// limit restart attempts to prevent infinite loops
|
|
170
|
+
if (attempts >= 5) {
|
|
171
|
+
logger.error({
|
|
172
|
+
deploymentId,
|
|
173
|
+
containerName,
|
|
174
|
+
attempts
|
|
175
|
+
}, 'max restart attempts reached - container needs manual intervention');
|
|
176
|
+
continue;
|
|
177
|
+
}
|
|
178
|
+
logger.warn({
|
|
179
|
+
deploymentId,
|
|
180
|
+
containerName,
|
|
181
|
+
state: containerInfo.State,
|
|
182
|
+
status: containerInfo.Status
|
|
183
|
+
}, 'container stopped - attempting restart');
|
|
184
|
+
try {
|
|
185
|
+
const container = this.docker.getContainer(containerInfo.Id);
|
|
186
|
+
await container.start();
|
|
187
|
+
// reset attempts on successful restart
|
|
188
|
+
this.restartAttempts.set(deploymentId, 0);
|
|
189
|
+
logger.info({
|
|
190
|
+
deploymentId,
|
|
191
|
+
containerName
|
|
192
|
+
}, 'container restarted successfully');
|
|
193
|
+
}
|
|
194
|
+
catch (err) {
|
|
195
|
+
this.restartAttempts.set(deploymentId, attempts + 1);
|
|
196
|
+
// if network error, try to recreate the container
|
|
197
|
+
if (err.message?.includes('network') || err.message?.includes('not found')) {
|
|
198
|
+
logger.warn({
|
|
199
|
+
deploymentId,
|
|
200
|
+
containerName,
|
|
201
|
+
error: err.message
|
|
202
|
+
}, 'network error on restart - container may need recreation');
|
|
203
|
+
}
|
|
204
|
+
else {
|
|
205
|
+
logger.error({
|
|
206
|
+
err,
|
|
207
|
+
deploymentId,
|
|
208
|
+
containerName
|
|
209
|
+
}, 'failed to restart container');
|
|
210
|
+
}
|
|
211
|
+
}
|
|
212
|
+
}
|
|
213
|
+
else {
|
|
214
|
+
// container is running, reset restart attempts
|
|
215
|
+
this.restartAttempts.delete(deploymentId);
|
|
216
|
+
}
|
|
217
|
+
}
|
|
218
|
+
}
|
|
219
|
+
catch (err) {
|
|
220
|
+
logger.debug({ err, deploymentId }, 'health check error for deployment');
|
|
221
|
+
}
|
|
222
|
+
}
|
|
223
|
+
}
|
|
224
|
+
stop() {
|
|
225
|
+
if (this.pollingInterval) {
|
|
226
|
+
clearInterval(this.pollingInterval);
|
|
227
|
+
this.pollingInterval = null;
|
|
228
|
+
}
|
|
229
|
+
if (this.healthCheckInterval) {
|
|
230
|
+
clearInterval(this.healthCheckInterval);
|
|
231
|
+
this.healthCheckInterval = null;
|
|
232
|
+
}
|
|
233
|
+
if (this.logFlushInterval) {
|
|
234
|
+
clearInterval(this.logFlushInterval);
|
|
235
|
+
this.logFlushInterval = null;
|
|
236
|
+
}
|
|
237
|
+
// flush remaining logs before shutdown
|
|
238
|
+
this.flushLogs().catch(err => {
|
|
239
|
+
logger.warn({ err: err.message }, 'failed to flush remaining logs on shutdown');
|
|
240
|
+
});
|
|
241
|
+
logger.info('lease handler stopped');
|
|
242
|
+
}
|
|
243
|
+
// poll for active leases assigned to this node
|
|
244
|
+
async pollLeases() {
|
|
245
|
+
try {
|
|
246
|
+
const response = await fetch(`${this.config.orchestratorUrl}/api/v1/provider/leases`, {
|
|
247
|
+
headers: {
|
|
248
|
+
'Authorization': `Bearer ${await this.getToken()}`
|
|
249
|
+
}
|
|
250
|
+
});
|
|
251
|
+
if (!response.ok) {
|
|
252
|
+
logger.debug('failed to fetch leases');
|
|
253
|
+
return;
|
|
254
|
+
}
|
|
255
|
+
const data = await response.json();
|
|
256
|
+
const leases = data.leases || [];
|
|
257
|
+
// filter for this node
|
|
258
|
+
const myLeases = leases.filter(l => l.nodeId === this.config.nodeId);
|
|
259
|
+
const activeDeploymentIds = new Set(myLeases.map(l => l.deploymentId));
|
|
260
|
+
// close deployments whose leases are no longer active
|
|
261
|
+
// check both tracked leases and discovered deployments (handles restart case)
|
|
262
|
+
const runningDeployments = new Set([
|
|
263
|
+
...this.activeLeases,
|
|
264
|
+
...this.executor.getRunningDeployments()
|
|
265
|
+
]);
|
|
266
|
+
for (const deploymentId of runningDeployments) {
|
|
267
|
+
if (!activeDeploymentIds.has(deploymentId)) {
|
|
268
|
+
logger.info({ deploymentId }, 'lease no longer active - closing deployment');
|
|
269
|
+
try {
|
|
270
|
+
await this.executor.closeDeployment(deploymentId);
|
|
271
|
+
this.activeLeases.delete(deploymentId);
|
|
272
|
+
this.filesVersions.delete(deploymentId);
|
|
273
|
+
stateManager.removeDeployment(deploymentId);
|
|
274
|
+
logger.info({ deploymentId }, 'deployment closed after lease ended');
|
|
275
|
+
}
|
|
276
|
+
catch (err) {
|
|
277
|
+
logger.error({ err, deploymentId }, 'failed to close deployment after lease ended');
|
|
278
|
+
}
|
|
279
|
+
}
|
|
280
|
+
}
|
|
281
|
+
for (const lease of myLeases) {
|
|
282
|
+
const deploymentRunning = this.executor.getDeployment(lease.deploymentId);
|
|
283
|
+
// check if deployment is already running (either we started it or discovered it)
|
|
284
|
+
if (deploymentRunning) {
|
|
285
|
+
// track this as active
|
|
286
|
+
this.activeLeases.add(lease.deploymentId);
|
|
287
|
+
// check if files_version changed (files updated)
|
|
288
|
+
const lastKnownVersion = this.filesVersions.get(lease.deploymentId);
|
|
289
|
+
const currentVersion = lease.filesVersion || 0;
|
|
290
|
+
// if we don't have a tracked version yet (discovered deployment), initialize it
|
|
291
|
+
if (lastKnownVersion === undefined) {
|
|
292
|
+
this.filesVersions.set(lease.deploymentId, currentVersion);
|
|
293
|
+
logger.info({ deploymentId: lease.deploymentId, filesVersion: currentVersion }, 'initialized files_version for existing deployment');
|
|
294
|
+
continue;
|
|
295
|
+
}
|
|
296
|
+
// check for updates
|
|
297
|
+
if (currentVersion > lastKnownVersion) {
|
|
298
|
+
logger.info({
|
|
299
|
+
deploymentId: lease.deploymentId,
|
|
300
|
+
oldVersion: lastKnownVersion,
|
|
301
|
+
newVersion: currentVersion
|
|
302
|
+
}, 'files updated - syncing changes');
|
|
303
|
+
try {
|
|
304
|
+
// get service names from manifest
|
|
305
|
+
const services = lease.manifest?.services || {};
|
|
306
|
+
const serviceNames = Object.keys(services);
|
|
307
|
+
// update files for each service (or use first service if only one)
|
|
308
|
+
const serviceName = serviceNames[0] || 'web';
|
|
309
|
+
await this.executor.updateDeploymentFiles(lease.deploymentId, serviceName);
|
|
310
|
+
// update tracked version
|
|
311
|
+
this.filesVersions.set(lease.deploymentId, currentVersion);
|
|
312
|
+
logger.info({ deploymentId: lease.deploymentId, serviceName }, 'files synced successfully');
|
|
313
|
+
}
|
|
314
|
+
catch (err) {
|
|
315
|
+
logger.error({ err, deploymentId: lease.deploymentId }, 'failed to sync files');
|
|
316
|
+
}
|
|
317
|
+
}
|
|
318
|
+
continue;
|
|
319
|
+
}
|
|
320
|
+
logger.info({ leaseId: lease.id, deploymentId: lease.deploymentId }, 'new lease assigned');
|
|
321
|
+
// execute deployment (will download files at current version)
|
|
322
|
+
await this.executeDeployment(lease);
|
|
323
|
+
// track files_version only AFTER successful deployment
|
|
324
|
+
// this ensures we don't miss updates that happened before we started
|
|
325
|
+
this.filesVersions.set(lease.deploymentId, lease.filesVersion || 0);
|
|
326
|
+
}
|
|
327
|
+
}
|
|
328
|
+
catch (err) {
|
|
329
|
+
logger.debug({ err }, 'lease polling error');
|
|
330
|
+
}
|
|
331
|
+
}
|
|
332
|
+
// execute deployment from lease
|
|
333
|
+
async executeDeployment(lease) {
|
|
334
|
+
try {
|
|
335
|
+
this.activeLeases.add(lease.deploymentId);
|
|
336
|
+
logger.info({ deploymentId: lease.deploymentId, manifest: lease.manifest }, 'executing deployment');
|
|
337
|
+
await this.executor.executeDeployment({
|
|
338
|
+
deploymentId: lease.deploymentId,
|
|
339
|
+
leaseId: lease.id,
|
|
340
|
+
manifest: lease.manifest
|
|
341
|
+
});
|
|
342
|
+
stateManager.addDeployment(lease.deploymentId);
|
|
343
|
+
logger.info({ deploymentId: lease.deploymentId }, 'deployment running');
|
|
344
|
+
}
|
|
345
|
+
catch (err) {
|
|
346
|
+
logger.error({ err, deploymentId: lease.deploymentId }, 'deployment execution failed');
|
|
347
|
+
this.activeLeases.delete(lease.deploymentId);
|
|
348
|
+
}
|
|
349
|
+
}
|
|
350
|
+
// send log batch to orchestrator
|
|
351
|
+
async sendLogBatchToOrchestrator(deploymentId, logs) {
|
|
352
|
+
try {
|
|
353
|
+
const response = await fetch(`${this.config.orchestratorUrl}/api/v1/deployments/${deploymentId}/logs/batch`, {
|
|
354
|
+
method: 'POST',
|
|
355
|
+
headers: {
|
|
356
|
+
'Content-Type': 'application/json',
|
|
357
|
+
'Authorization': `Bearer ${await this.getToken()}`
|
|
358
|
+
},
|
|
359
|
+
body: JSON.stringify({ logs })
|
|
360
|
+
});
|
|
361
|
+
if (!response.ok) {
|
|
362
|
+
logger.debug({ deploymentId, count: logs.length }, 'failed to send log batch');
|
|
363
|
+
}
|
|
364
|
+
}
|
|
365
|
+
catch (err) {
|
|
366
|
+
logger.debug({ err }, 'failed to send log batch to orchestrator');
|
|
367
|
+
}
|
|
368
|
+
}
|
|
369
|
+
// get auth token using provider credentials
|
|
370
|
+
async getToken() {
|
|
371
|
+
// prefer api key (sk_live_ format) for direct auth
|
|
372
|
+
if (this.config.apiKey) {
|
|
373
|
+
return this.config.apiKey;
|
|
374
|
+
}
|
|
375
|
+
// fallback to provider token from environment
|
|
376
|
+
const providerToken = process.env.PROVIDER_TOKEN || '';
|
|
377
|
+
if (providerToken) {
|
|
378
|
+
return providerToken;
|
|
379
|
+
}
|
|
380
|
+
throw new Error('no api key or provider token configured');
|
|
381
|
+
}
|
|
382
|
+
}
|
package/package.json
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "kova-node-cli",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"type": "module",
|
|
5
|
+
"description": "kova network provider node - earn by sharing compute",
|
|
6
|
+
"main": "dist/cli.js",
|
|
7
|
+
"bin": {
|
|
8
|
+
"kova-node": "./bin/cli.js"
|
|
9
|
+
},
|
|
10
|
+
"files": [
|
|
11
|
+
"dist",
|
|
12
|
+
"bin"
|
|
13
|
+
],
|
|
14
|
+
"scripts": {
|
|
15
|
+
"build": "tsc",
|
|
16
|
+
"dev": "tsx watch src/cli.ts",
|
|
17
|
+
"start": "node dist/cli.js",
|
|
18
|
+
"test": "jest --config jest.config.cjs --forceExit",
|
|
19
|
+
"test:coverage": "jest --config jest.config.cjs --forceExit --coverage"
|
|
20
|
+
},
|
|
21
|
+
"dependencies": {
|
|
22
|
+
"@chainsafe/libp2p-gossipsub": "^13.0.0",
|
|
23
|
+
"@chainsafe/libp2p-noise": "^15.0.0",
|
|
24
|
+
"@chainsafe/libp2p-yamux": "^6.0.0",
|
|
25
|
+
"@libp2p/bootstrap": "^10.0.0",
|
|
26
|
+
"@libp2p/identify": "^4.0.5",
|
|
27
|
+
"@libp2p/kad-dht": "^12.0.0",
|
|
28
|
+
"@libp2p/tcp": "^9.0.0",
|
|
29
|
+
"@libp2p/websockets": "^8.0.0",
|
|
30
|
+
"commander": "^11.1.0",
|
|
31
|
+
"convict": "^6.2.0",
|
|
32
|
+
"decimal.js": "^10.4.0",
|
|
33
|
+
"dockerode": "^4.0.0",
|
|
34
|
+
"dotenv": "^16.3.0",
|
|
35
|
+
"hyperswarm": "^4.14.2",
|
|
36
|
+
"libp2p": "^1.2.0",
|
|
37
|
+
"pino": "^8.16.0",
|
|
38
|
+
"pino-pretty": "^10.2.0",
|
|
39
|
+
"systeminformation": "^5.21.0"
|
|
40
|
+
},
|
|
41
|
+
"devDependencies": {
|
|
42
|
+
"@types/convict": "^6.1.6",
|
|
43
|
+
"@types/dockerode": "^3.3.23",
|
|
44
|
+
"@types/jest": "^30.0.0",
|
|
45
|
+
"@types/node": "^20.10.0",
|
|
46
|
+
"jest": "^30.2.0",
|
|
47
|
+
"ts-jest": "^29.4.6",
|
|
48
|
+
"tsx": "^4.6.0",
|
|
49
|
+
"typescript": "^5.3.0"
|
|
50
|
+
}
|
|
51
|
+
}
|