@fenwave/agent 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,696 @@
1
+ import { docker } from './containers.js';
2
+ import { formatCreatedTime } from '../helper-functions.js';
3
+ import { exec } from 'child_process';
4
+ import util from 'util';
5
+ import fs from 'fs';
6
+ import path from 'path';
7
+
8
+ const execPromise = util.promisify(exec);
9
+ const fsPromises = fs.promises;
10
+
11
+ // Cache for docker system df result to avoid concurrent operations
12
+ let dfCache = null;
13
+ let dfCacheTime = 0;
14
+ const CACHE_TTL = 5000; // 5 seconds cache
15
+
16
+ async function handleVolumeAction(ws, action, payload) {
17
+ switch (action) {
18
+ case 'fetchVolumes':
19
+ return await handleFetchVolumes(ws, payload);
20
+ case 'createVolume':
21
+ return await handleCreateVolume(ws, payload);
22
+ case 'deleteVolume':
23
+ return await handleDeleteVolume(ws, payload);
24
+ case 'inspectVolume':
25
+ return await handleInspectVolume(ws, payload);
26
+ case 'getContainersUsingVolume':
27
+ return await handleGetContainersUsingVolume(ws, payload);
28
+ case 'backupVolume':
29
+ return await handleBackupVolume(ws, payload);
30
+ case 'listVolumeBackups':
31
+ return await handleListVolumeBackups(ws, payload);
32
+ case 'restoreVolumeFromBackup':
33
+ return await handleRestoreVolumeFromBackup(ws, payload);
34
+ case 'deleteVolumeBackup':
35
+ return await handleDeleteVolumeBackup(ws, payload);
36
+ case 'getBackupDownloadUrl':
37
+ return await handleGetBackupDownloadUrl(ws, payload);
38
+ default:
39
+ throw new Error(`Unknown volume action: ${action}`);
40
+ }
41
+ }
42
+
43
+ // Get containers using a volume
44
+ async function getContainersUsingVolume(volumeName) {
45
+ try {
46
+ const containers = await docker.listContainers({ all: true });
47
+ const usedBy = [];
48
+
49
+ for (const container of containers) {
50
+ const containerInfo = await docker.getContainer(container.Id).inspect();
51
+
52
+ if (containerInfo.Mounts) {
53
+ for (const mount of containerInfo.Mounts) {
54
+ if (
55
+ mount.Name === volumeName ||
56
+ (mount.Source && mount.Source.endsWith(`/${volumeName}/_data`))
57
+ ) {
58
+ usedBy.push(containerInfo.Name.replace(/^\//, ''));
59
+ break;
60
+ }
61
+ }
62
+ }
63
+ }
64
+
65
+ return usedBy;
66
+ } catch (error) {
67
+ console.error('Error getting containers using volume:', error);
68
+ return [];
69
+ }
70
+ }
71
+
72
+ // Get all volumes sizes (cached to avoid concurrent docker system df operations)
73
+ async function getAllVolumesSizes() {
74
+ try {
75
+ // Check cache
76
+ const now = Date.now();
77
+ if (dfCache && (now - dfCacheTime) < CACHE_TTL) {
78
+ return dfCache;
79
+ }
80
+
81
+ // Run `docker system df -v` only once
82
+ const { stdout } = await execPromise('docker system df -v');
83
+
84
+ // Split into lines
85
+ const lines = stdout.split('\n');
86
+
87
+ // Parse sizes for all volumes
88
+ const sizes = {};
89
+ for (const line of lines) {
90
+ // Skip header and empty lines
91
+ if (!line.trim() || line.includes('VOLUME NAME')) continue;
92
+
93
+ // Normalize whitespace and split
94
+ const parts = line.trim().split(/\s+/);
95
+ if (parts.length >= 3) {
96
+ const volumeName = parts[0];
97
+ const size = parts[2];
98
+ sizes[volumeName] = size;
99
+ }
100
+ }
101
+
102
+ // Update cache
103
+ dfCache = sizes;
104
+ dfCacheTime = now;
105
+
106
+ return sizes;
107
+ } catch (error) {
108
+ console.error('Error fetching volume sizes:', error.message);
109
+ return {};
110
+ }
111
+ }
112
+
113
+ // Get volume size for a specific volume
114
+ async function getVolumeSize(volumeName, allSizes = null) {
115
+ try {
116
+ // If we already have all sizes, use them
117
+ if (allSizes && allSizes[volumeName]) {
118
+ return allSizes[volumeName];
119
+ }
120
+
121
+ // Otherwise get all sizes (this will use cache if available)
122
+ const sizes = await getAllVolumesSizes();
123
+ return sizes[volumeName] || 'N/A';
124
+ } catch (error) {
125
+ console.error('Error fetching volume size:', error.message);
126
+ return 'N/A';
127
+ }
128
+ }
129
+
130
+ async function handleFetchVolumes(ws, payload = {}) {
131
+ try {
132
+ const { Volumes } = await docker.listVolumes();
133
+
134
+ // Get all volume sizes ONCE to avoid concurrent docker system df operations
135
+ const allSizes = await getAllVolumesSizes();
136
+
137
+ const volumePromises = Volumes.map(async (volume) => {
138
+ const usedBy = await getContainersUsingVolume(volume.Name);
139
+ const size = allSizes[volume.Name] || 'N/A';
140
+
141
+ return {
142
+ id: volume.Name,
143
+ name: volume.Name,
144
+ driver: volume.Driver,
145
+ mountpoint: volume.Mountpoint,
146
+ created: formatCreatedTime(
147
+ new Date(volume.CreatedAt || Date.now()).getTime() / 1000
148
+ ),
149
+ size,
150
+ used: usedBy.length > 0,
151
+ usedBy,
152
+ };
153
+ });
154
+
155
+ const formattedVolumes = await Promise.all(volumePromises);
156
+
157
+ // Sort volumes alphabetically by name for consistent ordering
158
+ formattedVolumes.sort((a, b) => a.name.localeCompare(b.name));
159
+
160
+ ws.send(
161
+ JSON.stringify({
162
+ type: 'volumes',
163
+ volumes: formattedVolumes,
164
+ requestId: payload.requestId,
165
+ })
166
+ );
167
+ } catch (error) {
168
+ console.error('Error fetching volumes:', error);
169
+ ws.send(
170
+ JSON.stringify({
171
+ type: 'error',
172
+ error: 'Failed to fetch volumes: ' + error.message,
173
+ requestId: payload.requestId,
174
+ })
175
+ );
176
+ }
177
+ }
178
+
179
+ async function handleCreateVolume(ws, payload) {
180
+ try {
181
+ const { name, driver, requestId } = payload;
182
+
183
+ const volume = await docker.createVolume({
184
+ Name: name,
185
+ Driver: driver,
186
+ });
187
+
188
+ ws.send(
189
+ JSON.stringify({
190
+ type: 'volumeCreated',
191
+ volume: {
192
+ id: volume.Name,
193
+ name: volume.Name,
194
+ driver: volume.Driver,
195
+ mountpoint: volume.Mountpoint,
196
+ created: 'Just now',
197
+ size: '0 B',
198
+ used: false,
199
+ usedBy: [],
200
+ },
201
+ requestId,
202
+ })
203
+ );
204
+ } catch (error) {
205
+ console.error('Error creating volume:', error);
206
+ ws.send(
207
+ JSON.stringify({
208
+ type: 'error',
209
+ error: 'Failed to create volume: ' + error.message,
210
+ requestId: payload.requestId,
211
+ })
212
+ );
213
+ }
214
+ }
215
+
216
+ async function handleDeleteVolume(ws, payload) {
217
+ try {
218
+ const { id, requestId } = payload;
219
+
220
+ // Check if volume is in use
221
+ const usedBy = await getContainersUsingVolume(id);
222
+ if (usedBy.length > 0) {
223
+ ws.send(
224
+ JSON.stringify({
225
+ type: 'error',
226
+ error: `Volume is in use by containers: ${usedBy.join(', ')}`,
227
+ requestId,
228
+ })
229
+ );
230
+ return;
231
+ }
232
+
233
+ // Delete the volume
234
+ const volume = docker.getVolume(id);
235
+ await volume.remove();
236
+
237
+ ws.send(
238
+ JSON.stringify({
239
+ type: 'volumeDeleted',
240
+ id,
241
+ success: true,
242
+ requestId,
243
+ })
244
+ );
245
+ } catch (error) {
246
+ console.error('Error deleting volume:', error);
247
+ ws.send(
248
+ JSON.stringify({
249
+ type: 'error',
250
+ error: 'Failed to delete volume: ' + error.message,
251
+ requestId: payload.requestId,
252
+ })
253
+ );
254
+ }
255
+ }
256
+
257
+ async function handleInspectVolume(ws, payload) {
258
+ try {
259
+ const { name, requestId } = payload;
260
+ const volume = docker.getVolume(name);
261
+ const inspectionData = await volume.inspect();
262
+
263
+ // Get the volume size and containers using it
264
+ const [size, usedBy] = await Promise.all([
265
+ getVolumeSize(name),
266
+ getContainersUsingVolume(name),
267
+ ]);
268
+
269
+ // Add the additional computed fields to the inspection data
270
+ const fullInspectionData = {
271
+ ...inspectionData,
272
+ size,
273
+ used: usedBy.length > 0,
274
+ usedBy,
275
+ // Also include size in UsageData format
276
+ UsageData: {
277
+ Size: size,
278
+ RefCount: usedBy.length,
279
+ },
280
+ };
281
+
282
+ ws.send(
283
+ JSON.stringify({
284
+ type: 'volumeInspected',
285
+ data: fullInspectionData,
286
+ requestId,
287
+ })
288
+ );
289
+ } catch (error) {
290
+ console.error('Error inspecting volume:', error);
291
+ ws.send(
292
+ JSON.stringify({
293
+ type: 'error',
294
+ error: 'Failed to inspect volume: ' + error.message,
295
+ requestId: payload.requestId,
296
+ })
297
+ );
298
+ }
299
+ }
300
+
301
+ async function handleGetContainersUsingVolume(ws, payload) {
302
+ try {
303
+ const { name, requestId } = payload;
304
+
305
+ const containers = await getContainersUsingVolume(name);
306
+
307
+ ws.send(
308
+ JSON.stringify({
309
+ type: 'containersUsingVolume',
310
+ containers,
311
+ requestId,
312
+ })
313
+ );
314
+ } catch (error) {
315
+ console.error('Error getting containers using volume:', error);
316
+ ws.send(
317
+ JSON.stringify({
318
+ type: 'error',
319
+ error: 'Failed to get containers using volume: ' + error.message,
320
+ requestId: payload.requestId,
321
+ })
322
+ );
323
+ }
324
+ }
325
+
326
+ async function handleBackupVolume(ws, payload = {}) {
327
+ try {
328
+ const { volumeName, requestId } = payload;
329
+
330
+ if (!volumeName) {
331
+ throw new Error('Missing volume name.');
332
+ }
333
+
334
+
335
+ // Create proper backup directory (relative to the parent directory)
336
+ const backupDir = path.join(process.cwd(), '..', 'volume-backups');
337
+
338
+ try {
339
+ await fsPromises.mkdir(backupDir, { recursive: true });
340
+ } catch (mkdirError) {
341
+ console.log('Backup directory already exists or created');
342
+ }
343
+
344
+ // Check if alpine image exists first, pull if needed
345
+ try {
346
+ await docker.getImage('alpine:latest').inspect();
347
+ console.log('Alpine image found, proceeding with backup');
348
+ } catch (inspectError) {
349
+ // Pull the image and wait for it to complete
350
+ await new Promise((resolve, reject) => {
351
+ docker.pull('alpine:latest', (err, stream) => {
352
+ if (err) {
353
+ reject(err);
354
+ return;
355
+ }
356
+
357
+ docker.modem.followProgress(stream, (err, res) => {
358
+ if (err) {
359
+ reject(err);
360
+ } else {
361
+ resolve(res);
362
+ }
363
+ });
364
+ });
365
+ });
366
+
367
+ // Additional wait to ensure image is fully available
368
+ await new Promise((resolve) => setTimeout(resolve, 2000));
369
+ }
370
+
371
+ const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
372
+ const backupFileName = `${volumeName}-backup-${timestamp}.tar`;
373
+ const backupFilePath = path.join(backupDir, backupFileName);
374
+
375
+ // Create a temporary container to access the volume and create backup
376
+ const container = await docker.createContainer({
377
+ Image: 'alpine:latest',
378
+ Cmd: ['tar', 'cf', `/backup/${backupFileName}`, '-C', '/data', '.'],
379
+ HostConfig: {
380
+ Binds: [
381
+ `${volumeName}:/data:ro`, // Mount volume as read-only
382
+ `${backupDir}:/backup`, // Mount backup directory
383
+ ],
384
+ },
385
+ WorkingDir: '/data',
386
+ });
387
+
388
+ await container.start();
389
+ const result = await container.wait();
390
+ await container.remove();
391
+
392
+ // Check if backup was created successfully
393
+ if (result.StatusCode !== 0) {
394
+ throw new Error(
395
+ `Backup process failed with status code: ${result.StatusCode}`
396
+ );
397
+ }
398
+
399
+ // Verify the backup file exists
400
+ try {
401
+ const stats = await fsPromises.stat(backupFilePath);
402
+ console.log(
403
+ `✅ Backup created successfully: ${backupFilePath} (${formatFileSize(
404
+ stats.size
405
+ )})`
406
+ );
407
+ } catch (statError) {
408
+ throw new Error('Backup file was not created successfully');
409
+ }
410
+
411
+ ws.send(
412
+ JSON.stringify({
413
+ type: 'backupVolume',
414
+ message: `Backup of volume "${volumeName}" completed successfully.`,
415
+ backupPath: backupFilePath,
416
+ requestId,
417
+ })
418
+ );
419
+ } catch (error) {
420
+ console.error('Error backing up volume:', error);
421
+ ws.send(
422
+ JSON.stringify({
423
+ type: 'error',
424
+ error: `Failed to backup volume: ${error.message}`,
425
+ requestId: payload?.requestId,
426
+ })
427
+ );
428
+ }
429
+ }
430
+
431
+ async function handleListVolumeBackups(ws, payload = {}) {
432
+ try {
433
+ const { requestId } = payload;
434
+
435
+ // Use the same backup directory as backup creation (relative to parent directory)
436
+ const backupDir = path.join(process.cwd(), '..', 'volume-backups');
437
+ const backups = [];
438
+
439
+ try {
440
+ // Ensure backup directory exists
441
+ await fsPromises.mkdir(backupDir, { recursive: true });
442
+
443
+ const files = await fsPromises.readdir(backupDir);
444
+ const backupFiles = files.filter(
445
+ (file) => file.includes('-backup-') && file.endsWith('.tar')
446
+ );
447
+
448
+ for (const file of backupFiles) {
449
+ const filePath = path.join(backupDir, file);
450
+ const stats = await fsPromises.stat(filePath);
451
+
452
+ // Extract volume name from filename (format: volumeName-backup-timestamp.tar)
453
+ const volumeName = file.split('-backup-')[0];
454
+
455
+ backups.push({
456
+ id: `backup-${file}`,
457
+ volumeName: volumeName,
458
+ path: filePath,
459
+ size: formatFileSize(stats.size),
460
+ created: Math.floor(stats.mtime.getTime() / 1000),
461
+ });
462
+ }
463
+
464
+ // Sort backups by creation time (newest first)
465
+ backups.sort(
466
+ (a, b) => new Date(b.created).getTime() - new Date(a.created).getTime()
467
+ );
468
+ } catch (dirError) {
469
+ console.log('No backup directory or files found:', dirError.message);
470
+ }
471
+
472
+ ws.send(
473
+ JSON.stringify({
474
+ type: 'volumeBackups',
475
+ backups,
476
+ requestId,
477
+ })
478
+ );
479
+ } catch (error) {
480
+ console.error('Error listing volume backups:', error);
481
+ ws.send(
482
+ JSON.stringify({
483
+ type: 'error',
484
+ error: `Failed to list volume backups: ${error.message}`,
485
+ requestId: payload.requestId,
486
+ })
487
+ );
488
+ }
489
+ }
490
+
491
+ // Helper function to format file sizes
492
+ function formatFileSize(bytes) {
493
+ if (bytes === 0) return '0 B';
494
+ const k = 1024;
495
+ const sizes = ['B', 'KB', 'MB', 'GB'];
496
+ const i = Math.floor(Math.log(bytes) / Math.log(k));
497
+ return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + ' ' + sizes[i];
498
+ }
499
+
500
+ async function handleRestoreVolumeFromBackup(ws, payload = {}) {
501
+ try {
502
+ const { backupPath, volumeName, requestId } = payload;
503
+
504
+ if (!backupPath || !volumeName) {
505
+ throw new Error('Missing backup path or volume name');
506
+ }
507
+
508
+
509
+ // Verify backup file exists
510
+ try {
511
+ await fsPromises.access(backupPath);
512
+ } catch (accessError) {
513
+ throw new Error('Backup file not found');
514
+ }
515
+
516
+ // Get backup directory and filename
517
+ const backupDir = path.dirname(backupPath);
518
+ const backupFileName = path.basename(backupPath);
519
+
520
+ console.log(
521
+ `Restoring volume "${volumeName}" from backup: ${backupFileName}`
522
+ );
523
+
524
+ // Check if alpine image exists first, pull if needed
525
+ try {
526
+ await docker.getImage('alpine:latest').inspect();
527
+ console.log('Alpine image found, proceeding with restore');
528
+ } catch (inspectError) {
529
+ // Pull the image and wait for it to complete
530
+ await new Promise((resolve, reject) => {
531
+ docker.pull('alpine:latest', (err, stream) => {
532
+ if (err) {
533
+ reject(err);
534
+ return;
535
+ }
536
+
537
+ docker.modem.followProgress(stream, (err, res) => {
538
+ if (err) {
539
+ reject(err);
540
+ } else {
541
+ resolve(res);
542
+ }
543
+ });
544
+ });
545
+ });
546
+
547
+ // Additional wait to ensure image is fully available
548
+ await new Promise((resolve) => setTimeout(resolve, 2000));
549
+ }
550
+
551
+ // Create a temporary container to restore the volume
552
+ const container = await docker.createContainer({
553
+ Image: 'alpine:latest',
554
+ Cmd: ['sh', '-c', `cd /data && tar xf /backup/${backupFileName}`],
555
+ HostConfig: {
556
+ Binds: [
557
+ `${volumeName}:/data`, // Mount target volume
558
+ `${backupDir}:/backup:ro`, // Mount backup directory as read-only
559
+ ],
560
+ },
561
+ WorkingDir: '/data',
562
+ });
563
+
564
+ await container.start();
565
+ const result = await container.wait();
566
+ await container.remove();
567
+
568
+ // Check if restore was successful
569
+ if (result.StatusCode !== 0) {
570
+ throw new Error(
571
+ `Restore process failed with status code: ${result.StatusCode}`
572
+ );
573
+ }
574
+
575
+ ws.send(
576
+ JSON.stringify({
577
+ type: 'volumeRestored',
578
+ message: `Volume "${volumeName}" restored successfully from backup`,
579
+ volumeName,
580
+ requestId,
581
+ })
582
+ );
583
+ } catch (error) {
584
+ console.error('Error restoring volume from backup:', error);
585
+ ws.send(
586
+ JSON.stringify({
587
+ type: 'error',
588
+ error: `Failed to restore volume from backup: ${error.message}`,
589
+ requestId: payload.requestId,
590
+ })
591
+ );
592
+ }
593
+ }
594
+
595
+ async function handleDeleteVolumeBackup(ws, payload = {}) {
596
+ try {
597
+ const { backupPath, requestId } = payload;
598
+
599
+ if (!backupPath) {
600
+ throw new Error('Missing backup path');
601
+ }
602
+
603
+
604
+ // Security check: only allow deletion from the backup directory (relative to parent)
605
+ const backupDir = path.join(process.cwd(), '..', 'volume-backups');
606
+ const normalizedBackupPath = path.resolve(backupPath);
607
+ const normalizedBackupDir = path.resolve(backupDir);
608
+
609
+ if (
610
+ !normalizedBackupPath.startsWith(normalizedBackupDir) ||
611
+ !path.basename(backupPath).includes('-backup-') ||
612
+ !backupPath.endsWith('.tar')
613
+ ) {
614
+ throw new Error('Invalid backup path');
615
+ }
616
+
617
+ // Delete the actual backup file
618
+ await fsPromises.unlink(backupPath);
619
+
620
+ ws.send(
621
+ JSON.stringify({
622
+ type: 'backupDeleted',
623
+ message: `Backup at "${path.basename(
624
+ backupPath
625
+ )}" deleted successfully`,
626
+ success: true,
627
+ requestId,
628
+ })
629
+ );
630
+ } catch (error) {
631
+ console.error('Error deleting volume backup:', error);
632
+ ws.send(
633
+ JSON.stringify({
634
+ type: 'error',
635
+ error: `Failed to delete volume backup: ${error.message}`,
636
+ requestId: payload.requestId,
637
+ })
638
+ );
639
+ }
640
+ }
641
+
642
+ async function handleGetBackupDownloadUrl(ws, payload = {}) {
643
+ try {
644
+ const { backupPath, requestId } = payload;
645
+
646
+ if (!backupPath) {
647
+ throw new Error('Missing backup path');
648
+ }
649
+
650
+
651
+ // Security check: only allow downloads from the backup directory (relative to parent)
652
+ const backupDir = path.join(process.cwd(), '..', 'volume-backups');
653
+ const normalizedBackupPath = path.resolve(backupPath);
654
+ const normalizedBackupDir = path.resolve(backupDir);
655
+
656
+ if (
657
+ !normalizedBackupPath.startsWith(normalizedBackupDir) ||
658
+ !path.basename(backupPath).includes('-backup-') ||
659
+ !backupPath.endsWith('.tar')
660
+ ) {
661
+ throw new Error('Invalid backup path');
662
+ }
663
+
664
+ // Check if file exists before providing download URL
665
+ try {
666
+ await fsPromises.access(backupPath);
667
+ } catch (accessError) {
668
+ throw new Error('Backup file not found');
669
+ }
670
+
671
+ const downloadUrl = `/api/download?path=${encodeURIComponent(backupPath)}`;
672
+
673
+ ws.send(
674
+ JSON.stringify({
675
+ type: 'backupDownloadUrl',
676
+ downloadUrl,
677
+ requestId,
678
+ })
679
+ );
680
+ } catch (error) {
681
+ console.error('Error getting backup download URL:', error);
682
+ ws.send(
683
+ JSON.stringify({
684
+ type: 'error',
685
+ error: `Failed to get backup download URL: ${error.message}`,
686
+ requestId: payload.requestId,
687
+ })
688
+ );
689
+ }
690
+ }
691
+
692
+ export default { handleVolumeAction };
693
+
694
+ export {
695
+ handleVolumeAction,
696
+ };