@oussema_mili/test-pkg-123 1.1.32

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. package/LICENSE +29 -0
  2. package/README.md +220 -0
  3. package/auth-callback.html +97 -0
  4. package/auth.js +276 -0
  5. package/cli-commands.js +1921 -0
  6. package/containerManager.js +304 -0
  7. package/daemon/agentRunner.js +491 -0
  8. package/daemon/daemonEntry.js +64 -0
  9. package/daemon/daemonManager.js +266 -0
  10. package/daemon/logManager.js +227 -0
  11. package/dist/styles.css +504 -0
  12. package/docker-actions/apps.js +3913 -0
  13. package/docker-actions/config-transformer.js +380 -0
  14. package/docker-actions/containers.js +355 -0
  15. package/docker-actions/general.js +171 -0
  16. package/docker-actions/images.js +1128 -0
  17. package/docker-actions/logs.js +224 -0
  18. package/docker-actions/metrics.js +270 -0
  19. package/docker-actions/registry.js +1100 -0
  20. package/docker-actions/setup-tasks.js +859 -0
  21. package/docker-actions/terminal.js +247 -0
  22. package/docker-actions/volumes.js +713 -0
  23. package/helper-functions.js +193 -0
  24. package/index.html +83 -0
  25. package/index.js +341 -0
  26. package/package.json +82 -0
  27. package/postcss.config.mjs +5 -0
  28. package/scripts/release.sh +212 -0
  29. package/setup/setupWizard.js +403 -0
  30. package/store/agentSessionStore.js +51 -0
  31. package/store/agentStore.js +113 -0
  32. package/store/configStore.js +171 -0
  33. package/store/daemonStore.js +217 -0
  34. package/store/deviceCredentialStore.js +107 -0
  35. package/store/npmTokenStore.js +65 -0
  36. package/store/registryStore.js +329 -0
  37. package/store/setupState.js +147 -0
  38. package/styles.css +1 -0
  39. package/utils/appLogger.js +223 -0
  40. package/utils/deviceInfo.js +98 -0
  41. package/utils/ecrAuth.js +225 -0
  42. package/utils/encryption.js +112 -0
  43. package/utils/envSetup.js +41 -0
  44. package/utils/errorHandler.js +327 -0
  45. package/utils/portUtils.js +59 -0
  46. package/utils/prerequisites.js +323 -0
  47. package/utils/prompts.js +318 -0
  48. package/utils/ssl-certificates.js +256 -0
  49. package/websocket-server.js +415 -0
@@ -0,0 +1,713 @@
1
+ import { docker } from './containers.js';
2
+ import { formatCreatedTime } from '../helper-functions.js';
3
+ import { exec } from 'child_process';
4
+ import util from 'util';
5
+ import fs from 'fs';
6
+ import path from 'path';
7
+ import os from 'os';
8
+ import { loadConfig } from '../store/configStore.js';
9
+ import { getDaemonState } from '../store/daemonStore.js';
10
+
11
+ const execPromise = util.promisify(exec);
12
+ const fsPromises = fs.promises;
13
+
14
+ // Get the backup directory in the user's home directory
15
+ function getBackupDir() {
16
+ return path.join(os.homedir(), 'volume-backups');
17
+ }
18
+
19
+ // Get the agent's HTTP port (from daemon state or config)
20
+ function getAgentPort() {
21
+ const state = getDaemonState();
22
+ if (state?.port) {
23
+ return state.port;
24
+ }
25
+ const config = loadConfig();
26
+ return config.wsPort || 3001;
27
+ }
28
+
29
+ // Cache for docker system df result to avoid concurrent operations
30
+ let dfCache = null;
31
+ let dfCacheTime = 0;
32
+ const CACHE_TTL = 5000; // 5 seconds cache
33
+
34
+ async function handleVolumeAction(ws, action, payload) {
35
+ switch (action) {
36
+ case 'fetchVolumes':
37
+ return await handleFetchVolumes(ws, payload);
38
+ case 'createVolume':
39
+ return await handleCreateVolume(ws, payload);
40
+ case 'deleteVolume':
41
+ return await handleDeleteVolume(ws, payload);
42
+ case 'inspectVolume':
43
+ return await handleInspectVolume(ws, payload);
44
+ case 'getContainersUsingVolume':
45
+ return await handleGetContainersUsingVolume(ws, payload);
46
+ case 'backupVolume':
47
+ return await handleBackupVolume(ws, payload);
48
+ case 'listVolumeBackups':
49
+ return await handleListVolumeBackups(ws, payload);
50
+ case 'restoreVolumeFromBackup':
51
+ return await handleRestoreVolumeFromBackup(ws, payload);
52
+ case 'deleteVolumeBackup':
53
+ return await handleDeleteVolumeBackup(ws, payload);
54
+ case 'getBackupDownloadUrl':
55
+ return await handleGetBackupDownloadUrl(ws, payload);
56
+ default:
57
+ throw new Error(`Unknown volume action: ${action}`);
58
+ }
59
+ }
60
+
61
+ // Get containers using a volume
62
+ async function getContainersUsingVolume(volumeName) {
63
+ try {
64
+ const containers = await docker.listContainers({ all: true });
65
+ const usedBy = [];
66
+
67
+ for (const container of containers) {
68
+ const containerInfo = await docker.getContainer(container.Id).inspect();
69
+
70
+ if (containerInfo.Mounts) {
71
+ for (const mount of containerInfo.Mounts) {
72
+ if (
73
+ mount.Name === volumeName ||
74
+ (mount.Source && mount.Source.endsWith(`/${volumeName}/_data`))
75
+ ) {
76
+ usedBy.push(containerInfo.Name.replace(/^\//, ''));
77
+ break;
78
+ }
79
+ }
80
+ }
81
+ }
82
+
83
+ return usedBy;
84
+ } catch (error) {
85
+ console.error('Error getting containers using volume:', error);
86
+ return [];
87
+ }
88
+ }
89
+
90
+ // Get all volumes sizes (cached to avoid concurrent docker system df operations)
91
+ async function getAllVolumesSizes() {
92
+ try {
93
+ // Check cache
94
+ const now = Date.now();
95
+ if (dfCache && (now - dfCacheTime) < CACHE_TTL) {
96
+ return dfCache;
97
+ }
98
+
99
+ // Run `docker system df -v` only once
100
+ const { stdout } = await execPromise('docker system df -v');
101
+
102
+ // Split into lines
103
+ const lines = stdout.split('\n');
104
+
105
+ // Parse sizes for all volumes
106
+ const sizes = {};
107
+ for (const line of lines) {
108
+ // Skip header and empty lines
109
+ if (!line.trim() || line.includes('VOLUME NAME')) continue;
110
+
111
+ // Normalize whitespace and split
112
+ const parts = line.trim().split(/\s+/);
113
+ if (parts.length >= 3) {
114
+ const volumeName = parts[0];
115
+ const size = parts[2];
116
+ sizes[volumeName] = size;
117
+ }
118
+ }
119
+
120
+ // Update cache
121
+ dfCache = sizes;
122
+ dfCacheTime = now;
123
+
124
+ return sizes;
125
+ } catch (error) {
126
+ console.error('Error fetching volume sizes:', error.message);
127
+ return {};
128
+ }
129
+ }
130
+
131
+ // Get volume size for a specific volume
132
+ async function getVolumeSize(volumeName, allSizes = null) {
133
+ try {
134
+ // If we already have all sizes, use them
135
+ if (allSizes && allSizes[volumeName]) {
136
+ return allSizes[volumeName];
137
+ }
138
+
139
+ // Otherwise get all sizes (this will use cache if available)
140
+ const sizes = await getAllVolumesSizes();
141
+ return sizes[volumeName] || 'N/A';
142
+ } catch (error) {
143
+ console.error('Error fetching volume size:', error.message);
144
+ return 'N/A';
145
+ }
146
+ }
147
+
148
+ async function handleFetchVolumes(ws, payload = {}) {
149
+ try {
150
+ const { Volumes } = await docker.listVolumes();
151
+
152
+ // Get all volume sizes ONCE to avoid concurrent docker system df operations
153
+ const allSizes = await getAllVolumesSizes();
154
+
155
+ const volumePromises = Volumes.map(async (volume) => {
156
+ const usedBy = await getContainersUsingVolume(volume.Name);
157
+ const size = allSizes[volume.Name] || 'N/A';
158
+
159
+ return {
160
+ id: volume.Name,
161
+ name: volume.Name,
162
+ driver: volume.Driver,
163
+ mountpoint: volume.Mountpoint,
164
+ created: formatCreatedTime(
165
+ new Date(volume.CreatedAt || Date.now()).getTime() / 1000
166
+ ),
167
+ size,
168
+ used: usedBy.length > 0,
169
+ usedBy,
170
+ };
171
+ });
172
+
173
+ const formattedVolumes = await Promise.all(volumePromises);
174
+
175
+ // Sort volumes alphabetically by name for consistent ordering
176
+ formattedVolumes.sort((a, b) => a.name.localeCompare(b.name));
177
+
178
+ ws.send(
179
+ JSON.stringify({
180
+ type: 'volumes',
181
+ volumes: formattedVolumes,
182
+ requestId: payload.requestId,
183
+ })
184
+ );
185
+ } catch (error) {
186
+ console.error('Error fetching volumes:', error);
187
+ ws.send(
188
+ JSON.stringify({
189
+ type: 'error',
190
+ error: 'Failed to fetch volumes: ' + error.message,
191
+ requestId: payload.requestId,
192
+ })
193
+ );
194
+ }
195
+ }
196
+
197
+ async function handleCreateVolume(ws, payload) {
198
+ try {
199
+ const { name, driver, requestId } = payload;
200
+
201
+ const volume = await docker.createVolume({
202
+ Name: name,
203
+ Driver: driver,
204
+ });
205
+
206
+ ws.send(
207
+ JSON.stringify({
208
+ type: 'volumeCreated',
209
+ volume: {
210
+ id: volume.Name,
211
+ name: volume.Name,
212
+ driver: volume.Driver,
213
+ mountpoint: volume.Mountpoint,
214
+ created: 'Just now',
215
+ size: '0 B',
216
+ used: false,
217
+ usedBy: [],
218
+ },
219
+ requestId,
220
+ })
221
+ );
222
+ } catch (error) {
223
+ console.error('Error creating volume:', error);
224
+ ws.send(
225
+ JSON.stringify({
226
+ type: 'error',
227
+ error: 'Failed to create volume: ' + error.message,
228
+ requestId: payload.requestId,
229
+ })
230
+ );
231
+ }
232
+ }
233
+
234
+ async function handleDeleteVolume(ws, payload) {
235
+ try {
236
+ const { id, requestId } = payload;
237
+
238
+ // Check if volume is in use
239
+ const usedBy = await getContainersUsingVolume(id);
240
+ if (usedBy.length > 0) {
241
+ ws.send(
242
+ JSON.stringify({
243
+ type: 'error',
244
+ error: `Volume is in use by containers: ${usedBy.join(', ')}`,
245
+ requestId,
246
+ })
247
+ );
248
+ return;
249
+ }
250
+
251
+ // Delete the volume
252
+ const volume = docker.getVolume(id);
253
+ await volume.remove();
254
+
255
+ ws.send(
256
+ JSON.stringify({
257
+ type: 'volumeDeleted',
258
+ id,
259
+ success: true,
260
+ requestId,
261
+ })
262
+ );
263
+ } catch (error) {
264
+ console.error('Error deleting volume:', error);
265
+ ws.send(
266
+ JSON.stringify({
267
+ type: 'error',
268
+ error: 'Failed to delete volume: ' + error.message,
269
+ requestId: payload.requestId,
270
+ })
271
+ );
272
+ }
273
+ }
274
+
275
+ async function handleInspectVolume(ws, payload) {
276
+ try {
277
+ const { name, requestId } = payload;
278
+ const volume = docker.getVolume(name);
279
+ const inspectionData = await volume.inspect();
280
+
281
+ // Get the volume size and containers using it
282
+ const [size, usedBy] = await Promise.all([
283
+ getVolumeSize(name),
284
+ getContainersUsingVolume(name),
285
+ ]);
286
+
287
+ // Add the additional computed fields to the inspection data
288
+ const fullInspectionData = {
289
+ ...inspectionData,
290
+ size,
291
+ used: usedBy.length > 0,
292
+ usedBy,
293
+ // Also include size in UsageData format
294
+ UsageData: {
295
+ Size: size,
296
+ RefCount: usedBy.length,
297
+ },
298
+ };
299
+
300
+ ws.send(
301
+ JSON.stringify({
302
+ type: 'volumeInspected',
303
+ data: fullInspectionData,
304
+ requestId,
305
+ })
306
+ );
307
+ } catch (error) {
308
+ console.error('Error inspecting volume:', error);
309
+ ws.send(
310
+ JSON.stringify({
311
+ type: 'error',
312
+ error: 'Failed to inspect volume: ' + error.message,
313
+ requestId: payload.requestId,
314
+ })
315
+ );
316
+ }
317
+ }
318
+
319
+ async function handleGetContainersUsingVolume(ws, payload) {
320
+ try {
321
+ const { name, requestId } = payload;
322
+
323
+ const containers = await getContainersUsingVolume(name);
324
+
325
+ ws.send(
326
+ JSON.stringify({
327
+ type: 'containersUsingVolume',
328
+ containers,
329
+ requestId,
330
+ })
331
+ );
332
+ } catch (error) {
333
+ console.error('Error getting containers using volume:', error);
334
+ ws.send(
335
+ JSON.stringify({
336
+ type: 'error',
337
+ error: 'Failed to get containers using volume: ' + error.message,
338
+ requestId: payload.requestId,
339
+ })
340
+ );
341
+ }
342
+ }
343
+
344
+ async function handleBackupVolume(ws, payload = {}) {
345
+ try {
346
+ const { volumeName, requestId } = payload;
347
+
348
+ if (!volumeName) {
349
+ throw new Error('Missing volume name.');
350
+ }
351
+
352
+ // Create backup directory in user's home directory
353
+ const backupDir = getBackupDir();
354
+
355
+ try {
356
+ await fsPromises.mkdir(backupDir, { recursive: true });
357
+ } catch (mkdirError) {
358
+ console.log('Backup directory already exists or created');
359
+ }
360
+
361
+ // Check if alpine image exists first, pull if needed
362
+ try {
363
+ await docker.getImage('alpine:latest').inspect();
364
+ console.log('Alpine image found, proceeding with backup');
365
+ } catch (inspectError) {
366
+ // Pull the image and wait for it to complete
367
+ await new Promise((resolve, reject) => {
368
+ docker.pull('alpine:latest', (err, stream) => {
369
+ if (err) {
370
+ reject(err);
371
+ return;
372
+ }
373
+
374
+ docker.modem.followProgress(stream, (err, res) => {
375
+ if (err) {
376
+ reject(err);
377
+ } else {
378
+ resolve(res);
379
+ }
380
+ });
381
+ });
382
+ });
383
+
384
+ // Additional wait to ensure image is fully available
385
+ await new Promise((resolve) => setTimeout(resolve, 2000));
386
+ }
387
+
388
+ const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
389
+ const backupFileName = `${volumeName}-backup-${timestamp}.tar`;
390
+ const backupFilePath = path.join(backupDir, backupFileName);
391
+
392
+ // Create a temporary container to access the volume and create backup
393
+ const container = await docker.createContainer({
394
+ Image: 'alpine:latest',
395
+ Cmd: ['tar', 'cf', `/backup/${backupFileName}`, '-C', '/data', '.'],
396
+ HostConfig: {
397
+ Binds: [
398
+ `${volumeName}:/data:ro`, // Mount volume as read-only
399
+ `${backupDir}:/backup`, // Mount backup directory
400
+ ],
401
+ },
402
+ WorkingDir: '/data',
403
+ });
404
+
405
+ await container.start();
406
+ const result = await container.wait();
407
+ await container.remove();
408
+
409
+ // Check if backup was created successfully
410
+ if (result.StatusCode !== 0) {
411
+ throw new Error(
412
+ `Backup process failed with status code: ${result.StatusCode}`
413
+ );
414
+ }
415
+
416
+ // Verify the backup file exists
417
+ try {
418
+ const stats = await fsPromises.stat(backupFilePath);
419
+ console.log(
420
+ `✅ Backup created successfully: ${backupFilePath} (${formatFileSize(
421
+ stats.size
422
+ )})`
423
+ );
424
+ } catch (statError) {
425
+ throw new Error('Backup file was not created successfully');
426
+ }
427
+
428
+ ws.send(
429
+ JSON.stringify({
430
+ type: 'backupVolume',
431
+ message: `Backup of volume "${volumeName}" completed successfully.`,
432
+ backupPath: backupFilePath,
433
+ requestId,
434
+ })
435
+ );
436
+ } catch (error) {
437
+ console.error('Error backing up volume:', error);
438
+ ws.send(
439
+ JSON.stringify({
440
+ type: 'error',
441
+ error: `Failed to backup volume: ${error.message}`,
442
+ requestId: payload?.requestId,
443
+ })
444
+ );
445
+ }
446
+ }
447
+
448
+ async function handleListVolumeBackups(ws, payload = {}) {
449
+ try {
450
+ const { requestId } = payload;
451
+
452
+ // Use the same backup directory as backup creation
453
+ const backupDir = getBackupDir();
454
+ const backups = [];
455
+
456
+ try {
457
+ // Ensure backup directory exists
458
+ await fsPromises.mkdir(backupDir, { recursive: true });
459
+
460
+ const files = await fsPromises.readdir(backupDir);
461
+ const backupFiles = files.filter(
462
+ (file) => file.includes('-backup-') && file.endsWith('.tar')
463
+ );
464
+
465
+ for (const file of backupFiles) {
466
+ const filePath = path.join(backupDir, file);
467
+ const stats = await fsPromises.stat(filePath);
468
+
469
+ // Extract volume name from filename (format: volumeName-backup-timestamp.tar)
470
+ const volumeName = file.split('-backup-')[0];
471
+
472
+ backups.push({
473
+ id: `backup-${file}`,
474
+ volumeName: volumeName,
475
+ path: filePath,
476
+ size: formatFileSize(stats.size),
477
+ created: Math.floor(stats.mtime.getTime() / 1000),
478
+ });
479
+ }
480
+
481
+ // Sort backups by creation time (newest first)
482
+ backups.sort(
483
+ (a, b) => new Date(b.created).getTime() - new Date(a.created).getTime()
484
+ );
485
+ } catch (dirError) {
486
+ console.log('No backup directory or files found:', dirError.message);
487
+ }
488
+
489
+ ws.send(
490
+ JSON.stringify({
491
+ type: 'volumeBackups',
492
+ backups,
493
+ requestId,
494
+ })
495
+ );
496
+ } catch (error) {
497
+ console.error('Error listing volume backups:', error);
498
+ ws.send(
499
+ JSON.stringify({
500
+ type: 'error',
501
+ error: `Failed to list volume backups: ${error.message}`,
502
+ requestId: payload.requestId,
503
+ })
504
+ );
505
+ }
506
+ }
507
+
508
+ // Helper function to format file sizes
509
+ function formatFileSize(bytes) {
510
+ if (bytes === 0) return '0 B';
511
+ const k = 1024;
512
+ const sizes = ['B', 'KB', 'MB', 'GB'];
513
+ const i = Math.floor(Math.log(bytes) / Math.log(k));
514
+ return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + ' ' + sizes[i];
515
+ }
516
+
517
+ async function handleRestoreVolumeFromBackup(ws, payload = {}) {
518
+ try {
519
+ const { backupPath, volumeName, requestId } = payload;
520
+
521
+ if (!backupPath || !volumeName) {
522
+ throw new Error('Missing backup path or volume name');
523
+ }
524
+
525
+
526
+ // Verify backup file exists
527
+ try {
528
+ await fsPromises.access(backupPath);
529
+ } catch (accessError) {
530
+ throw new Error('Backup file not found');
531
+ }
532
+
533
+ // Get backup directory and filename
534
+ const backupDir = path.dirname(backupPath);
535
+ const backupFileName = path.basename(backupPath);
536
+
537
+ console.log(
538
+ `Restoring volume "${volumeName}" from backup: ${backupFileName}`
539
+ );
540
+
541
+ // Check if alpine image exists first, pull if needed
542
+ try {
543
+ await docker.getImage('alpine:latest').inspect();
544
+ console.log('Alpine image found, proceeding with restore');
545
+ } catch (inspectError) {
546
+ // Pull the image and wait for it to complete
547
+ await new Promise((resolve, reject) => {
548
+ docker.pull('alpine:latest', (err, stream) => {
549
+ if (err) {
550
+ reject(err);
551
+ return;
552
+ }
553
+
554
+ docker.modem.followProgress(stream, (err, res) => {
555
+ if (err) {
556
+ reject(err);
557
+ } else {
558
+ resolve(res);
559
+ }
560
+ });
561
+ });
562
+ });
563
+
564
+ // Additional wait to ensure image is fully available
565
+ await new Promise((resolve) => setTimeout(resolve, 2000));
566
+ }
567
+
568
+ // Create a temporary container to restore the volume
569
+ const container = await docker.createContainer({
570
+ Image: 'alpine:latest',
571
+ Cmd: ['sh', '-c', `cd /data && tar xf /backup/${backupFileName}`],
572
+ HostConfig: {
573
+ Binds: [
574
+ `${volumeName}:/data`, // Mount target volume
575
+ `${backupDir}:/backup:ro`, // Mount backup directory as read-only
576
+ ],
577
+ },
578
+ WorkingDir: '/data',
579
+ });
580
+
581
+ await container.start();
582
+ const result = await container.wait();
583
+ await container.remove();
584
+
585
+ // Check if restore was successful
586
+ if (result.StatusCode !== 0) {
587
+ throw new Error(
588
+ `Restore process failed with status code: ${result.StatusCode}`
589
+ );
590
+ }
591
+
592
+ ws.send(
593
+ JSON.stringify({
594
+ type: 'volumeRestored',
595
+ message: `Volume "${volumeName}" restored successfully from backup`,
596
+ volumeName,
597
+ requestId,
598
+ })
599
+ );
600
+ } catch (error) {
601
+ console.error('Error restoring volume from backup:', error);
602
+ ws.send(
603
+ JSON.stringify({
604
+ type: 'error',
605
+ error: `Failed to restore volume from backup: ${error.message}`,
606
+ requestId: payload.requestId,
607
+ })
608
+ );
609
+ }
610
+ }
611
+
612
+ async function handleDeleteVolumeBackup(ws, payload = {}) {
613
+ try {
614
+ const { backupPath, requestId } = payload;
615
+
616
+ if (!backupPath) {
617
+ throw new Error('Missing backup path');
618
+ }
619
+
620
+ // Security check: only allow deletion from the backup directory
621
+ const backupDir = getBackupDir();
622
+ const normalizedBackupPath = path.resolve(backupPath);
623
+ const normalizedBackupDir = path.resolve(backupDir);
624
+
625
+ if (
626
+ !normalizedBackupPath.startsWith(normalizedBackupDir) ||
627
+ !path.basename(backupPath).includes('-backup-') ||
628
+ !backupPath.endsWith('.tar')
629
+ ) {
630
+ throw new Error('Invalid backup path');
631
+ }
632
+
633
+ // Delete the actual backup file
634
+ await fsPromises.unlink(backupPath);
635
+
636
+ ws.send(
637
+ JSON.stringify({
638
+ type: 'backupDeleted',
639
+ message: `Backup at "${path.basename(
640
+ backupPath
641
+ )}" deleted successfully`,
642
+ success: true,
643
+ requestId,
644
+ })
645
+ );
646
+ } catch (error) {
647
+ console.error('Error deleting volume backup:', error);
648
+ ws.send(
649
+ JSON.stringify({
650
+ type: 'error',
651
+ error: `Failed to delete volume backup: ${error.message}`,
652
+ requestId: payload.requestId,
653
+ })
654
+ );
655
+ }
656
+ }
657
+
658
+ async function handleGetBackupDownloadUrl(ws, payload = {}) {
659
+ try {
660
+ const { backupPath, requestId } = payload;
661
+
662
+ if (!backupPath) {
663
+ throw new Error('Missing backup path');
664
+ }
665
+
666
+ // Get backup directory from home directory
667
+ const backupDir = getBackupDir();
668
+ const normalizedBackupPath = path.resolve(backupPath);
669
+ const normalizedBackupDir = path.resolve(backupDir);
670
+
671
+ if (
672
+ !normalizedBackupPath.startsWith(normalizedBackupDir) ||
673
+ !path.basename(backupPath).includes('-backup-') ||
674
+ !backupPath.endsWith('.tar')
675
+ ) {
676
+ throw new Error('Invalid backup path');
677
+ }
678
+
679
+ // Check if file exists before providing download URL
680
+ try {
681
+ await fsPromises.access(backupPath);
682
+ } catch (accessError) {
683
+ throw new Error('Backup file not found');
684
+ }
685
+
686
+ // Return the download URL pointing to the agent's HTTP server
687
+ const agentPort = getAgentPort();
688
+ const downloadUrl = `http://localhost:${agentPort}/api/download?path=${encodeURIComponent(backupPath)}`;
689
+
690
+ ws.send(
691
+ JSON.stringify({
692
+ type: 'backupDownloadUrl',
693
+ downloadUrl,
694
+ requestId,
695
+ })
696
+ );
697
+ } catch (error) {
698
+ console.error('Error getting backup download URL:', error);
699
+ ws.send(
700
+ JSON.stringify({
701
+ type: 'error',
702
+ error: `Failed to get backup download URL: ${error.message}`,
703
+ requestId: payload.requestId,
704
+ })
705
+ );
706
+ }
707
+ }
708
+
709
+ export default { handleVolumeAction };
710
+
711
+ export {
712
+ handleVolumeAction,
713
+ };