@protontech/drive-sdk 0.4.1 → 0.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/diagnostic/sdkDiagnostic.js +1 -1
- package/dist/diagnostic/sdkDiagnostic.js.map +1 -1
- package/dist/interface/download.d.ts +4 -4
- package/dist/interface/upload.d.ts +6 -3
- package/dist/internal/apiService/apiService.d.ts +3 -0
- package/dist/internal/apiService/apiService.js +25 -2
- package/dist/internal/apiService/apiService.js.map +1 -1
- package/dist/internal/apiService/apiService.test.js +38 -0
- package/dist/internal/apiService/apiService.test.js.map +1 -1
- package/dist/internal/apiService/driveTypes.d.ts +31 -48
- package/dist/internal/apiService/errors.js +3 -0
- package/dist/internal/apiService/errors.js.map +1 -1
- package/dist/internal/apiService/errors.test.js +15 -7
- package/dist/internal/apiService/errors.test.js.map +1 -1
- package/dist/internal/asyncIteratorMap.d.ts +1 -1
- package/dist/internal/asyncIteratorMap.js +6 -1
- package/dist/internal/asyncIteratorMap.js.map +1 -1
- package/dist/internal/asyncIteratorMap.test.js +9 -0
- package/dist/internal/asyncIteratorMap.test.js.map +1 -1
- package/dist/internal/download/fileDownloader.d.ts +3 -3
- package/dist/internal/download/fileDownloader.js +5 -5
- package/dist/internal/download/fileDownloader.js.map +1 -1
- package/dist/internal/download/fileDownloader.test.js +8 -8
- package/dist/internal/download/fileDownloader.test.js.map +1 -1
- package/dist/internal/nodes/apiService.d.ts +6 -1
- package/dist/internal/nodes/apiService.js +44 -32
- package/dist/internal/nodes/apiService.js.map +1 -1
- package/dist/internal/nodes/apiService.test.js +148 -17
- package/dist/internal/nodes/apiService.test.js.map +1 -1
- package/dist/internal/nodes/debouncer.d.ts +23 -0
- package/dist/internal/nodes/debouncer.js +80 -0
- package/dist/internal/nodes/debouncer.js.map +1 -0
- package/dist/internal/nodes/debouncer.test.d.ts +1 -0
- package/dist/internal/nodes/debouncer.test.js +100 -0
- package/dist/internal/nodes/debouncer.test.js.map +1 -0
- package/dist/internal/nodes/nodesAccess.d.ts +2 -1
- package/dist/internal/nodes/nodesAccess.js +24 -5
- package/dist/internal/nodes/nodesAccess.js.map +1 -1
- package/dist/internal/nodes/nodesAccess.test.js +2 -2
- package/dist/internal/nodes/nodesAccess.test.js.map +1 -1
- package/dist/internal/photos/upload.d.ts +2 -1
- package/dist/internal/photos/upload.js +3 -3
- package/dist/internal/photos/upload.js.map +1 -1
- package/dist/internal/sharingPublic/apiService.d.ts +2 -2
- package/dist/internal/sharingPublic/apiService.js +1 -63
- package/dist/internal/sharingPublic/apiService.js.map +1 -1
- package/dist/internal/sharingPublic/cryptoCache.d.ts +0 -4
- package/dist/internal/sharingPublic/cryptoCache.js +0 -28
- package/dist/internal/sharingPublic/cryptoCache.js.map +1 -1
- package/dist/internal/sharingPublic/cryptoReporter.d.ts +16 -0
- package/dist/internal/sharingPublic/cryptoReporter.js +44 -0
- package/dist/internal/sharingPublic/cryptoReporter.js.map +1 -0
- package/dist/internal/sharingPublic/cryptoService.d.ts +3 -4
- package/dist/internal/sharingPublic/cryptoService.js +5 -43
- package/dist/internal/sharingPublic/cryptoService.js.map +1 -1
- package/dist/internal/sharingPublic/index.d.ts +21 -3
- package/dist/internal/sharingPublic/index.js +43 -12
- package/dist/internal/sharingPublic/index.js.map +1 -1
- package/dist/internal/sharingPublic/interface.d.ts +0 -1
- package/dist/internal/sharingPublic/nodes.d.ts +13 -0
- package/dist/internal/sharingPublic/nodes.js +28 -0
- package/dist/internal/sharingPublic/nodes.js.map +1 -0
- package/dist/internal/sharingPublic/session/session.d.ts +3 -3
- package/dist/internal/sharingPublic/session/url.test.js +3 -3
- package/dist/internal/sharingPublic/shares.d.ts +34 -0
- package/dist/internal/sharingPublic/shares.js +69 -0
- package/dist/internal/sharingPublic/shares.js.map +1 -0
- package/dist/internal/upload/apiService.js +10 -1
- package/dist/internal/upload/apiService.js.map +1 -1
- package/dist/internal/upload/controller.d.ts +8 -2
- package/dist/internal/upload/controller.js.map +1 -1
- package/dist/internal/upload/fileUploader.d.ts +6 -3
- package/dist/internal/upload/fileUploader.js +3 -3
- package/dist/internal/upload/fileUploader.js.map +1 -1
- package/dist/internal/upload/fileUploader.test.js +23 -11
- package/dist/internal/upload/fileUploader.test.js.map +1 -1
- package/dist/internal/upload/streamUploader.d.ts +6 -2
- package/dist/internal/upload/streamUploader.js +8 -4
- package/dist/internal/upload/streamUploader.js.map +1 -1
- package/dist/internal/upload/streamUploader.test.js +10 -6
- package/dist/internal/upload/streamUploader.test.js.map +1 -1
- package/dist/protonDriveClient.d.ts +3 -3
- package/dist/protonDriveClient.js +4 -4
- package/dist/protonDriveClient.js.map +1 -1
- package/dist/protonDrivePublicLinkClient.d.ts +31 -4
- package/dist/protonDrivePublicLinkClient.js +52 -9
- package/dist/protonDrivePublicLinkClient.js.map +1 -1
- package/package.json +1 -1
- package/src/diagnostic/sdkDiagnostic.ts +1 -1
- package/src/interface/download.ts +4 -4
- package/src/interface/upload.ts +3 -3
- package/src/internal/apiService/apiService.test.ts +50 -0
- package/src/internal/apiService/apiService.ts +33 -2
- package/src/internal/apiService/driveTypes.ts +31 -48
- package/src/internal/apiService/errors.test.ts +10 -0
- package/src/internal/apiService/errors.ts +5 -1
- package/src/internal/asyncIteratorMap.test.ts +12 -0
- package/src/internal/asyncIteratorMap.ts +8 -0
- package/src/internal/download/fileDownloader.test.ts +8 -8
- package/src/internal/download/fileDownloader.ts +5 -5
- package/src/internal/nodes/apiService.test.ts +199 -16
- package/src/internal/nodes/apiService.ts +62 -49
- package/src/internal/nodes/debouncer.test.ts +129 -0
- package/src/internal/nodes/debouncer.ts +93 -0
- package/src/internal/nodes/nodesAccess.test.ts +2 -2
- package/src/internal/nodes/nodesAccess.ts +30 -5
- package/src/internal/photos/upload.ts +4 -1
- package/src/internal/sharingPublic/apiService.ts +4 -87
- package/src/internal/sharingPublic/cryptoCache.ts +0 -34
- package/src/internal/sharingPublic/cryptoReporter.ts +73 -0
- package/src/internal/sharingPublic/cryptoService.ts +4 -80
- package/src/internal/sharingPublic/index.ts +68 -6
- package/src/internal/sharingPublic/interface.ts +0 -9
- package/src/internal/sharingPublic/nodes.ts +37 -0
- package/src/internal/sharingPublic/session/apiService.ts +1 -1
- package/src/internal/sharingPublic/session/session.ts +3 -3
- package/src/internal/sharingPublic/session/url.test.ts +3 -3
- package/src/internal/sharingPublic/shares.ts +86 -0
- package/src/internal/upload/apiService.ts +12 -1
- package/src/internal/upload/controller.ts +2 -2
- package/src/internal/upload/fileUploader.test.ts +25 -11
- package/src/internal/upload/fileUploader.ts +4 -3
- package/src/internal/upload/streamUploader.test.ts +15 -3
- package/src/internal/upload/streamUploader.ts +8 -3
- package/src/protonDriveClient.ts +4 -4
- package/src/protonDrivePublicLinkClient.ts +93 -12
- package/dist/internal/sharingPublic/manager.d.ts +0 -19
- package/dist/internal/sharingPublic/manager.js +0 -81
- package/dist/internal/sharingPublic/manager.js.map +0 -1
- package/src/internal/sharingPublic/manager.ts +0 -86
|
@@ -78,7 +78,7 @@ describe('FileDownloader', () => {
|
|
|
78
78
|
} as DecryptedRevision;
|
|
79
79
|
});
|
|
80
80
|
|
|
81
|
-
describe('
|
|
81
|
+
describe('downloadToStream', () => {
|
|
82
82
|
let onProgress: (downloadedBytes: number) => void;
|
|
83
83
|
let onFinish: () => void;
|
|
84
84
|
|
|
@@ -89,7 +89,7 @@ describe('FileDownloader', () => {
|
|
|
89
89
|
const verifySuccess = async (
|
|
90
90
|
fileProgress: number = 6, // 3 blocks of length 1, 2, 3
|
|
91
91
|
) => {
|
|
92
|
-
const controller = downloader.
|
|
92
|
+
const controller = downloader.downloadToStream(stream, onProgress);
|
|
93
93
|
await controller.completion();
|
|
94
94
|
|
|
95
95
|
expect(apiService.iterateRevisionBlocks).toHaveBeenCalledWith('revisionUid', undefined);
|
|
@@ -103,7 +103,7 @@ describe('FileDownloader', () => {
|
|
|
103
103
|
};
|
|
104
104
|
|
|
105
105
|
const verifyFailure = async (error: string, downloadedBytes: number | undefined) => {
|
|
106
|
-
const controller = downloader.
|
|
106
|
+
const controller = downloader.downloadToStream(stream, onProgress);
|
|
107
107
|
|
|
108
108
|
await expect(controller.completion()).rejects.toThrow(error);
|
|
109
109
|
|
|
@@ -156,9 +156,9 @@ describe('FileDownloader', () => {
|
|
|
156
156
|
});
|
|
157
157
|
|
|
158
158
|
it('should reject two download starts', async () => {
|
|
159
|
-
downloader.
|
|
160
|
-
expect(() => downloader.
|
|
161
|
-
expect(() => downloader.
|
|
159
|
+
downloader.downloadToStream(stream, onProgress);
|
|
160
|
+
expect(() => downloader.downloadToStream(stream, onProgress)).toThrow('Download already started');
|
|
161
|
+
expect(() => downloader.unsafeDownloadToStream(stream, onProgress)).toThrow('Download already started');
|
|
162
162
|
});
|
|
163
163
|
|
|
164
164
|
it('should start a download and write to the stream', async () => {
|
|
@@ -347,7 +347,7 @@ describe('FileDownloader', () => {
|
|
|
347
347
|
});
|
|
348
348
|
});
|
|
349
349
|
|
|
350
|
-
describe('
|
|
350
|
+
describe('unsafeDownloadToStream', () => {
|
|
351
351
|
let onProgress: (downloadedBytes: number) => void;
|
|
352
352
|
let onFinish: () => void;
|
|
353
353
|
|
|
@@ -381,7 +381,7 @@ describe('FileDownloader', () => {
|
|
|
381
381
|
});
|
|
382
382
|
|
|
383
383
|
it('should skip verification steps', async () => {
|
|
384
|
-
const controller = downloader.
|
|
384
|
+
const controller = downloader.unsafeDownloadToStream(stream, onProgress);
|
|
385
385
|
await controller.completion();
|
|
386
386
|
|
|
387
387
|
expect(apiService.iterateRevisionBlocks).toHaveBeenCalledWith('revisionUid', undefined);
|
|
@@ -138,24 +138,24 @@ export class FileDownloader {
|
|
|
138
138
|
}
|
|
139
139
|
}
|
|
140
140
|
|
|
141
|
-
|
|
141
|
+
downloadToStream(stream: WritableStream, onProgress?: (downloadedBytes: number) => void): DownloadController {
|
|
142
142
|
if (this.controller.promise) {
|
|
143
143
|
throw new Error(`Download already started`);
|
|
144
144
|
}
|
|
145
|
-
this.controller.promise = this.
|
|
145
|
+
this.controller.promise = this.internalDownloadToStream(stream, onProgress);
|
|
146
146
|
return this.controller;
|
|
147
147
|
}
|
|
148
148
|
|
|
149
|
-
|
|
149
|
+
unsafeDownloadToStream(stream: WritableStream, onProgress?: (downloadedBytes: number) => void): DownloadController {
|
|
150
150
|
if (this.controller.promise) {
|
|
151
151
|
throw new Error(`Download already started`);
|
|
152
152
|
}
|
|
153
153
|
const ignoreIntegrityErrors = true;
|
|
154
|
-
this.controller.promise = this.
|
|
154
|
+
this.controller.promise = this.internalDownloadToStream(stream, onProgress, ignoreIntegrityErrors);
|
|
155
155
|
return this.controller;
|
|
156
156
|
}
|
|
157
157
|
|
|
158
|
-
private async
|
|
158
|
+
private async internalDownloadToStream(
|
|
159
159
|
stream: WritableStream,
|
|
160
160
|
onProgress?: (downloadedBytes: number) => void,
|
|
161
161
|
ignoreIntegrityErrors = false,
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import { MemberRole, NodeType } from '../../interface';
|
|
2
2
|
import { getMockLogger } from '../../tests/logger';
|
|
3
3
|
import { DriveAPIService, ErrorCode, InvalidRequirementsAPIError } from '../apiService';
|
|
4
|
-
import { NodeAPIService } from './apiService';
|
|
4
|
+
import { NodeAPIService, groupNodeUidsByVolumeAndIteratePerBatch } from './apiService';
|
|
5
5
|
import { NodeOutOfSyncError } from './errors';
|
|
6
6
|
|
|
7
7
|
function generateAPIFileNode(linkOverrides = {}, overrides = {}) {
|
|
@@ -476,6 +476,44 @@ describe('nodeAPIService', () => {
|
|
|
476
476
|
{ uid: 'volumeId~nodeId2', ok: false, error: 'INSUFFICIENT_SCOPE' },
|
|
477
477
|
]);
|
|
478
478
|
});
|
|
479
|
+
|
|
480
|
+
it('should trash nodes in batches', async () => {
|
|
481
|
+
// @ts-expect-error Mocking for testing purposes
|
|
482
|
+
apiMock.post = jest.fn(async (_, { LinkIDs }) =>
|
|
483
|
+
Promise.resolve({
|
|
484
|
+
Responses: LinkIDs.map((linkId: string) => ({
|
|
485
|
+
LinkID: linkId,
|
|
486
|
+
Response: {
|
|
487
|
+
Code: ErrorCode.OK,
|
|
488
|
+
},
|
|
489
|
+
})),
|
|
490
|
+
}),
|
|
491
|
+
);
|
|
492
|
+
|
|
493
|
+
const nodeUids = Array.from({ length: 250 }, (_, i) => `volumeId1~nodeId${i}`);
|
|
494
|
+
const nodeIds = nodeUids.map((uid) => uid.split('~')[1]);
|
|
495
|
+
|
|
496
|
+
const results = await Array.fromAsync(api.trashNodes(nodeUids));
|
|
497
|
+
expect(results).toHaveLength(nodeUids.length);
|
|
498
|
+
expect(results.every((result) => result.ok)).toBe(true);
|
|
499
|
+
|
|
500
|
+
expect(apiMock.post).toHaveBeenCalledTimes(3);
|
|
501
|
+
expect(apiMock.post).toHaveBeenCalledWith(
|
|
502
|
+
'drive/v2/volumes/volumeId1/trash_multiple',
|
|
503
|
+
{ LinkIDs: nodeIds.slice(0, 100) },
|
|
504
|
+
undefined,
|
|
505
|
+
);
|
|
506
|
+
expect(apiMock.post).toHaveBeenCalledWith(
|
|
507
|
+
'drive/v2/volumes/volumeId1/trash_multiple',
|
|
508
|
+
{ LinkIDs: nodeIds.slice(100, 200) },
|
|
509
|
+
undefined,
|
|
510
|
+
);
|
|
511
|
+
expect(apiMock.post).toHaveBeenCalledWith(
|
|
512
|
+
'drive/v2/volumes/volumeId1/trash_multiple',
|
|
513
|
+
{ LinkIDs: nodeIds.slice(200, 250) },
|
|
514
|
+
undefined,
|
|
515
|
+
);
|
|
516
|
+
});
|
|
479
517
|
});
|
|
480
518
|
|
|
481
519
|
describe('restoreNodes', () => {
|
|
@@ -517,17 +555,28 @@ describe('nodeAPIService', () => {
|
|
|
517
555
|
]);
|
|
518
556
|
});
|
|
519
557
|
|
|
520
|
-
it('should
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
558
|
+
it('should restore nodes from multiple volumes', async () => {
|
|
559
|
+
// @ts-expect-error Mocking for testing purposes
|
|
560
|
+
apiMock.put = jest.fn(async (_, { LinkIDs }) =>
|
|
561
|
+
Promise.resolve({
|
|
562
|
+
Responses: LinkIDs.map((linkId: string) => ({
|
|
563
|
+
LinkID: linkId,
|
|
564
|
+
Response: {
|
|
565
|
+
Code: ErrorCode.OK,
|
|
566
|
+
},
|
|
567
|
+
})),
|
|
568
|
+
}),
|
|
569
|
+
);
|
|
570
|
+
|
|
571
|
+
const result = await Array.fromAsync(api.restoreNodes(['volumeId1~nodeId1', 'volumeId2~nodeId2']));
|
|
572
|
+
expect(result).toEqual([
|
|
573
|
+
{ uid: 'volumeId1~nodeId1', ok: true },
|
|
574
|
+
{ uid: 'volumeId2~nodeId2', ok: true },
|
|
575
|
+
]);
|
|
527
576
|
});
|
|
528
577
|
});
|
|
529
578
|
|
|
530
|
-
describe('
|
|
579
|
+
describe('deleteNodes', () => {
|
|
531
580
|
it('should delete nodes', async () => {
|
|
532
581
|
// @ts-expect-error Mocking for testing purposes
|
|
533
582
|
apiMock.post = jest.fn(async () =>
|
|
@@ -557,13 +606,24 @@ describe('nodeAPIService', () => {
|
|
|
557
606
|
]);
|
|
558
607
|
});
|
|
559
608
|
|
|
560
|
-
it('should
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
609
|
+
it('should delete nodes from multiple volumes', async () => {
|
|
610
|
+
// @ts-expect-error Mocking for testing purposes
|
|
611
|
+
apiMock.post = jest.fn(async (_, { LinkIDs }) =>
|
|
612
|
+
Promise.resolve({
|
|
613
|
+
Responses: LinkIDs.map((linkId: string) => ({
|
|
614
|
+
LinkID: linkId,
|
|
615
|
+
Response: {
|
|
616
|
+
Code: ErrorCode.OK,
|
|
617
|
+
},
|
|
618
|
+
})),
|
|
619
|
+
}),
|
|
620
|
+
);
|
|
621
|
+
|
|
622
|
+
const result = await Array.fromAsync(api.deleteNodes(['volumeId1~nodeId1', 'volumeId2~nodeId2']));
|
|
623
|
+
expect(result).toEqual([
|
|
624
|
+
{ uid: 'volumeId1~nodeId1', ok: true },
|
|
625
|
+
{ uid: 'volumeId2~nodeId2', ok: true },
|
|
626
|
+
]);
|
|
567
627
|
});
|
|
568
628
|
});
|
|
569
629
|
|
|
@@ -600,3 +660,126 @@ describe('nodeAPIService', () => {
|
|
|
600
660
|
});
|
|
601
661
|
});
|
|
602
662
|
});
|
|
663
|
+
|
|
664
|
+
describe('groupNodeUidsByVolumeAndIteratePerBatch', () => {
|
|
665
|
+
it('should handle empty array', () => {
|
|
666
|
+
const result = Array.from(groupNodeUidsByVolumeAndIteratePerBatch([]));
|
|
667
|
+
expect(result).toEqual([]);
|
|
668
|
+
});
|
|
669
|
+
|
|
670
|
+
it('should handle single volume with nodes that fit in one batch', () => {
|
|
671
|
+
const nodeUids = ['volumeId1~nodeId1', 'volumeId1~nodeId2', 'volumeId1~nodeId3'];
|
|
672
|
+
|
|
673
|
+
const result = Array.from(groupNodeUidsByVolumeAndIteratePerBatch(nodeUids));
|
|
674
|
+
|
|
675
|
+
expect(result).toEqual([
|
|
676
|
+
{
|
|
677
|
+
volumeId: 'volumeId1',
|
|
678
|
+
batchNodeIds: ['nodeId1', 'nodeId2', 'nodeId3'],
|
|
679
|
+
batchNodeUids: ['volumeId1~nodeId1', 'volumeId1~nodeId2', 'volumeId1~nodeId3'],
|
|
680
|
+
},
|
|
681
|
+
]);
|
|
682
|
+
});
|
|
683
|
+
|
|
684
|
+
it('should handle single volume with nodes that require multiple batches', () => {
|
|
685
|
+
// Create 250 node UIDs to test batching (API_NODES_BATCH_SIZE = 100)
|
|
686
|
+
const nodeUids = Array.from({ length: 250 }, (_, i) => `volumeId1~nodeId${i}`);
|
|
687
|
+
|
|
688
|
+
const result = Array.from(groupNodeUidsByVolumeAndIteratePerBatch(nodeUids));
|
|
689
|
+
|
|
690
|
+
expect(result).toHaveLength(3); // 100 + 100 + 50
|
|
691
|
+
|
|
692
|
+
// First batch
|
|
693
|
+
expect(result[0]).toEqual({
|
|
694
|
+
volumeId: 'volumeId1',
|
|
695
|
+
batchNodeIds: Array.from({ length: 100 }, (_, i) => `nodeId${i}`),
|
|
696
|
+
batchNodeUids: Array.from({ length: 100 }, (_, i) => `volumeId1~nodeId${i}`),
|
|
697
|
+
});
|
|
698
|
+
|
|
699
|
+
// Second batch
|
|
700
|
+
expect(result[1]).toEqual({
|
|
701
|
+
volumeId: 'volumeId1',
|
|
702
|
+
batchNodeIds: Array.from({ length: 100 }, (_, i) => `nodeId${i + 100}`),
|
|
703
|
+
batchNodeUids: Array.from({ length: 100 }, (_, i) => `volumeId1~nodeId${i + 100}`),
|
|
704
|
+
});
|
|
705
|
+
|
|
706
|
+
// Third batch
|
|
707
|
+
expect(result[2]).toEqual({
|
|
708
|
+
volumeId: 'volumeId1',
|
|
709
|
+
batchNodeIds: Array.from({ length: 50 }, (_, i) => `nodeId${i + 200}`),
|
|
710
|
+
batchNodeUids: Array.from({ length: 50 }, (_, i) => `volumeId1~nodeId${i + 200}`),
|
|
711
|
+
});
|
|
712
|
+
});
|
|
713
|
+
|
|
714
|
+
it('should handle multiple volumes with nodes distributed across them', () => {
|
|
715
|
+
const nodeUids = [
|
|
716
|
+
'volumeId1~nodeId1',
|
|
717
|
+
'volumeId2~nodeId2',
|
|
718
|
+
'volumeId1~nodeId3',
|
|
719
|
+
'volumeId3~nodeId4',
|
|
720
|
+
'volumeId2~nodeId5',
|
|
721
|
+
];
|
|
722
|
+
|
|
723
|
+
const result = Array.from(groupNodeUidsByVolumeAndIteratePerBatch(nodeUids));
|
|
724
|
+
|
|
725
|
+
expect(result).toHaveLength(3); // One batch per volume
|
|
726
|
+
|
|
727
|
+
// Results should be grouped by volume
|
|
728
|
+
const volumeId1Batch = result.find((batch) => batch.volumeId === 'volumeId1');
|
|
729
|
+
const volumeId2Batch = result.find((batch) => batch.volumeId === 'volumeId2');
|
|
730
|
+
const volumeId3Batch = result.find((batch) => batch.volumeId === 'volumeId3');
|
|
731
|
+
|
|
732
|
+
expect(volumeId1Batch).toEqual({
|
|
733
|
+
volumeId: 'volumeId1',
|
|
734
|
+
batchNodeIds: ['nodeId1', 'nodeId3'],
|
|
735
|
+
batchNodeUids: ['volumeId1~nodeId1', 'volumeId1~nodeId3'],
|
|
736
|
+
});
|
|
737
|
+
|
|
738
|
+
expect(volumeId2Batch).toEqual({
|
|
739
|
+
volumeId: 'volumeId2',
|
|
740
|
+
batchNodeIds: ['nodeId2', 'nodeId5'],
|
|
741
|
+
batchNodeUids: ['volumeId2~nodeId2', 'volumeId2~nodeId5'],
|
|
742
|
+
});
|
|
743
|
+
|
|
744
|
+
expect(volumeId3Batch).toEqual({
|
|
745
|
+
volumeId: 'volumeId3',
|
|
746
|
+
batchNodeIds: ['nodeId4'],
|
|
747
|
+
batchNodeUids: ['volumeId3~nodeId4'],
|
|
748
|
+
});
|
|
749
|
+
});
|
|
750
|
+
|
|
751
|
+
it('should handle multiple volumes where some require multiple batches', () => {
|
|
752
|
+
// Volume 1: 150 nodes (2 batches)
|
|
753
|
+
// Volume 2: 50 nodes (1 batch)
|
|
754
|
+
// Volume 3: 200 nodes (2 batches)
|
|
755
|
+
const volume1Nodes = Array.from({ length: 150 }, (_, i) => `volumeId1~nodeId${i}`);
|
|
756
|
+
const volume2Nodes = Array.from({ length: 50 }, (_, i) => `volumeId2~nodeId${i}`);
|
|
757
|
+
const volume3Nodes = Array.from({ length: 200 }, (_, i) => `volumeId3~nodeId${i}`);
|
|
758
|
+
|
|
759
|
+
const nodeUids = [...volume1Nodes, ...volume2Nodes, ...volume3Nodes];
|
|
760
|
+
|
|
761
|
+
const result = Array.from(groupNodeUidsByVolumeAndIteratePerBatch(nodeUids));
|
|
762
|
+
|
|
763
|
+
expect(result).toHaveLength(5); // 2 + 1 + 2 batches
|
|
764
|
+
|
|
765
|
+
// Group results by volume
|
|
766
|
+
const volume1Batches = result.filter((batch) => batch.volumeId === 'volumeId1');
|
|
767
|
+
const volume2Batches = result.filter((batch) => batch.volumeId === 'volumeId2');
|
|
768
|
+
const volume3Batches = result.filter((batch) => batch.volumeId === 'volumeId3');
|
|
769
|
+
|
|
770
|
+
expect(volume1Batches).toHaveLength(2);
|
|
771
|
+
expect(volume2Batches).toHaveLength(1);
|
|
772
|
+
expect(volume3Batches).toHaveLength(2);
|
|
773
|
+
|
|
774
|
+
// Verify volume 1 batches
|
|
775
|
+
expect(volume1Batches[0].batchNodeIds).toHaveLength(100);
|
|
776
|
+
expect(volume1Batches[1].batchNodeIds).toHaveLength(50);
|
|
777
|
+
|
|
778
|
+
// Verify volume 2 batch
|
|
779
|
+
expect(volume2Batches[0].batchNodeIds).toHaveLength(50);
|
|
780
|
+
|
|
781
|
+
// Verify volume 3 batches
|
|
782
|
+
expect(volume3Batches[0].batchNodeIds).toHaveLength(100);
|
|
783
|
+
expect(volume3Batches[1].batchNodeIds).toHaveLength(100);
|
|
784
|
+
});
|
|
785
|
+
});
|
|
@@ -128,7 +128,7 @@ export class NodeAPIService {
|
|
|
128
128
|
|
|
129
129
|
async *iterateNodes(
|
|
130
130
|
nodeUids: string[],
|
|
131
|
-
ownVolumeId: string,
|
|
131
|
+
ownVolumeId: string | undefined,
|
|
132
132
|
filterOptions?: FilterOptions,
|
|
133
133
|
signal?: AbortSignal,
|
|
134
134
|
): AsyncGenerator<EncryptedNode> {
|
|
@@ -389,55 +389,49 @@ export class NodeAPIService {
|
|
|
389
389
|
return makeNodeUid(volumeId, response.LinkID);
|
|
390
390
|
}
|
|
391
391
|
|
|
392
|
-
// Improvement requested: split into multiple calls for many nodes.
|
|
393
392
|
async *trashNodes(nodeUids: string[], signal?: AbortSignal): AsyncGenerator<NodeResult> {
|
|
394
|
-
const
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
signal,
|
|
403
|
-
);
|
|
393
|
+
for (const { volumeId, batchNodeIds, batchNodeUids } of groupNodeUidsByVolumeAndIteratePerBatch(nodeUids)) {
|
|
394
|
+
const response = await this.apiService.post<PostTrashNodesRequest, PostTrashNodesResponse>(
|
|
395
|
+
`drive/v2/volumes/${volumeId}/trash_multiple`,
|
|
396
|
+
{
|
|
397
|
+
LinkIDs: batchNodeIds,
|
|
398
|
+
},
|
|
399
|
+
signal,
|
|
400
|
+
);
|
|
404
401
|
|
|
405
|
-
|
|
406
|
-
|
|
402
|
+
// TODO: remove `as` when backend fixes OpenAPI schema.
|
|
403
|
+
yield * handleResponseErrors(batchNodeUids, volumeId, response.Responses as LinkResponse[]);
|
|
404
|
+
}
|
|
407
405
|
}
|
|
408
406
|
|
|
409
|
-
// Improvement requested: split into multiple calls for many nodes.
|
|
410
407
|
async *restoreNodes(nodeUids: string[], signal?: AbortSignal): AsyncGenerator<NodeResult> {
|
|
411
|
-
const
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
signal,
|
|
420
|
-
);
|
|
408
|
+
for (const { volumeId, batchNodeIds, batchNodeUids } of groupNodeUidsByVolumeAndIteratePerBatch(nodeUids)) {
|
|
409
|
+
const response = await this.apiService.put<PutRestoreNodesRequest, PutRestoreNodesResponse>(
|
|
410
|
+
`drive/v2/volumes/${volumeId}/trash/restore_multiple`,
|
|
411
|
+
{
|
|
412
|
+
LinkIDs: batchNodeIds,
|
|
413
|
+
},
|
|
414
|
+
signal,
|
|
415
|
+
);
|
|
421
416
|
|
|
422
|
-
|
|
423
|
-
|
|
417
|
+
// TODO: remove `as` when backend fixes OpenAPI schema.
|
|
418
|
+
yield* handleResponseErrors(batchNodeUids, volumeId, response.Responses as LinkResponse[]);
|
|
419
|
+
}
|
|
424
420
|
}
|
|
425
421
|
|
|
426
|
-
// Improvement requested: split into multiple calls for many nodes.
|
|
427
422
|
async *deleteNodes(nodeUids: string[], signal?: AbortSignal): AsyncGenerator<NodeResult> {
|
|
428
|
-
const
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
signal,
|
|
437
|
-
);
|
|
423
|
+
for (const { volumeId, batchNodeIds, batchNodeUids } of groupNodeUidsByVolumeAndIteratePerBatch(nodeUids)) {
|
|
424
|
+
const response = await this.apiService.post<PostDeleteNodesRequest, PostDeleteNodesResponse>(
|
|
425
|
+
`drive/v2/volumes/${volumeId}/trash/delete_multiple`,
|
|
426
|
+
{
|
|
427
|
+
LinkIDs: batchNodeIds,
|
|
428
|
+
},
|
|
429
|
+
signal,
|
|
430
|
+
);
|
|
438
431
|
|
|
439
|
-
|
|
440
|
-
|
|
432
|
+
// TODO: remove `as` when backend fixes OpenAPI schema.
|
|
433
|
+
yield* handleResponseErrors(batchNodeUids, volumeId, response.Responses as LinkResponse[]);
|
|
434
|
+
}
|
|
441
435
|
}
|
|
442
436
|
|
|
443
437
|
async createFolder(
|
|
@@ -513,15 +507,6 @@ export class NodeAPIService {
|
|
|
513
507
|
}
|
|
514
508
|
}
|
|
515
509
|
|
|
516
|
-
function assertAndGetSingleVolumeId(operationForErrorMessage: string, nodeIds: { volumeId: string }[]): string {
|
|
517
|
-
const uniqueVolumeIds = new Set(nodeIds.map(({ volumeId }) => volumeId));
|
|
518
|
-
if (uniqueVolumeIds.size !== 1) {
|
|
519
|
-
throw new ValidationError(c('Error').t`${operationForErrorMessage} from multiple sections is not allowed`);
|
|
520
|
-
}
|
|
521
|
-
const volumeId = nodeIds[0].volumeId;
|
|
522
|
-
return volumeId;
|
|
523
|
-
}
|
|
524
|
-
|
|
525
510
|
type LinkResponse = {
|
|
526
511
|
LinkID: string;
|
|
527
512
|
Response: {
|
|
@@ -657,6 +642,34 @@ function linkToEncryptedNode(
|
|
|
657
642
|
throw new Error(`Unknown node type: ${link.Link.Type}`);
|
|
658
643
|
}
|
|
659
644
|
|
|
645
|
+
export function* groupNodeUidsByVolumeAndIteratePerBatch(
|
|
646
|
+
nodeUids: string[],
|
|
647
|
+
): Generator<{ volumeId: string; batchNodeIds: string[]; batchNodeUids: string[] }> {
|
|
648
|
+
const allNodeIds = nodeUids.map((nodeUid: string) => {
|
|
649
|
+
const { volumeId, nodeId } = splitNodeUid(nodeUid);
|
|
650
|
+
return { volumeId, nodeIds: { nodeId, nodeUid } };
|
|
651
|
+
});
|
|
652
|
+
|
|
653
|
+
const nodeIdsByVolumeId = new Map<string, { nodeId: string; nodeUid: string }[]>();
|
|
654
|
+
for (const { volumeId, nodeIds } of allNodeIds) {
|
|
655
|
+
if (!nodeIdsByVolumeId.has(volumeId)) {
|
|
656
|
+
nodeIdsByVolumeId.set(volumeId, []);
|
|
657
|
+
}
|
|
658
|
+
nodeIdsByVolumeId.get(volumeId)?.push(nodeIds);
|
|
659
|
+
}
|
|
660
|
+
|
|
661
|
+
for (const [volumeId, nodeIds] of nodeIdsByVolumeId.entries()) {
|
|
662
|
+
for (const nodeIdsBatch of batch(nodeIds, API_NODES_BATCH_SIZE)) {
|
|
663
|
+
yield {
|
|
664
|
+
volumeId,
|
|
665
|
+
batchNodeIds: nodeIdsBatch.map(({ nodeId }) => nodeId),
|
|
666
|
+
batchNodeUids: nodeIdsBatch.map(({ nodeUid }) => nodeUid),
|
|
667
|
+
};
|
|
668
|
+
}
|
|
669
|
+
}
|
|
670
|
+
}
|
|
671
|
+
|
|
672
|
+
|
|
660
673
|
function transformRevisionResponse(
|
|
661
674
|
volumeId: string,
|
|
662
675
|
nodeId: string,
|
|
@@ -0,0 +1,129 @@
|
|
|
1
|
+
import { NodesDebouncer } from './debouncer';
|
|
2
|
+
import { Logger } from '../../interface';
|
|
3
|
+
|
|
4
|
+
describe('NodesDebouncer', () => {
|
|
5
|
+
let debouncer: NodesDebouncer;
|
|
6
|
+
let mockLogger: jest.Mocked<Logger>;
|
|
7
|
+
|
|
8
|
+
beforeEach(() => {
|
|
9
|
+
mockLogger = {
|
|
10
|
+
debug: jest.fn(),
|
|
11
|
+
info: jest.fn(),
|
|
12
|
+
warn: jest.fn(),
|
|
13
|
+
error: jest.fn(),
|
|
14
|
+
};
|
|
15
|
+
debouncer = new NodesDebouncer(mockLogger);
|
|
16
|
+
|
|
17
|
+
jest.useFakeTimers();
|
|
18
|
+
});
|
|
19
|
+
|
|
20
|
+
afterEach(() => {
|
|
21
|
+
jest.useRealTimers();
|
|
22
|
+
debouncer.clear();
|
|
23
|
+
});
|
|
24
|
+
|
|
25
|
+
it('should register a node for loading and wait for it to finish', async () => {
|
|
26
|
+
const nodeUid = 'test-node-1';
|
|
27
|
+
debouncer.loadingNode(nodeUid);
|
|
28
|
+
|
|
29
|
+
// Verify that the node is registered by checking if waitForLoadingNode works
|
|
30
|
+
const waitPromise = debouncer.waitForLoadingNode(nodeUid);
|
|
31
|
+
expect(waitPromise).toBeInstanceOf(Promise);
|
|
32
|
+
|
|
33
|
+
// Finish loading to clean up
|
|
34
|
+
debouncer.finishedLoadingNode(nodeUid);
|
|
35
|
+
await waitPromise;
|
|
36
|
+
});
|
|
37
|
+
|
|
38
|
+
it('should allow multiple nodes to be registered', async () => {
|
|
39
|
+
const nodeUid1 = 'test-node-1';
|
|
40
|
+
const nodeUid2 = 'test-node-2';
|
|
41
|
+
|
|
42
|
+
debouncer.loadingNode(nodeUid1);
|
|
43
|
+
debouncer.loadingNode(nodeUid2);
|
|
44
|
+
|
|
45
|
+
const wait1 = debouncer.waitForLoadingNode(nodeUid1);
|
|
46
|
+
const wait2 = debouncer.waitForLoadingNode(nodeUid2);
|
|
47
|
+
|
|
48
|
+
expect(wait1).toBeInstanceOf(Promise);
|
|
49
|
+
expect(wait2).toBeInstanceOf(Promise);
|
|
50
|
+
|
|
51
|
+
debouncer.finishedLoadingNode(nodeUid1);
|
|
52
|
+
debouncer.finishedLoadingNode(nodeUid2);
|
|
53
|
+
await Promise.all([wait1, wait2]);
|
|
54
|
+
});
|
|
55
|
+
|
|
56
|
+
it('should register multiple nodes at once', async () => {
|
|
57
|
+
const nodeUid1 = 'test-node-1';
|
|
58
|
+
const nodeUid2 = 'test-node-2';
|
|
59
|
+
|
|
60
|
+
debouncer.loadingNodes([nodeUid1, nodeUid2]);
|
|
61
|
+
|
|
62
|
+
const wait1 = debouncer.waitForLoadingNode(nodeUid1);
|
|
63
|
+
const wait2 = debouncer.waitForLoadingNode(nodeUid2);
|
|
64
|
+
|
|
65
|
+
expect(wait1).toBeInstanceOf(Promise);
|
|
66
|
+
expect(wait2).toBeInstanceOf(Promise);
|
|
67
|
+
|
|
68
|
+
debouncer.finishedLoadingNode(nodeUid1);
|
|
69
|
+
debouncer.finishedLoadingNode(nodeUid2);
|
|
70
|
+
await Promise.all([wait1, wait2]);
|
|
71
|
+
});
|
|
72
|
+
|
|
73
|
+
it('should warn about registering the same node twice', async () => {
|
|
74
|
+
const nodeUid = 'test-node-1';
|
|
75
|
+
|
|
76
|
+
// Register the same node twice
|
|
77
|
+
debouncer.loadingNode(nodeUid);
|
|
78
|
+
debouncer.loadingNode(nodeUid);
|
|
79
|
+
|
|
80
|
+
expect(mockLogger.warn).toHaveBeenCalledWith(`debouncer: Loading twice for: ${nodeUid}`);
|
|
81
|
+
});
|
|
82
|
+
|
|
83
|
+
it('should timeout', async () => {
|
|
84
|
+
const nodeUid = 'test-node-1';
|
|
85
|
+
debouncer.loadingNode(nodeUid);
|
|
86
|
+
|
|
87
|
+
jest.advanceTimersByTime(6000);
|
|
88
|
+
expect(mockLogger.warn).toHaveBeenCalledWith(`debouncer: Timeout for: ${nodeUid}`);
|
|
89
|
+
await expect(debouncer.waitForLoadingNode(nodeUid)).resolves.toBeUndefined();
|
|
90
|
+
});
|
|
91
|
+
|
|
92
|
+
describe('finishedLoadingNode', () => {
|
|
93
|
+
it('should handle non-existent node gracefully', async () => {
|
|
94
|
+
const nodeUid = 'non-existent-node';
|
|
95
|
+
|
|
96
|
+
expect(() => debouncer.finishedLoadingNode(nodeUid)).not.toThrow();
|
|
97
|
+
});
|
|
98
|
+
|
|
99
|
+
it('should remove node from internal map after finishing', async () => {
|
|
100
|
+
const nodeUid = 'test-node-1';
|
|
101
|
+
debouncer.loadingNode(nodeUid);
|
|
102
|
+
debouncer.finishedLoadingNode(nodeUid);
|
|
103
|
+
|
|
104
|
+
const waitPromise = debouncer.waitForLoadingNode(nodeUid);
|
|
105
|
+
await expect(waitPromise).resolves.toBe(undefined);
|
|
106
|
+
});
|
|
107
|
+
});
|
|
108
|
+
|
|
109
|
+
describe('waitForLoadingNode', () => {
|
|
110
|
+
it('should return immediately for non-registered node', async () => {
|
|
111
|
+
const nodeUid = 'non-existent-node';
|
|
112
|
+
|
|
113
|
+
const result = await debouncer.waitForLoadingNode(nodeUid);
|
|
114
|
+
expect(result).toBeUndefined();
|
|
115
|
+
expect(mockLogger.debug).not.toHaveBeenCalled();
|
|
116
|
+
});
|
|
117
|
+
|
|
118
|
+
it('should wait for registered node and log debug message', async () => {
|
|
119
|
+
const nodeUid = 'test-node-1';
|
|
120
|
+
debouncer.loadingNode(nodeUid);
|
|
121
|
+
|
|
122
|
+
const waitPromise = debouncer.waitForLoadingNode(nodeUid);
|
|
123
|
+
|
|
124
|
+
expect(mockLogger.debug).toHaveBeenCalledWith(`debouncer: Wait for: ${nodeUid}`);
|
|
125
|
+
debouncer.finishedLoadingNode(nodeUid);
|
|
126
|
+
await waitPromise;
|
|
127
|
+
});
|
|
128
|
+
});
|
|
129
|
+
});
|