querysub 0.311.0 → 0.313.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.cursorrules +1 -1
- package/costsBenefits.txt +4 -1
- package/package.json +3 -2
- package/spec.txt +23 -18
- package/src/-0-hooks/hooks.ts +1 -1
- package/src/-a-archives/archives.ts +16 -3
- package/src/-a-archives/archivesBackBlaze.ts +51 -3
- package/src/-a-archives/archivesLimitedCache.ts +175 -0
- package/src/-a-archives/archivesPrivateFileSystem.ts +299 -0
- package/src/-a-auth/certs.ts +58 -31
- package/src/-b-authorities/cdnAuthority.ts +2 -2
- package/src/-b-authorities/dnsAuthority.ts +3 -2
- package/src/-c-identity/IdentityController.ts +9 -2
- package/src/-d-trust/NetworkTrust2.ts +38 -31
- package/src/-e-certs/EdgeCertController.ts +3 -4
- package/src/-e-certs/certAuthority.ts +1 -2
- package/src/-f-node-discovery/NodeDiscovery.ts +20 -13
- package/src/-g-core-values/NodeCapabilities.ts +6 -1
- package/src/0-path-value-core/NodePathAuthorities.ts +1 -1
- package/src/0-path-value-core/PathValueCommitter.ts +3 -3
- package/src/0-path-value-core/PathValueController.ts +3 -3
- package/src/0-path-value-core/archiveLocks/ArchiveLocks2.ts +15 -37
- package/src/0-path-value-core/pathValueCore.ts +4 -3
- package/src/3-path-functions/PathFunctionRunner.ts +2 -2
- package/src/4-dom/qreact.tsx +4 -3
- package/src/4-querysub/Querysub.ts +2 -2
- package/src/4-querysub/QuerysubController.ts +2 -2
- package/src/5-diagnostics/GenericFormat.tsx +1 -0
- package/src/5-diagnostics/Table.tsx +3 -0
- package/src/5-diagnostics/diskValueAudit.ts +2 -1
- package/src/5-diagnostics/nodeMetadata.ts +0 -1
- package/src/deployManager/components/MachineDetailPage.tsx +9 -1
- package/src/deployManager/components/ServiceDetailPage.tsx +10 -1
- package/src/diagnostics/NodeViewer.tsx +3 -4
- package/src/diagnostics/logs/FastArchiveAppendable.ts +748 -0
- package/src/diagnostics/logs/FastArchiveController.ts +524 -0
- package/src/diagnostics/logs/FastArchiveViewer.tsx +863 -0
- package/src/diagnostics/logs/LogViewer2.tsx +349 -0
- package/src/diagnostics/logs/TimeRangeSelector.tsx +94 -0
- package/src/diagnostics/logs/diskLogger.ts +135 -305
- package/src/diagnostics/logs/diskShimConsoleLogs.ts +6 -29
- package/src/diagnostics/logs/errorNotifications/ErrorNotificationController.ts +577 -0
- package/src/diagnostics/logs/errorNotifications/ErrorSuppressionUI.tsx +225 -0
- package/src/diagnostics/logs/errorNotifications/ErrorWarning.tsx +207 -0
- package/src/diagnostics/logs/importLogsEntry.ts +38 -0
- package/src/diagnostics/logs/injectFileLocationToConsole.ts +7 -17
- package/src/diagnostics/logs/lifeCycleAnalysis/lifeCycles.tsx +0 -0
- package/src/diagnostics/logs/lifeCycleAnalysis/spec.md +151 -0
- package/src/diagnostics/managementPages.tsx +7 -16
- package/src/diagnostics/misc-pages/ComponentSyncStats.tsx +0 -1
- package/src/diagnostics/periodic.ts +5 -0
- package/src/diagnostics/watchdog.ts +2 -2
- package/src/functional/SocketChannel.ts +67 -0
- package/src/library-components/Input.tsx +1 -1
- package/src/library-components/InputLabel.tsx +5 -2
- package/src/misc.ts +111 -0
- package/src/src.d.ts +34 -1
- package/src/user-implementation/userData.ts +4 -3
- package/test.ts +13 -0
- package/testEntry2.ts +29 -0
- package/src/diagnostics/errorLogs/ErrorLogController.ts +0 -535
- package/src/diagnostics/errorLogs/ErrorLogCore.ts +0 -274
- package/src/diagnostics/errorLogs/LogClassifiers.tsx +0 -308
- package/src/diagnostics/errorLogs/LogFilterUI.tsx +0 -84
- package/src/diagnostics/errorLogs/LogNotify.tsx +0 -101
- package/src/diagnostics/errorLogs/LogTimeSelector.tsx +0 -723
- package/src/diagnostics/errorLogs/LogViewer.tsx +0 -757
- package/src/diagnostics/errorLogs/logFiltering.tsx +0 -149
- package/src/diagnostics/logs/DiskLoggerPage.tsx +0 -613
package/.cursorrules
CHANGED
|
@@ -43,7 +43,7 @@ Never use em or rem. Only use px or vw/vh/%.
|
|
|
43
43
|
|
|
44
44
|
Errors should almost always use a template string to include the expected value, and the actual value, as in: "throw new Error(`Expected X, was ${Y}`);"
|
|
45
45
|
|
|
46
|
-
|
|
46
|
+
Don't add font colors unless asked to add styling. Don't add any aethetics beyond hbox/vbox/pad2 unless asked to add styling. Don't add fontSize unless asked to add styling. If you believe styling is possible, just tell the user, I can add styling, but won't do it unless you ask me to.
|
|
47
47
|
|
|
48
48
|
Don't use switch statements. Use if statements instead.
|
|
49
49
|
|
package/costsBenefits.txt
CHANGED
|
@@ -16,13 +16,16 @@ Concepts
|
|
|
16
16
|
|
|
17
17
|
NOTE: We don't actually lock things such as Object.keys(). In practice we will get pretty close, but our handling of it is simply not fully ACID.
|
|
18
18
|
|
|
19
|
-
NOTE: In regards to FunctionRunners. FunctionRunner rerun any calls which
|
|
19
|
+
NOTE: In regards to FunctionRunners. FunctionRunner rerun any calls which have no result (included if their result was written, then invalidate). The first one to run will get the value, with the others having their ReadLock on the result (from epoch to their LATER writeTime) invalidated.
|
|
20
20
|
- Although, in practice, the writeTimes will be close, as they are based on requested writeTimes, made unique by creatorId. So... if two FunctionRunners run the same call, one will consistently create the accepted value. So... just don't have two FunctionRunners run the same call!
|
|
21
21
|
|
|
22
22
|
NOTE: "Bad" predictions are possibly by if you know the path some change will write to (ie, the call result path). You can create a prediction that depends on that path being undefined (in some range that includes the eventual remote change, but not the prediction), simply watch that path, and when the change eventually happens your prediction will automatically be invalidated.
|
|
23
23
|
- This is not suitable for trusted nodes, as it means all of our dependent values on that prediction will be only acceptable to us (as our prediction won't really be able to be propagated, as the change might never happen, in which case our prediction has to go away, which is impossible to do if we propagate it), but... it is very useful for clients to quickly predict writes (trusted nodes can just write actual values, not trying to get some remote to make a change).
|
|
24
24
|
- AND, if you only depend on that path there is no jitter as you reject old values, as there is no cascading! This DOES allow values to drift out of sync with the server (if they are CONSTANTLY written to), but... that is another problem, for games, and just one of many issues games have to overcome.
|
|
25
25
|
|
|
26
|
+
NOTE: The reason that we have our main database servers resolving all the values before they write to disk, is simple. They need to resolve the values, because somebody needs to and if the watchers resolve it it's extremely inefficient. We could have them resolve it but then pass the full stream to another server which then resolves it before writing to disk. But then any cases where this fixes an issue of lost data would mean the disk is now inconsistent with what our watchers have. So therefore, splitting off another server to record the full stream of data and having that be all it does and then resolving the data afterwards isn't any faster, and doesn't make our servers appear more stable.
|
|
27
|
+
- However, splitting off the data stream might be a good idea for the purposes of auditing, instead of speed, or reliability.
|
|
28
|
+
|
|
26
29
|
|
|
27
30
|
Summary of benefits/costs
|
|
28
31
|
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "querysub",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.313.0",
|
|
4
4
|
"main": "index.js",
|
|
5
5
|
"license": "MIT",
|
|
6
6
|
"note1": "note on node-forge fork, see https://github.com/digitalbazaar/forge/issues/744 for details",
|
|
@@ -35,7 +35,8 @@
|
|
|
35
35
|
"servershardedtest": "yarn server --authority ./pathremain.json & yarn server --authority ./patha.json & yarn server --authority ./pathb.json & yarn server --authority ./pathc.json & yarn server --authority ./pathd.json",
|
|
36
36
|
"type": "yarn tsc --noEmit",
|
|
37
37
|
"depend": "yarn --silent depcruise src --include-only \"^src\" --config --output-type dot | dot -T svg > dependency-graph.svg",
|
|
38
|
-
"test": "yarn typenode ./
|
|
38
|
+
"test": "yarn typenode ./test.ts",
|
|
39
|
+
"test3": "yarn typenode ./src/test/test.tsx --local",
|
|
39
40
|
"test2": "yarn typenode ./src/4-dom/qreactTest.tsx --local"
|
|
40
41
|
},
|
|
41
42
|
"bin": {
|
package/spec.txt
CHANGED
|
@@ -52,23 +52,24 @@ More corruption resistance files
|
|
|
52
52
|
- and... we might as well add support for short, int, float, and uint (uint is a good way to store a guid, via storing 8 uint variables).
|
|
53
53
|
|
|
54
54
|
Schema/binary PathValues accesses
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
55
|
+
PER Path, not per value. Values will likely be lazily deserialized anyways, making them either pointers, or just numbers, stored in unchanging objects, which... should be quite fast.
|
|
56
|
+
0) First our stream/schema wrappers. Which at first just convert to raw path reads/writes, but once we add schemas they can efficiently create/use those
|
|
57
|
+
1) PathValue.path (string[]), need to be converted to an opaque type `Path`
|
|
58
|
+
- Helpers to manipulate it, and go back to string[], string, etc.
|
|
59
|
+
2) Have Path be able to store itself in schema + variables mode
|
|
60
|
+
- Where schema is shared across all Path instances
|
|
61
|
+
{ schema: { path: (string|Variable)[] }; variables: (string | number)[]; }
|
|
62
|
+
- Add functions to access the schema and variable values
|
|
63
|
+
3) Create an efficient `Map<Path, T>`, via using schemas from the paths
|
|
64
|
+
- By requiring Path as the input we can directly use the schema to narrow down the cases, and then within that we just have to lookup values by variables
|
|
65
|
+
4) Use Path everywhere, replacing PathValue.path, using our efficient lookup to manage it.
|
|
66
|
+
- I think a lookup should handle all the cases. We should be able to nest them as well?
|
|
67
|
+
5) This SHOULD let us entirely get rid of path joining, which should be SO much faster.
|
|
68
|
+
6) Update serialization to keep schemas, instead of converting them back to paths. This should be more efficient, and a lot smaller.
|
|
69
|
+
- Values serialization won't change, and we still need to encode the variables, but... it should still be a lot faster.
|
|
70
|
+
7) Try to remove as many Path conversions to string (and also to a lesser degree string[]), as possible, by making them schema aware.
|
|
71
|
+
8) Investigate further optimizations
|
|
72
|
+
- Replacing variables with numbers, so we our internals Maps can be replaced with arrays / typed arrays
|
|
72
73
|
|
|
73
74
|
IMPORTANT! Actually... a lot of the following is wrong. We should have this PER atomic value, and perhaps ONLY for paths!
|
|
74
75
|
|
|
@@ -1108,4 +1109,8 @@ Very short expiry times on thread certificates, finding some way to automaticall
|
|
|
1108
1109
|
- Our system for the client to update it's cert after the fact can likely be reused to trigger an update of credentials?
|
|
1109
1110
|
- I assume outstanding connections won't be killed if their certs expire, so... we would actually want to
|
|
1110
1111
|
add an additional check to close a connection if it's cert isn't updated? Or... we could just call renegotiate, which
|
|
1111
|
-
I think serverside TLS connections can do?
|
|
1112
|
+
I think serverside TLS connections can do?
|
|
1113
|
+
|
|
1114
|
+
Public release?
|
|
1115
|
+
Get benchmarks on https://benchant.com/ranking/database-ranking
|
|
1116
|
+
- It seems like the throughput can be easily beat. We trade massive data-loss and memory efficiency for speed, so after our path optimizations we should be able to easily beat all others servers (again, because we lose data easily, and store everything in memory...)
|
package/src/-0-hooks/hooks.ts
CHANGED
|
@@ -21,7 +21,7 @@ function createHookFunction<Fnc extends (...args: any[]) => void>(debugName: str
|
|
|
21
21
|
}, 1000);
|
|
22
22
|
function flushQueued() {
|
|
23
23
|
if (!declaration) {
|
|
24
|
-
throw new Error(`Hook function ${debugName} not declared
|
|
24
|
+
throw new Error(`Hook function ${debugName} not declared, but the hook was used. Make sure to include the code somewhere after using the hook.`);
|
|
25
25
|
}
|
|
26
26
|
let queued = queuedCalls;
|
|
27
27
|
queuedCalls = [];
|
|
@@ -7,9 +7,7 @@ import { magenta } from "socket-function/src/formatting/logColors";
|
|
|
7
7
|
import debugbreak from "debugbreak";
|
|
8
8
|
import { isClient } from "../config2";
|
|
9
9
|
import { wrapArchivesWithCache } from "./archiveCache";
|
|
10
|
-
|
|
11
|
-
// https://www.backblaze.com/apidocs/b2-upload-part
|
|
12
|
-
// https://www.backblaze.com/apidocs/b2-finish-large-file
|
|
10
|
+
import { Args } from "socket-function/src/types";
|
|
13
11
|
|
|
14
12
|
export interface Archives {
|
|
15
13
|
getDebugName(): string;
|
|
@@ -58,6 +56,19 @@ export interface Archives {
|
|
|
58
56
|
assertPathValid(path: string): Promise<void>;
|
|
59
57
|
|
|
60
58
|
getBaseArchives?: () => { parentPath: string; archives: Archives; } | undefined;
|
|
59
|
+
|
|
60
|
+
/** Get a download URL for a file. Only implemented for some archive types (like Backblaze). */
|
|
61
|
+
getURL?: (path: string) => Promise<string>;
|
|
62
|
+
|
|
63
|
+
// NOTE: This isn't part of getURL, as it is much more efficient to call it once and reuse the authorization. This is only for backblaze as of right now.
|
|
64
|
+
// NOTE: This also gives authorization for ALL files, so... ONLY use it for internal download links. If we need external links, we should use a public folder with hash links, or... make a new endpoint.
|
|
65
|
+
getDownloadAuthorization?: (config: {
|
|
66
|
+
validDurationInSeconds: number;
|
|
67
|
+
}) => Promise<{
|
|
68
|
+
bucketId: string;
|
|
69
|
+
fileNamePrefix: string;
|
|
70
|
+
authorizationToken: string;
|
|
71
|
+
}>;
|
|
61
72
|
}
|
|
62
73
|
|
|
63
74
|
export function nestArchives(path: string, archives: Archives): Archives {
|
|
@@ -101,6 +112,8 @@ export function nestArchives(path: string, archives: Archives): Archives {
|
|
|
101
112
|
}
|
|
102
113
|
return result;
|
|
103
114
|
},
|
|
115
|
+
getURL: archives.getURL ? (filePath: string) => archives.getURL!(path + stripFilePrefix(filePath)) : undefined,
|
|
116
|
+
getDownloadAuthorization: archives.getDownloadAuthorization && ((...args: Args<typeof archives.getDownloadAuthorization>) => archives.getDownloadAuthorization!(...args)),
|
|
104
117
|
};
|
|
105
118
|
}
|
|
106
119
|
|
|
@@ -342,6 +342,22 @@ const getAPI = lazy(async () => {
|
|
|
342
342
|
fileId: string;
|
|
343
343
|
}, {}>("b2_cancel_large_file", "POST", "noAccountId");
|
|
344
344
|
|
|
345
|
+
const getDownloadAuthorization = createB2Function<{
|
|
346
|
+
bucketId: string;
|
|
347
|
+
fileNamePrefix: string;
|
|
348
|
+
validDurationInSeconds: number;
|
|
349
|
+
b2ContentDisposition?: string;
|
|
350
|
+
b2ContentLanguage?: string;
|
|
351
|
+
b2Expires?: string;
|
|
352
|
+
b2CacheControl?: string;
|
|
353
|
+
b2ContentEncoding?: string;
|
|
354
|
+
b2ContentType?: string;
|
|
355
|
+
}, {
|
|
356
|
+
bucketId: string;
|
|
357
|
+
fileNamePrefix: string;
|
|
358
|
+
authorizationToken: string;
|
|
359
|
+
}>("b2_get_download_authorization", "POST", "noAccountId");
|
|
360
|
+
|
|
345
361
|
async function getDownloadURL(path: string) {
|
|
346
362
|
if (!path.startsWith("/")) {
|
|
347
363
|
path = "/" + path;
|
|
@@ -349,6 +365,7 @@ const getAPI = lazy(async () => {
|
|
|
349
365
|
return auth.downloadUrl + path;
|
|
350
366
|
}
|
|
351
367
|
|
|
368
|
+
|
|
352
369
|
return {
|
|
353
370
|
createBucket,
|
|
354
371
|
updateBucket,
|
|
@@ -363,6 +380,7 @@ const getAPI = lazy(async () => {
|
|
|
363
380
|
uploadPart,
|
|
364
381
|
finishLargeFile,
|
|
365
382
|
cancelLargeFile,
|
|
383
|
+
getDownloadAuthorization,
|
|
366
384
|
getDownloadURL,
|
|
367
385
|
};
|
|
368
386
|
});
|
|
@@ -402,14 +420,15 @@ export class ArchivesBackblaze {
|
|
|
402
420
|
cacheTime = 86400 * 1000;
|
|
403
421
|
}
|
|
404
422
|
|
|
405
|
-
|
|
423
|
+
// ALWAYS set access control, as we can make urls for private buckets with getDownloadAuthorization
|
|
424
|
+
let desiredCorsRules = [{
|
|
406
425
|
corsRuleName: "allowAll",
|
|
407
426
|
allowedOrigins: ["https"],
|
|
408
427
|
allowedOperations: ["b2_download_file_by_id", "b2_download_file_by_name"],
|
|
409
428
|
allowedHeaders: ["range"],
|
|
410
429
|
exposeHeaders: ["x-bz-content-sha1"],
|
|
411
430
|
maxAgeSeconds: cacheTime / 1000,
|
|
412
|
-
}]
|
|
431
|
+
}];
|
|
413
432
|
let bucketInfo: Record<string, unknown> = {};
|
|
414
433
|
if (cacheTime) {
|
|
415
434
|
bucketInfo["cache-control"] = `max-age=${cacheTime / 1000}`;
|
|
@@ -858,7 +877,7 @@ export class ArchivesBackblaze {
|
|
|
858
877
|
return this.move({ ...config, copyInstead: true });
|
|
859
878
|
}
|
|
860
879
|
|
|
861
|
-
public async
|
|
880
|
+
public async getURL(path: string) {
|
|
862
881
|
return await this.apiRetryLogic(async (api) => {
|
|
863
882
|
if (path.startsWith("/")) {
|
|
864
883
|
path = path.slice(1);
|
|
@@ -866,6 +885,29 @@ export class ArchivesBackblaze {
|
|
|
866
885
|
return await api.getDownloadURL("file/" + this.bucketName + "/" + path);
|
|
867
886
|
});
|
|
868
887
|
}
|
|
888
|
+
|
|
889
|
+
public async getDownloadAuthorization(config: {
|
|
890
|
+
fileNamePrefix?: string;
|
|
891
|
+
validDurationInSeconds: number;
|
|
892
|
+
b2ContentDisposition?: string;
|
|
893
|
+
b2ContentLanguage?: string;
|
|
894
|
+
b2Expires?: string;
|
|
895
|
+
b2CacheControl?: string;
|
|
896
|
+
b2ContentEncoding?: string;
|
|
897
|
+
b2ContentType?: string;
|
|
898
|
+
}): Promise<{
|
|
899
|
+
bucketId: string;
|
|
900
|
+
fileNamePrefix: string;
|
|
901
|
+
authorizationToken: string;
|
|
902
|
+
}> {
|
|
903
|
+
return await this.apiRetryLogic(async (api) => {
|
|
904
|
+
return await api.getDownloadAuthorization({
|
|
905
|
+
bucketId: this.bucketId,
|
|
906
|
+
fileNamePrefix: config.fileNamePrefix ?? "",
|
|
907
|
+
...config,
|
|
908
|
+
});
|
|
909
|
+
});
|
|
910
|
+
}
|
|
869
911
|
}
|
|
870
912
|
|
|
871
913
|
/*
|
|
@@ -880,6 +922,12 @@ Names should be a UTF-8 string up to 1024 bytes with the following exceptions:
|
|
|
880
922
|
export const getArchivesBackblaze = cache((domain: string) => {
|
|
881
923
|
return new ArchivesBackblaze({ bucketName: domain });
|
|
882
924
|
});
|
|
925
|
+
export const getArchivesBackblazePrivateImmutable = cache((domain: string) => {
|
|
926
|
+
return new ArchivesBackblaze({
|
|
927
|
+
bucketName: domain + "-private-immutable",
|
|
928
|
+
immutable: true
|
|
929
|
+
});
|
|
930
|
+
});
|
|
883
931
|
export const getArchivesBackblazePublicImmutable = cache((domain: string) => {
|
|
884
932
|
return new ArchivesBackblaze({
|
|
885
933
|
bucketName: domain + "-public-immutable",
|
|
@@ -0,0 +1,175 @@
|
|
|
1
|
+
import { formatNumber } from "socket-function/src/formatting/format";
|
|
2
|
+
import { Archives } from "./archives";
|
|
3
|
+
import { cache } from "socket-function/src/caching";
|
|
4
|
+
|
|
5
|
+
interface FileInfo {
|
|
6
|
+
writeTime: number;
|
|
7
|
+
accessTime: number;
|
|
8
|
+
size: number;
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
class ArchivesLimitedCache {
|
|
12
|
+
private baseArchives: Archives;
|
|
13
|
+
private maxFiles: number;
|
|
14
|
+
private maxSize: number;
|
|
15
|
+
private cache = new Map<string, FileInfo>();
|
|
16
|
+
private initialized = false;
|
|
17
|
+
|
|
18
|
+
constructor(baseArchives: Archives, config: { maxFiles: number; maxSize: number }) {
|
|
19
|
+
this.baseArchives = baseArchives;
|
|
20
|
+
this.maxFiles = config.maxFiles;
|
|
21
|
+
this.maxSize = config.maxSize;
|
|
22
|
+
this.initOptionalMethods();
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
public getDebugName(): string {
|
|
26
|
+
return `limitedCache(${this.maxFiles}files,${Math.round(this.maxSize / (1024 * 1024))}MB)/${this.baseArchives.getDebugName()}`;
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
private async ensureInitialized(): Promise<void> {
|
|
30
|
+
if (this.initialized) return;
|
|
31
|
+
|
|
32
|
+
// Load all files on first access
|
|
33
|
+
const allFiles = await this.baseArchives.findInfo("", { type: "files" });
|
|
34
|
+
for (const file of allFiles) {
|
|
35
|
+
this.cache.set(file.path, {
|
|
36
|
+
writeTime: file.createTime,
|
|
37
|
+
accessTime: file.createTime,
|
|
38
|
+
size: file.size,
|
|
39
|
+
});
|
|
40
|
+
}
|
|
41
|
+
this.initialized = true;
|
|
42
|
+
|
|
43
|
+
// Cleanup if we're already over limits
|
|
44
|
+
await this.applyLimits();
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
private async applyLimits(): Promise<void> {
|
|
48
|
+
const files = Array.from(this.cache.entries()).map(([path, info]) => ({
|
|
49
|
+
path,
|
|
50
|
+
...info,
|
|
51
|
+
}));
|
|
52
|
+
|
|
53
|
+
const totalSize = files.reduce((sum, file) => sum + file.size, 0);
|
|
54
|
+
const totalFiles = files.length;
|
|
55
|
+
|
|
56
|
+
if (totalFiles <= this.maxFiles && totalSize <= this.maxSize) {
|
|
57
|
+
return; // No cleanup needed
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
// Sort by access time (oldest first)
|
|
61
|
+
files.sort((a, b) => a.accessTime - b.accessTime);
|
|
62
|
+
|
|
63
|
+
let currentSize = totalSize;
|
|
64
|
+
let currentFiles = totalFiles;
|
|
65
|
+
|
|
66
|
+
// Remove files until we're under both limits
|
|
67
|
+
for (const file of files) {
|
|
68
|
+
if (currentFiles <= this.maxFiles && currentSize <= this.maxSize) {
|
|
69
|
+
break;
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
console.warn(`Deleting file ${file.path} to maintain limits. Currently have ${formatNumber(currentFiles)} files and ${formatNumber(currentSize)}B`);
|
|
73
|
+
try {
|
|
74
|
+
await this.baseArchives.del(file.path);
|
|
75
|
+
this.cache.delete(file.path);
|
|
76
|
+
currentSize -= file.size;
|
|
77
|
+
currentFiles--;
|
|
78
|
+
} catch (error) {
|
|
79
|
+
// File might already be deleted, continue
|
|
80
|
+
console.warn(`Failed to delete file ${file.path}:`, error);
|
|
81
|
+
this.cache.delete(file.path); // Remove from cache anyway
|
|
82
|
+
currentSize -= file.size;
|
|
83
|
+
currentFiles--;
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
private updateAccessTime(path: string): void {
|
|
89
|
+
const info = this.cache.get(path);
|
|
90
|
+
if (info) {
|
|
91
|
+
info.accessTime = Date.now();
|
|
92
|
+
}
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
public async get(path: string, config?: { range?: { start: number; end: number; }; retryCount?: number }): Promise<Buffer | undefined> {
|
|
96
|
+
await this.ensureInitialized();
|
|
97
|
+
this.updateAccessTime(path);
|
|
98
|
+
return this.baseArchives.get(path, config);
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
public async set(path: string, data: Buffer): Promise<void> {
|
|
102
|
+
await this.ensureInitialized();
|
|
103
|
+
// Limit before, not after, so we don't remove the file we just added
|
|
104
|
+
await this.applyLimits();
|
|
105
|
+
await this.baseArchives.set(path, data);
|
|
106
|
+
const now = Date.now();
|
|
107
|
+
this.cache.set(path, {
|
|
108
|
+
writeTime: now,
|
|
109
|
+
accessTime: now,
|
|
110
|
+
size: data.length,
|
|
111
|
+
});
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
public async del(path: string): Promise<void> {
|
|
115
|
+
await this.ensureInitialized();
|
|
116
|
+
await this.baseArchives.del(path);
|
|
117
|
+
this.cache.delete(path);
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
public async getInfo(path: string): Promise<{ writeTime: number; size: number; } | undefined> {
|
|
121
|
+
await this.ensureInitialized();
|
|
122
|
+
this.updateAccessTime(path);
|
|
123
|
+
return this.baseArchives.getInfo(path);
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
public async setLargeFile(config: { path: string; getNextData(): Promise<Buffer | undefined>; }): Promise<void> {
|
|
127
|
+
throw new Error(`setLargeFile in archivesLimitedCache is not supported`);
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
public async find(prefix: string, config?: { shallow?: boolean; type: "files" | "folders" }): Promise<string[]> {
|
|
131
|
+
await this.ensureInitialized();
|
|
132
|
+
return this.baseArchives.find(prefix, config);
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
public async findInfo(prefix: string, config?: { shallow?: boolean; type: "files" | "folders" }): Promise<{ path: string; createTime: number; size: number; }[]> {
|
|
136
|
+
await this.ensureInitialized();
|
|
137
|
+
return this.baseArchives.findInfo(prefix, config);
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
public async move(config: { path: string; target: Archives; targetPath: string }): Promise<void> {
|
|
141
|
+
throw new Error(`Move in archivesLimitedCache is not supported`);
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
public async copy(config: { path: string; target: Archives; targetPath: string }): Promise<void> {
|
|
145
|
+
throw new Error(`Copy in archivesLimitedCache is not supported`);
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
public enableLogging(): void {
|
|
149
|
+
this.baseArchives.enableLogging();
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
public async assertPathValid(path: string): Promise<void> {
|
|
153
|
+
await this.baseArchives.assertPathValid(path);
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
public getBaseArchives?: () => { parentPath: string; archives: Archives; } | undefined = () => {
|
|
157
|
+
const base = this.baseArchives.getBaseArchives?.();
|
|
158
|
+
if (base) {
|
|
159
|
+
return base;
|
|
160
|
+
}
|
|
161
|
+
return { parentPath: "", archives: this.baseArchives };
|
|
162
|
+
};
|
|
163
|
+
|
|
164
|
+
public getURL?: (path: string) => Promise<string>;
|
|
165
|
+
public getDownloadAuthorization?: (config: { validDurationInSeconds: number }) => Promise<{ bucketId: string; fileNamePrefix: string; authorizationToken: string; }>;
|
|
166
|
+
|
|
167
|
+
private initOptionalMethods(): void {
|
|
168
|
+
this.getURL = this.baseArchives.getURL;
|
|
169
|
+
this.getDownloadAuthorization = this.baseArchives.getDownloadAuthorization;
|
|
170
|
+
}
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
export function createArchivesLimitedCache(baseArchives: Archives, config: { maxFiles: number; maxSize: number }): Archives {
|
|
174
|
+
return new ArchivesLimitedCache(baseArchives, config);
|
|
175
|
+
}
|