@budarin/psw-plugin-opfs-serve-range 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js ADDED
@@ -0,0 +1,192 @@
1
+ import { HEADER_RANGE } from '@budarin/http-constants/headers';
2
+ import { MIME_APPLICATION_OCTET_STREAM } from '@budarin/http-constants/mime-types';
3
+ import { OPFS_META_FOOTER_LENGTH, MAX_META_JSON_BYTES, } from './opfsFormat.js';
4
+ import { getOpfsDir, isOpfsAvailable, shouldProcessFile } from './opfsUtil.js';
5
+ import { parseRangeHeader, build206Response } from './opfsRangeUtil.js';
6
+ export { OPFS_META_FOOTER_LENGTH, OPFS_FOLDER_NAME, KILOBYTE, MEGABYTE, GIGABYTE, } from './opfsFormat.js';
7
+ export { getOpfsDir, clearOpfsCache, configureOpfs, isOpfsAvailable, getMaxCacheFraction, } from './opfsUtil.js';
8
+ export { isBlacklisted, addToBlacklist, getStorageEstimate, getCacheLimit } from './opfsLru.js';
9
+ export { OPFS_MSG_QUOTA_EXCEEDED, OPFS_MSG_WRITE_SKIPPED_SIZE, OPFS_MSG_CACHE_LIMIT_REACHED, OPFS_MSG_EVICTION_COMPLETED, OPFS_MSG_WRITE_FAILED, OPFS_MSG_SKIP_QUOTA_EXCEEDED, } from './opfsMessages.js';
10
+ const HEADER_IF_RANGE = 'If-Range';
11
+ const urlToKeyCache = new Map();
12
+ export async function urlToOpfsKey(url) {
13
+ const cached = urlToKeyCache.get(url);
14
+ if (cached !== undefined) {
15
+ return cached;
16
+ }
17
+ const bytes = new TextEncoder().encode(url);
18
+ const hash = await crypto.subtle.digest('SHA-256', bytes);
19
+ const key = Array.from(new Uint8Array(hash))
20
+ .map((b) => b.toString(16).padStart(2, '0'))
21
+ .join('');
22
+ urlToKeyCache.set(url, key);
23
+ return key;
24
+ }
25
+ function ifRangeMatches(ifRangeValue, meta) {
26
+ const value = ifRangeValue.trim();
27
+ if (!value) {
28
+ return false;
29
+ }
30
+ if (meta.lastModified) {
31
+ const ifRangeDate = Date.parse(value);
32
+ if (!Number.isNaN(ifRangeDate)) {
33
+ const storedDate = Date.parse(meta.lastModified);
34
+ return !Number.isNaN(storedDate) && ifRangeDate === storedDate;
35
+ }
36
+ }
37
+ if (meta.etag) {
38
+ const normalizeEtag = (s) => s
39
+ .replace(/^\s*W\//i, '')
40
+ .replace(/^"|"$/g, '')
41
+ .trim();
42
+ return normalizeEtag(value) === normalizeEtag(meta.etag);
43
+ }
44
+ return false;
45
+ }
46
+ async function getMetadataFromFileFooter(file) {
47
+ const size = file.size;
48
+ if (size < OPFS_META_FOOTER_LENGTH) {
49
+ return { metadata: undefined, bodySize: size };
50
+ }
51
+ const footerBlob = file.slice(size - OPFS_META_FOOTER_LENGTH, size);
52
+ const footerBuf = await footerBlob.arrayBuffer();
53
+ const metaLen = new DataView(footerBuf).getUint32(0, true);
54
+ if (metaLen === 0 ||
55
+ metaLen > MAX_META_JSON_BYTES ||
56
+ metaLen > size - OPFS_META_FOOTER_LENGTH) {
57
+ return { metadata: undefined, bodySize: size };
58
+ }
59
+ try {
60
+ const jsonBlob = file.slice(size - OPFS_META_FOOTER_LENGTH - metaLen, size - OPFS_META_FOOTER_LENGTH);
61
+ const text = await jsonBlob.text();
62
+ const metadata = JSON.parse(text);
63
+ const bodySize = size - OPFS_META_FOOTER_LENGTH - metaLen;
64
+ return { metadata, bodySize };
65
+ }
66
+ catch {
67
+ return { metadata: undefined, bodySize: size };
68
+ }
69
+ }
70
+ async function updateLastAccessedInBackground(handle, metadata, bodySize) {
71
+ try {
72
+ const meta = { ...metadata, lastAccessed: Date.now() };
73
+ const metaJson = JSON.stringify(meta);
74
+ const metaBytes = new TextEncoder().encode(metaJson);
75
+ if (metaBytes.length > MAX_META_JSON_BYTES) {
76
+ return;
77
+ }
78
+ const lengthAb = new ArrayBuffer(OPFS_META_FOOTER_LENGTH);
79
+ new DataView(lengthAb).setUint32(0, metaBytes.length, true);
80
+ const writable = await handle.createWritable();
81
+ await writable.seek(bodySize);
82
+ await writable.write(metaBytes);
83
+ await writable.write(lengthAb);
84
+ await writable.truncate(bodySize + metaBytes.length + OPFS_META_FOOTER_LENGTH);
85
+ await writable.close();
86
+ }
87
+ catch {
88
+ }
89
+ }
90
+ export function opfsServeRange(options = {}) {
91
+ if (!isOpfsAvailable()) {
92
+ return undefined;
93
+ }
94
+ const { order = -15, enableLogging = false, include, exclude, rangeResponseCacheControl = 'max-age=31536000, immutable', } = options;
95
+ return {
96
+ name: 'opfs-serve-range',
97
+ order,
98
+ async fetch(event, logger) {
99
+ const request = event.request;
100
+ const rangeHeader = request.headers.get(HEADER_RANGE);
101
+ if (!rangeHeader) {
102
+ return;
103
+ }
104
+ if (request.method !== 'GET') {
105
+ return;
106
+ }
107
+ if (!shouldProcessFile(request.url, include, exclude)) {
108
+ if (enableLogging) {
109
+ logger.debug(`opfsServeRange: skip ${request.url} (filtered by include/exclude)`);
110
+ }
111
+ return;
112
+ }
113
+ const url = request.url;
114
+ let key;
115
+ try {
116
+ key = await urlToOpfsKey(url);
117
+ }
118
+ catch (err) {
119
+ if (enableLogging) {
120
+ logger.error(`opfsServeRange: hash failed for ${url}`, err);
121
+ }
122
+ return;
123
+ }
124
+ const root = await navigator.storage.getDirectory();
125
+ let dir;
126
+ try {
127
+ dir = await getOpfsDir(root, false);
128
+ }
129
+ catch {
130
+ if (enableLogging) {
131
+ logger.debug(`opfsServeRange: no plugin dir in OPFS for ${url}`);
132
+ }
133
+ return;
134
+ }
135
+ let fileHandle;
136
+ try {
137
+ fileHandle = await dir.getFileHandle(key);
138
+ }
139
+ catch {
140
+ if (enableLogging) {
141
+ logger.debug(`opfsServeRange: no file in OPFS for ${url}`);
142
+ }
143
+ return;
144
+ }
145
+ const file = await fileHandle.getFile();
146
+ const { metadata, bodySize } = await getMetadataFromFileFooter(file);
147
+ const size = metadata?.size ?? bodySize;
148
+ const type = metadata?.type ?? MIME_APPLICATION_OCTET_STREAM;
149
+ const ifRangeHeader = request.headers.get(HEADER_IF_RANGE);
150
+ if (ifRangeHeader &&
151
+ metadata &&
152
+ !ifRangeMatches(ifRangeHeader, metadata)) {
153
+ if (enableLogging) {
154
+ logger.debug(`opfsServeRange: If-Range mismatch for ${url}, passing through`);
155
+ }
156
+ return;
157
+ }
158
+ try {
159
+ const range = parseRangeHeader(rangeHeader, size);
160
+ const blob = file.slice(range.start, range.end + 1);
161
+ const response = build206Response(blob, range, size, {
162
+ type,
163
+ ...(metadata?.etag && { etag: metadata.etag }),
164
+ ...(metadata?.lastModified && {
165
+ lastModified: metadata.lastModified,
166
+ }),
167
+ ...(rangeResponseCacheControl && {
168
+ cacheControl: rangeResponseCacheControl,
169
+ }),
170
+ });
171
+ if (metadata && event.waitUntil) {
172
+ event.waitUntil(updateLastAccessedInBackground(fileHandle, metadata, bodySize));
173
+ }
174
+ if (enableLogging) {
175
+ logger.debug(`opfsServeRange: 206 for ${url} bytes ${range.start}-${range.end}`);
176
+ }
177
+ return response;
178
+ }
179
+ catch (err) {
180
+ if (enableLogging) {
181
+ logger.error(`opfsServeRange: error for ${url}`, err);
182
+ }
183
+ return;
184
+ }
185
+ },
186
+ };
187
+ }
188
+ export { parseRangeHeader, build206Response, build206ResponseFromStream, createRangeExtractTransform, } from './opfsRangeUtil.js';
189
+ export { writeToOpfs, metadataFromResponse } from './opfsWrite.js';
190
+ export { opfsPrecache } from './opfsPrecache.js';
191
+ export { opfsRangeFromNetworkAndCache } from './opfsRangeFromNetworkAndCache.js';
192
+ export { opfsBackgroundFetch } from './opfsBackgroundFetch.js';
@@ -0,0 +1,8 @@
1
+ import type { Plugin } from '@budarin/pluggable-serviceworker';
2
+ export interface OpfsBackgroundFetchOptions {
3
+ order?: number;
4
+ include?: string[];
5
+ exclude?: string[];
6
+ enableLogging?: boolean;
7
+ }
8
+ export declare function opfsBackgroundFetch(options?: OpfsBackgroundFetchOptions): Plugin | undefined;
@@ -0,0 +1,75 @@
1
+ import { notifyClients } from '@budarin/pluggable-serviceworker/utils';
2
+ import { getOpfsDir, urlToOpfsKey } from './index.js';
3
+ import { isOpfsAvailable, shouldProcessFile } from './opfsUtil.js';
4
+ import { writeToOpfs, metadataFromResponse } from './opfsWrite.js';
5
+ import { isBlacklisted } from './opfsLru.js';
6
+ import { OPFS_MSG_SKIP_QUOTA_EXCEEDED } from './opfsMessages.js';
7
+ export function opfsBackgroundFetch(options = {}) {
8
+ if (!isOpfsAvailable()) {
9
+ return undefined;
10
+ }
11
+ const { order = 0, include, exclude, enableLogging = false } = options;
12
+ return {
13
+ name: 'opfs-background-fetch',
14
+ order,
15
+ async backgroundfetchsuccess(event, logger) {
16
+ const root = await navigator.storage.getDirectory();
17
+ const dir = await getOpfsDir(root, true);
18
+ const records = await event.registration.matchAll();
19
+ for (const record of records) {
20
+ const url = record.request.url;
21
+ if (!shouldProcessFile(url, include, exclude)) {
22
+ if (enableLogging) {
23
+ logger.debug(`opfsBackgroundFetch: skip ${url} (filtered by include/exclude)`);
24
+ }
25
+ continue;
26
+ }
27
+ if (isBlacklisted(url)) {
28
+ notifyClients(OPFS_MSG_SKIP_QUOTA_EXCEEDED, { url });
29
+ if (enableLogging) {
30
+ logger.debug(`opfsBackgroundFetch: skip ${url} (blacklisted, quota exceeded)`);
31
+ }
32
+ continue;
33
+ }
34
+ const response = await record.responseReady;
35
+ if (!response.ok || !response.body) {
36
+ if (enableLogging) {
37
+ logger.debug(`opfsBackgroundFetch: skip ${url} (not ok or no body)`);
38
+ }
39
+ continue;
40
+ }
41
+ try {
42
+ const key = await urlToOpfsKey(url);
43
+ const metadata = metadataFromResponse(response, url);
44
+ await writeToOpfs(dir, key, response.body, metadata, {
45
+ url,
46
+ ...(metadata.size > 0 && { knownSize: metadata.size }),
47
+ });
48
+ if (enableLogging) {
49
+ logger.debug(`opfsBackgroundFetch: cached ${url} -> ${key} (${metadata.size} bytes)`);
50
+ }
51
+ }
52
+ catch (err) {
53
+ if (enableLogging) {
54
+ logger.error(`opfsBackgroundFetch: write failed ${record.request.url}`, err);
55
+ }
56
+ }
57
+ }
58
+ },
59
+ async backgroundfetchfail(event, logger) {
60
+ if (enableLogging) {
61
+ logger.warn(`opfsBackgroundFetch: background fetch failed, id=${event.registration.id}`);
62
+ }
63
+ },
64
+ async backgroundfetchabort(event, logger) {
65
+ if (enableLogging) {
66
+ logger.debug(`opfsBackgroundFetch: background fetch aborted, id=${event.registration.id}`);
67
+ }
68
+ },
69
+ async backgroundfetchclick(event, logger) {
70
+ if (enableLogging) {
71
+ logger.debug(`opfsBackgroundFetch: user clicked download UI, id=${event.registration.id}`);
72
+ }
73
+ },
74
+ };
75
+ }
@@ -0,0 +1,8 @@
1
+ import type { Plugin } from '@budarin/pluggable-serviceworker';
2
+ export interface OpfsCacheOnFetchOptions {
3
+ order?: number;
4
+ include?: string[];
5
+ exclude?: string[];
6
+ enableLogging?: boolean;
7
+ }
8
+ export declare function opfsCacheOnFetch(options?: OpfsCacheOnFetchOptions): Plugin;
@@ -0,0 +1,153 @@
1
+ import { HEADER_RANGE } from '@budarin/http-constants/headers';
2
+ import { getOpfsDir, urlToOpfsKey } from './index.js';
3
+ import { parseRangeHeader, build206Response, build206ResponseFromStream, createRangeExtractTransform, } from './opfsRangeUtil.js';
4
+ import { writeToOpfs, metadataFromResponse } from './opfsWrite.js';
5
+ import { shouldProcessFile } from './opfsUtil.js';
6
+ const loadingUrls = new Set();
7
+ async function backgroundFullFetchToOpfs(url, logger, enableLogging) {
8
+ try {
9
+ const fullRequest = new Request(url, { method: 'GET' });
10
+ const response = await fetch(fullRequest);
11
+ if (!response.ok || !response.body) {
12
+ if (enableLogging) {
13
+ logger.debug(`opfsCacheOnFetch: background full GET ${url} -> ${response.status}, skip cache`);
14
+ }
15
+ return;
16
+ }
17
+ if (response.status !== 200) {
18
+ if (enableLogging) {
19
+ logger.debug(`opfsCacheOnFetch: background full GET ${url} -> ${response.status}, skip cache`);
20
+ }
21
+ return;
22
+ }
23
+ const metadata = metadataFromResponse(response);
24
+ const key = await urlToOpfsKey(url);
25
+ const root = await navigator.storage.getDirectory();
26
+ const dir = await getOpfsDir(root, true);
27
+ await writeToOpfs(dir, key, response.body, metadata);
28
+ if (enableLogging) {
29
+ logger.debug(`opfsCacheOnFetch: background cached ${url} -> ${key} (${metadata.size} bytes)`);
30
+ }
31
+ }
32
+ catch (err) {
33
+ if (enableLogging) {
34
+ logger.error(`opfsCacheOnFetch: background full GET failed ${url}`, err);
35
+ }
36
+ }
37
+ finally {
38
+ loadingUrls.delete(url);
39
+ }
40
+ }
41
+ export function opfsCacheOnFetch(options = {}) {
42
+ const { order = -10, include, exclude, enableLogging = false, } = options;
43
+ return {
44
+ name: 'opfs-cache-on-fetch',
45
+ order,
46
+ async fetch(event, logger) {
47
+ const request = event.request;
48
+ if (request.method !== 'GET') {
49
+ return;
50
+ }
51
+ if (!shouldProcessFile(request.url, include, exclude)) {
52
+ return;
53
+ }
54
+ const url = request.url;
55
+ const rangeHeader = request.headers.get(HEADER_RANGE);
56
+ if (!rangeHeader) {
57
+ try {
58
+ const response = await fetch(request);
59
+ if (!response.ok || !response.body) {
60
+ return response;
61
+ }
62
+ if (response.status !== 200) {
63
+ return response;
64
+ }
65
+ const metadata = metadataFromResponse(response);
66
+ const key = await urlToOpfsKey(url);
67
+ const root = await navigator.storage.getDirectory();
68
+ const dir = await getOpfsDir(root, true);
69
+ const [branch1, branch2] = response.body.tee();
70
+ writeToOpfs(dir, key, branch2, metadata).catch((err) => {
71
+ if (enableLogging) {
72
+ logger.error(`opfsCacheOnFetch: write failed ${url}`, err);
73
+ }
74
+ });
75
+ if (enableLogging) {
76
+ logger.debug(`opfsCacheOnFetch: caching full GET ${url} (${metadata.size} bytes)`);
77
+ }
78
+ return new Response(branch1, {
79
+ status: response.status,
80
+ statusText: response.statusText,
81
+ headers: response.headers,
82
+ });
83
+ }
84
+ catch {
85
+ return;
86
+ }
87
+ }
88
+ try {
89
+ const response = await fetch(request);
90
+ if (!response.body) {
91
+ return response;
92
+ }
93
+ if (response.status === 206) {
94
+ if (!loadingUrls.has(url)) {
95
+ loadingUrls.add(url);
96
+ backgroundFullFetchToOpfs(url, logger, enableLogging);
97
+ }
98
+ return new Response(response.body, {
99
+ status: response.status,
100
+ statusText: response.statusText,
101
+ headers: response.headers,
102
+ });
103
+ }
104
+ if (response.status === 416) {
105
+ return response;
106
+ }
107
+ if (response.status === 200) {
108
+ const contentLength = response.headers.get('Content-Length');
109
+ const fullSize = contentLength
110
+ ? parseInt(contentLength, 10)
111
+ : 0;
112
+ const type = response.headers.get('Content-Type') ??
113
+ 'application/octet-stream';
114
+ const etag = response.headers.get('ETag') ?? undefined;
115
+ const lastModified = response.headers.get('Last-Modified') ?? undefined;
116
+ if (fullSize > 0 &&
117
+ Number.isInteger(fullSize)) {
118
+ const range = parseRangeHeader(rangeHeader, fullSize);
119
+ const metadata = metadataFromResponse(response);
120
+ const key = await urlToOpfsKey(url);
121
+ const root = await navigator.storage.getDirectory();
122
+ const dir = await getOpfsDir(root, true);
123
+ const [branch1, branch2] = response.body.tee();
124
+ writeToOpfs(dir, key, branch2, metadata).catch((err) => {
125
+ if (enableLogging) {
126
+ logger.error(`opfsCacheOnFetch: write failed ${url}`, err);
127
+ }
128
+ });
129
+ const rangeStream = branch1.pipeThrough(createRangeExtractTransform(range));
130
+ return build206ResponseFromStream(rangeStream, range, fullSize, {
131
+ type,
132
+ ...(etag && { etag }),
133
+ ...(lastModified && { lastModified }),
134
+ });
135
+ }
136
+ const blob = await response.blob();
137
+ const size = blob.size;
138
+ const range = parseRangeHeader(rangeHeader, size);
139
+ const rangeBlob = blob.slice(range.start, range.end + 1);
140
+ return build206Response(rangeBlob, range, size, {
141
+ type,
142
+ ...(etag && { etag }),
143
+ ...(lastModified && { lastModified }),
144
+ });
145
+ }
146
+ return response;
147
+ }
148
+ catch {
149
+ return;
150
+ }
151
+ },
152
+ };
153
+ }
@@ -0,0 +1,14 @@
1
+ export declare const OPFS_FOLDER_NAME = "range-requests-cache";
2
+ export declare const OPFS_META_FOOTER_LENGTH = 4;
3
+ export declare const MAX_META_JSON_BYTES = 2048;
4
+ export declare const KILOBYTE = 1024;
5
+ export declare const MEGABYTE: number;
6
+ export declare const GIGABYTE: number;
7
+ export interface OpfsMetadata {
8
+ url: string;
9
+ size: number;
10
+ type?: string;
11
+ etag?: string;
12
+ lastModified?: string;
13
+ lastAccessed?: number;
14
+ }
@@ -0,0 +1,6 @@
1
+ export const OPFS_FOLDER_NAME = 'range-requests-cache';
2
+ export const OPFS_META_FOOTER_LENGTH = 4;
3
+ export const MAX_META_JSON_BYTES = 2048;
4
+ export const KILOBYTE = 1024;
5
+ export const MEGABYTE = 1024 * 1024;
6
+ export const GIGABYTE = 1024 * 1024 * 1024;
@@ -0,0 +1,28 @@
1
+ export interface CacheFileEntry {
2
+ key: string;
3
+ size: number;
4
+ lastAccessed: number;
5
+ }
6
+ export interface StorageEstimate {
7
+ quota: number;
8
+ usage: number;
9
+ }
10
+ export declare function isBlacklisted(url: string): boolean;
11
+ export declare function addToBlacklist(url: string): void;
12
+ export declare function getStorageEstimate(): Promise<StorageEstimate>;
13
+ export declare function getCacheLimit(estimate: StorageEstimate): number;
14
+ export declare function listCacheFilesWithMeta(dir: FileSystemDirectoryHandle): Promise<CacheFileEntry[]>;
15
+ export declare function getTotalCacheSize(entries: CacheFileEntry[]): number;
16
+ export declare function computeEvictionSet(entries: CacheFileEntry[], needToFree: number): string[];
17
+ export declare function evictFiles(dir: FileSystemDirectoryHandle, keys: string[]): Promise<void>;
18
+ export type EnsureSpaceResult = {
19
+ ok: true;
20
+ evictedKeys?: string[];
21
+ } | {
22
+ ok: false;
23
+ reason: string;
24
+ };
25
+ export interface EnsureSpaceOptions {
26
+ onEvicted?: (keys: string[]) => void;
27
+ }
28
+ export declare function ensureSpaceForWrite(dir: FileSystemDirectoryHandle, newFileSize: number, options?: EnsureSpaceOptions): Promise<EnsureSpaceResult>;
@@ -0,0 +1,112 @@
1
+ import { OPFS_META_FOOTER_LENGTH, MAX_META_JSON_BYTES, } from './opfsFormat.js';
2
+ import { getMaxCacheFraction } from './opfsUtil.js';
3
+ const blacklist = new Set();
4
+ export function isBlacklisted(url) {
5
+ return blacklist.has(url);
6
+ }
7
+ export function addToBlacklist(url) {
8
+ blacklist.add(url);
9
+ }
10
+ export async function getStorageEstimate() {
11
+ const est = await navigator.storage.estimate();
12
+ return {
13
+ quota: est.quota ?? 0,
14
+ usage: est.usage ?? 0,
15
+ };
16
+ }
17
+ export function getCacheLimit(estimate) {
18
+ const fraction = getMaxCacheFraction();
19
+ const byFraction = Math.floor(estimate.quota * fraction);
20
+ const byAvailable = estimate.quota - estimate.usage;
21
+ return Math.max(0, Math.min(byFraction, byAvailable));
22
+ }
23
+ async function readMetaFromFile(file) {
24
+ const size = file.size;
25
+ if (size < OPFS_META_FOOTER_LENGTH) {
26
+ return { size, lastAccessed: 0 };
27
+ }
28
+ const footerBlob = file.slice(size - OPFS_META_FOOTER_LENGTH, size);
29
+ const footerBuf = await footerBlob.arrayBuffer();
30
+ const metaLen = new DataView(footerBuf).getUint32(0, true);
31
+ if (metaLen === 0 ||
32
+ metaLen > MAX_META_JSON_BYTES ||
33
+ metaLen > size - OPFS_META_FOOTER_LENGTH) {
34
+ return { size, lastAccessed: 0 };
35
+ }
36
+ try {
37
+ const jsonBlob = file.slice(size - OPFS_META_FOOTER_LENGTH - metaLen, size - OPFS_META_FOOTER_LENGTH);
38
+ const text = await jsonBlob.text();
39
+ const metadata = JSON.parse(text);
40
+ return {
41
+ size,
42
+ lastAccessed: metadata.lastAccessed ?? 0,
43
+ };
44
+ }
45
+ catch {
46
+ return { size, lastAccessed: 0 };
47
+ }
48
+ }
49
+ export async function listCacheFilesWithMeta(dir) {
50
+ const entries = [];
51
+ for await (const [name, handle] of dir.entries()) {
52
+ if (handle.kind === 'file') {
53
+ try {
54
+ const file = await handle.getFile();
55
+ const { size, lastAccessed } = await readMetaFromFile(file);
56
+ entries.push({ key: name, size, lastAccessed });
57
+ }
58
+ catch {
59
+ }
60
+ }
61
+ }
62
+ return entries;
63
+ }
64
+ export function getTotalCacheSize(entries) {
65
+ return entries.reduce((sum, e) => sum + e.size, 0);
66
+ }
67
+ export function computeEvictionSet(entries, needToFree) {
68
+ if (needToFree <= 0) {
69
+ return [];
70
+ }
71
+ const sorted = [...entries].sort((a, b) => a.lastAccessed - b.lastAccessed);
72
+ const toDelete = [];
73
+ let freed = 0;
74
+ for (const e of sorted) {
75
+ if (freed >= needToFree) {
76
+ break;
77
+ }
78
+ toDelete.push(e.key);
79
+ freed += e.size;
80
+ }
81
+ return toDelete;
82
+ }
83
+ export async function evictFiles(dir, keys) {
84
+ for (const key of keys) {
85
+ try {
86
+ await dir.removeEntry(key);
87
+ }
88
+ catch {
89
+ }
90
+ }
91
+ }
92
+ export async function ensureSpaceForWrite(dir, newFileSize, options = {}) {
93
+ const { onEvicted } = options;
94
+ const estimate = await getStorageEstimate();
95
+ const limit = getCacheLimit(estimate);
96
+ const entries = await listCacheFilesWithMeta(dir);
97
+ const totalSize = getTotalCacheSize(entries);
98
+ const needToFree = Math.max(0, totalSize + newFileSize - limit, estimate.usage + newFileSize - estimate.quota);
99
+ if (needToFree === 0) {
100
+ return { ok: true };
101
+ }
102
+ if (needToFree > totalSize) {
103
+ return {
104
+ ok: false,
105
+ reason: 'File does not fit even after full eviction',
106
+ };
107
+ }
108
+ const keysToDelete = computeEvictionSet(entries, needToFree);
109
+ await evictFiles(dir, keysToDelete);
110
+ onEvicted?.(keysToDelete);
111
+ return { ok: true, evictedKeys: keysToDelete };
112
+ }
@@ -0,0 +1,7 @@
1
+ export declare const OPFS_MSG_QUOTA_EXCEEDED = "OPFS_QUOTA_EXCEEDED";
2
+ export declare const OPFS_MSG_WRITE_SKIPPED_SIZE = "OPFS_WRITE_SKIPPED_SIZE";
3
+ export declare const OPFS_MSG_CACHE_LIMIT_REACHED = "OPFS_CACHE_LIMIT_REACHED";
4
+ export declare const OPFS_MSG_EVICTION_COMPLETED = "OPFS_EVICTION_COMPLETED";
5
+ export declare const OPFS_MSG_WRITE_FAILED = "OPFS_WRITE_FAILED";
6
+ export declare const OPFS_MSG_SKIP_QUOTA_EXCEEDED = "OPFS_SKIP_QUOTA_EXCEEDED";
7
+ export type OpfsMessageType = typeof OPFS_MSG_QUOTA_EXCEEDED | typeof OPFS_MSG_WRITE_SKIPPED_SIZE | typeof OPFS_MSG_CACHE_LIMIT_REACHED | typeof OPFS_MSG_EVICTION_COMPLETED | typeof OPFS_MSG_WRITE_FAILED | typeof OPFS_MSG_SKIP_QUOTA_EXCEEDED;
@@ -0,0 +1,6 @@
1
+ export const OPFS_MSG_QUOTA_EXCEEDED = 'OPFS_QUOTA_EXCEEDED';
2
+ export const OPFS_MSG_WRITE_SKIPPED_SIZE = 'OPFS_WRITE_SKIPPED_SIZE';
3
+ export const OPFS_MSG_CACHE_LIMIT_REACHED = 'OPFS_CACHE_LIMIT_REACHED';
4
+ export const OPFS_MSG_EVICTION_COMPLETED = 'OPFS_EVICTION_COMPLETED';
5
+ export const OPFS_MSG_WRITE_FAILED = 'OPFS_WRITE_FAILED';
6
+ export const OPFS_MSG_SKIP_QUOTA_EXCEEDED = 'OPFS_SKIP_QUOTA_EXCEEDED';
@@ -0,0 +1,7 @@
1
+ import type { Plugin } from '@budarin/pluggable-serviceworker';
2
+ export interface OpfsPrecacheOptions {
3
+ urls: string[] | (() => Promise<string[]>);
4
+ order?: number;
5
+ enableLogging?: boolean;
6
+ }
7
+ export declare function opfsPrecache(options: OpfsPrecacheOptions): Plugin | undefined;
@@ -0,0 +1,46 @@
1
+ import { getOpfsDir, urlToOpfsKey } from './index.js';
2
+ import { isOpfsAvailable } from './opfsUtil.js';
3
+ import { writeToOpfs, metadataFromResponse } from './opfsWrite.js';
4
+ export function opfsPrecache(options) {
5
+ if (!isOpfsAvailable()) {
6
+ return undefined;
7
+ }
8
+ const { urls, order = 0, enableLogging = false } = options;
9
+ return {
10
+ name: 'opfs-precache',
11
+ order,
12
+ async install(_event, logger) {
13
+ const list = typeof urls === 'function' ? await urls() : urls;
14
+ if (list.length === 0) {
15
+ return;
16
+ }
17
+ const root = await navigator.storage.getDirectory();
18
+ const dir = await getOpfsDir(root, true);
19
+ for (const url of list) {
20
+ try {
21
+ const response = await fetch(url);
22
+ if (!response.ok || !response.body) {
23
+ if (enableLogging) {
24
+ logger.warn(`opfsPrecache: skip ${url} (status ${response.status} or no body)`);
25
+ }
26
+ continue;
27
+ }
28
+ const metadata = metadataFromResponse(response, url);
29
+ const key = await urlToOpfsKey(url);
30
+ await writeToOpfs(dir, key, response.body, metadata, {
31
+ url,
32
+ ...(metadata.size > 0 && { knownSize: metadata.size }),
33
+ });
34
+ if (enableLogging) {
35
+ logger.debug(`opfsPrecache: cached ${url} -> ${key} (${metadata.size} bytes)`);
36
+ }
37
+ }
38
+ catch (err) {
39
+ if (enableLogging) {
40
+ logger.error(`opfsPrecache: failed ${url}`, err);
41
+ }
42
+ }
43
+ }
44
+ },
45
+ };
46
+ }