@esportsplus/web-storage 0.4.0 → 0.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +248 -0
- package/build/drivers/localstorage.d.ts +1 -0
- package/build/drivers/localstorage.js +10 -2
- package/build/drivers/sessionstorage.d.ts +1 -0
- package/build/drivers/sessionstorage.js +10 -2
- package/build/lz.d.ts +3 -0
- package/build/lz.js +134 -0
- package/package.json +1 -1
- package/src/drivers/localstorage.ts +13 -2
- package/src/drivers/sessionstorage.ts +13 -2
- package/src/lz.ts +192 -0
- package/storage/test-audit-web-storage.md +74 -0
- package/tests/drivers/localstorage.ts +86 -0
- package/tests/drivers/sessionstorage.ts +85 -0
- package/tests/index.ts +409 -0
- package/tests/lz.ts +324 -0
- package/storage/feature-research.md +0 -173
package/src/lz.ts
ADDED
|
@@ -0,0 +1,192 @@
|
|
|
1
|
+
type CompressCtx = { bitsInBuffer: number; buffer: number; numBits: number; output: number[] };
|
|
2
|
+
type DecompressCtx = { bitPos: number; compressed: string; currentValue: number; pos: number };
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
function emitLiteral(ctx: CompressCtx, ch: string) {
|
|
6
|
+
let code = ch.charCodeAt(0);
|
|
7
|
+
|
|
8
|
+
if (code < 256) {
|
|
9
|
+
writeBits(ctx, ctx.numBits, 0);
|
|
10
|
+
writeBits(ctx, 8, code);
|
|
11
|
+
}
|
|
12
|
+
else {
|
|
13
|
+
writeBits(ctx, ctx.numBits, 1);
|
|
14
|
+
writeBits(ctx, 16, code);
|
|
15
|
+
}
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
function readBits(ctx: DecompressCtx, n: number): number {
|
|
19
|
+
let result = 0;
|
|
20
|
+
|
|
21
|
+
for (let i = 0; i < n; i++) {
|
|
22
|
+
if (ctx.bitPos > 15) {
|
|
23
|
+
ctx.currentValue = ctx.compressed.charCodeAt(ctx.pos++) - 1;
|
|
24
|
+
ctx.bitPos = 0;
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
result = (result << 1) | ((ctx.currentValue >> (15 - ctx.bitPos)) & 1);
|
|
28
|
+
ctx.bitPos++;
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
return result;
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
function writeBits(ctx: CompressCtx, n: number, value: number) {
|
|
35
|
+
for (let i = n - 1; i >= 0; i--) {
|
|
36
|
+
ctx.buffer = (ctx.buffer << 1) | ((value >> i) & 1);
|
|
37
|
+
ctx.bitsInBuffer++;
|
|
38
|
+
|
|
39
|
+
if (ctx.bitsInBuffer === 16) {
|
|
40
|
+
ctx.output.push(ctx.buffer + 1);
|
|
41
|
+
ctx.buffer = 0;
|
|
42
|
+
ctx.bitsInBuffer = 0;
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
const compress = (input: string): string => {
|
|
49
|
+
if (!input) {
|
|
50
|
+
return '';
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
let ctx: CompressCtx = { bitsInBuffer: 0, buffer: 0, numBits: 2, output: [] },
|
|
54
|
+
dictSize = 3,
|
|
55
|
+
dictionary = new Map<string, number>(),
|
|
56
|
+
w = '';
|
|
57
|
+
|
|
58
|
+
for (let i = 0, n = input.length; i < n; i++) {
|
|
59
|
+
let c = input[i],
|
|
60
|
+
wc = w + c;
|
|
61
|
+
|
|
62
|
+
if (dictionary.has(wc)) {
|
|
63
|
+
w = wc;
|
|
64
|
+
continue;
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
if (w.length > 0) {
|
|
68
|
+
if (dictionary.has(w)) {
|
|
69
|
+
writeBits(ctx, ctx.numBits, dictionary.get(w)!);
|
|
70
|
+
}
|
|
71
|
+
else {
|
|
72
|
+
emitLiteral(ctx, w);
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
dictionary.set(wc, dictSize++);
|
|
76
|
+
|
|
77
|
+
if (dictSize > (1 << ctx.numBits)) {
|
|
78
|
+
ctx.numBits++;
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
w = c;
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
if (w.length > 0) {
|
|
86
|
+
if (dictionary.has(w)) {
|
|
87
|
+
writeBits(ctx, ctx.numBits, dictionary.get(w)!);
|
|
88
|
+
}
|
|
89
|
+
else {
|
|
90
|
+
emitLiteral(ctx, w);
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
// Trailing dict advance: ensures the decompressor's last placeholder growth
|
|
95
|
+
// matches (the decompressor will push a placeholder before reading EOF)
|
|
96
|
+
dictSize++;
|
|
97
|
+
|
|
98
|
+
if (dictSize > (1 << ctx.numBits)) {
|
|
99
|
+
ctx.numBits++;
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
writeBits(ctx, ctx.numBits, 2);
|
|
103
|
+
|
|
104
|
+
if (ctx.bitsInBuffer > 0) {
|
|
105
|
+
ctx.output.push(((ctx.buffer << (16 - ctx.bitsInBuffer)) & 0xFFFF) + 1);
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
ctx.output.push((ctx.bitsInBuffer === 0 ? 16 : ctx.bitsInBuffer) + 1);
|
|
109
|
+
|
|
110
|
+
let chars: string[] = [];
|
|
111
|
+
|
|
112
|
+
for (let i = 0, n = ctx.output.length; i < n; i++) {
|
|
113
|
+
chars.push(String.fromCharCode(ctx.output[i]));
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
return chars.join('');
|
|
117
|
+
};
|
|
118
|
+
|
|
119
|
+
const decompress = (compressed: string): string => {
|
|
120
|
+
if (!compressed) {
|
|
121
|
+
return '';
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
let ctx: DecompressCtx = { bitPos: 16, compressed: '', currentValue: 0, pos: 0 },
|
|
125
|
+
dictSize = 3,
|
|
126
|
+
dictionary: string[] = [],
|
|
127
|
+
numBits = 2;
|
|
128
|
+
|
|
129
|
+
ctx.compressed = compressed.substring(0, compressed.length - 1);
|
|
130
|
+
|
|
131
|
+
let code = readBits(ctx, numBits),
|
|
132
|
+
entry: string;
|
|
133
|
+
|
|
134
|
+
if (code === 0) {
|
|
135
|
+
entry = String.fromCharCode(readBits(ctx, 8));
|
|
136
|
+
}
|
|
137
|
+
else if (code === 1) {
|
|
138
|
+
entry = String.fromCharCode(readBits(ctx, 16));
|
|
139
|
+
}
|
|
140
|
+
else {
|
|
141
|
+
return '';
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
let result: string[] = [entry],
|
|
145
|
+
w = entry;
|
|
146
|
+
|
|
147
|
+
while (true) {
|
|
148
|
+
// Reserve dict slot BEFORE reading (matches compressor's add-before-next-emit timing)
|
|
149
|
+
let slotIdx = dictionary.length;
|
|
150
|
+
|
|
151
|
+
dictionary.push('');
|
|
152
|
+
dictSize++;
|
|
153
|
+
|
|
154
|
+
if (dictSize > (1 << numBits)) {
|
|
155
|
+
numBits++;
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
code = readBits(ctx, numBits);
|
|
159
|
+
|
|
160
|
+
if (code === 2) {
|
|
161
|
+
dictionary.pop();
|
|
162
|
+
break;
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
let slotCode = slotIdx + 3;
|
|
166
|
+
|
|
167
|
+
if (code === 0) {
|
|
168
|
+
entry = String.fromCharCode(readBits(ctx, 8));
|
|
169
|
+
}
|
|
170
|
+
else if (code === 1) {
|
|
171
|
+
entry = String.fromCharCode(readBits(ctx, 16));
|
|
172
|
+
}
|
|
173
|
+
else if (code === slotCode) {
|
|
174
|
+
entry = w + w[0];
|
|
175
|
+
}
|
|
176
|
+
else if (code >= 3 && code < slotCode) {
|
|
177
|
+
entry = dictionary[code - 3];
|
|
178
|
+
}
|
|
179
|
+
else {
|
|
180
|
+
throw new Error('LZ: invalid decompression code');
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
dictionary[slotIdx] = w + entry[0];
|
|
184
|
+
result.push(entry);
|
|
185
|
+
w = entry;
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
return result.join('');
|
|
189
|
+
};
|
|
190
|
+
|
|
191
|
+
|
|
192
|
+
export { compress, decompress };
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
# Test Audit: @esportsplus/web-storage
|
|
2
|
+
|
|
3
|
+
## Summary
|
|
4
|
+
- Source modules: 8
|
|
5
|
+
- Tested modules: 8 (100%)
|
|
6
|
+
- Benchmarked modules: 0 (0%)
|
|
7
|
+
- Total tests: 253
|
|
8
|
+
- Total gaps found: 23
|
|
9
|
+
|
|
10
|
+
## Missing Tests (Priority Order)
|
|
11
|
+
|
|
12
|
+
| Module | Export / Path | Type | Risk |
|
|
13
|
+
|--------|-------------|------|------|
|
|
14
|
+
| Local<T> + SessionStorage | All Local<T> methods with SS driver | integration | HIGH — only 1 factory test exists for SS; zero method coverage at Local<T> layer |
|
|
15
|
+
| Local<T> + Memory | TTL methods (ttl, persist, cleanup) | integration | HIGH — TTL logic is driver-agnostic but untested against Memory driver |
|
|
16
|
+
| Local<T> + Memory | get(key, factory) | integration | MED — factory tested for IDB/LS but not Memory |
|
|
17
|
+
| Local<T> + Memory | encryption (with secret) | integration | MED — no encryption round-trip tests for Memory driver |
|
|
18
|
+
| Local<T> + Memory | filter, only, map, length | integration | MED — bulk read operations untested at Local<T> layer for Memory |
|
|
19
|
+
| Local<T> | persist() on non-existent key | edge case | MED — returns false by code inspection, no test |
|
|
20
|
+
| Local<T> | persist() on already-permanent key | edge case | LOW — returns true, no test |
|
|
21
|
+
| Local<T> | cleanup() subscription notifications | integration | MED — validator flagged: cleanup fires notify but no test covers it |
|
|
22
|
+
| Local<T> | get() TTL expiry + subscription | edge case | MED — expired get does fire-and-forget delete but bypasses subscription-aware delete path; behavior unclear |
|
|
23
|
+
| Local<T> | count() with VERSION_KEY present | edge case | MED — should return count-1 when migrations active; no test |
|
|
24
|
+
| Local<T> | keys() with VERSION_KEY present | edge case | MED — should exclude __version__; no test |
|
|
25
|
+
| Local<T> + encryption | get(key, factory) + encryption | integration | LOW — factory + encrypt combo untested |
|
|
26
|
+
| Local<T> + LS | compression + encryption combined | integration | MED — encrypted ciphertext stored via driver which may attempt compression; round-trip untested |
|
|
27
|
+
|
|
28
|
+
## Shallow Tests
|
|
29
|
+
|
|
30
|
+
| Module | Export | Covered | Missing |
|
|
31
|
+
|--------|--------|---------|---------|
|
|
32
|
+
| Local<T>.persist() | IDB, LS | happy path (has TTL, removes it) | non-existent key, already-permanent key, expired key |
|
|
33
|
+
| Local<T>.cleanup() | IDB, LS | removes expired entries | empty store, no expired entries, subscription notifications |
|
|
34
|
+
| Local<T>.clear() | IDB, LS, Memory | clears all + notifies | VERSION_KEY preserved after clear (migration stores re-init) |
|
|
35
|
+
| Local<T>.subscribe() | Memory | set, delete, replace, clear, unsubscribe | cleanup notifications, factory-triggered notifications, TTL expiry notifications |
|
|
36
|
+
| Local<T>.map() | IDB, LS, Memory | iterates entries | TTL filtering + VERSION_KEY filtering combined |
|
|
37
|
+
| LZ compress/decompress | round-trip | all string types | very large strings (>100KB), strings that produce larger output than input (random/high-entropy) |
|
|
38
|
+
| LocalStorageDriver.parse() | error handling | corrupted compressed data | corrupted non-compressed JSON, null byte in stored data |
|
|
39
|
+
|
|
40
|
+
## Missing Benchmarks
|
|
41
|
+
|
|
42
|
+
No benchmark infrastructure exists. For a storage library, benchmarks would be useful for:
|
|
43
|
+
|
|
44
|
+
| Module | Export | Reason |
|
|
45
|
+
|--------|--------|--------|
|
|
46
|
+
| LZ compress/decompress | compress() | Called on every localStorage write ≥100 bytes |
|
|
47
|
+
| LZ compress/decompress | decompress() | Called on every localStorage read of compressed data |
|
|
48
|
+
| LocalStorageDriver | set/get | Hot path for localStorage operations |
|
|
49
|
+
| IndexedDBDriver | set/get/all | Async I/O operations, would reveal contention |
|
|
50
|
+
| Local<T> | set with encryption | Encryption + serialization overhead |
|
|
51
|
+
|
|
52
|
+
## Stale Tests
|
|
53
|
+
|
|
54
|
+
None found. All test references match current exports.
|
|
55
|
+
|
|
56
|
+
## Recommendations
|
|
57
|
+
|
|
58
|
+
### Priority 1: SessionStorage Local<T> integration (HIGH)
|
|
59
|
+
The sessionStorage driver has full driver-level tests (36) but almost zero Local<T> integration tests (just 1 factory test). Add at minimum: set/get, all, delete, clear, count, keys — mirroring the existing Memory driver block. Encryption and TTL should also be tested since the driver shares serialization logic with localStorage but includes compression.
|
|
60
|
+
|
|
61
|
+
### Priority 2: Memory driver feature coverage (HIGH)
|
|
62
|
+
TTL, persist, cleanup, get(key, factory), encryption, and bulk read operations are untested at the Local<T> layer for the Memory driver. Since Memory is the recommended driver for unit testing, these gaps are ironic — users testing their own code with Memory may hit untested paths.
|
|
63
|
+
|
|
64
|
+
### Priority 3: Cross-feature edge cases (MED)
|
|
65
|
+
- cleanup() + subscription notifications
|
|
66
|
+
- get() TTL expiry + subscription side-effects
|
|
67
|
+
- count()/keys() with VERSION_KEY present (migrations active)
|
|
68
|
+
- Compression + encryption combined round-trip
|
|
69
|
+
- persist() on non-existent and already-permanent keys
|
|
70
|
+
|
|
71
|
+
### Priority 4: LZ compression boundaries (LOW)
|
|
72
|
+
- Very large strings (100KB+)
|
|
73
|
+
- High-entropy strings that don't compress
|
|
74
|
+
- Explicit test that compression never increases size by more than a bounded amount
|
|
@@ -232,6 +232,92 @@ describe('LocalStorageDriver', () => {
|
|
|
232
232
|
});
|
|
233
233
|
|
|
234
234
|
|
|
235
|
+
describe('compression', () => {
|
|
236
|
+
type LargeData = { bio: string };
|
|
237
|
+
|
|
238
|
+
let largeDriver: LocalStorageDriver<LargeData>,
|
|
239
|
+
largeValue: string;
|
|
240
|
+
|
|
241
|
+
beforeEach(() => {
|
|
242
|
+
largeDriver = new LocalStorageDriver<LargeData>('lz', 1);
|
|
243
|
+
largeValue = 'a'.repeat(200);
|
|
244
|
+
});
|
|
245
|
+
|
|
246
|
+
it('stores small values without compression prefix', async () => {
|
|
247
|
+
await driver.set('name', 'alice');
|
|
248
|
+
|
|
249
|
+
let raw = localStorage.getItem('test:1:name')!;
|
|
250
|
+
|
|
251
|
+
expect(raw.charCodeAt(0)).not.toBe(1);
|
|
252
|
+
expect(raw).toBe('"alice"');
|
|
253
|
+
});
|
|
254
|
+
|
|
255
|
+
it('stores large values with \\x01 prefix', async () => {
|
|
256
|
+
await largeDriver.set('bio', largeValue);
|
|
257
|
+
|
|
258
|
+
let raw = localStorage.getItem('lz:1:bio')!;
|
|
259
|
+
|
|
260
|
+
expect(raw.charCodeAt(0)).toBe(1);
|
|
261
|
+
});
|
|
262
|
+
|
|
263
|
+
it('round-trips large values through set/get', async () => {
|
|
264
|
+
await largeDriver.set('bio', largeValue);
|
|
265
|
+
|
|
266
|
+
expect(await largeDriver.get('bio')).toBe(largeValue);
|
|
267
|
+
});
|
|
268
|
+
|
|
269
|
+
it('round-trips large values through replace/all', async () => {
|
|
270
|
+
await largeDriver.replace([['bio', largeValue]]);
|
|
271
|
+
|
|
272
|
+
let all = await largeDriver.all();
|
|
273
|
+
|
|
274
|
+
expect(all.bio).toBe(largeValue);
|
|
275
|
+
});
|
|
276
|
+
|
|
277
|
+
it('reads existing uncompressed values (backward compat)', async () => {
|
|
278
|
+
localStorage.setItem('lz:1:bio', JSON.stringify(largeValue));
|
|
279
|
+
|
|
280
|
+
expect(await largeDriver.get('bio')).toBe(largeValue);
|
|
281
|
+
});
|
|
282
|
+
|
|
283
|
+
it('compressed output is smaller than raw JSON', async () => {
|
|
284
|
+
await largeDriver.set('bio', largeValue);
|
|
285
|
+
|
|
286
|
+
let compressed = localStorage.getItem('lz:1:bio')!,
|
|
287
|
+
raw = JSON.stringify(largeValue);
|
|
288
|
+
|
|
289
|
+
expect(compressed.length).toBeLessThan(raw.length);
|
|
290
|
+
});
|
|
291
|
+
|
|
292
|
+
it('handles 100-byte boundary correctly', async () => {
|
|
293
|
+
type BoundaryData = { val: string };
|
|
294
|
+
|
|
295
|
+
let boundaryDriver = new LocalStorageDriver<BoundaryData>('bound', 1);
|
|
296
|
+
|
|
297
|
+
// JSON.stringify('"' + 'x'.repeat(97) + '"') = 97 chars + 2 quotes = "xxx...x" = 99 chars inside quotes, total 99+2=101? No.
|
|
298
|
+
// JSON.stringify('x'.repeat(96)) = '"' + 'x'*96 + '"' = 98 bytes < 100 => no compress
|
|
299
|
+
await boundaryDriver.set('val', 'x'.repeat(96));
|
|
300
|
+
|
|
301
|
+
let rawSmall = localStorage.getItem('bound:1:val')!;
|
|
302
|
+
|
|
303
|
+
expect(rawSmall.charCodeAt(0)).not.toBe(1);
|
|
304
|
+
|
|
305
|
+
// JSON.stringify('x'.repeat(98)) = '"' + 'x'*98 + '"' = 100 bytes >= 100 => compress
|
|
306
|
+
await boundaryDriver.set('val', 'x'.repeat(98));
|
|
307
|
+
|
|
308
|
+
let rawLarge = localStorage.getItem('bound:1:val')!;
|
|
309
|
+
|
|
310
|
+
expect(rawLarge.charCodeAt(0)).toBe(1);
|
|
311
|
+
});
|
|
312
|
+
|
|
313
|
+
it('parse returns undefined for corrupted compressed data', async () => {
|
|
314
|
+
localStorage.setItem('lz:1:bio', '\x01corrupted-data');
|
|
315
|
+
|
|
316
|
+
expect(await largeDriver.get('bio')).toBeUndefined();
|
|
317
|
+
});
|
|
318
|
+
});
|
|
319
|
+
|
|
320
|
+
|
|
235
321
|
describe('set / get', () => {
|
|
236
322
|
it('overwrites existing key', async () => {
|
|
237
323
|
await driver.set('name', 'alice');
|
|
@@ -232,6 +232,91 @@ describe('SessionStorageDriver', () => {
|
|
|
232
232
|
});
|
|
233
233
|
|
|
234
234
|
|
|
235
|
+
describe('compression', () => {
|
|
236
|
+
type LargeData = { bio: string };
|
|
237
|
+
|
|
238
|
+
let largeDriver: SessionStorageDriver<LargeData>,
|
|
239
|
+
largeValue: string;
|
|
240
|
+
|
|
241
|
+
beforeEach(() => {
|
|
242
|
+
largeDriver = new SessionStorageDriver<LargeData>('lz', 1);
|
|
243
|
+
largeValue = 'a'.repeat(200);
|
|
244
|
+
});
|
|
245
|
+
|
|
246
|
+
it('stores small values without compression prefix', async () => {
|
|
247
|
+
await driver.set('name', 'alice');
|
|
248
|
+
|
|
249
|
+
let raw = sessionStorage.getItem('test:1:name')!;
|
|
250
|
+
|
|
251
|
+
expect(raw.charCodeAt(0)).not.toBe(1);
|
|
252
|
+
expect(raw).toBe('"alice"');
|
|
253
|
+
});
|
|
254
|
+
|
|
255
|
+
it('stores large values with \\x01 prefix', async () => {
|
|
256
|
+
await largeDriver.set('bio', largeValue);
|
|
257
|
+
|
|
258
|
+
let raw = sessionStorage.getItem('lz:1:bio')!;
|
|
259
|
+
|
|
260
|
+
expect(raw.charCodeAt(0)).toBe(1);
|
|
261
|
+
});
|
|
262
|
+
|
|
263
|
+
it('round-trips large values through set/get', async () => {
|
|
264
|
+
await largeDriver.set('bio', largeValue);
|
|
265
|
+
|
|
266
|
+
expect(await largeDriver.get('bio')).toBe(largeValue);
|
|
267
|
+
});
|
|
268
|
+
|
|
269
|
+
it('round-trips large values through replace/all', async () => {
|
|
270
|
+
await largeDriver.replace([['bio', largeValue]]);
|
|
271
|
+
|
|
272
|
+
let all = await largeDriver.all();
|
|
273
|
+
|
|
274
|
+
expect(all.bio).toBe(largeValue);
|
|
275
|
+
});
|
|
276
|
+
|
|
277
|
+
it('reads existing uncompressed values (backward compat)', async () => {
|
|
278
|
+
sessionStorage.setItem('lz:1:bio', JSON.stringify(largeValue));
|
|
279
|
+
|
|
280
|
+
expect(await largeDriver.get('bio')).toBe(largeValue);
|
|
281
|
+
});
|
|
282
|
+
|
|
283
|
+
it('compressed output is smaller than raw JSON', async () => {
|
|
284
|
+
await largeDriver.set('bio', largeValue);
|
|
285
|
+
|
|
286
|
+
let compressed = sessionStorage.getItem('lz:1:bio')!,
|
|
287
|
+
raw = JSON.stringify(largeValue);
|
|
288
|
+
|
|
289
|
+
expect(compressed.length).toBeLessThan(raw.length);
|
|
290
|
+
});
|
|
291
|
+
|
|
292
|
+
it('handles 100-byte boundary correctly', async () => {
|
|
293
|
+
type BoundaryData = { val: string };
|
|
294
|
+
|
|
295
|
+
let boundaryDriver = new SessionStorageDriver<BoundaryData>('bound', 1);
|
|
296
|
+
|
|
297
|
+
// JSON.stringify('x'.repeat(96)) = '"' + 'x'*96 + '"' = 98 bytes < 100 => no compress
|
|
298
|
+
await boundaryDriver.set('val', 'x'.repeat(96));
|
|
299
|
+
|
|
300
|
+
let rawSmall = sessionStorage.getItem('bound:1:val')!;
|
|
301
|
+
|
|
302
|
+
expect(rawSmall.charCodeAt(0)).not.toBe(1);
|
|
303
|
+
|
|
304
|
+
// JSON.stringify('x'.repeat(98)) = '"' + 'x'*98 + '"' = 100 bytes >= 100 => compress
|
|
305
|
+
await boundaryDriver.set('val', 'x'.repeat(98));
|
|
306
|
+
|
|
307
|
+
let rawLarge = sessionStorage.getItem('bound:1:val')!;
|
|
308
|
+
|
|
309
|
+
expect(rawLarge.charCodeAt(0)).toBe(1);
|
|
310
|
+
});
|
|
311
|
+
|
|
312
|
+
it('parse returns undefined for corrupted compressed data', async () => {
|
|
313
|
+
sessionStorage.setItem('lz:1:bio', '\x01corrupted-data');
|
|
314
|
+
|
|
315
|
+
expect(await largeDriver.get('bio')).toBeUndefined();
|
|
316
|
+
});
|
|
317
|
+
});
|
|
318
|
+
|
|
319
|
+
|
|
235
320
|
describe('set / get', () => {
|
|
236
321
|
it('overwrites existing key', async () => {
|
|
237
322
|
await driver.set('name', 'alice');
|