@sylphx/lens-storage-redis 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.ts +64 -0
- package/dist/index.js +189 -0
- package/package.json +44 -0
- package/src/index.ts +354 -0
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
import { OpLogStorage, OpLogStorageConfig } from "@sylphx/lens-server";
|
|
2
|
+
/**
|
|
3
|
+
* Redis client interface.
|
|
4
|
+
* Compatible with ioredis.
|
|
5
|
+
*/
|
|
6
|
+
interface RedisClient {
|
|
7
|
+
get(key: string): Promise<string | null>;
|
|
8
|
+
set(key: string, value: string, ...args: unknown[]): Promise<unknown>;
|
|
9
|
+
del(...keys: string[]): Promise<number>;
|
|
10
|
+
keys(pattern: string): Promise<string[]>;
|
|
11
|
+
exists(...keys: string[]): Promise<number>;
|
|
12
|
+
quit(): Promise<unknown>;
|
|
13
|
+
}
|
|
14
|
+
/**
|
|
15
|
+
* Redis storage options.
|
|
16
|
+
*/
|
|
17
|
+
interface RedisStorageOptions extends OpLogStorageConfig {
|
|
18
|
+
/**
|
|
19
|
+
* Redis client instance (ioredis).
|
|
20
|
+
*
|
|
21
|
+
* @example
|
|
22
|
+
* ```typescript
|
|
23
|
+
* import Redis from "ioredis";
|
|
24
|
+
* const redis = new Redis(process.env.REDIS_URL);
|
|
25
|
+
* ```
|
|
26
|
+
*/
|
|
27
|
+
redis: RedisClient;
|
|
28
|
+
/**
|
|
29
|
+
* Key prefix for all stored data.
|
|
30
|
+
* @default "lens"
|
|
31
|
+
*/
|
|
32
|
+
prefix?: string;
|
|
33
|
+
/**
|
|
34
|
+
* TTL for state data in seconds.
|
|
35
|
+
* Set to 0 for no expiration.
|
|
36
|
+
* @default 0 (no expiration)
|
|
37
|
+
*/
|
|
38
|
+
stateTTL?: number;
|
|
39
|
+
}
|
|
40
|
+
/**
|
|
41
|
+
* Create a Redis storage adapter.
|
|
42
|
+
*
|
|
43
|
+
* Requires `ioredis` as a peer dependency.
|
|
44
|
+
*
|
|
45
|
+
* Uses optimistic locking: if a concurrent write is detected,
|
|
46
|
+
* the operation is retried up to `maxRetries` times.
|
|
47
|
+
*
|
|
48
|
+
* @example
|
|
49
|
+
* ```typescript
|
|
50
|
+
* import Redis from "ioredis";
|
|
51
|
+
* import { redisStorage } from "@sylphx/lens-storage-redis";
|
|
52
|
+
*
|
|
53
|
+
* const redis = new Redis(process.env.REDIS_URL);
|
|
54
|
+
*
|
|
55
|
+
* const app = createApp({
|
|
56
|
+
* router,
|
|
57
|
+
* plugins: [opLog({
|
|
58
|
+
* storage: redisStorage({ redis }),
|
|
59
|
+
* })],
|
|
60
|
+
* });
|
|
61
|
+
* ```
|
|
62
|
+
*/
|
|
63
|
+
declare function redisStorage(options: RedisStorageOptions): OpLogStorage;
|
|
64
|
+
export { redisStorage, RedisStorageOptions, RedisClient };
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,189 @@
|
|
|
1
|
+
// src/index.ts
|
|
2
|
+
import {
|
|
3
|
+
DEFAULT_STORAGE_CONFIG
|
|
4
|
+
} from "@sylphx/lens-server";
|
|
5
|
+
function computePatch(oldState, newState) {
|
|
6
|
+
const patch = [];
|
|
7
|
+
const oldKeys = new Set(Object.keys(oldState));
|
|
8
|
+
const newKeys = new Set(Object.keys(newState));
|
|
9
|
+
for (const key of newKeys) {
|
|
10
|
+
const oldValue = oldState[key];
|
|
11
|
+
const newValue = newState[key];
|
|
12
|
+
if (!oldKeys.has(key)) {
|
|
13
|
+
patch.push({ op: "add", path: `/${key}`, value: newValue });
|
|
14
|
+
} else if (JSON.stringify(oldValue) !== JSON.stringify(newValue)) {
|
|
15
|
+
patch.push({ op: "replace", path: `/${key}`, value: newValue });
|
|
16
|
+
}
|
|
17
|
+
}
|
|
18
|
+
for (const key of oldKeys) {
|
|
19
|
+
if (!newKeys.has(key)) {
|
|
20
|
+
patch.push({ op: "remove", path: `/${key}` });
|
|
21
|
+
}
|
|
22
|
+
}
|
|
23
|
+
return patch;
|
|
24
|
+
}
|
|
25
|
+
function redisStorage(options) {
|
|
26
|
+
const { redis, prefix = "lens", stateTTL = 0 } = options;
|
|
27
|
+
const cfg = { ...DEFAULT_STORAGE_CONFIG, ...options };
|
|
28
|
+
function makeKey(entity, entityId) {
|
|
29
|
+
return `${prefix}:${entity}:${entityId}`;
|
|
30
|
+
}
|
|
31
|
+
async function getData(entity, entityId) {
|
|
32
|
+
const key = makeKey(entity, entityId);
|
|
33
|
+
const raw = await redis.get(key);
|
|
34
|
+
if (!raw)
|
|
35
|
+
return null;
|
|
36
|
+
try {
|
|
37
|
+
return JSON.parse(raw);
|
|
38
|
+
} catch {
|
|
39
|
+
return null;
|
|
40
|
+
}
|
|
41
|
+
}
|
|
42
|
+
async function setData(entity, entityId, data) {
|
|
43
|
+
const key = makeKey(entity, entityId);
|
|
44
|
+
const value = JSON.stringify(data);
|
|
45
|
+
if (stateTTL > 0) {
|
|
46
|
+
await redis.set(key, value, "EX", stateTTL);
|
|
47
|
+
} else {
|
|
48
|
+
await redis.set(key, value);
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
function trimPatches(patches, now) {
|
|
52
|
+
const minTimestamp = now - cfg.maxPatchAge;
|
|
53
|
+
let filtered = patches.filter((p) => p.timestamp >= minTimestamp);
|
|
54
|
+
if (filtered.length > cfg.maxPatchesPerEntity) {
|
|
55
|
+
filtered = filtered.slice(-cfg.maxPatchesPerEntity);
|
|
56
|
+
}
|
|
57
|
+
return filtered;
|
|
58
|
+
}
|
|
59
|
+
async function emitWithRetry(entity, entityId, data, retryCount = 0) {
|
|
60
|
+
const now = Date.now();
|
|
61
|
+
const existing = await getData(entity, entityId);
|
|
62
|
+
if (!existing) {
|
|
63
|
+
const newData2 = {
|
|
64
|
+
data: { ...data },
|
|
65
|
+
version: 1,
|
|
66
|
+
updatedAt: now,
|
|
67
|
+
patches: []
|
|
68
|
+
};
|
|
69
|
+
await setData(entity, entityId, newData2);
|
|
70
|
+
return {
|
|
71
|
+
version: 1,
|
|
72
|
+
patch: null,
|
|
73
|
+
changed: true
|
|
74
|
+
};
|
|
75
|
+
}
|
|
76
|
+
const expectedVersion = existing.version;
|
|
77
|
+
const oldHash = JSON.stringify(existing.data);
|
|
78
|
+
const newHash = JSON.stringify(data);
|
|
79
|
+
if (oldHash === newHash) {
|
|
80
|
+
return {
|
|
81
|
+
version: existing.version,
|
|
82
|
+
patch: null,
|
|
83
|
+
changed: false
|
|
84
|
+
};
|
|
85
|
+
}
|
|
86
|
+
const patch = computePatch(existing.data, data);
|
|
87
|
+
const newVersion = expectedVersion + 1;
|
|
88
|
+
let patches = [...existing.patches];
|
|
89
|
+
if (patch.length > 0) {
|
|
90
|
+
patches.push({
|
|
91
|
+
version: newVersion,
|
|
92
|
+
patch,
|
|
93
|
+
timestamp: now
|
|
94
|
+
});
|
|
95
|
+
patches = trimPatches(patches, now);
|
|
96
|
+
}
|
|
97
|
+
const newData = {
|
|
98
|
+
data: { ...data },
|
|
99
|
+
version: newVersion,
|
|
100
|
+
updatedAt: now,
|
|
101
|
+
patches
|
|
102
|
+
};
|
|
103
|
+
await setData(entity, entityId, newData);
|
|
104
|
+
const verify = await getData(entity, entityId);
|
|
105
|
+
if (verify && verify.version !== newVersion) {
|
|
106
|
+
if (retryCount < cfg.maxRetries) {
|
|
107
|
+
const delay = Math.min(10 * 2 ** retryCount, 100);
|
|
108
|
+
await new Promise((resolve) => setTimeout(resolve, delay));
|
|
109
|
+
return emitWithRetry(entity, entityId, data, retryCount + 1);
|
|
110
|
+
}
|
|
111
|
+
return {
|
|
112
|
+
version: verify.version,
|
|
113
|
+
patch: null,
|
|
114
|
+
changed: true
|
|
115
|
+
};
|
|
116
|
+
}
|
|
117
|
+
return {
|
|
118
|
+
version: newVersion,
|
|
119
|
+
patch: patch.length > 0 ? patch : null,
|
|
120
|
+
changed: true
|
|
121
|
+
};
|
|
122
|
+
}
|
|
123
|
+
return {
|
|
124
|
+
emit: (entity, entityId, data) => emitWithRetry(entity, entityId, data, 0),
|
|
125
|
+
async getState(entity, entityId) {
|
|
126
|
+
const stored = await getData(entity, entityId);
|
|
127
|
+
return stored ? { ...stored.data } : null;
|
|
128
|
+
},
|
|
129
|
+
async getVersion(entity, entityId) {
|
|
130
|
+
const stored = await getData(entity, entityId);
|
|
131
|
+
return stored?.version ?? 0;
|
|
132
|
+
},
|
|
133
|
+
async getLatestPatch(entity, entityId) {
|
|
134
|
+
const stored = await getData(entity, entityId);
|
|
135
|
+
if (!stored || stored.patches.length === 0) {
|
|
136
|
+
return null;
|
|
137
|
+
}
|
|
138
|
+
const lastPatch = stored.patches[stored.patches.length - 1];
|
|
139
|
+
return lastPatch ? lastPatch.patch : null;
|
|
140
|
+
},
|
|
141
|
+
async getPatchesSince(entity, entityId, sinceVersion) {
|
|
142
|
+
const stored = await getData(entity, entityId);
|
|
143
|
+
if (!stored) {
|
|
144
|
+
return sinceVersion === 0 ? [] : null;
|
|
145
|
+
}
|
|
146
|
+
if (sinceVersion >= stored.version) {
|
|
147
|
+
return [];
|
|
148
|
+
}
|
|
149
|
+
const relevantPatches = stored.patches.filter((p) => p.version > sinceVersion);
|
|
150
|
+
if (relevantPatches.length === 0) {
|
|
151
|
+
return null;
|
|
152
|
+
}
|
|
153
|
+
relevantPatches.sort((a, b) => a.version - b.version);
|
|
154
|
+
const firstPatch = relevantPatches[0];
|
|
155
|
+
if (!firstPatch || firstPatch.version !== sinceVersion + 1) {
|
|
156
|
+
return null;
|
|
157
|
+
}
|
|
158
|
+
for (let i = 1;i < relevantPatches.length; i++) {
|
|
159
|
+
const current = relevantPatches[i];
|
|
160
|
+
const previous = relevantPatches[i - 1];
|
|
161
|
+
if (!current || !previous || current.version !== previous.version + 1) {
|
|
162
|
+
return null;
|
|
163
|
+
}
|
|
164
|
+
}
|
|
165
|
+
return relevantPatches.map((p) => p.patch);
|
|
166
|
+
},
|
|
167
|
+
async has(entity, entityId) {
|
|
168
|
+
const key = makeKey(entity, entityId);
|
|
169
|
+
const count = await redis.exists(key);
|
|
170
|
+
return count > 0;
|
|
171
|
+
},
|
|
172
|
+
async delete(entity, entityId) {
|
|
173
|
+
const key = makeKey(entity, entityId);
|
|
174
|
+
await redis.del(key);
|
|
175
|
+
},
|
|
176
|
+
async clear() {
|
|
177
|
+
const keys = await redis.keys(`${prefix}:*`);
|
|
178
|
+
if (keys.length > 0) {
|
|
179
|
+
await redis.del(...keys);
|
|
180
|
+
}
|
|
181
|
+
},
|
|
182
|
+
async dispose() {
|
|
183
|
+
await redis.quit();
|
|
184
|
+
}
|
|
185
|
+
};
|
|
186
|
+
}
|
|
187
|
+
export {
|
|
188
|
+
redisStorage
|
|
189
|
+
};
|
package/package.json
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@sylphx/lens-storage-redis",
|
|
3
|
+
"version": "1.0.0",
|
|
4
|
+
"description": "Redis storage adapter for Lens opLog plugin (ioredis)",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"main": "./dist/index.js",
|
|
7
|
+
"types": "./dist/index.d.ts",
|
|
8
|
+
"exports": {
|
|
9
|
+
".": {
|
|
10
|
+
"import": "./dist/index.js",
|
|
11
|
+
"types": "./dist/index.d.ts"
|
|
12
|
+
}
|
|
13
|
+
},
|
|
14
|
+
"scripts": {
|
|
15
|
+
"build": "bunup",
|
|
16
|
+
"typecheck": "tsc --noEmit",
|
|
17
|
+
"test": "echo 'no tests yet'",
|
|
18
|
+
"prepack": "[ -d dist ] || bun run build"
|
|
19
|
+
},
|
|
20
|
+
"files": [
|
|
21
|
+
"dist",
|
|
22
|
+
"src"
|
|
23
|
+
],
|
|
24
|
+
"keywords": [
|
|
25
|
+
"lens",
|
|
26
|
+
"storage",
|
|
27
|
+
"redis",
|
|
28
|
+
"ioredis",
|
|
29
|
+
"oplog"
|
|
30
|
+
],
|
|
31
|
+
"author": "SylphxAI",
|
|
32
|
+
"license": "MIT",
|
|
33
|
+
"dependencies": {
|
|
34
|
+
"@sylphx/lens-core": "^2.0.1",
|
|
35
|
+
"@sylphx/lens-server": "^2.0.1"
|
|
36
|
+
},
|
|
37
|
+
"peerDependencies": {
|
|
38
|
+
"ioredis": ">=5.0.0"
|
|
39
|
+
},
|
|
40
|
+
"devDependencies": {
|
|
41
|
+
"ioredis": "^5.6.1",
|
|
42
|
+
"typescript": "^5.9.3"
|
|
43
|
+
}
|
|
44
|
+
}
|
package/src/index.ts
ADDED
|
@@ -0,0 +1,354 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @sylphx/lens-storage-redis
|
|
3
|
+
*
|
|
4
|
+
* Redis storage adapter for Lens opLog plugin using ioredis.
|
|
5
|
+
* Best for long-running servers with persistent connections.
|
|
6
|
+
*
|
|
7
|
+
* Features:
|
|
8
|
+
* - Persistent connection pooling
|
|
9
|
+
* - Optimistic locking with retry on conflict
|
|
10
|
+
* - Automatic patch eviction
|
|
11
|
+
*
|
|
12
|
+
* For serverless environments, use `@sylphx/lens-storage-upstash` or
|
|
13
|
+
* `@sylphx/lens-storage-vercel-kv` instead.
|
|
14
|
+
*
|
|
15
|
+
* @example
|
|
16
|
+
* ```typescript
|
|
17
|
+
* import Redis from "ioredis";
|
|
18
|
+
* import { redisStorage } from "@sylphx/lens-storage-redis";
|
|
19
|
+
*
|
|
20
|
+
* const redis = new Redis(process.env.REDIS_URL);
|
|
21
|
+
*
|
|
22
|
+
* const app = createApp({
|
|
23
|
+
* router,
|
|
24
|
+
* plugins: [opLog({
|
|
25
|
+
* storage: redisStorage({ redis }),
|
|
26
|
+
* })],
|
|
27
|
+
* });
|
|
28
|
+
* ```
|
|
29
|
+
*/
|
|
30
|
+
|
|
31
|
+
import type { PatchOperation } from "@sylphx/lens-core";
|
|
32
|
+
import {
|
|
33
|
+
DEFAULT_STORAGE_CONFIG,
|
|
34
|
+
type EmitResult,
|
|
35
|
+
type OpLogStorage,
|
|
36
|
+
type OpLogStorageConfig,
|
|
37
|
+
type StoredPatchEntry,
|
|
38
|
+
} from "@sylphx/lens-server";
|
|
39
|
+
|
|
40
|
+
/**
|
|
41
|
+
* Redis client interface.
|
|
42
|
+
* Compatible with ioredis.
|
|
43
|
+
*/
|
|
44
|
+
export interface RedisClient {
|
|
45
|
+
get(key: string): Promise<string | null>;
|
|
46
|
+
set(key: string, value: string, ...args: unknown[]): Promise<unknown>;
|
|
47
|
+
del(...keys: string[]): Promise<number>;
|
|
48
|
+
keys(pattern: string): Promise<string[]>;
|
|
49
|
+
exists(...keys: string[]): Promise<number>;
|
|
50
|
+
quit(): Promise<unknown>;
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
/**
|
|
54
|
+
* Redis storage options.
|
|
55
|
+
*/
|
|
56
|
+
export interface RedisStorageOptions extends OpLogStorageConfig {
|
|
57
|
+
/**
|
|
58
|
+
* Redis client instance (ioredis).
|
|
59
|
+
*
|
|
60
|
+
* @example
|
|
61
|
+
* ```typescript
|
|
62
|
+
* import Redis from "ioredis";
|
|
63
|
+
* const redis = new Redis(process.env.REDIS_URL);
|
|
64
|
+
* ```
|
|
65
|
+
*/
|
|
66
|
+
redis: RedisClient;
|
|
67
|
+
|
|
68
|
+
/**
|
|
69
|
+
* Key prefix for all stored data.
|
|
70
|
+
* @default "lens"
|
|
71
|
+
*/
|
|
72
|
+
prefix?: string;
|
|
73
|
+
|
|
74
|
+
/**
|
|
75
|
+
* TTL for state data in seconds.
|
|
76
|
+
* Set to 0 for no expiration.
|
|
77
|
+
* @default 0 (no expiration)
|
|
78
|
+
*/
|
|
79
|
+
stateTTL?: number;
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
/**
|
|
83
|
+
* Internal stored data structure.
|
|
84
|
+
*/
|
|
85
|
+
interface StoredData {
|
|
86
|
+
data: Record<string, unknown>;
|
|
87
|
+
version: number;
|
|
88
|
+
updatedAt: number;
|
|
89
|
+
patches: StoredPatchEntry[];
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
/**
|
|
93
|
+
* Compute JSON Patch operations between two states.
|
|
94
|
+
*/
|
|
95
|
+
function computePatch(
|
|
96
|
+
oldState: Record<string, unknown>,
|
|
97
|
+
newState: Record<string, unknown>,
|
|
98
|
+
): PatchOperation[] {
|
|
99
|
+
const patch: PatchOperation[] = [];
|
|
100
|
+
const oldKeys = new Set(Object.keys(oldState));
|
|
101
|
+
const newKeys = new Set(Object.keys(newState));
|
|
102
|
+
|
|
103
|
+
for (const key of newKeys) {
|
|
104
|
+
const oldValue = oldState[key];
|
|
105
|
+
const newValue = newState[key];
|
|
106
|
+
|
|
107
|
+
if (!oldKeys.has(key)) {
|
|
108
|
+
patch.push({ op: "add", path: `/${key}`, value: newValue });
|
|
109
|
+
} else if (JSON.stringify(oldValue) !== JSON.stringify(newValue)) {
|
|
110
|
+
patch.push({ op: "replace", path: `/${key}`, value: newValue });
|
|
111
|
+
}
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
for (const key of oldKeys) {
|
|
115
|
+
if (!newKeys.has(key)) {
|
|
116
|
+
patch.push({ op: "remove", path: `/${key}` });
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
return patch;
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
/**
|
|
124
|
+
* Create a Redis storage adapter.
|
|
125
|
+
*
|
|
126
|
+
* Requires `ioredis` as a peer dependency.
|
|
127
|
+
*
|
|
128
|
+
* Uses optimistic locking: if a concurrent write is detected,
|
|
129
|
+
* the operation is retried up to `maxRetries` times.
|
|
130
|
+
*
|
|
131
|
+
* @example
|
|
132
|
+
* ```typescript
|
|
133
|
+
* import Redis from "ioredis";
|
|
134
|
+
* import { redisStorage } from "@sylphx/lens-storage-redis";
|
|
135
|
+
*
|
|
136
|
+
* const redis = new Redis(process.env.REDIS_URL);
|
|
137
|
+
*
|
|
138
|
+
* const app = createApp({
|
|
139
|
+
* router,
|
|
140
|
+
* plugins: [opLog({
|
|
141
|
+
* storage: redisStorage({ redis }),
|
|
142
|
+
* })],
|
|
143
|
+
* });
|
|
144
|
+
* ```
|
|
145
|
+
*/
|
|
146
|
+
export function redisStorage(options: RedisStorageOptions): OpLogStorage {
|
|
147
|
+
const { redis, prefix = "lens", stateTTL = 0 } = options;
|
|
148
|
+
const cfg = { ...DEFAULT_STORAGE_CONFIG, ...options };
|
|
149
|
+
|
|
150
|
+
function makeKey(entity: string, entityId: string): string {
|
|
151
|
+
return `${prefix}:${entity}:${entityId}`;
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
async function getData(entity: string, entityId: string): Promise<StoredData | null> {
|
|
155
|
+
const key = makeKey(entity, entityId);
|
|
156
|
+
const raw = await redis.get(key);
|
|
157
|
+
if (!raw) return null;
|
|
158
|
+
|
|
159
|
+
try {
|
|
160
|
+
return JSON.parse(raw) as StoredData;
|
|
161
|
+
} catch {
|
|
162
|
+
return null;
|
|
163
|
+
}
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
async function setData(entity: string, entityId: string, data: StoredData): Promise<void> {
|
|
167
|
+
const key = makeKey(entity, entityId);
|
|
168
|
+
const value = JSON.stringify(data);
|
|
169
|
+
|
|
170
|
+
if (stateTTL > 0) {
|
|
171
|
+
await redis.set(key, value, "EX", stateTTL);
|
|
172
|
+
} else {
|
|
173
|
+
await redis.set(key, value);
|
|
174
|
+
}
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
function trimPatches(patches: StoredPatchEntry[], now: number): StoredPatchEntry[] {
|
|
178
|
+
const minTimestamp = now - cfg.maxPatchAge;
|
|
179
|
+
let filtered = patches.filter((p) => p.timestamp >= minTimestamp);
|
|
180
|
+
|
|
181
|
+
if (filtered.length > cfg.maxPatchesPerEntity) {
|
|
182
|
+
filtered = filtered.slice(-cfg.maxPatchesPerEntity);
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
return filtered;
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
/**
|
|
189
|
+
* Emit with optimistic locking.
|
|
190
|
+
* Retries on version conflict up to maxRetries times.
|
|
191
|
+
*/
|
|
192
|
+
async function emitWithRetry(
|
|
193
|
+
entity: string,
|
|
194
|
+
entityId: string,
|
|
195
|
+
data: Record<string, unknown>,
|
|
196
|
+
retryCount = 0,
|
|
197
|
+
): Promise<EmitResult> {
|
|
198
|
+
const now = Date.now();
|
|
199
|
+
const existing = await getData(entity, entityId);
|
|
200
|
+
|
|
201
|
+
if (!existing) {
|
|
202
|
+
const newData: StoredData = {
|
|
203
|
+
data: { ...data },
|
|
204
|
+
version: 1,
|
|
205
|
+
updatedAt: now,
|
|
206
|
+
patches: [],
|
|
207
|
+
};
|
|
208
|
+
await setData(entity, entityId, newData);
|
|
209
|
+
|
|
210
|
+
return {
|
|
211
|
+
version: 1,
|
|
212
|
+
patch: null,
|
|
213
|
+
changed: true,
|
|
214
|
+
};
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
const expectedVersion = existing.version;
|
|
218
|
+
|
|
219
|
+
const oldHash = JSON.stringify(existing.data);
|
|
220
|
+
const newHash = JSON.stringify(data);
|
|
221
|
+
|
|
222
|
+
if (oldHash === newHash) {
|
|
223
|
+
return {
|
|
224
|
+
version: existing.version,
|
|
225
|
+
patch: null,
|
|
226
|
+
changed: false,
|
|
227
|
+
};
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
const patch = computePatch(existing.data, data);
|
|
231
|
+
const newVersion = expectedVersion + 1;
|
|
232
|
+
|
|
233
|
+
let patches = [...existing.patches];
|
|
234
|
+
if (patch.length > 0) {
|
|
235
|
+
patches.push({
|
|
236
|
+
version: newVersion,
|
|
237
|
+
patch,
|
|
238
|
+
timestamp: now,
|
|
239
|
+
});
|
|
240
|
+
patches = trimPatches(patches, now);
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
const newData: StoredData = {
|
|
244
|
+
data: { ...data },
|
|
245
|
+
version: newVersion,
|
|
246
|
+
updatedAt: now,
|
|
247
|
+
patches,
|
|
248
|
+
};
|
|
249
|
+
|
|
250
|
+
await setData(entity, entityId, newData);
|
|
251
|
+
|
|
252
|
+
// Re-read to verify our write succeeded (optimistic check)
|
|
253
|
+
const verify = await getData(entity, entityId);
|
|
254
|
+
if (verify && verify.version !== newVersion) {
|
|
255
|
+
// Version conflict
|
|
256
|
+
if (retryCount < cfg.maxRetries) {
|
|
257
|
+
const delay = Math.min(10 * 2 ** retryCount, 100);
|
|
258
|
+
await new Promise((resolve) => setTimeout(resolve, delay));
|
|
259
|
+
return emitWithRetry(entity, entityId, data, retryCount + 1);
|
|
260
|
+
}
|
|
261
|
+
return {
|
|
262
|
+
version: verify.version,
|
|
263
|
+
patch: null,
|
|
264
|
+
changed: true,
|
|
265
|
+
};
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
return {
|
|
269
|
+
version: newVersion,
|
|
270
|
+
patch: patch.length > 0 ? patch : null,
|
|
271
|
+
changed: true,
|
|
272
|
+
};
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
return {
|
|
276
|
+
emit: (entity, entityId, data) => emitWithRetry(entity, entityId, data, 0),
|
|
277
|
+
|
|
278
|
+
async getState(entity, entityId): Promise<Record<string, unknown> | null> {
|
|
279
|
+
const stored = await getData(entity, entityId);
|
|
280
|
+
return stored ? { ...stored.data } : null;
|
|
281
|
+
},
|
|
282
|
+
|
|
283
|
+
async getVersion(entity, entityId): Promise<number> {
|
|
284
|
+
const stored = await getData(entity, entityId);
|
|
285
|
+
return stored?.version ?? 0;
|
|
286
|
+
},
|
|
287
|
+
|
|
288
|
+
async getLatestPatch(entity, entityId): Promise<PatchOperation[] | null> {
|
|
289
|
+
const stored = await getData(entity, entityId);
|
|
290
|
+
if (!stored || stored.patches.length === 0) {
|
|
291
|
+
return null;
|
|
292
|
+
}
|
|
293
|
+
const lastPatch = stored.patches[stored.patches.length - 1];
|
|
294
|
+
return lastPatch ? lastPatch.patch : null;
|
|
295
|
+
},
|
|
296
|
+
|
|
297
|
+
async getPatchesSince(entity, entityId, sinceVersion): Promise<PatchOperation[][] | null> {
|
|
298
|
+
const stored = await getData(entity, entityId);
|
|
299
|
+
|
|
300
|
+
if (!stored) {
|
|
301
|
+
return sinceVersion === 0 ? [] : null;
|
|
302
|
+
}
|
|
303
|
+
|
|
304
|
+
if (sinceVersion >= stored.version) {
|
|
305
|
+
return [];
|
|
306
|
+
}
|
|
307
|
+
|
|
308
|
+
const relevantPatches = stored.patches.filter((p) => p.version > sinceVersion);
|
|
309
|
+
|
|
310
|
+
if (relevantPatches.length === 0) {
|
|
311
|
+
return null;
|
|
312
|
+
}
|
|
313
|
+
|
|
314
|
+
relevantPatches.sort((a, b) => a.version - b.version);
|
|
315
|
+
|
|
316
|
+
const firstPatch = relevantPatches[0];
|
|
317
|
+
if (!firstPatch || firstPatch.version !== sinceVersion + 1) {
|
|
318
|
+
return null;
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
for (let i = 1; i < relevantPatches.length; i++) {
|
|
322
|
+
const current = relevantPatches[i];
|
|
323
|
+
const previous = relevantPatches[i - 1];
|
|
324
|
+
if (!current || !previous || current.version !== previous.version + 1) {
|
|
325
|
+
return null;
|
|
326
|
+
}
|
|
327
|
+
}
|
|
328
|
+
|
|
329
|
+
return relevantPatches.map((p) => p.patch);
|
|
330
|
+
},
|
|
331
|
+
|
|
332
|
+
async has(entity, entityId): Promise<boolean> {
|
|
333
|
+
const key = makeKey(entity, entityId);
|
|
334
|
+
const count = await redis.exists(key);
|
|
335
|
+
return count > 0;
|
|
336
|
+
},
|
|
337
|
+
|
|
338
|
+
async delete(entity, entityId): Promise<void> {
|
|
339
|
+
const key = makeKey(entity, entityId);
|
|
340
|
+
await redis.del(key);
|
|
341
|
+
},
|
|
342
|
+
|
|
343
|
+
async clear(): Promise<void> {
|
|
344
|
+
const keys = await redis.keys(`${prefix}:*`);
|
|
345
|
+
if (keys.length > 0) {
|
|
346
|
+
await redis.del(...keys);
|
|
347
|
+
}
|
|
348
|
+
},
|
|
349
|
+
|
|
350
|
+
async dispose(): Promise<void> {
|
|
351
|
+
await redis.quit();
|
|
352
|
+
},
|
|
353
|
+
};
|
|
354
|
+
}
|