@abtnode/db-cache 1.16.45-beta-20250609-111118-0d252ebe → 1.16.45-beta-20250610-112229-2eb0face
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +84 -55
- package/package.json +2 -2
package/README.md
CHANGED
|
@@ -2,17 +2,21 @@
|
|
|
2
2
|
|
|
3
3
|
Use Redis or Sqlite3 LRC cache, multithread safe
|
|
4
4
|
|
|
5
|
-
|
|
6
|
-
不限制 cache 个数: Redis 我们约定直接在 redis 配置好最大内存和达到内存上限时自动移除旧的; Sqlite 由于使用的是硬盘, 也没有必要限制上限.
|
|
5
|
+
**Convention:** DB Cache must not be the sole source of truth. We must assume that cache data can be released at any time during development.
|
|
7
6
|
|
|
8
|
-
|
|
7
|
+
**No limit on the number of caches:**
|
|
9
8
|
|
|
10
|
-
|
|
11
|
-
-
|
|
12
|
-
- 最大只用 512MB 内存
|
|
13
|
-
- 内存满时对所有键做 LRU (最近最久未使用) 淘汰
|
|
9
|
+
For Redis, we configure it with a max memory limit and enable automatic LRU (Least Recently Used) eviction when full.
|
|
10
|
+
For SQLite, since it's disk-based, there's no need to limit the size.
|
|
14
11
|
|
|
15
|
-
|
|
12
|
+
## Redis Startup Configuration
|
|
13
|
+
|
|
14
|
+
- Redis bound to the internal network only
|
|
15
|
+
- No persistence
|
|
16
|
+
- Max memory: 512MB
|
|
17
|
+
- Eviction policy: allkeys-lru (LRU for all keys)
|
|
18
|
+
|
|
19
|
+
```bash
|
|
16
20
|
docker run -d \
|
|
17
21
|
--name db-cache-redis \
|
|
18
22
|
-p 127.0.0.1:40409:6379 \
|
|
@@ -26,7 +30,7 @@ docker run -d \
|
|
|
26
30
|
|
|
27
31
|
## Usage
|
|
28
32
|
|
|
29
|
-
```
|
|
33
|
+
```bash
|
|
30
34
|
yarn add @abtnode/db-cache
|
|
31
35
|
```
|
|
32
36
|
|
|
@@ -35,7 +39,7 @@ Then:
|
|
|
35
39
|
```javascript
|
|
36
40
|
const { DBCache } = require('@abtnode/db-cache');
|
|
37
41
|
|
|
38
|
-
//
|
|
42
|
+
// Configuration will only be read on first use
|
|
39
43
|
const dbCache = new DBCache(() => ({
|
|
40
44
|
prefix: 'the prefix key',
|
|
41
45
|
ttl: 60 * 1000,
|
|
@@ -50,7 +54,7 @@ await dbCache.delete('key');
|
|
|
50
54
|
await dbCache.has('key');
|
|
51
55
|
```
|
|
52
56
|
|
|
53
|
-
##
|
|
57
|
+
## Group set/get usage
|
|
54
58
|
|
|
55
59
|
```javascript
|
|
56
60
|
const cache = new DBCache(() => ({
|
|
@@ -63,41 +67,61 @@ const cache = new DBCache(() => ({
|
|
|
63
67
|
await cache.groupSet('group-key', 'sub-key', { a: 'b' });
|
|
64
68
|
const data = await cache.groupGet('group-key', 'sub-key');
|
|
65
69
|
await cache.groupDel('group-key', 'sub-key');
|
|
66
|
-
await cache.del('group-key'); // delete
|
|
70
|
+
await cache.del('group-key'); // delete all keys under the group
|
|
67
71
|
```
|
|
68
72
|
|
|
69
|
-
##
|
|
73
|
+
## Auto Cache
|
|
74
|
+
|
|
75
|
+
`autoCache` or `autoCacheGroup`:
|
|
76
|
+
|
|
77
|
+
If the value is cached, return it; otherwise, compute and cache it
|
|
78
|
+
|
|
79
|
+
Errors during computation won't be cached
|
|
70
80
|
|
|
71
81
|
```javascript
|
|
72
|
-
const
|
|
82
|
+
const cache = new DBCache(() => ({
|
|
73
83
|
prefix: 'the prefix key',
|
|
74
84
|
ttl: 5 * 1000,
|
|
75
85
|
sqlitePath: 'test.db',
|
|
76
86
|
redisURL: process.env.REDIS_URL,
|
|
77
87
|
}));
|
|
78
|
-
await lock.acquire('key name');
|
|
79
|
-
// do something
|
|
80
|
-
await lock.releaseLock('key name');
|
|
81
88
|
|
|
82
|
-
|
|
89
|
+
await cache.autoCache('key', async () => {
|
|
90
|
+
return 'want to cache data';
|
|
91
|
+
});
|
|
92
|
+
|
|
93
|
+
await cache.autoCacheGroup('group-key', 'sub-key', async () => {
|
|
94
|
+
return 'want to cache data';
|
|
95
|
+
});
|
|
83
96
|
```
|
|
84
97
|
|
|
85
|
-
##
|
|
98
|
+
## Lock usage
|
|
86
99
|
|
|
87
|
-
|
|
100
|
+
```javascript
|
|
101
|
+
const lock = new DBCache(() => ({
|
|
102
|
+
prefix: 'the prefix key',
|
|
103
|
+
ttl: 5 * 1000,
|
|
104
|
+
sqlitePath: 'test.db',
|
|
105
|
+
redisURL: process.env.REDIS_URL,
|
|
106
|
+
}));
|
|
88
107
|
|
|
89
|
-
|
|
108
|
+
await lock.acquire('key name');
|
|
109
|
+
// do something or wait for TTL to auto-release the lock
|
|
110
|
+
await lock.releaseLock('key name');
|
|
111
|
+
```
|
|
90
112
|
|
|
91
|
-
|
|
113
|
+
## SQLite Performance
|
|
92
114
|
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
PRAGMA
|
|
97
|
-
PRAGMA
|
|
115
|
+
**SQLite auto set WAL mode**
|
|
116
|
+
|
|
117
|
+
```sql
|
|
118
|
+
PRAGMA journal_mode = WAL; -- Enable Write-Ahead Logging for parallel reads/writes
|
|
119
|
+
PRAGMA synchronous = OFF; -- Skip fsync for performance
|
|
120
|
+
PRAGMA busy_timeout = 10000; -- Wait up to 10 seconds on lock conflict
|
|
121
|
+
PRAGMA wal_autocheckpoint = 2000; -- Auto-checkpoint after every 2000 writes
|
|
98
122
|
```
|
|
99
123
|
|
|
100
|
-
|
|
124
|
+
### Benchmark Results
|
|
101
125
|
|
|
102
126
|
```
|
|
103
127
|
=== SQLite Backend Benchmark ===
|
|
@@ -109,27 +133,28 @@ SET 50000 ops in 0.14s -> 370370 ops/sec
|
|
|
109
133
|
GET 50000 ops in 0.14s -> 349650 ops/sec
|
|
110
134
|
```
|
|
111
135
|
|
|
112
|
-
|
|
136
|
+
### Disk or Memory Usage
|
|
113
137
|
|
|
114
|
-
|
|
138
|
+
Example with 100,000 entries. Sample data:
|
|
115
139
|
|
|
116
|
-
|
|
140
|
+
```json
|
|
117
141
|
{
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
142
|
+
idx: i,
|
|
143
|
+
v: `value-${i}`,
|
|
144
|
+
other: `other-${i}`,
|
|
145
|
+
other2: `other2-${i}`,
|
|
146
|
+
other3: `other3-${i}`,
|
|
147
|
+
other4: `other4-${i}`,
|
|
148
|
+
other5: `other5-${i}`,
|
|
149
|
+
other6: `other6-${i}`,
|
|
150
|
+
other7: `other7-${i}`,
|
|
151
|
+
other8: `other8-${i}`,
|
|
152
|
+
other9: `other9-${i}`,
|
|
153
|
+
other10: `other10-${i}`,
|
|
154
|
+
}
|
|
155
|
+
```
|
|
132
156
|
|
|
157
|
+
```
|
|
133
158
|
=== Redis Memory Info ===
|
|
134
159
|
# Memory
|
|
135
160
|
used_memory_rss_human:57.41M
|
|
@@ -137,17 +162,18 @@ used_memory_rss_human:57.41M
|
|
|
137
162
|
SQLite file size: 31.63 MB
|
|
138
163
|
```
|
|
139
164
|
|
|
140
|
-
|
|
165
|
+
Concurrency is safe and depends on `busy_timeout` and the level of concurrency.
|
|
141
166
|
|
|
142
|
-
|
|
167
|
+
**Conclusion:** Redis is roughly 20–40x faster than SQLite. However, SQLite will not be the QPS bottleneck in most business use cases. It's a cost-effective choice for trading disk space for memory in scenarios with moderate RPS demands.
|
|
143
168
|
|
|
144
|
-
|
|
169
|
+
---
|
|
145
170
|
|
|
146
|
-
|
|
171
|
+
### Group Implementation Differences
|
|
147
172
|
|
|
148
|
-
|
|
173
|
+
- Redis: groups are implemented via hash sets — performance is very good.
|
|
174
|
+
- SQLite: implemented with dual-key lookups — still performs well.
|
|
149
175
|
|
|
150
|
-
|
|
176
|
+
**Group set/get performance:**
|
|
151
177
|
|
|
152
178
|
```
|
|
153
179
|
=== SQLite Backend Benchmark ===
|
|
@@ -159,12 +185,15 @@ SET 50000 ops in 0.22s -> 230415 ops/sec
|
|
|
159
185
|
GET 50000 ops in 0.08s -> 649351 ops/sec
|
|
160
186
|
```
|
|
161
187
|
|
|
162
|
-
|
|
188
|
+
---
|
|
163
189
|
|
|
164
|
-
|
|
190
|
+
### CI Redis Testing
|
|
165
191
|
|
|
166
|
-
|
|
192
|
+
If the `TEST_REDIS_URL` environment variable is set, tests will run in Redis mode.
|
|
193
|
+
If not, tests will fall back to SQLite mode.
|
|
167
194
|
|
|
168
|
-
|
|
195
|
+
Example:
|
|
196
|
+
|
|
197
|
+
```bash
|
|
169
198
|
TEST_REDIS_URL="redis://:the_password@127.0.0.1:6379"
|
|
170
199
|
```
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@abtnode/db-cache",
|
|
3
|
-
"version": "1.16.45-beta-
|
|
3
|
+
"version": "1.16.45-beta-20250610-112229-2eb0face",
|
|
4
4
|
"description": "Db cache use redis or sqlite as backend",
|
|
5
5
|
"publishConfig": {
|
|
6
6
|
"access": "public"
|
|
@@ -44,5 +44,5 @@
|
|
|
44
44
|
"typescript": "^5.6.3",
|
|
45
45
|
"unbuild": "^2.0.0"
|
|
46
46
|
},
|
|
47
|
-
"gitHead": "
|
|
47
|
+
"gitHead": "d35fb6c907c29f4466fa965c079e1662dc77e582"
|
|
48
48
|
}
|