@withjoy/limiter 0.1.2 → 0.1.4-test
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +4 -0
- package/limitd-redis/LICENSE +21 -0
- package/limitd-redis/README.md +183 -0
- package/limitd-redis/docker-compose.yml +11 -0
- package/limitd-redis/index.js +2 -0
- package/limitd-redis/lib/cb.js +45 -0
- package/limitd-redis/lib/client.js +135 -0
- package/limitd-redis/lib/db.js +501 -0
- package/limitd-redis/lib/db_ping.js +106 -0
- package/limitd-redis/lib/put.lua +31 -0
- package/limitd-redis/lib/take.lua +48 -0
- package/limitd-redis/lib/utils.js +116 -0
- package/limitd-redis/lib/validation.js +64 -0
- package/limitd-redis/node_modules/lru-cache/LICENSE +15 -0
- package/limitd-redis/node_modules/lru-cache/README.md +158 -0
- package/limitd-redis/node_modules/lru-cache/index.js +468 -0
- package/limitd-redis/node_modules/lru-cache/package.json +74 -0
- package/limitd-redis/node_modules/ms/index.js +162 -0
- package/limitd-redis/node_modules/ms/license.md +21 -0
- package/limitd-redis/node_modules/ms/package.json +73 -0
- package/limitd-redis/node_modules/ms/readme.md +59 -0
- package/limitd-redis/node_modules/yallist/LICENSE +15 -0
- package/limitd-redis/node_modules/yallist/README.md +204 -0
- package/limitd-redis/node_modules/yallist/iterator.js +7 -0
- package/limitd-redis/node_modules/yallist/package.json +65 -0
- package/limitd-redis/node_modules/yallist/yallist.js +370 -0
- package/limitd-redis/opslevel.yml +6 -0
- package/limitd-redis/package-lock.json +3484 -0
- package/limitd-redis/package.json +31 -0
- package/limitd-redis/test/cb.tests.js +124 -0
- package/limitd-redis/test/client.tests.js +194 -0
- package/limitd-redis/test/db.tests.js +1318 -0
- package/limitd-redis/test/validation.tests.js +124 -0
- package/limiter.js +83 -19
- package/package.json +3 -2
- package/tests/limiter.test.js +27 -27
- package/tests/performTestWithTestPerMinute.js +1 -1
- package/tests/sanityCheck.js +33 -29
|
@@ -0,0 +1,501 @@
|
|
|
1
|
+
const ms = require('ms');
|
|
2
|
+
const fs = require('fs');
|
|
3
|
+
const _ = require('lodash');
|
|
4
|
+
const async = require('async');
|
|
5
|
+
const LRU = require('lru-cache');
|
|
6
|
+
const utils = require('./utils');
|
|
7
|
+
const Redis = require('ioredis');
|
|
8
|
+
const { validateParams } = require('./validation');
|
|
9
|
+
const DBPing = require("./db_ping");
|
|
10
|
+
const EventEmitter = require('events').EventEmitter;
|
|
11
|
+
|
|
12
|
+
const TAKE_LUA = fs.readFileSync(`${__dirname}/take.lua`, "utf8");
|
|
13
|
+
const PUT_LUA = fs.readFileSync(`${__dirname}/put.lua`, "utf8");
|
|
14
|
+
|
|
15
|
+
const PING_SUCCESS = "successful";
|
|
16
|
+
const PING_ERROR = "error";
|
|
17
|
+
const PING_RECONNECT = "reconnect";
|
|
18
|
+
const PING_RECONNECT_DRY_RUN = "reconnect-dry-run";
|
|
19
|
+
|
|
20
|
+
const DEFAULT_COMMAND_TIMEOUT = 125; // Milliseconds
|
|
21
|
+
|
|
22
|
+
class LimitDBRedis extends EventEmitter {
|
|
23
|
+
static get PING_SUCCESS() {
|
|
24
|
+
return PING_SUCCESS;
|
|
25
|
+
}
|
|
26
|
+
static get PING_ERROR() {
|
|
27
|
+
return PING_ERROR;
|
|
28
|
+
}
|
|
29
|
+
static get PING_RECONNECT() {
|
|
30
|
+
return PING_RECONNECT;
|
|
31
|
+
}
|
|
32
|
+
static get PING_RECONNECT_DRY_RUN() {
|
|
33
|
+
return PING_RECONNECT_DRY_RUN;
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
/**
|
|
37
|
+
* Creates an instance of LimitDB client for Redis.
|
|
38
|
+
* @param {params} params - The configuration for the database and client.
|
|
39
|
+
*/
|
|
40
|
+
constructor(config) {
|
|
41
|
+
super();
|
|
42
|
+
config = config || {};
|
|
43
|
+
|
|
44
|
+
if (!config.nodes && !config.uri) {
|
|
45
|
+
throw new Error('Redis connection information must be specified');
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
if (!config.buckets) {
|
|
49
|
+
throw new Error('Buckets must be specified for Limitd');
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
this.configurateBuckets(config.buckets);
|
|
53
|
+
this.prefix = config.prefix;
|
|
54
|
+
this.globalTTL = (config.globalTTL || ms('7d')) / 1000;
|
|
55
|
+
this.callCounts = new LRU({ max: 50 });
|
|
56
|
+
|
|
57
|
+
const redisOptions = {
|
|
58
|
+
// a low commandTimeout value would likely cause sharded clusters to fail `enableReadyCheck` due to it running `CLUSTER INFO`
|
|
59
|
+
// which is a slow command. timeouts are being handled by the client#dispatch method.
|
|
60
|
+
enableOfflineQueue: false,
|
|
61
|
+
keyPrefix: config.prefix,
|
|
62
|
+
password: config.password,
|
|
63
|
+
tls: config.tls,
|
|
64
|
+
reconnectOnError: (err) => {
|
|
65
|
+
// will force a reconnect when error starts with `READONLY`
|
|
66
|
+
// this code is only triggered when auto-failover is disabled
|
|
67
|
+
// more: https://github.com/luin/ioredis#reconnect-on-error
|
|
68
|
+
return err.message.includes('READONLY');
|
|
69
|
+
},
|
|
70
|
+
};
|
|
71
|
+
|
|
72
|
+
const clusterOptions = {
|
|
73
|
+
slotsRefreshTimeout: config.slotsRefreshTimeout || 3000,
|
|
74
|
+
slotsRefreshInterval: config.slotsRefreshInterval || ms('5m'),
|
|
75
|
+
keyPrefix: config.prefix,
|
|
76
|
+
dnsLookup: config.dnsLookup,
|
|
77
|
+
enableReadyCheck: true,
|
|
78
|
+
redisOptions
|
|
79
|
+
};
|
|
80
|
+
|
|
81
|
+
this.redis = null;
|
|
82
|
+
if (config.nodes) {
|
|
83
|
+
this.redis = new Redis.Cluster(config.nodes, clusterOptions);
|
|
84
|
+
} else {
|
|
85
|
+
this.redis = new Redis(config.uri, redisOptions);
|
|
86
|
+
this.setupPing(config);
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
this.redis.defineCommand('take', {
|
|
90
|
+
numberOfKeys: 1,
|
|
91
|
+
lua: TAKE_LUA
|
|
92
|
+
});
|
|
93
|
+
|
|
94
|
+
this.redis.defineCommand('put', {
|
|
95
|
+
numberOfKeys: 1,
|
|
96
|
+
lua: PUT_LUA
|
|
97
|
+
});
|
|
98
|
+
|
|
99
|
+
this.redis.on('ready', () => {
|
|
100
|
+
this.emit('ready');
|
|
101
|
+
});
|
|
102
|
+
|
|
103
|
+
this.redis.on('error', (err) => {
|
|
104
|
+
this.emit('error', err);
|
|
105
|
+
});
|
|
106
|
+
|
|
107
|
+
this.redis.on('node error', (err, node) => {
|
|
108
|
+
this.emit('node error', err, node);
|
|
109
|
+
});
|
|
110
|
+
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
setupPing(config) {
|
|
114
|
+
this.redis.on("ready", () => this.startPing(config));
|
|
115
|
+
this.redis.on("close", () => this.stopPing());
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
startPing(config) {
|
|
119
|
+
this.stopPing();
|
|
120
|
+
this.ping = new DBPing(config.ping, this.redis);
|
|
121
|
+
this.ping.on("ping", (data) => this.emit("ping", data));
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
stopPing() {
|
|
125
|
+
if(this.ping) {
|
|
126
|
+
this.ping.stop();
|
|
127
|
+
this.ping.removeAllListeners();
|
|
128
|
+
}
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
close(callback) {
|
|
132
|
+
this.stopPing();
|
|
133
|
+
this.redis.quit(callback);
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
configurateBuckets(buckets) {
|
|
137
|
+
if (buckets) {
|
|
138
|
+
this.buckets = utils.buildBuckets(buckets);
|
|
139
|
+
}
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
configurateBucket(key, bucket) {
|
|
143
|
+
this.buckets[key] = utils.buildBucket(bucket);
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
/**
|
|
147
|
+
* @param {string} type
|
|
148
|
+
* @param {object} params
|
|
149
|
+
* @returns
|
|
150
|
+
*/
|
|
151
|
+
bucketKeyConfig(type, params) {
|
|
152
|
+
if (typeof params.configOverride === 'object') {
|
|
153
|
+
return utils.normalizeTemporals(params.configOverride);
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
const fromOverride = type.overrides[params.key];
|
|
157
|
+
if (fromOverride) {
|
|
158
|
+
return fromOverride;
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
const fromCache = type.overridesCache && type.overridesCache.get(params.key);
|
|
162
|
+
if (fromCache) {
|
|
163
|
+
return fromCache;
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
const fromMatch = _.find(type.overridesMatch, (o) => {
|
|
167
|
+
return o.match.exec(params.key);
|
|
168
|
+
});
|
|
169
|
+
if (fromMatch) {
|
|
170
|
+
type.overridesCache.set(params.key, fromMatch);
|
|
171
|
+
return fromMatch;
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
return type;
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
// not super accurate given clock drift across redis and host
|
|
178
|
+
calculateReset(bucketKeyConfig, remaining, now) {
|
|
179
|
+
if (!bucketKeyConfig.per_interval) {
|
|
180
|
+
return 0;
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
now = now || Date.now();
|
|
184
|
+
const missing = bucketKeyConfig.size - remaining;
|
|
185
|
+
const msToCompletion = Math.ceil(missing * bucketKeyConfig.drip_interval);
|
|
186
|
+
return Math.ceil((now + msToCompletion) / 1000);
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
/**
|
|
190
|
+
* Take N elements from a bucket if available.
|
|
191
|
+
*
|
|
192
|
+
* @param {takeParams} params - The params for take.
|
|
193
|
+
* @param {function(Error, takeResult)} callback.
|
|
194
|
+
*/
|
|
195
|
+
take(params, callback) {
|
|
196
|
+
const valError = validateParams(params, this.buckets);
|
|
197
|
+
if (valError) {
|
|
198
|
+
return process.nextTick(callback, valError);
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
const bucket = this.buckets[params.type];
|
|
202
|
+
const bucketKeyConfig = this.bucketKeyConfig(bucket, params);
|
|
203
|
+
|
|
204
|
+
const key = `${params.type}:${params.key}`;
|
|
205
|
+
|
|
206
|
+
let count = this._determineCount({
|
|
207
|
+
paramsCount: params.count,
|
|
208
|
+
defaultCount: 1,
|
|
209
|
+
bucketKeyConfigSize: bucketKeyConfig.size,
|
|
210
|
+
});
|
|
211
|
+
|
|
212
|
+
if (bucketKeyConfig.unlimited) {
|
|
213
|
+
return process.nextTick(callback, null, {
|
|
214
|
+
conformant: true,
|
|
215
|
+
remaining: bucketKeyConfig.size,
|
|
216
|
+
reset: Math.ceil(Date.now() / 1000),
|
|
217
|
+
limit: bucketKeyConfig.size,
|
|
218
|
+
delayed: false,
|
|
219
|
+
});
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
if (bucketKeyConfig.skip_n_calls > 0) {
|
|
223
|
+
const prevCall = this.callCounts.get(key);
|
|
224
|
+
|
|
225
|
+
if (prevCall) {
|
|
226
|
+
const shouldGoToRedis = prevCall.count >= bucketKeyConfig.skip_n_calls
|
|
227
|
+
|
|
228
|
+
|
|
229
|
+
if (!shouldGoToRedis) {
|
|
230
|
+
prevCall.count ++;
|
|
231
|
+
return process.nextTick(callback, null, prevCall.res);
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
// if lastCall not exists it's the first time that we go to redis.
|
|
235
|
+
// so we don't change count; subsequently calls take count should be
|
|
236
|
+
// proportional to the number of call that we skip.
|
|
237
|
+
// if count=3, and we go every 5 times, take should 15
|
|
238
|
+
// This parameter is most likely 1, and doing times is an overkill but better safe than sorry.
|
|
239
|
+
if (shouldGoToRedis) {
|
|
240
|
+
// we need to account for the skipped calls + the current call
|
|
241
|
+
count *= (bucketKeyConfig.skip_n_calls + 1);
|
|
242
|
+
}
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
this.redis.take(key,
|
|
249
|
+
bucketKeyConfig.ms_per_interval || 0,
|
|
250
|
+
bucketKeyConfig.size,
|
|
251
|
+
count,
|
|
252
|
+
Math.ceil(bucketKeyConfig.ttl || this.globalTTL),
|
|
253
|
+
bucketKeyConfig.drip_interval || 0,
|
|
254
|
+
(err, results) => {
|
|
255
|
+
if (err) {
|
|
256
|
+
return callback(err);
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
const remaining = parseInt(results[0], 10);
|
|
260
|
+
const conformant = parseInt(results[1], 10) ? true : false;
|
|
261
|
+
const currentMS = parseInt(results[2], 10);
|
|
262
|
+
const reset = parseInt(results[3], 10);
|
|
263
|
+
const res = {
|
|
264
|
+
conformant,
|
|
265
|
+
remaining,
|
|
266
|
+
reset: Math.ceil(reset / 1000),
|
|
267
|
+
limit: bucketKeyConfig.size,
|
|
268
|
+
delayed: false,
|
|
269
|
+
};
|
|
270
|
+
|
|
271
|
+
if (bucketKeyConfig.skip_n_calls > 0) {
|
|
272
|
+
this.callCounts.set(key, { res, count: 0 });
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
return callback(null, res);
|
|
276
|
+
});
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
/**
|
|
280
|
+
* Take N elements from a bucket if available otherwise wait for them.
|
|
281
|
+
* The callback is called when the number of request tokens is available.
|
|
282
|
+
*
|
|
283
|
+
* @param {waitParams} params - The params for take.
|
|
284
|
+
* @param {function(Error, waitResult)} callback.
|
|
285
|
+
*/
|
|
286
|
+
wait(params, callback) {
|
|
287
|
+
this.take(params, (err, result) => {
|
|
288
|
+
if (err || result.conformant) {
|
|
289
|
+
return callback(err, result);
|
|
290
|
+
}
|
|
291
|
+
|
|
292
|
+
const bucket = this.buckets[params.type];
|
|
293
|
+
const bucketKeyConfig = this.bucketKeyConfig(bucket, params);
|
|
294
|
+
const count = params.count || 1;
|
|
295
|
+
const required = count - result.remaining;
|
|
296
|
+
const minWait = Math.ceil(required * bucketKeyConfig.interval / bucketKeyConfig.per_interval);
|
|
297
|
+
|
|
298
|
+
return setTimeout(() => {
|
|
299
|
+
this.wait(params, (err, result) => {
|
|
300
|
+
if (err) {
|
|
301
|
+
return callback(err);
|
|
302
|
+
}
|
|
303
|
+
result.delayed = true;
|
|
304
|
+
callback(null, result);
|
|
305
|
+
});
|
|
306
|
+
}, minWait);
|
|
307
|
+
});
|
|
308
|
+
}
|
|
309
|
+
|
|
310
|
+
/**
|
|
311
|
+
* Put N elements in the bucket.
|
|
312
|
+
*
|
|
313
|
+
* @param {putParams} params - The params for take.
|
|
314
|
+
* @param {function(Error, putResult)} [callback].
|
|
315
|
+
*/
|
|
316
|
+
put(params, callback) {
|
|
317
|
+
callback = callback || _.noop;
|
|
318
|
+
|
|
319
|
+
const valError = validateParams(params, this.buckets);
|
|
320
|
+
if (valError) {
|
|
321
|
+
return process.nextTick(callback, valError);
|
|
322
|
+
}
|
|
323
|
+
|
|
324
|
+
const bucket = this.buckets[params.type];
|
|
325
|
+
const bucketKeyConfig = this.bucketKeyConfig(bucket, params);
|
|
326
|
+
|
|
327
|
+
const count = Math.min(
|
|
328
|
+
this._determineCount({
|
|
329
|
+
paramsCount: params.count,
|
|
330
|
+
defaultCount: bucketKeyConfig.size,
|
|
331
|
+
bucketKeyConfigSize: bucketKeyConfig.size,
|
|
332
|
+
}),
|
|
333
|
+
bucketKeyConfig.size);
|
|
334
|
+
|
|
335
|
+
if (bucketKeyConfig.unlimited) {
|
|
336
|
+
return process.nextTick(callback, null, {
|
|
337
|
+
remaining: bucketKeyConfig.size,
|
|
338
|
+
reset: Math.ceil(Date.now() / 1000),
|
|
339
|
+
limit: bucketKeyConfig.size
|
|
340
|
+
});
|
|
341
|
+
}
|
|
342
|
+
|
|
343
|
+
const key = `${params.type}:${params.key}`;
|
|
344
|
+
this.redis.put(key,
|
|
345
|
+
count,
|
|
346
|
+
bucketKeyConfig.size,
|
|
347
|
+
Math.ceil(bucketKeyConfig.ttl || this.globalTTL),
|
|
348
|
+
bucketKeyConfig.drip_interval || 0,
|
|
349
|
+
(err, results) => {
|
|
350
|
+
if (err) {
|
|
351
|
+
return callback(err);
|
|
352
|
+
}
|
|
353
|
+
|
|
354
|
+
const remaining = parseInt(results[0], 10);
|
|
355
|
+
return callback(null, {
|
|
356
|
+
remaining: remaining,
|
|
357
|
+
reset: Math.ceil(parseInt(results[3], 10) / 1000),
|
|
358
|
+
limit: bucketKeyConfig.size
|
|
359
|
+
});
|
|
360
|
+
});
|
|
361
|
+
}
|
|
362
|
+
|
|
363
|
+
/**
|
|
364
|
+
* Get elements in the bucket.
|
|
365
|
+
*
|
|
366
|
+
* @param {getParams} params - The params for take.
|
|
367
|
+
* @param {function(Error, getResult)} [callback].
|
|
368
|
+
*/
|
|
369
|
+
get(params, callback) {
|
|
370
|
+
callback = callback || _.noop;
|
|
371
|
+
|
|
372
|
+
const valError = validateParams(params, this.buckets);
|
|
373
|
+
if (valError) {
|
|
374
|
+
return process.nextTick(callback, valError);
|
|
375
|
+
}
|
|
376
|
+
|
|
377
|
+
const bucket = this.buckets[params.type];
|
|
378
|
+
const bucketKeyConfig = this.bucketKeyConfig(bucket, params);
|
|
379
|
+
|
|
380
|
+
if (bucketKeyConfig.unlimited) {
|
|
381
|
+
return process.nextTick(callback, null, {
|
|
382
|
+
remaining: bucketKeyConfig.size,
|
|
383
|
+
reset: Math.ceil(Date.now() / 1000),
|
|
384
|
+
limit: bucketKeyConfig.size
|
|
385
|
+
});
|
|
386
|
+
}
|
|
387
|
+
|
|
388
|
+
const key = `${params.type}:${params.key}`;
|
|
389
|
+
this.redis.hmget(key, 'r', 'd',
|
|
390
|
+
(err, results) => {
|
|
391
|
+
if (err) {
|
|
392
|
+
return callback(err);
|
|
393
|
+
}
|
|
394
|
+
|
|
395
|
+
let remaining = parseInt(results[0], 10);
|
|
396
|
+
remaining = Number.isInteger(remaining) ? remaining : bucketKeyConfig.size;
|
|
397
|
+
return callback(null, {
|
|
398
|
+
remaining,
|
|
399
|
+
reset: this.calculateReset(bucketKeyConfig, remaining, parseInt(results[1], 10)),
|
|
400
|
+
limit: bucketKeyConfig.size
|
|
401
|
+
});
|
|
402
|
+
});
|
|
403
|
+
}
|
|
404
|
+
|
|
405
|
+
/**
|
|
406
|
+
* Resets/re-fills all keys in all buckets.
|
|
407
|
+
* @param {function(Error)} [callback].
|
|
408
|
+
*/
|
|
409
|
+
resetAll(callback) {
|
|
410
|
+
callback = callback || _.noop;
|
|
411
|
+
|
|
412
|
+
const dbs = this.redis.nodes ? this.redis.nodes('master') : [this.redis];
|
|
413
|
+
async.each(dbs, (db, cb) => {
|
|
414
|
+
db.flushdb(cb);
|
|
415
|
+
}, callback);
|
|
416
|
+
}
|
|
417
|
+
|
|
418
|
+
_determineCount({ paramsCount, defaultCount, bucketKeyConfigSize }) {
|
|
419
|
+
if (paramsCount === 'all') {
|
|
420
|
+
return bucketKeyConfigSize;
|
|
421
|
+
}
|
|
422
|
+
|
|
423
|
+
if (Number.isInteger(paramsCount)) {
|
|
424
|
+
return paramsCount;
|
|
425
|
+
}
|
|
426
|
+
|
|
427
|
+
if (!paramsCount) {
|
|
428
|
+
return defaultCount;
|
|
429
|
+
}
|
|
430
|
+
|
|
431
|
+
throw new Error('if provided, count must be \'all\' or an integer value');
|
|
432
|
+
}
|
|
433
|
+
}
|
|
434
|
+
|
|
435
|
+
|
|
436
|
+
module.exports = LimitDBRedis;
|
|
437
|
+
|
|
438
|
+
/**
|
|
439
|
+
* And now some typedefs for you:
|
|
440
|
+
*
|
|
441
|
+
* @typedef {Object} type
|
|
442
|
+
* @property {integer} [per_interval] The number of tokens to add per interval.
|
|
443
|
+
* @property {integer} [interval] The length of the interval in milliseconds.
|
|
444
|
+
* @property {integer} [size] The maximum number of tokens in the bucket.
|
|
445
|
+
* @property {integer} [per_second] The number of tokens to add per second. Equivalent to "interval: 1000, per_interval: x".
|
|
446
|
+
* @property {integer} [per_minute] The number of tokens to add per minute. Equivalent to "interval: 60000, per_interval: x".
|
|
447
|
+
* @property {integer} [per_hour] The number of tokens to add per hour. Equivalent to "interval: 3600000, per_interval: x".
|
|
448
|
+
* @property {integer} [per_day] The number of tokens to add per day. Equivalent to "interval: 86400000, per_interval: x".
|
|
449
|
+
*
|
|
450
|
+
* @typedef {Object} params
|
|
451
|
+
* uri nodes buckets prefix
|
|
452
|
+
* @property {string} [params.uri] Address of Redis.
|
|
453
|
+
* @property {Object.<string, object>} [params.nodes] Redis Cluster Configuration https://github.com/luin/ioredis#cluster".
|
|
454
|
+
* @property {Object.<string, type>} [params.types] The buckets configuration.
|
|
455
|
+
* @property {string} [params.prefix] Prefix keys in Redis.
|
|
456
|
+
* @property {type} [params.configOverride] Bucket configuration override
|
|
457
|
+
*
|
|
458
|
+
* @typedef takeParams
|
|
459
|
+
* @property {string} type The name of the bucket type.
|
|
460
|
+
* @property {string} key The key of the bucket instance.
|
|
461
|
+
* @property {integer} [count=1] The number of tokens to take from the bucket.
|
|
462
|
+
* @property {type} configOverride Externally provided bucket configruation
|
|
463
|
+
*
|
|
464
|
+
* @typedef takeResult
|
|
465
|
+
* @property {boolean} conformant Returns true if there is enough capacity in the bucket and the tokens has been removed.
|
|
466
|
+
* @property {integer} remaining The number of tokens remaining in the bucket.
|
|
467
|
+
* @property {integer} reset A unix timestamp indicating when the bucket is going to be full.
|
|
468
|
+
* @property {integer} limit The size of the bucket.
|
|
469
|
+
*
|
|
470
|
+
* @typedef waitParams
|
|
471
|
+
* @property {string} type The name of the bucket type.
|
|
472
|
+
* @property {string} key The key of the bucket instance.
|
|
473
|
+
* @property {integer} [count=1] The number of tokens to wait for.
|
|
474
|
+
* @property {type} configOverride Externally provided bucket configruation
|
|
475
|
+
*
|
|
476
|
+
* @typedef waitResult
|
|
477
|
+
* @property {integer} remaining The number of tokens remaining in the bucket.
|
|
478
|
+
* @property {integer} reset A unix timestamp indicating when the bucket is going to be full.
|
|
479
|
+
* @property {integer} limit The size of the bucket.
|
|
480
|
+
*
|
|
481
|
+
* @typedef putParams
|
|
482
|
+
* @property {string} type The name of the bucket type.
|
|
483
|
+
* @property {string} key The key of the bucket instance.
|
|
484
|
+
* @property {integer} [count=SIZE] The number of tokens to put in the bucket. Defaults to the size of the bucket.
|
|
485
|
+
* @property {type} configOverride Externally provided bucket configruation
|
|
486
|
+
*
|
|
487
|
+
* @typedef putResult
|
|
488
|
+
* @property {integer} remaining The number of tokens remaining in the bucket.
|
|
489
|
+
* @property {integer} reset A unix timestamp indicating when the bucket is going to be full.
|
|
490
|
+
* @property {integer} limit The size of the bucket.
|
|
491
|
+
*
|
|
492
|
+
* @typedef getParams
|
|
493
|
+
* @property {string} type The name of the bucket type.
|
|
494
|
+
* @property {string} key The key of the bucket instance.
|
|
495
|
+
* @property {type} configOverride Externally provided bucket configruation
|
|
496
|
+
*
|
|
497
|
+
* @typedef getResult
|
|
498
|
+
* @property {integer} remaining The number of tokens remaining in the bucket.
|
|
499
|
+
* @property {integer} reset A unix timestamp indicating when the bucket is going to be full.
|
|
500
|
+
* @property {integer} limit The size of the bucket.
|
|
501
|
+
*/
|
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
const cbControl = require('./cb');
|
|
2
|
+
const utils = require("./utils");
|
|
3
|
+
const EventEmitter = require("events").EventEmitter;
|
|
4
|
+
|
|
5
|
+
const PING_SUCCESS = "successful";
|
|
6
|
+
const PING_ERROR = "error";
|
|
7
|
+
const PING_RECONNECT = "reconnect";
|
|
8
|
+
const PING_RECONNECT_DRY_RUN = "reconnect-dry-run";
|
|
9
|
+
|
|
10
|
+
const DEFAULT_PING_INTERVAL = 3000; // Milliseconds
|
|
11
|
+
|
|
12
|
+
class DBPing extends EventEmitter {
|
|
13
|
+
constructor(config, redis) {
|
|
14
|
+
super();
|
|
15
|
+
|
|
16
|
+
this.redis = redis;
|
|
17
|
+
this.config = {
|
|
18
|
+
commandTimeout: 125,
|
|
19
|
+
enabled: config ? true : false,
|
|
20
|
+
interval: config && config.interval ? config.interval : DEFAULT_PING_INTERVAL,
|
|
21
|
+
maxFailedAttempts: config && config.maxFailedAttempts ? config.maxFailedAttempts : 5,
|
|
22
|
+
reconnectIfFailed:
|
|
23
|
+
utils.functionOrFalse(config ? config.reconnectIfFailed : undefined) || (() => false),
|
|
24
|
+
};
|
|
25
|
+
|
|
26
|
+
this.failedPings = 0;
|
|
27
|
+
|
|
28
|
+
this.start();
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
start() {
|
|
32
|
+
const doPing = () => {
|
|
33
|
+
if (!this.config.enabled) {
|
|
34
|
+
return;
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
let start = Date.now();
|
|
38
|
+
this.redis.ping(cbControl((err) => {
|
|
39
|
+
let duration = Date.now() - start;
|
|
40
|
+
err
|
|
41
|
+
? this.pingKO(triggerLoop, err, duration)
|
|
42
|
+
: this.pingOK(triggerLoop, duration);
|
|
43
|
+
}).timeout(this.config.commandTimeout));
|
|
44
|
+
};
|
|
45
|
+
|
|
46
|
+
const triggerLoop = () => setTimeout(doPing, this.config.interval);
|
|
47
|
+
|
|
48
|
+
doPing();
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
stop() {
|
|
52
|
+
this.enabled = false;
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
pingOK(callback, duration) {
|
|
56
|
+
this.reconnecting = false;
|
|
57
|
+
this.failedPings = 0;
|
|
58
|
+
this.emitPingResult(PING_SUCCESS, undefined, duration, 0);
|
|
59
|
+
callback();
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
pingKO(callback, err, duration) {
|
|
63
|
+
this.failedPings++;
|
|
64
|
+
this.emitPingResult(PING_ERROR, err, duration, this.failedPings);
|
|
65
|
+
|
|
66
|
+
if (this.failedPings < this.config.maxFailedAttempts) {
|
|
67
|
+
return callback();
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
if (!this.config.reconnectIfFailed()) {
|
|
71
|
+
return this.emitPingResult(
|
|
72
|
+
PING_RECONNECT_DRY_RUN,
|
|
73
|
+
undefined,
|
|
74
|
+
0,
|
|
75
|
+
this.failedPings
|
|
76
|
+
);
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
this.retryStrategy(() => {
|
|
80
|
+
this.emitPingResult(PING_RECONNECT, undefined, 0, this.failedPings);
|
|
81
|
+
this.redis.disconnect(true);
|
|
82
|
+
});
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
emitPingResult(status, err, duration, failedPings) {
|
|
86
|
+
const result = {
|
|
87
|
+
status: status,
|
|
88
|
+
duration: duration,
|
|
89
|
+
error: err,
|
|
90
|
+
failedPings: failedPings,
|
|
91
|
+
};
|
|
92
|
+
this.emit("ping", result);
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
retryStrategy(callback) {
|
|
96
|
+
//jitter between 0% and 10% of the total wait time needed to reconnect
|
|
97
|
+
//i.e. if interval = 100 and maxFailedAttempts = 3 => it'll randomly jitter between 0 and 30 ms
|
|
98
|
+
const deviation =
|
|
99
|
+
utils.randomBetween(0, 0.1) *
|
|
100
|
+
this.config.interval *
|
|
101
|
+
this.config.maxFailedAttempts;
|
|
102
|
+
setTimeout(callback, deviation);
|
|
103
|
+
}
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
module.exports = DBPing;
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
local tokens_to_add = tonumber(ARGV[1])
|
|
2
|
+
local bucket_size = tonumber(ARGV[2])
|
|
3
|
+
local ttl = tonumber(ARGV[3])
|
|
4
|
+
local drip_interval = tonumber(ARGV[4])
|
|
5
|
+
|
|
6
|
+
local current_time = redis.call('TIME')
|
|
7
|
+
local current_timestamp_ms = current_time[1] * 1000 + current_time[2] / 1000
|
|
8
|
+
|
|
9
|
+
local current_remaining = redis.call('HMGET', KEYS[1], 'r')[1]
|
|
10
|
+
if current_remaining == false then
|
|
11
|
+
current_remaining = bucket_size
|
|
12
|
+
end
|
|
13
|
+
|
|
14
|
+
local new_content = math.min(current_remaining + tokens_to_add, bucket_size)
|
|
15
|
+
|
|
16
|
+
redis.replicate_commands()
|
|
17
|
+
if new_content < bucket_size then
|
|
18
|
+
redis.call('HMSET', KEYS[1],
|
|
19
|
+
'd', current_timestamp_ms,
|
|
20
|
+
'r', new_content)
|
|
21
|
+
redis.call('EXPIRE', KEYS[1], ttl)
|
|
22
|
+
else
|
|
23
|
+
redis.call('DEL', KEYS[1])
|
|
24
|
+
end
|
|
25
|
+
|
|
26
|
+
local reset_ms = 0
|
|
27
|
+
if drip_interval > 0 then
|
|
28
|
+
reset_ms = math.ceil(current_timestamp_ms + (bucket_size - new_content) * drip_interval)
|
|
29
|
+
end
|
|
30
|
+
|
|
31
|
+
return { new_content, current_timestamp_ms, reset_ms }
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
local tokens_per_ms = tonumber(ARGV[1])
|
|
2
|
+
local bucket_size = tonumber(ARGV[2])
|
|
3
|
+
local new_content = tonumber(ARGV[2])
|
|
4
|
+
local tokens_to_take = tonumber(ARGV[3])
|
|
5
|
+
local ttl = tonumber(ARGV[4])
|
|
6
|
+
local drip_interval = tonumber(ARGV[5])
|
|
7
|
+
|
|
8
|
+
local current_time = redis.call('TIME')
|
|
9
|
+
local current_timestamp_ms = current_time[1] * 1000 + current_time[2] / 1000
|
|
10
|
+
|
|
11
|
+
local current = redis.pcall('HMGET', KEYS[1], 'd', 'r')
|
|
12
|
+
|
|
13
|
+
if current.err ~= nil then
|
|
14
|
+
current = {}
|
|
15
|
+
end
|
|
16
|
+
|
|
17
|
+
if current[1] and tokens_per_ms then
|
|
18
|
+
-- drip bucket
|
|
19
|
+
local last_drip = current[1]
|
|
20
|
+
local content = current[2]
|
|
21
|
+
local delta_ms = math.max(current_timestamp_ms - last_drip, 0)
|
|
22
|
+
local drip_amount = delta_ms * tokens_per_ms
|
|
23
|
+
new_content = math.min(content + drip_amount, bucket_size)
|
|
24
|
+
elseif current[1] and tokens_per_ms == 0 then
|
|
25
|
+
-- fixed bucket
|
|
26
|
+
new_content = current[2]
|
|
27
|
+
end
|
|
28
|
+
|
|
29
|
+
local enough_tokens = new_content >= tokens_to_take
|
|
30
|
+
|
|
31
|
+
if enough_tokens then
|
|
32
|
+
new_content = math.min(new_content - tokens_to_take, bucket_size)
|
|
33
|
+
end
|
|
34
|
+
|
|
35
|
+
-- https://redis.io/commands/EVAL#replicating-commands-instead-of-scripts
|
|
36
|
+
redis.replicate_commands()
|
|
37
|
+
|
|
38
|
+
redis.call('HMSET', KEYS[1],
|
|
39
|
+
'd', current_timestamp_ms,
|
|
40
|
+
'r', new_content)
|
|
41
|
+
redis.call('EXPIRE', KEYS[1], ttl)
|
|
42
|
+
|
|
43
|
+
local reset_ms = 0
|
|
44
|
+
if drip_interval > 0 then
|
|
45
|
+
reset_ms = math.ceil(current_timestamp_ms + (bucket_size - new_content) * drip_interval)
|
|
46
|
+
end
|
|
47
|
+
|
|
48
|
+
return { new_content, enough_tokens, current_timestamp_ms, reset_ms }
|