@donkeylabs/server 2.1.0 → 2.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/docs/cache.md +27 -34
- package/docs/processes.md +92 -0
- package/docs/rate-limiter.md +23 -28
- package/package.json +6 -2
- package/src/core/cache-adapter-redis.ts +113 -0
- package/src/core/index.ts +10 -0
- package/src/core/process-client.ts +37 -4
- package/src/core/rate-limit-adapter-redis.ts +109 -0
package/docs/cache.md
CHANGED
|
@@ -322,51 +322,44 @@ interface CacheAdapter {
|
|
|
322
322
|
}
|
|
323
323
|
```
|
|
324
324
|
|
|
325
|
-
### Redis Adapter
|
|
325
|
+
### Built-in Redis Adapter
|
|
326
|
+
|
|
327
|
+
A production-ready Redis adapter is included. Requires `ioredis` as a peer dependency (`bun add ioredis`).
|
|
326
328
|
|
|
327
329
|
```ts
|
|
328
|
-
import { createCache, type CacheAdapter } from "./core/cache";
|
|
329
330
|
import Redis from "ioredis";
|
|
331
|
+
import { RedisCacheAdapter } from "@donkeylabs/server/core";
|
|
330
332
|
|
|
331
|
-
|
|
332
|
-
constructor(private redis: Redis) {}
|
|
333
|
+
const redis = new Redis("redis://localhost:6379");
|
|
333
334
|
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
}
|
|
335
|
+
const server = new AppServer({
|
|
336
|
+
cache: {
|
|
337
|
+
adapter: new RedisCacheAdapter(redis, { prefix: "myapp:" }),
|
|
338
|
+
},
|
|
339
|
+
});
|
|
338
340
|
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
await this.redis.set(key, serialized, "PX", ttlMs);
|
|
343
|
-
} else {
|
|
344
|
-
await this.redis.set(key, serialized);
|
|
345
|
-
}
|
|
346
|
-
}
|
|
341
|
+
// Remember to disconnect on shutdown
|
|
342
|
+
server.onShutdown(() => redis.disconnect());
|
|
343
|
+
```
|
|
347
344
|
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
345
|
+
**Features:**
|
|
346
|
+
- JSON serialization for values
|
|
347
|
+
- `SET key val PX ttlMs` for TTL support
|
|
348
|
+
- `SCAN` (not `KEYS`) for production-safe key listing on large datasets
|
|
349
|
+
- Optional `prefix` for key namespace isolation in shared Redis instances
|
|
350
|
+
- With prefix: `clear()` uses SCAN + DEL only for prefixed keys
|
|
351
|
+
- Without prefix: `clear()` uses `FLUSHDB`
|
|
352
352
|
|
|
353
|
-
|
|
354
|
-
return (await this.redis.exists(key)) === 1;
|
|
355
|
-
}
|
|
353
|
+
### Custom Redis Adapter Example
|
|
356
354
|
|
|
357
|
-
|
|
358
|
-
await this.redis.flushdb();
|
|
359
|
-
}
|
|
355
|
+
For custom requirements, implement `CacheAdapter` directly:
|
|
360
356
|
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
}
|
|
364
|
-
}
|
|
357
|
+
```ts
|
|
358
|
+
import { type CacheAdapter } from "@donkeylabs/server/core";
|
|
365
359
|
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
});
|
|
360
|
+
class MyCustomCacheAdapter implements CacheAdapter {
|
|
361
|
+
// Implement get, set, delete, has, clear, keys
|
|
362
|
+
}
|
|
370
363
|
```
|
|
371
364
|
|
|
372
365
|
---
|
package/docs/processes.md
CHANGED
|
@@ -269,6 +269,11 @@ client.connected; // true | false
|
|
|
269
269
|
// Emit a typed event to the server
|
|
270
270
|
await client.emit("progress", { percent: 50, fps: 30 });
|
|
271
271
|
|
|
272
|
+
// Register a handler for server-sent messages (alternative to onMessage in connect options)
|
|
273
|
+
client.onMessage((message) => {
|
|
274
|
+
console.log("Received from server:", message);
|
|
275
|
+
});
|
|
276
|
+
|
|
272
277
|
// Disconnect when done
|
|
273
278
|
client.disconnect();
|
|
274
279
|
```
|
|
@@ -358,6 +363,9 @@ interface Processes {
|
|
|
358
363
|
/** Get processes by name */
|
|
359
364
|
getByName(name: string): ManagedProcess[];
|
|
360
365
|
|
|
366
|
+
/** Send a message to a running process */
|
|
367
|
+
send(processId: string, message: any): Promise<boolean>;
|
|
368
|
+
|
|
361
369
|
/** Stop a process */
|
|
362
370
|
stop(processId: string, signal?: NodeJS.Signals): Promise<void>;
|
|
363
371
|
|
|
@@ -563,6 +571,89 @@ ctx.core.events.on("process.stats", ({ processId, name, stats }) => {
|
|
|
563
571
|
});
|
|
564
572
|
```
|
|
565
573
|
|
|
574
|
+
## Server-to-Process Communication
|
|
575
|
+
|
|
576
|
+
The server can send messages to running processes via `ctx.core.processes.send()`. The ProcessClient receives these messages through the `onMessage` callback.
|
|
577
|
+
|
|
578
|
+
### Sending Messages from Server
|
|
579
|
+
|
|
580
|
+
```typescript
|
|
581
|
+
// In a route handler or service
|
|
582
|
+
await ctx.core.processes.send(processId, {
|
|
583
|
+
type: "subscribe",
|
|
584
|
+
channel: "live-scores",
|
|
585
|
+
});
|
|
586
|
+
|
|
587
|
+
await ctx.core.processes.send(processId, {
|
|
588
|
+
type: "config_update",
|
|
589
|
+
settings: { maxConnections: 100 },
|
|
590
|
+
});
|
|
591
|
+
```
|
|
592
|
+
|
|
593
|
+
### Receiving Messages in Worker
|
|
594
|
+
|
|
595
|
+
```typescript
|
|
596
|
+
// Option 1: In connect options
|
|
597
|
+
const client = await ProcessClient.connect({
|
|
598
|
+
onMessage: (message) => {
|
|
599
|
+
switch (message.type) {
|
|
600
|
+
case "subscribe":
|
|
601
|
+
subscribeToChannel(message.channel);
|
|
602
|
+
break;
|
|
603
|
+
case "config_update":
|
|
604
|
+
applyConfig(message.settings);
|
|
605
|
+
break;
|
|
606
|
+
}
|
|
607
|
+
},
|
|
608
|
+
});
|
|
609
|
+
|
|
610
|
+
// Option 2: Register handler after connecting
|
|
611
|
+
const client = await ProcessClient.connect();
|
|
612
|
+
client.onMessage((message) => {
|
|
613
|
+
console.log("Received:", message);
|
|
614
|
+
});
|
|
615
|
+
```
|
|
616
|
+
|
|
617
|
+
### Example: WebSocket Daemon with Server Commands
|
|
618
|
+
|
|
619
|
+
```typescript
|
|
620
|
+
// Server: define and spawn the WebSocket daemon
|
|
621
|
+
server.getCore().processes.define("ws-daemon", {
|
|
622
|
+
command: "bun",
|
|
623
|
+
args: ["./workers/ws-daemon.ts"],
|
|
624
|
+
events: {
|
|
625
|
+
ready: z.object({ port: z.number() }),
|
|
626
|
+
clientCount: z.object({ count: z.number() }),
|
|
627
|
+
},
|
|
628
|
+
});
|
|
629
|
+
|
|
630
|
+
const processId = await ctx.core.processes.spawn("ws-daemon", {
|
|
631
|
+
metadata: { port: 8080 },
|
|
632
|
+
});
|
|
633
|
+
|
|
634
|
+
// Server: send commands to the daemon
|
|
635
|
+
await ctx.core.processes.send(processId, {
|
|
636
|
+
type: "broadcast",
|
|
637
|
+
message: "Server maintenance in 5 minutes",
|
|
638
|
+
});
|
|
639
|
+
|
|
640
|
+
// Worker: ws-daemon.ts
|
|
641
|
+
import { ProcessClient } from "@donkeylabs/server/process-client";
|
|
642
|
+
|
|
643
|
+
const client = await ProcessClient.connect({
|
|
644
|
+
onMessage: (message) => {
|
|
645
|
+
if (message.type === "broadcast") {
|
|
646
|
+
// Broadcast to all connected WebSocket clients
|
|
647
|
+
for (const ws of connections) {
|
|
648
|
+
ws.send(JSON.stringify({ type: "announcement", text: message.message }));
|
|
649
|
+
}
|
|
650
|
+
}
|
|
651
|
+
},
|
|
652
|
+
});
|
|
653
|
+
|
|
654
|
+
client.emit("ready", { port: client.metadata.port });
|
|
655
|
+
```
|
|
656
|
+
|
|
566
657
|
## Heartbeat Monitoring
|
|
567
658
|
|
|
568
659
|
The ProcessClient automatically sends heartbeats. If heartbeats stop:
|
|
@@ -651,3 +742,4 @@ process.on("SIGTERM", () => client.disconnect());
|
|
|
651
742
|
3. **Use typed events** - Define event schemas for type safety
|
|
652
743
|
4. **Monitor heartbeats** - Set appropriate timeout for your use case
|
|
653
744
|
5. **Keep wrappers thin** - Business logic should be in the actual process
|
|
745
|
+
6. **Use onMessage for commands** - Register `onMessage` to receive server commands for stateful workers
|
package/docs/rate-limiter.md
CHANGED
|
@@ -420,46 +420,41 @@ interface RateLimitAdapter {
|
|
|
420
420
|
}
|
|
421
421
|
```
|
|
422
422
|
|
|
423
|
-
### Redis Adapter
|
|
423
|
+
### Built-in Redis Adapter
|
|
424
|
+
|
|
425
|
+
A production-ready Redis adapter is included. Requires `ioredis` as a peer dependency (`bun add ioredis`).
|
|
424
426
|
|
|
425
427
|
```ts
|
|
426
|
-
import { createRateLimiter, type RateLimitAdapter } from "./core/rate-limiter";
|
|
427
428
|
import Redis from "ioredis";
|
|
429
|
+
import { RedisRateLimitAdapter } from "@donkeylabs/server/core";
|
|
428
430
|
|
|
429
|
-
|
|
430
|
-
constructor(private redis: Redis) {}
|
|
431
|
+
const redis = new Redis("redis://localhost:6379");
|
|
431
432
|
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
433
|
+
const server = new AppServer({
|
|
434
|
+
rateLimiter: {
|
|
435
|
+
adapter: new RedisRateLimitAdapter(redis, { prefix: "myapp:" }),
|
|
436
|
+
},
|
|
437
|
+
});
|
|
435
438
|
|
|
436
|
-
|
|
439
|
+
// Remember to disconnect on shutdown
|
|
440
|
+
server.onShutdown(() => redis.disconnect());
|
|
441
|
+
```
|
|
437
442
|
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
443
|
+
**Features:**
|
|
444
|
+
- Atomic Lua script for `INCR` + conditional `PEXPIRE` (prevents race conditions)
|
|
445
|
+
- Pipeline `GET` + `PTTL` in a single round-trip for `get()`
|
|
446
|
+
- Optional `prefix` for key namespace isolation in shared Redis instances
|
|
442
447
|
|
|
443
|
-
|
|
448
|
+
### Custom Redis Adapter Example
|
|
444
449
|
|
|
445
|
-
|
|
446
|
-
}
|
|
450
|
+
For custom requirements, implement `RateLimitAdapter` directly:
|
|
447
451
|
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
}
|
|
452
|
+
```ts
|
|
453
|
+
import { type RateLimitAdapter } from "@donkeylabs/server/core";
|
|
451
454
|
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
if (keys.length > 0) {
|
|
455
|
-
await this.redis.del(...keys);
|
|
456
|
-
}
|
|
457
|
-
}
|
|
455
|
+
class MyCustomRateLimitAdapter implements RateLimitAdapter {
|
|
456
|
+
// Implement increment, get, reset
|
|
458
457
|
}
|
|
459
|
-
|
|
460
|
-
const rateLimiter = createRateLimiter({
|
|
461
|
-
adapter: new RedisRateLimitAdapter(new Redis()),
|
|
462
|
-
});
|
|
463
458
|
```
|
|
464
459
|
|
|
465
460
|
---
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@donkeylabs/server",
|
|
3
|
-
"version": "2.
|
|
3
|
+
"version": "2.3.0",
|
|
4
4
|
"type": "module",
|
|
5
5
|
"description": "Type-safe plugin system for building RPC-style APIs with Bun",
|
|
6
6
|
"main": "./src/index.ts",
|
|
@@ -75,7 +75,8 @@
|
|
|
75
75
|
"@aws-sdk/s3-request-presigner": "^3.0.0",
|
|
76
76
|
"@playwright/test": "^1.40.0",
|
|
77
77
|
"pg": "^8.0.0",
|
|
78
|
-
"mysql2": "^3.0.0"
|
|
78
|
+
"mysql2": "^3.0.0",
|
|
79
|
+
"ioredis": "^5.0.0"
|
|
79
80
|
},
|
|
80
81
|
"peerDependenciesMeta": {
|
|
81
82
|
"@aws-sdk/client-s3": {
|
|
@@ -92,6 +93,9 @@
|
|
|
92
93
|
},
|
|
93
94
|
"mysql2": {
|
|
94
95
|
"optional": true
|
|
96
|
+
},
|
|
97
|
+
"ioredis": {
|
|
98
|
+
"optional": true
|
|
95
99
|
}
|
|
96
100
|
},
|
|
97
101
|
"dependencies": {
|
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
// Redis Cache Adapter
|
|
2
|
+
// Production-ready cache backend using Redis (via ioredis)
|
|
3
|
+
|
|
4
|
+
import type { CacheAdapter } from "./cache";
|
|
5
|
+
|
|
6
|
+
export interface RedisCacheAdapterConfig {
|
|
7
|
+
/** Key prefix for namespace isolation in shared Redis instances */
|
|
8
|
+
prefix?: string;
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
/**
|
|
12
|
+
* Redis-backed cache adapter using ioredis.
|
|
13
|
+
*
|
|
14
|
+
* Constructor takes a pre-built ioredis client (typed as `any` to avoid
|
|
15
|
+
* requiring ioredis types at compile time — same pattern as S3StorageAdapter).
|
|
16
|
+
* User manages connection lifecycle (connect/disconnect in onShutdown).
|
|
17
|
+
*
|
|
18
|
+
* @example
|
|
19
|
+
* ```ts
|
|
20
|
+
* import Redis from "ioredis";
|
|
21
|
+
* import { RedisCacheAdapter } from "@donkeylabs/server/core";
|
|
22
|
+
*
|
|
23
|
+
* const redis = new Redis("redis://localhost:6379");
|
|
24
|
+
* const server = new AppServer({
|
|
25
|
+
* cache: { adapter: new RedisCacheAdapter(redis, { prefix: "myapp:" }) },
|
|
26
|
+
* });
|
|
27
|
+
* ```
|
|
28
|
+
*/
|
|
29
|
+
export class RedisCacheAdapter implements CacheAdapter {
|
|
30
|
+
private redis: any;
|
|
31
|
+
private prefix: string;
|
|
32
|
+
|
|
33
|
+
constructor(redis: any, config: RedisCacheAdapterConfig = {}) {
|
|
34
|
+
this.redis = redis;
|
|
35
|
+
this.prefix = config.prefix ?? "";
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
private prefixKey(key: string): string {
|
|
39
|
+
return this.prefix + key;
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
private stripPrefix(key: string): string {
|
|
43
|
+
if (this.prefix && key.startsWith(this.prefix)) {
|
|
44
|
+
return key.slice(this.prefix.length);
|
|
45
|
+
}
|
|
46
|
+
return key;
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
async get<T>(key: string): Promise<T | null> {
|
|
50
|
+
const raw = await this.redis.get(this.prefixKey(key));
|
|
51
|
+
if (raw === null || raw === undefined) return null;
|
|
52
|
+
return JSON.parse(raw) as T;
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
async set<T>(key: string, value: T, ttlMs?: number): Promise<void> {
|
|
56
|
+
const serialized = JSON.stringify(value);
|
|
57
|
+
if (ttlMs && ttlMs > 0) {
|
|
58
|
+
await this.redis.set(this.prefixKey(key), serialized, "PX", ttlMs);
|
|
59
|
+
} else {
|
|
60
|
+
await this.redis.set(this.prefixKey(key), serialized);
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
async delete(key: string): Promise<boolean> {
|
|
65
|
+
const result = await this.redis.del(this.prefixKey(key));
|
|
66
|
+
return result > 0;
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
async has(key: string): Promise<boolean> {
|
|
70
|
+
return (await this.redis.exists(this.prefixKey(key))) === 1;
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
async clear(): Promise<void> {
|
|
74
|
+
if (this.prefix) {
|
|
75
|
+
// With prefix: SCAN + DEL only prefixed keys (production-safe)
|
|
76
|
+
const keys = await this.scanKeys(this.prefix + "*");
|
|
77
|
+
if (keys.length > 0) {
|
|
78
|
+
await this.redis.del(...keys);
|
|
79
|
+
}
|
|
80
|
+
} else {
|
|
81
|
+
await this.redis.flushdb();
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
async keys(pattern?: string): Promise<string[]> {
|
|
86
|
+
const redisPattern = this.prefix + (pattern ?? "*");
|
|
87
|
+
const keys = await this.scanKeys(redisPattern);
|
|
88
|
+
return keys.map((k: string) => this.stripPrefix(k));
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
/**
|
|
92
|
+
* Uses SCAN (not KEYS) for production safety on large datasets.
|
|
93
|
+
* Iterates cursor until exhausted.
|
|
94
|
+
*/
|
|
95
|
+
private async scanKeys(pattern: string): Promise<string[]> {
|
|
96
|
+
const results: string[] = [];
|
|
97
|
+
let cursor = "0";
|
|
98
|
+
|
|
99
|
+
do {
|
|
100
|
+
const [nextCursor, keys] = await this.redis.scan(
|
|
101
|
+
cursor,
|
|
102
|
+
"MATCH",
|
|
103
|
+
pattern,
|
|
104
|
+
"COUNT",
|
|
105
|
+
100,
|
|
106
|
+
);
|
|
107
|
+
cursor = nextCursor;
|
|
108
|
+
results.push(...keys);
|
|
109
|
+
} while (cursor !== "0");
|
|
110
|
+
|
|
111
|
+
return results;
|
|
112
|
+
}
|
|
113
|
+
}
|
package/src/core/index.ts
CHANGED
|
@@ -282,6 +282,16 @@ export {
|
|
|
282
282
|
export { LocalStorageAdapter } from "./storage-adapter-local";
|
|
283
283
|
export { S3StorageAdapter } from "./storage-adapter-s3";
|
|
284
284
|
|
|
285
|
+
export {
|
|
286
|
+
RedisCacheAdapter,
|
|
287
|
+
type RedisCacheAdapterConfig,
|
|
288
|
+
} from "./cache-adapter-redis";
|
|
289
|
+
|
|
290
|
+
export {
|
|
291
|
+
RedisRateLimitAdapter,
|
|
292
|
+
type RedisRateLimitAdapterConfig,
|
|
293
|
+
} from "./rate-limit-adapter-redis";
|
|
294
|
+
|
|
285
295
|
export {
|
|
286
296
|
type Logs,
|
|
287
297
|
type LogSource,
|
|
@@ -8,7 +8,11 @@
|
|
|
8
8
|
* ```ts
|
|
9
9
|
* import { ProcessClient } from "@donkeylabs/server/process-client";
|
|
10
10
|
*
|
|
11
|
-
* const client = await ProcessClient.connect(
|
|
11
|
+
* const client = await ProcessClient.connect({
|
|
12
|
+
* onMessage: (message) => {
|
|
13
|
+
* if (message.type === "subscribe") { ... }
|
|
14
|
+
* },
|
|
15
|
+
* });
|
|
12
16
|
*
|
|
13
17
|
* // Access metadata passed during spawn
|
|
14
18
|
* const { inputPath, outputPath } = client.metadata;
|
|
@@ -83,6 +87,8 @@ export interface ProcessClientConfig {
|
|
|
83
87
|
maxReconnectAttempts?: number;
|
|
84
88
|
/** Stats emission configuration */
|
|
85
89
|
stats?: StatsConfig;
|
|
90
|
+
/** Callback for messages sent from the server via ctx.core.processes.send() */
|
|
91
|
+
onMessage?: (message: any) => void | Promise<void>;
|
|
86
92
|
}
|
|
87
93
|
|
|
88
94
|
export interface ProcessClient {
|
|
@@ -94,6 +100,8 @@ export interface ProcessClient {
|
|
|
94
100
|
readonly connected: boolean;
|
|
95
101
|
/** Emit a typed event to the server */
|
|
96
102
|
emit(event: string, data?: Record<string, any>): Promise<boolean>;
|
|
103
|
+
/** Register a handler for messages sent from the server via ctx.core.processes.send() */
|
|
104
|
+
onMessage(handler: (message: any) => void | Promise<void>): void;
|
|
97
105
|
/** Disconnect from the server */
|
|
98
106
|
disconnect(): void;
|
|
99
107
|
}
|
|
@@ -120,6 +128,7 @@ class ProcessClientImpl implements ProcessClient {
|
|
|
120
128
|
private reconnectAttempts = 0;
|
|
121
129
|
private isDisconnecting = false;
|
|
122
130
|
private _connected = false;
|
|
131
|
+
private messageHandler?: (message: any) => void | Promise<void>;
|
|
123
132
|
|
|
124
133
|
// For CPU percentage calculation
|
|
125
134
|
private lastCpuUsage?: NodeJS.CpuUsage;
|
|
@@ -134,6 +143,7 @@ class ProcessClientImpl implements ProcessClient {
|
|
|
134
143
|
this.reconnectInterval = config.reconnectInterval ?? 2000;
|
|
135
144
|
this.maxReconnectAttempts = config.maxReconnectAttempts ?? 30;
|
|
136
145
|
this.statsConfig = config.stats ?? { enabled: false };
|
|
146
|
+
if (config.onMessage) this.messageHandler = config.onMessage;
|
|
137
147
|
}
|
|
138
148
|
|
|
139
149
|
get connected(): boolean {
|
|
@@ -221,9 +231,22 @@ class ProcessClientImpl implements ProcessClient {
|
|
|
221
231
|
}
|
|
222
232
|
|
|
223
233
|
private handleServerMessage(message: any): void {
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
234
|
+
if (this.messageHandler) {
|
|
235
|
+
try {
|
|
236
|
+
const result = this.messageHandler(message);
|
|
237
|
+
if (result instanceof Promise) {
|
|
238
|
+
result.catch((err) => {
|
|
239
|
+
console.error(`[ProcessClient] Error in onMessage handler:`, err);
|
|
240
|
+
});
|
|
241
|
+
}
|
|
242
|
+
} catch (err) {
|
|
243
|
+
console.error(`[ProcessClient] Error in onMessage handler:`, err);
|
|
244
|
+
}
|
|
245
|
+
}
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
onMessage(handler: (message: any) => void | Promise<void>): void {
|
|
249
|
+
this.messageHandler = handler;
|
|
227
250
|
}
|
|
228
251
|
|
|
229
252
|
private scheduleReconnect(): void {
|
|
@@ -412,6 +435,14 @@ export function createProcessClient(config: ProcessClientConfig): ProcessClient
|
|
|
412
435
|
* const client = await ProcessClient.connect({
|
|
413
436
|
* stats: { enabled: true, interval: 2000 }
|
|
414
437
|
* });
|
|
438
|
+
*
|
|
439
|
+
* // With server message handling
|
|
440
|
+
* const client = await ProcessClient.connect({
|
|
441
|
+
* onMessage: (message) => {
|
|
442
|
+
* if (message.type === "subscribe") { ... }
|
|
443
|
+
* if (message.type === "config_update") { ... }
|
|
444
|
+
* },
|
|
445
|
+
* });
|
|
415
446
|
* ```
|
|
416
447
|
*/
|
|
417
448
|
export async function connect(options?: {
|
|
@@ -420,6 +451,8 @@ export async function connect(options?: {
|
|
|
420
451
|
maxReconnectAttempts?: number;
|
|
421
452
|
/** Enable real-time CPU/memory stats emission */
|
|
422
453
|
stats?: StatsConfig;
|
|
454
|
+
/** Callback for messages sent from the server via ctx.core.processes.send() */
|
|
455
|
+
onMessage?: (message: any) => void | Promise<void>;
|
|
423
456
|
}): Promise<ProcessClient> {
|
|
424
457
|
const processId = process.env.DONKEYLABS_PROCESS_ID;
|
|
425
458
|
const socketPath = process.env.DONKEYLABS_SOCKET_PATH;
|
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
// Redis Rate Limit Adapter
|
|
2
|
+
// Production-ready rate limiting backend using Redis (via ioredis)
|
|
3
|
+
|
|
4
|
+
import type { RateLimitAdapter } from "./rate-limiter";
|
|
5
|
+
|
|
6
|
+
export interface RedisRateLimitAdapterConfig {
|
|
7
|
+
/** Key prefix for namespace isolation in shared Redis instances */
|
|
8
|
+
prefix?: string;
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
/**
|
|
12
|
+
* Redis-backed rate limit adapter using ioredis.
|
|
13
|
+
*
|
|
14
|
+
* Uses a Lua script for atomic INCR + conditional PEXPIRE to prevent
|
|
15
|
+
* race conditions where a key is incremented but the expire fails.
|
|
16
|
+
*
|
|
17
|
+
* Constructor takes a pre-built ioredis client (typed as `any` to avoid
|
|
18
|
+
* requiring ioredis types at compile time — same pattern as S3StorageAdapter).
|
|
19
|
+
* User manages connection lifecycle (connect/disconnect in onShutdown).
|
|
20
|
+
*
|
|
21
|
+
* @example
|
|
22
|
+
* ```ts
|
|
23
|
+
* import Redis from "ioredis";
|
|
24
|
+
* import { RedisRateLimitAdapter } from "@donkeylabs/server/core";
|
|
25
|
+
*
|
|
26
|
+
* const redis = new Redis("redis://localhost:6379");
|
|
27
|
+
* const server = new AppServer({
|
|
28
|
+
* rateLimiter: { adapter: new RedisRateLimitAdapter(redis, { prefix: "myapp:" }) },
|
|
29
|
+
* });
|
|
30
|
+
* ```
|
|
31
|
+
*/
|
|
32
|
+
export class RedisRateLimitAdapter implements RateLimitAdapter {
|
|
33
|
+
private redis: any;
|
|
34
|
+
private prefix: string;
|
|
35
|
+
|
|
36
|
+
/**
|
|
37
|
+
* Lua script for atomic increment + conditional expire.
|
|
38
|
+
* KEYS[1] = rate limit key
|
|
39
|
+
* ARGV[1] = window TTL in milliseconds
|
|
40
|
+
*
|
|
41
|
+
* Returns [count, ttl_remaining_ms]:
|
|
42
|
+
* - count: current count after increment
|
|
43
|
+
* - ttl_remaining_ms: remaining TTL in milliseconds
|
|
44
|
+
*/
|
|
45
|
+
private static readonly INCREMENT_SCRIPT = `
|
|
46
|
+
local count = redis.call('INCR', KEYS[1])
|
|
47
|
+
if count == 1 then
|
|
48
|
+
redis.call('PEXPIRE', KEYS[1], ARGV[1])
|
|
49
|
+
end
|
|
50
|
+
local ttl = redis.call('PTTL', KEYS[1])
|
|
51
|
+
return {count, ttl}
|
|
52
|
+
`;
|
|
53
|
+
|
|
54
|
+
constructor(redis: any, config: RedisRateLimitAdapterConfig = {}) {
|
|
55
|
+
this.redis = redis;
|
|
56
|
+
this.prefix = config.prefix ?? "";
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
private prefixKey(key: string): string {
|
|
60
|
+
return this.prefix + key;
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
async increment(
|
|
64
|
+
key: string,
|
|
65
|
+
windowMs: number,
|
|
66
|
+
): Promise<{ count: number; resetAt: Date }> {
|
|
67
|
+
const prefixed = this.prefixKey(key);
|
|
68
|
+
const [count, ttl] = await this.redis.eval(
|
|
69
|
+
RedisRateLimitAdapter.INCREMENT_SCRIPT,
|
|
70
|
+
1,
|
|
71
|
+
prefixed,
|
|
72
|
+
windowMs,
|
|
73
|
+
);
|
|
74
|
+
|
|
75
|
+
// ttl is remaining time in ms; derive resetAt from it
|
|
76
|
+
const resetAt = new Date(Date.now() + Math.max(ttl, 0));
|
|
77
|
+
return { count, resetAt };
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
async get(key: string): Promise<{ count: number; resetAt: Date } | null> {
|
|
81
|
+
const prefixed = this.prefixKey(key);
|
|
82
|
+
|
|
83
|
+
// Pipeline GET + PTTL in a single round-trip
|
|
84
|
+
const pipeline = this.redis.pipeline();
|
|
85
|
+
pipeline.get(prefixed);
|
|
86
|
+
pipeline.pttl(prefixed);
|
|
87
|
+
const results = await pipeline.exec();
|
|
88
|
+
|
|
89
|
+
const [getErr, rawCount] = results[0];
|
|
90
|
+
const [pttlErr, ttl] = results[1];
|
|
91
|
+
|
|
92
|
+
if (getErr || pttlErr) {
|
|
93
|
+
throw getErr || pttlErr;
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
if (rawCount === null) return null;
|
|
97
|
+
|
|
98
|
+
const count = parseInt(rawCount, 10);
|
|
99
|
+
if (isNaN(count)) return null;
|
|
100
|
+
|
|
101
|
+
// PTTL returns -2 if key doesn't exist, -1 if no expiry
|
|
102
|
+
const resetAt = new Date(Date.now() + Math.max(ttl, 0));
|
|
103
|
+
return { count, resetAt };
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
async reset(key: string): Promise<void> {
|
|
107
|
+
await this.redis.del(this.prefixKey(key));
|
|
108
|
+
}
|
|
109
|
+
}
|