@supaku/agentfactory-server 0.7.18 → 0.7.19
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/src/index.d.ts +3 -0
- package/dist/src/index.d.ts.map +1 -1
- package/dist/src/index.js +6 -0
- package/dist/src/quota-tracker.d.ts +62 -0
- package/dist/src/quota-tracker.d.ts.map +1 -0
- package/dist/src/quota-tracker.js +155 -0
- package/dist/src/redis-circuit-breaker.d.ts +67 -0
- package/dist/src/redis-circuit-breaker.d.ts.map +1 -0
- package/dist/src/redis-circuit-breaker.js +294 -0
- package/dist/src/redis-rate-limiter.d.ts +51 -0
- package/dist/src/redis-rate-limiter.d.ts.map +1 -0
- package/dist/src/redis-rate-limiter.js +168 -0
- package/package.json +3 -3
package/dist/src/index.d.ts
CHANGED
|
@@ -18,4 +18,7 @@ export * from './governor-storage.js';
|
|
|
18
18
|
export * from './processing-state-storage.js';
|
|
19
19
|
export * from './governor-event-bus.js';
|
|
20
20
|
export * from './governor-dedup.js';
|
|
21
|
+
export * from './redis-rate-limiter.js';
|
|
22
|
+
export * from './redis-circuit-breaker.js';
|
|
23
|
+
export * from './quota-tracker.js';
|
|
21
24
|
//# sourceMappingURL=index.d.ts.map
|
package/dist/src/index.d.ts.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/index.ts"],"names":[],"mappings":"AACA,cAAc,aAAa,CAAA;AAG3B,cAAc,YAAY,CAAA;AAG1B,cAAc,YAAY,CAAA;AAG1B,cAAc,sBAAsB,CAAA;AAGpC,cAAc,iBAAiB,CAAA;AAG/B,cAAc,qBAAqB,CAAA;AAGnC,cAAc,iBAAiB,CAAA;AAG/B,cAAc,qBAAqB,CAAA;AAGnC,cAAc,0BAA0B,CAAA;AAGxC,cAAc,sBAAsB,CAAA;AAGpC,cAAc,qBAAqB,CAAA;AAGnC,cAAc,kBAAkB,CAAA;AAGhC,cAAc,mBAAmB,CAAA;AAGjC,cAAc,iBAAiB,CAAA;AAG/B,cAAc,oBAAoB,CAAA;AAGlC,cAAc,qBAAqB,CAAA;AAGnC,cAAc,uBAAuB,CAAA;AAGrC,cAAc,+BAA+B,CAAA;AAG7C,cAAc,yBAAyB,CAAA;AAGvC,cAAc,qBAAqB,CAAA"}
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/index.ts"],"names":[],"mappings":"AACA,cAAc,aAAa,CAAA;AAG3B,cAAc,YAAY,CAAA;AAG1B,cAAc,YAAY,CAAA;AAG1B,cAAc,sBAAsB,CAAA;AAGpC,cAAc,iBAAiB,CAAA;AAG/B,cAAc,qBAAqB,CAAA;AAGnC,cAAc,iBAAiB,CAAA;AAG/B,cAAc,qBAAqB,CAAA;AAGnC,cAAc,0BAA0B,CAAA;AAGxC,cAAc,sBAAsB,CAAA;AAGpC,cAAc,qBAAqB,CAAA;AAGnC,cAAc,kBAAkB,CAAA;AAGhC,cAAc,mBAAmB,CAAA;AAGjC,cAAc,iBAAiB,CAAA;AAG/B,cAAc,oBAAoB,CAAA;AAGlC,cAAc,qBAAqB,CAAA;AAGnC,cAAc,uBAAuB,CAAA;AAGrC,cAAc,+BAA+B,CAAA;AAG7C,cAAc,yBAAyB,CAAA;AAGvC,cAAc,qBAAqB,CAAA;AAGnC,cAAc,yBAAyB,CAAA;AAGvC,cAAc,4BAA4B,CAAA;AAG1C,cAAc,oBAAoB,CAAA"}
|
package/dist/src/index.js
CHANGED
|
@@ -38,3 +38,9 @@ export * from './processing-state-storage.js';
|
|
|
38
38
|
export * from './governor-event-bus.js';
|
|
39
39
|
// Governor event deduplicator (Redis SETNX)
|
|
40
40
|
export * from './governor-dedup.js';
|
|
41
|
+
// Redis-backed Linear API rate limiter (shared across processes)
|
|
42
|
+
export * from './redis-rate-limiter.js';
|
|
43
|
+
// Redis-backed circuit breaker (shared across processes)
|
|
44
|
+
export * from './redis-circuit-breaker.js';
|
|
45
|
+
// Linear API quota tracker (reads rate limit headers)
|
|
46
|
+
export * from './quota-tracker.js';
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Linear API Quota Tracker
|
|
3
|
+
*
|
|
4
|
+
* Stores Linear's rate limit response headers in Redis so any component
|
|
5
|
+
* can check the remaining budget before making a call.
|
|
6
|
+
*
|
|
7
|
+
* Redis keys:
|
|
8
|
+
* - `linear:quota:{workspaceId}:requests_remaining`
|
|
9
|
+
* - `linear:quota:{workspaceId}:complexity_remaining`
|
|
10
|
+
* - `linear:quota:{workspaceId}:requests_reset` (timestamp)
|
|
11
|
+
* - `linear:quota:{workspaceId}:updated_at` (timestamp)
|
|
12
|
+
*
|
|
13
|
+
* Usage:
|
|
14
|
+
* - After every Linear API call, call `recordQuota()` with response headers
|
|
15
|
+
* - Before making a call, check `getQuota()` to see remaining budget
|
|
16
|
+
* - If `requestsRemaining < LOW_QUOTA_THRESHOLD`, the rate limiter
|
|
17
|
+
* should proactively throttle
|
|
18
|
+
*/
|
|
19
|
+
/** Threshold below which we should proactively throttle */
|
|
20
|
+
export declare const LOW_QUOTA_THRESHOLD = 500;
|
|
21
|
+
export interface LinearQuotaSnapshot {
|
|
22
|
+
/** Remaining request quota (from X-RateLimit-Requests-Remaining) */
|
|
23
|
+
requestsRemaining: number | null;
|
|
24
|
+
/** Remaining complexity quota (from X-RateLimit-Complexity-Remaining) */
|
|
25
|
+
complexityRemaining: number | null;
|
|
26
|
+
/** Timestamp when request quota resets (from X-RateLimit-Requests-Reset) */
|
|
27
|
+
requestsReset: number | null;
|
|
28
|
+
/** When this snapshot was last updated */
|
|
29
|
+
updatedAt: number;
|
|
30
|
+
}
|
|
31
|
+
/**
|
|
32
|
+
* Record quota information from Linear API response headers.
|
|
33
|
+
*
|
|
34
|
+
* Call this after every successful Linear API response.
|
|
35
|
+
*/
|
|
36
|
+
export declare function recordQuota(workspaceId: string, headers: {
|
|
37
|
+
requestsRemaining?: string | number | null;
|
|
38
|
+
complexityRemaining?: string | number | null;
|
|
39
|
+
requestsReset?: string | number | null;
|
|
40
|
+
}): Promise<void>;
|
|
41
|
+
/**
|
|
42
|
+
* Get the current quota snapshot for a workspace.
|
|
43
|
+
*/
|
|
44
|
+
export declare function getQuota(workspaceId: string): Promise<LinearQuotaSnapshot>;
|
|
45
|
+
/**
|
|
46
|
+
* Check if quota is critically low for a workspace.
|
|
47
|
+
*
|
|
48
|
+
* Returns true if we know the quota is below the threshold.
|
|
49
|
+
* Returns false if quota is healthy or unknown (fail open).
|
|
50
|
+
*/
|
|
51
|
+
export declare function isQuotaLow(workspaceId: string): Promise<boolean>;
|
|
52
|
+
/**
|
|
53
|
+
* Extract quota headers from a Linear API response.
|
|
54
|
+
*
|
|
55
|
+
* Works with both fetch Response objects and plain header objects.
|
|
56
|
+
*/
|
|
57
|
+
export declare function extractQuotaHeaders(response: unknown): {
|
|
58
|
+
requestsRemaining?: string;
|
|
59
|
+
complexityRemaining?: string;
|
|
60
|
+
requestsReset?: string;
|
|
61
|
+
};
|
|
62
|
+
//# sourceMappingURL=quota-tracker.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"quota-tracker.d.ts","sourceRoot":"","sources":["../../src/quota-tracker.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;;;GAiBG;AAOH,2DAA2D;AAC3D,eAAO,MAAM,mBAAmB,MAAM,CAAA;AAKtC,MAAM,WAAW,mBAAmB;IAClC,oEAAoE;IACpE,iBAAiB,EAAE,MAAM,GAAG,IAAI,CAAA;IAChC,yEAAyE;IACzE,mBAAmB,EAAE,MAAM,GAAG,IAAI,CAAA;IAClC,4EAA4E;IAC5E,aAAa,EAAE,MAAM,GAAG,IAAI,CAAA;IAC5B,0CAA0C;IAC1C,SAAS,EAAE,MAAM,CAAA;CAClB;AAED;;;;GAIG;AACH,wBAAsB,WAAW,CAC/B,WAAW,EAAE,MAAM,EACnB,OAAO,EAAE;IACP,iBAAiB,CAAC,EAAE,MAAM,GAAG,MAAM,GAAG,IAAI,CAAA;IAC1C,mBAAmB,CAAC,EAAE,MAAM,GAAG,MAAM,GAAG,IAAI,CAAA;IAC5C,aAAa,CAAC,EAAE,MAAM,GAAG,MAAM,GAAG,IAAI,CAAA;CACvC,GACA,OAAO,CAAC,IAAI,CAAC,CAgDf;AAED;;GAEG;AACH,wBAAsB,QAAQ,CAAC,WAAW,EAAE,MAAM,GAAG,OAAO,CAAC,mBAAmB,CAAC,CAiChF;AAED;;;;;GAKG;AACH,wBAAsB,UAAU,CAAC,WAAW,EAAE,MAAM,GAAG,OAAO,CAAC,OAAO,CAAC,CAUtE;AAED;;;;GAIG;AACH,wBAAgB,mBAAmB,CAAC,QAAQ,EAAE,OAAO,GAAG;IACtD,iBAAiB,CAAC,EAAE,MAAM,CAAA;IAC1B,mBAAmB,CAAC,EAAE,MAAM,CAAA;IAC5B,aAAa,CAAC,EAAE,MAAM,CAAA;CACvB,CAkCA"}
|
|
@@ -0,0 +1,155 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Linear API Quota Tracker
|
|
3
|
+
*
|
|
4
|
+
* Stores Linear's rate limit response headers in Redis so any component
|
|
5
|
+
* can check the remaining budget before making a call.
|
|
6
|
+
*
|
|
7
|
+
* Redis keys:
|
|
8
|
+
* - `linear:quota:{workspaceId}:requests_remaining`
|
|
9
|
+
* - `linear:quota:{workspaceId}:complexity_remaining`
|
|
10
|
+
* - `linear:quota:{workspaceId}:requests_reset` (timestamp)
|
|
11
|
+
* - `linear:quota:{workspaceId}:updated_at` (timestamp)
|
|
12
|
+
*
|
|
13
|
+
* Usage:
|
|
14
|
+
* - After every Linear API call, call `recordQuota()` with response headers
|
|
15
|
+
* - Before making a call, check `getQuota()` to see remaining budget
|
|
16
|
+
* - If `requestsRemaining < LOW_QUOTA_THRESHOLD`, the rate limiter
|
|
17
|
+
* should proactively throttle
|
|
18
|
+
*/
|
|
19
|
+
import { getRedisClient } from './redis.js';
|
|
20
|
+
import { createLogger } from './logger.js';
|
|
21
|
+
const log = createLogger('quota-tracker');
|
|
22
|
+
/** Threshold below which we should proactively throttle */
|
|
23
|
+
export const LOW_QUOTA_THRESHOLD = 500;
|
|
24
|
+
/** Default quota TTL in Redis (2 hours, matching Linear's hourly reset) */
|
|
25
|
+
const QUOTA_TTL_SECONDS = 7200;
|
|
26
|
+
/**
|
|
27
|
+
* Record quota information from Linear API response headers.
|
|
28
|
+
*
|
|
29
|
+
* Call this after every successful Linear API response.
|
|
30
|
+
*/
|
|
31
|
+
export async function recordQuota(workspaceId, headers) {
|
|
32
|
+
try {
|
|
33
|
+
const redis = getRedisClient();
|
|
34
|
+
const prefix = `linear:quota:${workspaceId}`;
|
|
35
|
+
const pipeline = redis.pipeline();
|
|
36
|
+
const now = Date.now();
|
|
37
|
+
if (headers.requestsRemaining != null) {
|
|
38
|
+
const value = String(headers.requestsRemaining);
|
|
39
|
+
pipeline.set(`${prefix}:requests_remaining`, value, 'EX', QUOTA_TTL_SECONDS);
|
|
40
|
+
const remaining = parseInt(value, 10);
|
|
41
|
+
if (!isNaN(remaining) && remaining < LOW_QUOTA_THRESHOLD) {
|
|
42
|
+
log.warn('Linear quota running low', {
|
|
43
|
+
workspaceId,
|
|
44
|
+
requestsRemaining: remaining,
|
|
45
|
+
});
|
|
46
|
+
}
|
|
47
|
+
}
|
|
48
|
+
if (headers.complexityRemaining != null) {
|
|
49
|
+
pipeline.set(`${prefix}:complexity_remaining`, String(headers.complexityRemaining), 'EX', QUOTA_TTL_SECONDS);
|
|
50
|
+
}
|
|
51
|
+
if (headers.requestsReset != null) {
|
|
52
|
+
pipeline.set(`${prefix}:requests_reset`, String(headers.requestsReset), 'EX', QUOTA_TTL_SECONDS);
|
|
53
|
+
}
|
|
54
|
+
pipeline.set(`${prefix}:updated_at`, String(now), 'EX', QUOTA_TTL_SECONDS);
|
|
55
|
+
await pipeline.exec();
|
|
56
|
+
}
|
|
57
|
+
catch (err) {
|
|
58
|
+
// Non-critical — log and continue
|
|
59
|
+
log.error('Failed to record quota', {
|
|
60
|
+
workspaceId,
|
|
61
|
+
error: err instanceof Error ? err.message : String(err),
|
|
62
|
+
});
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
/**
|
|
66
|
+
* Get the current quota snapshot for a workspace.
|
|
67
|
+
*/
|
|
68
|
+
export async function getQuota(workspaceId) {
|
|
69
|
+
try {
|
|
70
|
+
const redis = getRedisClient();
|
|
71
|
+
const prefix = `linear:quota:${workspaceId}`;
|
|
72
|
+
const [requestsRemaining, complexityRemaining, requestsReset, updatedAt] = await Promise.all([
|
|
73
|
+
redis.get(`${prefix}:requests_remaining`),
|
|
74
|
+
redis.get(`${prefix}:complexity_remaining`),
|
|
75
|
+
redis.get(`${prefix}:requests_reset`),
|
|
76
|
+
redis.get(`${prefix}:updated_at`),
|
|
77
|
+
]);
|
|
78
|
+
return {
|
|
79
|
+
requestsRemaining: requestsRemaining ? parseInt(requestsRemaining, 10) : null,
|
|
80
|
+
complexityRemaining: complexityRemaining
|
|
81
|
+
? parseInt(complexityRemaining, 10)
|
|
82
|
+
: null,
|
|
83
|
+
requestsReset: requestsReset ? parseInt(requestsReset, 10) : null,
|
|
84
|
+
updatedAt: updatedAt ? parseInt(updatedAt, 10) : 0,
|
|
85
|
+
};
|
|
86
|
+
}
|
|
87
|
+
catch (err) {
|
|
88
|
+
log.error('Failed to get quota', {
|
|
89
|
+
workspaceId,
|
|
90
|
+
error: err instanceof Error ? err.message : String(err),
|
|
91
|
+
});
|
|
92
|
+
return {
|
|
93
|
+
requestsRemaining: null,
|
|
94
|
+
complexityRemaining: null,
|
|
95
|
+
requestsReset: null,
|
|
96
|
+
updatedAt: 0,
|
|
97
|
+
};
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
/**
|
|
101
|
+
* Check if quota is critically low for a workspace.
|
|
102
|
+
*
|
|
103
|
+
* Returns true if we know the quota is below the threshold.
|
|
104
|
+
* Returns false if quota is healthy or unknown (fail open).
|
|
105
|
+
*/
|
|
106
|
+
export async function isQuotaLow(workspaceId) {
|
|
107
|
+
const quota = await getQuota(workspaceId);
|
|
108
|
+
if (quota.requestsRemaining === null)
|
|
109
|
+
return false; // unknown = allow
|
|
110
|
+
// Check staleness — if data is older than 5 minutes, don't trust it
|
|
111
|
+
const staleThreshold = 5 * 60 * 1000;
|
|
112
|
+
if (Date.now() - quota.updatedAt > staleThreshold)
|
|
113
|
+
return false;
|
|
114
|
+
return quota.requestsRemaining < LOW_QUOTA_THRESHOLD;
|
|
115
|
+
}
|
|
116
|
+
/**
|
|
117
|
+
* Extract quota headers from a Linear API response.
|
|
118
|
+
*
|
|
119
|
+
* Works with both fetch Response objects and plain header objects.
|
|
120
|
+
*/
|
|
121
|
+
export function extractQuotaHeaders(response) {
|
|
122
|
+
const result = {};
|
|
123
|
+
if (typeof response !== 'object' || response === null)
|
|
124
|
+
return result;
|
|
125
|
+
const resp = response;
|
|
126
|
+
// Try fetch-style Response with .headers.get()
|
|
127
|
+
const headers = resp.headers;
|
|
128
|
+
if (headers) {
|
|
129
|
+
if (typeof headers.get === 'function') {
|
|
130
|
+
const getHeader = headers.get;
|
|
131
|
+
const rr = getHeader.call(headers, 'x-ratelimit-requests-remaining');
|
|
132
|
+
const cr = getHeader.call(headers, 'x-ratelimit-complexity-remaining');
|
|
133
|
+
const rs = getHeader.call(headers, 'x-ratelimit-requests-reset');
|
|
134
|
+
if (rr)
|
|
135
|
+
result.requestsRemaining = rr;
|
|
136
|
+
if (cr)
|
|
137
|
+
result.complexityRemaining = cr;
|
|
138
|
+
if (rs)
|
|
139
|
+
result.requestsReset = rs;
|
|
140
|
+
}
|
|
141
|
+
else {
|
|
142
|
+
// Plain object headers
|
|
143
|
+
const rr = headers['x-ratelimit-requests-remaining'];
|
|
144
|
+
const cr = headers['x-ratelimit-complexity-remaining'];
|
|
145
|
+
const rs = headers['x-ratelimit-requests-reset'];
|
|
146
|
+
if (rr)
|
|
147
|
+
result.requestsRemaining = rr;
|
|
148
|
+
if (cr)
|
|
149
|
+
result.complexityRemaining = cr;
|
|
150
|
+
if (rs)
|
|
151
|
+
result.requestsReset = rs;
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
return result;
|
|
155
|
+
}
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Redis Circuit Breaker
|
|
3
|
+
*
|
|
4
|
+
* Shares circuit breaker state across processes via Redis.
|
|
5
|
+
* All processes (dashboard, governor, CLI agents) see the same
|
|
6
|
+
* circuit state for a workspace.
|
|
7
|
+
*
|
|
8
|
+
* Redis keys:
|
|
9
|
+
* - `linear:circuit:{workspaceId}:state` — 'closed' | 'open' | 'half-open'
|
|
10
|
+
* - `linear:circuit:{workspaceId}:failures` — consecutive failure count (with TTL)
|
|
11
|
+
* - `linear:circuit:{workspaceId}:opened_at` — timestamp when circuit was opened
|
|
12
|
+
* - `linear:circuit:{workspaceId}:reset_timeout` — current reset timeout (for backoff)
|
|
13
|
+
*
|
|
14
|
+
* Implements CircuitBreakerStrategy from @supaku/agentfactory-linear
|
|
15
|
+
* so it can be injected into LinearAgentClient.
|
|
16
|
+
*/
|
|
17
|
+
import type { CircuitBreakerStrategy, CircuitBreakerConfig } from '@supaku/agentfactory-linear';
|
|
18
|
+
export interface RedisCircuitBreakerConfig extends CircuitBreakerConfig {
|
|
19
|
+
/** Workspace-specific key prefix */
|
|
20
|
+
workspaceId: string;
|
|
21
|
+
}
|
|
22
|
+
export declare class RedisCircuitBreaker implements CircuitBreakerStrategy {
|
|
23
|
+
private readonly config;
|
|
24
|
+
private readonly keyPrefix;
|
|
25
|
+
constructor(config: Partial<RedisCircuitBreakerConfig> & {
|
|
26
|
+
workspaceId: string;
|
|
27
|
+
});
|
|
28
|
+
private get stateKey();
|
|
29
|
+
private get failuresKey();
|
|
30
|
+
private get openedAtKey();
|
|
31
|
+
private get resetTimeoutKey();
|
|
32
|
+
/**
|
|
33
|
+
* Check if a call is allowed to proceed.
|
|
34
|
+
*/
|
|
35
|
+
canProceed(): Promise<boolean>;
|
|
36
|
+
/**
|
|
37
|
+
* Record a successful API call. Resets the circuit to closed.
|
|
38
|
+
*/
|
|
39
|
+
recordSuccess(): Promise<void>;
|
|
40
|
+
/**
|
|
41
|
+
* Record an auth failure. May trip the circuit to open.
|
|
42
|
+
*/
|
|
43
|
+
recordAuthFailure(statusCode: number): Promise<void>;
|
|
44
|
+
/**
|
|
45
|
+
* Check if an error is an auth/rate-limit error.
|
|
46
|
+
* Reuses the same detection logic as the in-memory CircuitBreaker.
|
|
47
|
+
*/
|
|
48
|
+
isAuthError(error: unknown): boolean;
|
|
49
|
+
/**
|
|
50
|
+
* Reset the circuit breaker to closed state.
|
|
51
|
+
*/
|
|
52
|
+
reset(): Promise<void>;
|
|
53
|
+
/**
|
|
54
|
+
* Get diagnostic info for monitoring.
|
|
55
|
+
*/
|
|
56
|
+
getStatus(): Promise<{
|
|
57
|
+
state: string;
|
|
58
|
+
failures: number;
|
|
59
|
+
openedAt: number | null;
|
|
60
|
+
currentResetTimeoutMs: number;
|
|
61
|
+
}>;
|
|
62
|
+
}
|
|
63
|
+
/**
|
|
64
|
+
* Create a Redis circuit breaker for a specific workspace.
|
|
65
|
+
*/
|
|
66
|
+
export declare function createRedisCircuitBreaker(workspaceId: string, config?: Partial<Omit<RedisCircuitBreakerConfig, 'workspaceId'>>): RedisCircuitBreaker;
|
|
67
|
+
//# sourceMappingURL=redis-circuit-breaker.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"redis-circuit-breaker.d.ts","sourceRoot":"","sources":["../../src/redis-circuit-breaker.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;GAeG;AAIH,OAAO,KAAK,EAAE,sBAAsB,EAAE,oBAAoB,EAAE,MAAM,6BAA6B,CAAA;AAI/F,MAAM,WAAW,yBAA0B,SAAQ,oBAAoB;IACrE,oCAAoC;IACpC,WAAW,EAAE,MAAM,CAAA;CACpB;AA6GD,qBAAa,mBAAoB,YAAW,sBAAsB;IAChE,OAAO,CAAC,QAAQ,CAAC,MAAM,CAA2B;IAClD,OAAO,CAAC,QAAQ,CAAC,SAAS,CAAQ;gBAEtB,MAAM,EAAE,OAAO,CAAC,yBAAyB,CAAC,GAAG;QAAE,WAAW,EAAE,MAAM,CAAA;KAAE;IAKhF,OAAO,KAAK,QAAQ,GAEnB;IACD,OAAO,KAAK,WAAW,GAEtB;IACD,OAAO,KAAK,WAAW,GAEtB;IACD,OAAO,KAAK,eAAe,GAE1B;IAED;;OAEG;IACG,UAAU,IAAI,OAAO,CAAC,OAAO,CAAC;IAuBpC;;OAEG;IACG,aAAa,IAAI,OAAO,CAAC,IAAI,CAAC;IAgBpC;;OAEG;IACG,iBAAiB,CAAC,UAAU,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC;IAkC1D;;;OAGG;IACH,WAAW,CAAC,KAAK,EAAE,OAAO,GAAG,OAAO;IAsCpC;;OAEG;IACG,KAAK,IAAI,OAAO,CAAC,IAAI,CAAC;IAgB5B;;OAEG;IACG,SAAS,IAAI,OAAO,CAAC;QACzB,KAAK,EAAE,MAAM,CAAA;QACb,QAAQ,EAAE,MAAM,CAAA;QAChB,QAAQ,EAAE,MAAM,GAAG,IAAI,CAAA;QACvB,qBAAqB,EAAE,MAAM,CAAA;KAC9B,CAAC;CA2BH;AAED;;GAEG;AACH,wBAAgB,yBAAyB,CACvC,WAAW,EAAE,MAAM,EACnB,MAAM,CAAC,EAAE,OAAO,CAAC,IAAI,CAAC,yBAAyB,EAAE,aAAa,CAAC,CAAC,GAC/D,mBAAmB,CAErB"}
|
|
@@ -0,0 +1,294 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Redis Circuit Breaker
|
|
3
|
+
*
|
|
4
|
+
* Shares circuit breaker state across processes via Redis.
|
|
5
|
+
* All processes (dashboard, governor, CLI agents) see the same
|
|
6
|
+
* circuit state for a workspace.
|
|
7
|
+
*
|
|
8
|
+
* Redis keys:
|
|
9
|
+
* - `linear:circuit:{workspaceId}:state` — 'closed' | 'open' | 'half-open'
|
|
10
|
+
* - `linear:circuit:{workspaceId}:failures` — consecutive failure count (with TTL)
|
|
11
|
+
* - `linear:circuit:{workspaceId}:opened_at` — timestamp when circuit was opened
|
|
12
|
+
* - `linear:circuit:{workspaceId}:reset_timeout` — current reset timeout (for backoff)
|
|
13
|
+
*
|
|
14
|
+
* Implements CircuitBreakerStrategy from @supaku/agentfactory-linear
|
|
15
|
+
* so it can be injected into LinearAgentClient.
|
|
16
|
+
*/
|
|
17
|
+
import { getRedisClient } from './redis.js';
|
|
18
|
+
import { createLogger } from './logger.js';
|
|
19
|
+
const log = createLogger('redis-circuit-breaker');
|
|
20
|
+
const DEFAULT_CONFIG = {
|
|
21
|
+
failureThreshold: 2,
|
|
22
|
+
resetTimeoutMs: 60_000,
|
|
23
|
+
maxResetTimeoutMs: 300_000,
|
|
24
|
+
backoffMultiplier: 2,
|
|
25
|
+
authErrorCodes: [400, 401, 403],
|
|
26
|
+
};
|
|
27
|
+
/**
|
|
28
|
+
* Lua script for atomic circuit breaker state check.
|
|
29
|
+
*
|
|
30
|
+
* KEYS[1] = state key
|
|
31
|
+
* KEYS[2] = opened_at key
|
|
32
|
+
* KEYS[3] = reset_timeout key
|
|
33
|
+
* ARGV[1] = current timestamp (ms)
|
|
34
|
+
* ARGV[2] = default reset timeout (ms)
|
|
35
|
+
*
|
|
36
|
+
* Returns: 1 if call can proceed, 0 if blocked, 2 if probe (half-open)
|
|
37
|
+
*/
|
|
38
|
+
const CAN_PROCEED_LUA = `
|
|
39
|
+
local stateKey = KEYS[1]
|
|
40
|
+
local openedAtKey = KEYS[2]
|
|
41
|
+
local resetTimeoutKey = KEYS[3]
|
|
42
|
+
local now = tonumber(ARGV[1])
|
|
43
|
+
local defaultResetTimeout = tonumber(ARGV[2])
|
|
44
|
+
|
|
45
|
+
local state = redis.call('GET', stateKey)
|
|
46
|
+
|
|
47
|
+
-- Closed or no state: allow
|
|
48
|
+
if state == false or state == 'closed' then
|
|
49
|
+
return 1
|
|
50
|
+
end
|
|
51
|
+
|
|
52
|
+
-- Open: check if reset timeout has elapsed
|
|
53
|
+
if state == 'open' then
|
|
54
|
+
local openedAt = tonumber(redis.call('GET', openedAtKey)) or 0
|
|
55
|
+
local resetTimeout = tonumber(redis.call('GET', resetTimeoutKey)) or defaultResetTimeout
|
|
56
|
+
|
|
57
|
+
if (now - openedAt) >= resetTimeout then
|
|
58
|
+
-- Transition to half-open: allow one probe
|
|
59
|
+
redis.call('SET', stateKey, 'half-open', 'EX', 3600)
|
|
60
|
+
return 2
|
|
61
|
+
end
|
|
62
|
+
|
|
63
|
+
return 0
|
|
64
|
+
end
|
|
65
|
+
|
|
66
|
+
-- Half-open: block (probe already in flight)
|
|
67
|
+
-- The first caller to see 'open' -> 'half-open' transition gets the probe
|
|
68
|
+
if state == 'half-open' then
|
|
69
|
+
return 0
|
|
70
|
+
end
|
|
71
|
+
|
|
72
|
+
return 1
|
|
73
|
+
`;
|
|
74
|
+
/**
|
|
75
|
+
* Lua script for recording auth failure.
|
|
76
|
+
*
|
|
77
|
+
* KEYS[1] = state key
|
|
78
|
+
* KEYS[2] = failures key
|
|
79
|
+
* KEYS[3] = opened_at key
|
|
80
|
+
* KEYS[4] = reset_timeout key
|
|
81
|
+
* ARGV[1] = failure threshold
|
|
82
|
+
* ARGV[2] = current timestamp (ms)
|
|
83
|
+
* ARGV[3] = default reset timeout (ms)
|
|
84
|
+
* ARGV[4] = backoff multiplier
|
|
85
|
+
* ARGV[5] = max reset timeout (ms)
|
|
86
|
+
*
|
|
87
|
+
* Returns: new state ('closed', 'open')
|
|
88
|
+
*/
|
|
89
|
+
const RECORD_FAILURE_LUA = `
|
|
90
|
+
local stateKey = KEYS[1]
|
|
91
|
+
local failuresKey = KEYS[2]
|
|
92
|
+
local openedAtKey = KEYS[3]
|
|
93
|
+
local resetTimeoutKey = KEYS[4]
|
|
94
|
+
local threshold = tonumber(ARGV[1])
|
|
95
|
+
local now = tonumber(ARGV[2])
|
|
96
|
+
local defaultResetTimeout = tonumber(ARGV[3])
|
|
97
|
+
local backoffMultiplier = tonumber(ARGV[4])
|
|
98
|
+
local maxResetTimeout = tonumber(ARGV[5])
|
|
99
|
+
|
|
100
|
+
local state = redis.call('GET', stateKey) or 'closed'
|
|
101
|
+
local failures = tonumber(redis.call('INCR', failuresKey))
|
|
102
|
+
redis.call('EXPIRE', failuresKey, 3600)
|
|
103
|
+
|
|
104
|
+
-- If half-open: probe failed, reopen with backoff
|
|
105
|
+
if state == 'half-open' then
|
|
106
|
+
local currentTimeout = tonumber(redis.call('GET', resetTimeoutKey)) or defaultResetTimeout
|
|
107
|
+
local newTimeout = math.min(currentTimeout * backoffMultiplier, maxResetTimeout)
|
|
108
|
+
redis.call('SET', stateKey, 'open', 'EX', 3600)
|
|
109
|
+
redis.call('SET', openedAtKey, tostring(now), 'EX', 3600)
|
|
110
|
+
redis.call('SET', resetTimeoutKey, tostring(newTimeout), 'EX', 3600)
|
|
111
|
+
return 'open'
|
|
112
|
+
end
|
|
113
|
+
|
|
114
|
+
-- If closed and at threshold: trip to open
|
|
115
|
+
if failures >= threshold then
|
|
116
|
+
redis.call('SET', stateKey, 'open', 'EX', 3600)
|
|
117
|
+
redis.call('SET', openedAtKey, tostring(now), 'EX', 3600)
|
|
118
|
+
redis.call('SET', resetTimeoutKey, tostring(defaultResetTimeout), 'EX', 3600)
|
|
119
|
+
return 'open'
|
|
120
|
+
end
|
|
121
|
+
|
|
122
|
+
return 'closed'
|
|
123
|
+
`;
|
|
124
|
+
export class RedisCircuitBreaker {
|
|
125
|
+
config;
|
|
126
|
+
keyPrefix;
|
|
127
|
+
constructor(config) {
|
|
128
|
+
this.config = { ...DEFAULT_CONFIG, ...config };
|
|
129
|
+
this.keyPrefix = `linear:circuit:${this.config.workspaceId}`;
|
|
130
|
+
}
|
|
131
|
+
get stateKey() {
|
|
132
|
+
return `${this.keyPrefix}:state`;
|
|
133
|
+
}
|
|
134
|
+
get failuresKey() {
|
|
135
|
+
return `${this.keyPrefix}:failures`;
|
|
136
|
+
}
|
|
137
|
+
get openedAtKey() {
|
|
138
|
+
return `${this.keyPrefix}:opened_at`;
|
|
139
|
+
}
|
|
140
|
+
get resetTimeoutKey() {
|
|
141
|
+
return `${this.keyPrefix}:reset_timeout`;
|
|
142
|
+
}
|
|
143
|
+
/**
|
|
144
|
+
* Check if a call is allowed to proceed.
|
|
145
|
+
*/
|
|
146
|
+
async canProceed() {
|
|
147
|
+
try {
|
|
148
|
+
const redis = getRedisClient();
|
|
149
|
+
const result = await redis.eval(CAN_PROCEED_LUA, 3, this.stateKey, this.openedAtKey, this.resetTimeoutKey, String(Date.now()), String(this.config.resetTimeoutMs));
|
|
150
|
+
// 1 = closed (allow), 2 = half-open probe (allow), 0 = blocked
|
|
151
|
+
return result === 1 || result === 2;
|
|
152
|
+
}
|
|
153
|
+
catch (err) {
|
|
154
|
+
// If Redis is down, allow the request (fail open for circuit breaker)
|
|
155
|
+
log.error('Redis circuit breaker error, failing open', {
|
|
156
|
+
error: err instanceof Error ? err.message : String(err),
|
|
157
|
+
});
|
|
158
|
+
return true;
|
|
159
|
+
}
|
|
160
|
+
}
|
|
161
|
+
/**
|
|
162
|
+
* Record a successful API call. Resets the circuit to closed.
|
|
163
|
+
*/
|
|
164
|
+
async recordSuccess() {
|
|
165
|
+
try {
|
|
166
|
+
const redis = getRedisClient();
|
|
167
|
+
const pipeline = redis.pipeline();
|
|
168
|
+
pipeline.set(this.stateKey, 'closed', 'EX', 3600);
|
|
169
|
+
pipeline.del(this.failuresKey);
|
|
170
|
+
pipeline.del(this.openedAtKey);
|
|
171
|
+
pipeline.del(this.resetTimeoutKey);
|
|
172
|
+
await pipeline.exec();
|
|
173
|
+
}
|
|
174
|
+
catch (err) {
|
|
175
|
+
log.error('Failed to record circuit breaker success', {
|
|
176
|
+
error: err instanceof Error ? err.message : String(err),
|
|
177
|
+
});
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
/**
|
|
181
|
+
* Record an auth failure. May trip the circuit to open.
|
|
182
|
+
*/
|
|
183
|
+
async recordAuthFailure(statusCode) {
|
|
184
|
+
if (!this.config.authErrorCodes.includes(statusCode)) {
|
|
185
|
+
return;
|
|
186
|
+
}
|
|
187
|
+
try {
|
|
188
|
+
const redis = getRedisClient();
|
|
189
|
+
const result = await redis.eval(RECORD_FAILURE_LUA, 4, this.stateKey, this.failuresKey, this.openedAtKey, this.resetTimeoutKey, String(this.config.failureThreshold), String(Date.now()), String(this.config.resetTimeoutMs), String(this.config.backoffMultiplier), String(this.config.maxResetTimeoutMs));
|
|
190
|
+
if (result === 'open') {
|
|
191
|
+
log.warn('Circuit breaker tripped to OPEN', {
|
|
192
|
+
workspaceId: this.config.workspaceId,
|
|
193
|
+
statusCode,
|
|
194
|
+
});
|
|
195
|
+
}
|
|
196
|
+
}
|
|
197
|
+
catch (err) {
|
|
198
|
+
log.error('Failed to record circuit breaker failure', {
|
|
199
|
+
error: err instanceof Error ? err.message : String(err),
|
|
200
|
+
});
|
|
201
|
+
}
|
|
202
|
+
}
|
|
203
|
+
/**
|
|
204
|
+
* Check if an error is an auth/rate-limit error.
|
|
205
|
+
* Reuses the same detection logic as the in-memory CircuitBreaker.
|
|
206
|
+
*/
|
|
207
|
+
isAuthError(error) {
|
|
208
|
+
if (typeof error !== 'object' || error === null)
|
|
209
|
+
return false;
|
|
210
|
+
const err = error;
|
|
211
|
+
// Check HTTP status code
|
|
212
|
+
const statusCode = (typeof err.status === 'number' ? err.status : undefined) ??
|
|
213
|
+
(typeof err.statusCode === 'number' ? err.statusCode : undefined) ??
|
|
214
|
+
(typeof err.response?.status === 'number'
|
|
215
|
+
? err.response.status
|
|
216
|
+
: undefined);
|
|
217
|
+
if (statusCode !== undefined && this.config.authErrorCodes.includes(statusCode)) {
|
|
218
|
+
return true;
|
|
219
|
+
}
|
|
220
|
+
// Check for GraphQL RATELIMITED
|
|
221
|
+
const extensions = err.extensions;
|
|
222
|
+
if (extensions?.code === 'RATELIMITED')
|
|
223
|
+
return true;
|
|
224
|
+
const errors = err.errors;
|
|
225
|
+
if (Array.isArray(errors)) {
|
|
226
|
+
for (const gqlError of errors) {
|
|
227
|
+
const ext = gqlError.extensions;
|
|
228
|
+
if (ext?.code === 'RATELIMITED')
|
|
229
|
+
return true;
|
|
230
|
+
}
|
|
231
|
+
}
|
|
232
|
+
// Check error message patterns
|
|
233
|
+
const message = err.message ?? '';
|
|
234
|
+
if (/access denied|unauthorized|forbidden|RATELIMITED/i.test(message)) {
|
|
235
|
+
return true;
|
|
236
|
+
}
|
|
237
|
+
return false;
|
|
238
|
+
}
|
|
239
|
+
/**
|
|
240
|
+
* Reset the circuit breaker to closed state.
|
|
241
|
+
*/
|
|
242
|
+
async reset() {
|
|
243
|
+
try {
|
|
244
|
+
const redis = getRedisClient();
|
|
245
|
+
const pipeline = redis.pipeline();
|
|
246
|
+
pipeline.set(this.stateKey, 'closed', 'EX', 3600);
|
|
247
|
+
pipeline.del(this.failuresKey);
|
|
248
|
+
pipeline.del(this.openedAtKey);
|
|
249
|
+
pipeline.del(this.resetTimeoutKey);
|
|
250
|
+
await pipeline.exec();
|
|
251
|
+
}
|
|
252
|
+
catch (err) {
|
|
253
|
+
log.error('Failed to reset circuit breaker', {
|
|
254
|
+
error: err instanceof Error ? err.message : String(err),
|
|
255
|
+
});
|
|
256
|
+
}
|
|
257
|
+
}
|
|
258
|
+
/**
|
|
259
|
+
* Get diagnostic info for monitoring.
|
|
260
|
+
*/
|
|
261
|
+
async getStatus() {
|
|
262
|
+
try {
|
|
263
|
+
const redis = getRedisClient();
|
|
264
|
+
const [state, failures, openedAt, resetTimeout] = await Promise.all([
|
|
265
|
+
redis.get(this.stateKey),
|
|
266
|
+
redis.get(this.failuresKey),
|
|
267
|
+
redis.get(this.openedAtKey),
|
|
268
|
+
redis.get(this.resetTimeoutKey),
|
|
269
|
+
]);
|
|
270
|
+
return {
|
|
271
|
+
state: state ?? 'closed',
|
|
272
|
+
failures: failures ? parseInt(failures, 10) : 0,
|
|
273
|
+
openedAt: openedAt ? parseInt(openedAt, 10) : null,
|
|
274
|
+
currentResetTimeoutMs: resetTimeout
|
|
275
|
+
? parseInt(resetTimeout, 10)
|
|
276
|
+
: this.config.resetTimeoutMs,
|
|
277
|
+
};
|
|
278
|
+
}
|
|
279
|
+
catch {
|
|
280
|
+
return {
|
|
281
|
+
state: 'unknown',
|
|
282
|
+
failures: -1,
|
|
283
|
+
openedAt: null,
|
|
284
|
+
currentResetTimeoutMs: this.config.resetTimeoutMs,
|
|
285
|
+
};
|
|
286
|
+
}
|
|
287
|
+
}
|
|
288
|
+
}
|
|
289
|
+
/**
|
|
290
|
+
* Create a Redis circuit breaker for a specific workspace.
|
|
291
|
+
*/
|
|
292
|
+
export function createRedisCircuitBreaker(workspaceId, config) {
|
|
293
|
+
return new RedisCircuitBreaker({ ...config, workspaceId });
|
|
294
|
+
}
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Redis Token Bucket Rate Limiter
|
|
3
|
+
*
|
|
4
|
+
* Atomic token bucket implementation using Redis + Lua script.
|
|
5
|
+
* All processes (dashboard, governor, CLI agents) share one bucket
|
|
6
|
+
* keyed by `linear:rate-limit:{workspaceId}`.
|
|
7
|
+
*
|
|
8
|
+
* Implements RateLimiterStrategy from @supaku/agentfactory-linear
|
|
9
|
+
* so it can be injected into LinearAgentClient.
|
|
10
|
+
*/
|
|
11
|
+
import type { RateLimiterStrategy } from '@supaku/agentfactory-linear';
|
|
12
|
+
export interface RedisTokenBucketConfig {
|
|
13
|
+
/** Redis key for this bucket (default: 'linear:rate-limit:default') */
|
|
14
|
+
key: string;
|
|
15
|
+
/** Maximum tokens (burst capacity). Default: 80 */
|
|
16
|
+
maxTokens: number;
|
|
17
|
+
/** Tokens added per second. Default: 1.5 (~90/min) */
|
|
18
|
+
refillRate: number;
|
|
19
|
+
/** Maximum time to wait for a token before throwing (ms). Default: 30_000 */
|
|
20
|
+
acquireTimeoutMs: number;
|
|
21
|
+
/** Polling interval when waiting for tokens (ms). Default: 500 */
|
|
22
|
+
pollIntervalMs: number;
|
|
23
|
+
}
|
|
24
|
+
export declare const DEFAULT_REDIS_RATE_LIMIT_CONFIG: RedisTokenBucketConfig;
|
|
25
|
+
export declare class RedisTokenBucket implements RateLimiterStrategy {
|
|
26
|
+
private readonly config;
|
|
27
|
+
constructor(config?: Partial<RedisTokenBucketConfig>);
|
|
28
|
+
/**
|
|
29
|
+
* Acquire a single token. Polls Redis until a token is available
|
|
30
|
+
* or the acquire timeout is reached.
|
|
31
|
+
*/
|
|
32
|
+
acquire(): Promise<void>;
|
|
33
|
+
/**
|
|
34
|
+
* Penalize the bucket after receiving a rate limit response.
|
|
35
|
+
* Drains all tokens and sets a penalty period.
|
|
36
|
+
*/
|
|
37
|
+
penalize(seconds: number): Promise<void>;
|
|
38
|
+
/**
|
|
39
|
+
* Try to acquire a token atomically. Returns true if acquired.
|
|
40
|
+
*/
|
|
41
|
+
private tryAcquire;
|
|
42
|
+
/**
|
|
43
|
+
* Get the current token count (for monitoring).
|
|
44
|
+
*/
|
|
45
|
+
getAvailableTokens(): Promise<number>;
|
|
46
|
+
}
|
|
47
|
+
/**
|
|
48
|
+
* Create a Redis token bucket for a specific workspace.
|
|
49
|
+
*/
|
|
50
|
+
export declare function createRedisTokenBucket(workspaceId: string, config?: Partial<Omit<RedisTokenBucketConfig, 'key'>>): RedisTokenBucket;
|
|
51
|
+
//# sourceMappingURL=redis-rate-limiter.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"redis-rate-limiter.d.ts","sourceRoot":"","sources":["../../src/redis-rate-limiter.ts"],"names":[],"mappings":"AAAA;;;;;;;;;GASG;AAIH,OAAO,KAAK,EAAE,mBAAmB,EAAE,MAAM,6BAA6B,CAAA;AAItE,MAAM,WAAW,sBAAsB;IACrC,uEAAuE;IACvE,GAAG,EAAE,MAAM,CAAA;IACX,mDAAmD;IACnD,SAAS,EAAE,MAAM,CAAA;IACjB,sDAAsD;IACtD,UAAU,EAAE,MAAM,CAAA;IAClB,6EAA6E;IAC7E,gBAAgB,EAAE,MAAM,CAAA;IACxB,kEAAkE;IAClE,cAAc,EAAE,MAAM,CAAA;CACvB;AAED,eAAO,MAAM,+BAA+B,EAAE,sBAM7C,CAAA;AAyED,qBAAa,gBAAiB,YAAW,mBAAmB;IAC1D,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAwB;gBAEnC,MAAM,CAAC,EAAE,OAAO,CAAC,sBAAsB,CAAC;IAIpD;;;OAGG;IACG,OAAO,IAAI,OAAO,CAAC,IAAI,CAAC;IAqB9B;;;OAGG;IACG,QAAQ,CAAC,OAAO,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC;IAkB9C;;OAEG;YACW,UAAU;IAqBxB;;OAEG;IACG,kBAAkB,IAAI,OAAO,CAAC,MAAM,CAAC;CAS5C;AAED;;GAEG;AACH,wBAAgB,sBAAsB,CACpC,WAAW,EAAE,MAAM,EACnB,MAAM,CAAC,EAAE,OAAO,CAAC,IAAI,CAAC,sBAAsB,EAAE,KAAK,CAAC,CAAC,GACpD,gBAAgB,CAKlB"}
|
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Redis Token Bucket Rate Limiter
|
|
3
|
+
*
|
|
4
|
+
* Atomic token bucket implementation using Redis + Lua script.
|
|
5
|
+
* All processes (dashboard, governor, CLI agents) share one bucket
|
|
6
|
+
* keyed by `linear:rate-limit:{workspaceId}`.
|
|
7
|
+
*
|
|
8
|
+
* Implements RateLimiterStrategy from @supaku/agentfactory-linear
|
|
9
|
+
* so it can be injected into LinearAgentClient.
|
|
10
|
+
*/
|
|
11
|
+
import { getRedisClient } from './redis.js';
|
|
12
|
+
import { createLogger } from './logger.js';
|
|
13
|
+
const log = createLogger('redis-rate-limiter');
|
|
14
|
+
export const DEFAULT_REDIS_RATE_LIMIT_CONFIG = {
|
|
15
|
+
key: 'linear:rate-limit:default',
|
|
16
|
+
maxTokens: 80,
|
|
17
|
+
refillRate: 1.5,
|
|
18
|
+
acquireTimeoutMs: 30_000,
|
|
19
|
+
pollIntervalMs: 500,
|
|
20
|
+
};
|
|
21
|
+
/**
|
|
22
|
+
* Lua script for atomic token bucket acquire.
|
|
23
|
+
*
|
|
24
|
+
* KEYS[1] = bucket key (hash with fields: tokens, last_refill, penalty_until)
|
|
25
|
+
* ARGV[1] = maxTokens
|
|
26
|
+
* ARGV[2] = refillRate (tokens per second)
|
|
27
|
+
* ARGV[3] = current timestamp (ms)
|
|
28
|
+
*
|
|
29
|
+
* Returns: 1 if token acquired, 0 if no tokens available
|
|
30
|
+
*/
|
|
31
|
+
const ACQUIRE_LUA = `
|
|
32
|
+
local key = KEYS[1]
|
|
33
|
+
local maxTokens = tonumber(ARGV[1])
|
|
34
|
+
local refillRate = tonumber(ARGV[2])
|
|
35
|
+
local now = tonumber(ARGV[3])
|
|
36
|
+
|
|
37
|
+
-- Initialize bucket if it doesn't exist
|
|
38
|
+
local tokens = tonumber(redis.call('HGET', key, 'tokens'))
|
|
39
|
+
local lastRefill = tonumber(redis.call('HGET', key, 'last_refill'))
|
|
40
|
+
local penaltyUntil = tonumber(redis.call('HGET', key, 'penalty_until')) or 0
|
|
41
|
+
|
|
42
|
+
if tokens == nil then
|
|
43
|
+
tokens = maxTokens
|
|
44
|
+
lastRefill = now
|
|
45
|
+
redis.call('HMSET', key, 'tokens', tokens, 'last_refill', lastRefill, 'penalty_until', 0)
|
|
46
|
+
redis.call('EXPIRE', key, 3600)
|
|
47
|
+
end
|
|
48
|
+
|
|
49
|
+
-- Check if we're in a penalty period
|
|
50
|
+
if now < penaltyUntil then
|
|
51
|
+
return 0
|
|
52
|
+
end
|
|
53
|
+
|
|
54
|
+
-- Refill tokens based on elapsed time
|
|
55
|
+
local elapsed = (now - lastRefill) / 1000.0
|
|
56
|
+
if elapsed > 0 then
|
|
57
|
+
local newTokens = elapsed * refillRate
|
|
58
|
+
tokens = math.min(maxTokens, tokens + newTokens)
|
|
59
|
+
lastRefill = now
|
|
60
|
+
end
|
|
61
|
+
|
|
62
|
+
-- Try to acquire a token
|
|
63
|
+
if tokens >= 1 then
|
|
64
|
+
tokens = tokens - 1
|
|
65
|
+
redis.call('HMSET', key, 'tokens', tokens, 'last_refill', lastRefill)
|
|
66
|
+
redis.call('EXPIRE', key, 3600)
|
|
67
|
+
return 1
|
|
68
|
+
end
|
|
69
|
+
|
|
70
|
+
redis.call('HMSET', key, 'tokens', tokens, 'last_refill', lastRefill)
|
|
71
|
+
redis.call('EXPIRE', key, 3600)
|
|
72
|
+
return 0
|
|
73
|
+
`;
|
|
74
|
+
/**
|
|
75
|
+
* Lua script for penalizing the bucket (after rate limit response).
|
|
76
|
+
*
|
|
77
|
+
* KEYS[1] = bucket key
|
|
78
|
+
* ARGV[1] = penalty duration (seconds)
|
|
79
|
+
* ARGV[2] = current timestamp (ms)
|
|
80
|
+
*/
|
|
81
|
+
const PENALIZE_LUA = `
|
|
82
|
+
local key = KEYS[1]
|
|
83
|
+
local penaltySeconds = tonumber(ARGV[1])
|
|
84
|
+
local now = tonumber(ARGV[2])
|
|
85
|
+
|
|
86
|
+
redis.call('HMSET', key, 'tokens', 0, 'penalty_until', now + (penaltySeconds * 1000))
|
|
87
|
+
redis.call('EXPIRE', key, 3600)
|
|
88
|
+
return 1
|
|
89
|
+
`;
|
|
90
|
+
export class RedisTokenBucket {
|
|
91
|
+
config;
|
|
92
|
+
constructor(config) {
|
|
93
|
+
this.config = { ...DEFAULT_REDIS_RATE_LIMIT_CONFIG, ...config };
|
|
94
|
+
}
|
|
95
|
+
/**
|
|
96
|
+
* Acquire a single token. Polls Redis until a token is available
|
|
97
|
+
* or the acquire timeout is reached.
|
|
98
|
+
*/
|
|
99
|
+
async acquire() {
|
|
100
|
+
const start = Date.now();
|
|
101
|
+
while (true) {
|
|
102
|
+
const acquired = await this.tryAcquire();
|
|
103
|
+
if (acquired)
|
|
104
|
+
return;
|
|
105
|
+
// Check timeout
|
|
106
|
+
if (Date.now() - start > this.config.acquireTimeoutMs) {
|
|
107
|
+
throw new Error(`RedisTokenBucket: timed out waiting for rate limit token after ${this.config.acquireTimeoutMs}ms`);
|
|
108
|
+
}
|
|
109
|
+
// Wait before polling again
|
|
110
|
+
await new Promise((resolve) => setTimeout(resolve, this.config.pollIntervalMs));
|
|
111
|
+
}
|
|
112
|
+
}
|
|
113
|
+
/**
|
|
114
|
+
* Penalize the bucket after receiving a rate limit response.
|
|
115
|
+
* Drains all tokens and sets a penalty period.
|
|
116
|
+
*/
|
|
117
|
+
async penalize(seconds) {
|
|
118
|
+
try {
|
|
119
|
+
const redis = getRedisClient();
|
|
120
|
+
await redis.eval(PENALIZE_LUA, 1, this.config.key, String(seconds), String(Date.now()));
|
|
121
|
+
log.warn('Rate limit penalty applied', { seconds, key: this.config.key });
|
|
122
|
+
}
|
|
123
|
+
catch (err) {
|
|
124
|
+
log.error('Failed to apply rate limit penalty', {
|
|
125
|
+
error: err instanceof Error ? err.message : String(err),
|
|
126
|
+
});
|
|
127
|
+
}
|
|
128
|
+
}
|
|
129
|
+
/**
|
|
130
|
+
* Try to acquire a token atomically. Returns true if acquired.
|
|
131
|
+
*/
|
|
132
|
+
async tryAcquire() {
|
|
133
|
+
try {
|
|
134
|
+
const redis = getRedisClient();
|
|
135
|
+
const result = await redis.eval(ACQUIRE_LUA, 1, this.config.key, String(this.config.maxTokens), String(this.config.refillRate), String(Date.now()));
|
|
136
|
+
return result === 1;
|
|
137
|
+
}
|
|
138
|
+
catch (err) {
|
|
139
|
+
// If Redis is down, allow the request (fail open for rate limiting)
|
|
140
|
+
log.error('Redis rate limiter error, failing open', {
|
|
141
|
+
error: err instanceof Error ? err.message : String(err),
|
|
142
|
+
});
|
|
143
|
+
return true;
|
|
144
|
+
}
|
|
145
|
+
}
|
|
146
|
+
/**
|
|
147
|
+
* Get the current token count (for monitoring).
|
|
148
|
+
*/
|
|
149
|
+
async getAvailableTokens() {
|
|
150
|
+
try {
|
|
151
|
+
const redis = getRedisClient();
|
|
152
|
+
const tokens = await redis.hget(this.config.key, 'tokens');
|
|
153
|
+
return tokens ? parseFloat(tokens) : this.config.maxTokens;
|
|
154
|
+
}
|
|
155
|
+
catch {
|
|
156
|
+
return -1;
|
|
157
|
+
}
|
|
158
|
+
}
|
|
159
|
+
}
|
|
160
|
+
/**
|
|
161
|
+
* Create a Redis token bucket for a specific workspace.
|
|
162
|
+
*/
|
|
163
|
+
export function createRedisTokenBucket(workspaceId, config) {
|
|
164
|
+
return new RedisTokenBucket({
|
|
165
|
+
...config,
|
|
166
|
+
key: `linear:rate-limit:${workspaceId}`,
|
|
167
|
+
});
|
|
168
|
+
}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@supaku/agentfactory-server",
|
|
3
|
-
"version": "0.7.
|
|
3
|
+
"version": "0.7.19",
|
|
4
4
|
"type": "module",
|
|
5
5
|
"description": "Webhook server and distributed worker pool for AgentFactory — Redis queues, issue locks, session management",
|
|
6
6
|
"author": "Supaku (https://supaku.com)",
|
|
@@ -44,8 +44,8 @@
|
|
|
44
44
|
],
|
|
45
45
|
"dependencies": {
|
|
46
46
|
"ioredis": "^5.4.2",
|
|
47
|
-
"@supaku/agentfactory": "0.7.
|
|
48
|
-
"@supaku/agentfactory
|
|
47
|
+
"@supaku/agentfactory-linear": "0.7.19",
|
|
48
|
+
"@supaku/agentfactory": "0.7.19"
|
|
49
49
|
},
|
|
50
50
|
"devDependencies": {
|
|
51
51
|
"@types/node": "^22.5.4",
|