cooper-stack 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (97) hide show
  1. package/dist/ai.d.ts +60 -0
  2. package/dist/ai.d.ts.map +1 -0
  3. package/dist/ai.js +66 -0
  4. package/dist/ai.js.map +1 -0
  5. package/dist/api.d.ts +31 -0
  6. package/dist/api.d.ts.map +1 -0
  7. package/dist/api.js +40 -0
  8. package/dist/api.js.map +1 -0
  9. package/dist/auth.d.ts +13 -0
  10. package/dist/auth.d.ts.map +1 -0
  11. package/dist/auth.js +16 -0
  12. package/dist/auth.js.map +1 -0
  13. package/dist/bridge.d.ts +15 -0
  14. package/dist/bridge.d.ts.map +1 -0
  15. package/dist/bridge.js +217 -0
  16. package/dist/bridge.js.map +1 -0
  17. package/dist/cache.d.ts +27 -0
  18. package/dist/cache.d.ts.map +1 -0
  19. package/dist/cache.js +69 -0
  20. package/dist/cache.js.map +1 -0
  21. package/dist/cron.d.ts +22 -0
  22. package/dist/cron.d.ts.map +1 -0
  23. package/dist/cron.js +26 -0
  24. package/dist/cron.js.map +1 -0
  25. package/dist/db.d.ts +46 -0
  26. package/dist/db.d.ts.map +1 -0
  27. package/dist/db.js +158 -0
  28. package/dist/db.js.map +1 -0
  29. package/dist/error.d.ts +18 -0
  30. package/dist/error.d.ts.map +1 -0
  31. package/dist/error.js +30 -0
  32. package/dist/error.js.map +1 -0
  33. package/dist/index.d.ts +18 -0
  34. package/dist/index.d.ts.map +1 -0
  35. package/dist/index.js +18 -0
  36. package/dist/index.js.map +1 -0
  37. package/dist/islands.d.ts +16 -0
  38. package/dist/islands.d.ts.map +1 -0
  39. package/dist/islands.js +23 -0
  40. package/dist/islands.js.map +1 -0
  41. package/dist/middleware.d.ts +20 -0
  42. package/dist/middleware.d.ts.map +1 -0
  43. package/dist/middleware.js +26 -0
  44. package/dist/middleware.js.map +1 -0
  45. package/dist/nats.d.ts +46 -0
  46. package/dist/nats.d.ts.map +1 -0
  47. package/dist/nats.js +157 -0
  48. package/dist/nats.js.map +1 -0
  49. package/dist/pubsub.d.ts +27 -0
  50. package/dist/pubsub.d.ts.map +1 -0
  51. package/dist/pubsub.js +152 -0
  52. package/dist/pubsub.js.map +1 -0
  53. package/dist/queue.d.ts +39 -0
  54. package/dist/queue.d.ts.map +1 -0
  55. package/dist/queue.js +298 -0
  56. package/dist/queue.js.map +1 -0
  57. package/dist/rateLimit.d.ts +29 -0
  58. package/dist/rateLimit.d.ts.map +1 -0
  59. package/dist/rateLimit.js +70 -0
  60. package/dist/rateLimit.js.map +1 -0
  61. package/dist/registry.d.ts +75 -0
  62. package/dist/registry.d.ts.map +1 -0
  63. package/dist/registry.js +41 -0
  64. package/dist/registry.js.map +1 -0
  65. package/dist/secrets.d.ts +10 -0
  66. package/dist/secrets.d.ts.map +1 -0
  67. package/dist/secrets.js +35 -0
  68. package/dist/secrets.js.map +1 -0
  69. package/dist/ssr.d.ts +53 -0
  70. package/dist/ssr.d.ts.map +1 -0
  71. package/dist/ssr.js +39 -0
  72. package/dist/ssr.js.map +1 -0
  73. package/dist/storage.d.ts +28 -0
  74. package/dist/storage.d.ts.map +1 -0
  75. package/dist/storage.js +61 -0
  76. package/dist/storage.js.map +1 -0
  77. package/package.json +40 -0
  78. package/src/ai.ts +99 -0
  79. package/src/api.ts +56 -0
  80. package/src/auth.ts +16 -0
  81. package/src/bridge.ts +267 -0
  82. package/src/cache.ts +86 -0
  83. package/src/cron.ts +32 -0
  84. package/src/db.ts +211 -0
  85. package/src/error.ts +44 -0
  86. package/src/index.ts +17 -0
  87. package/src/islands.ts +28 -0
  88. package/src/middleware.ts +27 -0
  89. package/src/nats.ts +186 -0
  90. package/src/pubsub.ts +208 -0
  91. package/src/queue.ts +414 -0
  92. package/src/rateLimit.ts +89 -0
  93. package/src/registry.ts +98 -0
  94. package/src/secrets.ts +40 -0
  95. package/src/ssr.ts +58 -0
  96. package/src/storage.ts +79 -0
  97. package/tsconfig.json +17 -0
package/src/pubsub.ts ADDED
@@ -0,0 +1,208 @@
1
+ import { registry } from "./registry.js";
2
+ import {
3
+ ensureConnected,
4
+ getJetStream,
5
+ getJetStreamManager,
6
+ jsonCodec,
7
+ streamName,
8
+ consumerName,
9
+ ensureStream,
10
+ } from "./nats.js";
11
+ import { headers as natsHeaders, AckPolicy } from "nats";
12
+
13
+ export interface TopicConfig {
14
+ deliveryGuarantee?: "at-least-once" | "exactly-once";
15
+ orderBy?: string;
16
+ }
17
+
18
+ export interface SubscribeConfig {
19
+ concurrency?: number;
20
+ handler: (data: any) => Promise<void>;
21
+ }
22
+
23
+ export interface Topic<T> {
24
+ publish(data: T): Promise<void>;
25
+ subscribe(name: string, config: SubscribeConfig): any;
26
+ }
27
+
28
+ /**
29
+ * Declare a typed Pub/Sub topic.
30
+ *
31
+ * ```ts
32
+ * export const UserSignedUp = topic<{ userId: string; email: string }>(
33
+ * "user-signed-up",
34
+ * { deliveryGuarantee: "at-least-once" }
35
+ * );
36
+ * ```
37
+ *
38
+ * Local dev uses embedded NATS with JetStream for durable delivery.
39
+ * Falls back to in-memory if NATS is unavailable.
40
+ */
41
+ export function topic<T = any>(name: string, config?: TopicConfig): Topic<T> {
42
+ const subscribers = new Map<string, { handler: Function; options: any }>();
43
+ const useDedup = config?.deliveryGuarantee === "exactly-once";
44
+ const subject = `cooper.topic.${name}`;
45
+
46
+ // Track whether JetStream consumers have been started
47
+ const activeConsumers = new Set<string>();
48
+
49
+ /**
50
+ * Start a JetStream pull consumer for a subscriber.
51
+ * Runs in the background, processing messages until the connection closes.
52
+ */
53
+ async function startConsumer(
54
+ subName: string,
55
+ handler: Function,
56
+ concurrency: number
57
+ ): Promise<void> {
58
+ if (activeConsumers.has(subName)) return;
59
+ activeConsumers.add(subName);
60
+
61
+ const js = getJetStream();
62
+ const jsm = getJetStreamManager();
63
+ if (!js || !jsm) return;
64
+
65
+ const stream = streamName(name);
66
+ const durable = consumerName(subName);
67
+
68
+ // Ensure consumer exists
69
+ try {
70
+ await jsm.consumers.info(stream, durable);
71
+ } catch {
72
+ await jsm.consumers.add(stream, {
73
+ durable_name: durable,
74
+ ack_policy: AckPolicy.Explicit,
75
+ max_ack_pending: concurrency,
76
+ filter_subject: subject,
77
+ });
78
+ }
79
+
80
+ const consumer = await js.consumers.get(stream, durable);
81
+
82
+ // Process messages in the background
83
+ (async () => {
84
+ try {
85
+ const messages = await consumer.consume();
86
+ for await (const msg of messages) {
87
+ try {
88
+ const data = jsonCodec.decode(msg.data);
89
+ await handler(data);
90
+ msg.ack();
91
+ } catch (err) {
92
+ console.error(
93
+ `[cooper] Subscriber "${subName}" on topic "${name}" failed:`,
94
+ err
95
+ );
96
+ // NAK with delay for retry (5 second backoff)
97
+ msg.nak(5000);
98
+ }
99
+ }
100
+ } catch (err: any) {
101
+ // Consumer iteration ended (connection closed, etc.)
102
+ activeConsumers.delete(subName);
103
+ if (!err.message?.includes("closed")) {
104
+ console.error(
105
+ `[cooper] Consumer "${subName}" on topic "${name}" stopped:`,
106
+ err
107
+ );
108
+ }
109
+ }
110
+ })();
111
+ }
112
+
113
+ const t: Topic<T> = {
114
+ async publish(data: T) {
115
+ const connected = await ensureConnected();
116
+
117
+ if (connected) {
118
+ const js = getJetStream();
119
+ if (js) {
120
+ await ensureStream(name, { dedup: useDedup });
121
+
122
+ const headers =
123
+ useDedup && data && typeof data === "object"
124
+ ? createDedup(data as any, config?.orderBy)
125
+ : undefined;
126
+
127
+ await js.publish(subject, jsonCodec.encode(data), { headers });
128
+ return;
129
+ }
130
+ }
131
+
132
+ // Fallback: in-memory delivery
133
+ for (const [subName, sub] of subscribers) {
134
+ try {
135
+ await sub.handler(data);
136
+ } catch (err) {
137
+ console.error(`[cooper] Subscriber "${subName}" failed:`, err);
138
+ }
139
+ }
140
+ },
141
+
142
+ subscribe(subName: string, subConfig: SubscribeConfig) {
143
+ subscribers.set(subName, {
144
+ handler: subConfig.handler,
145
+ options: subConfig,
146
+ });
147
+
148
+ registry.registerTopic(name, {
149
+ name,
150
+ subscribers,
151
+ });
152
+
153
+ // Start JetStream consumer in the background
154
+ const concurrency = subConfig.concurrency ?? 1;
155
+ ensureConnected().then(async (connected) => {
156
+ if (connected) {
157
+ await ensureStream(name, { dedup: useDedup });
158
+ startConsumer(subName, subConfig.handler, concurrency).catch(
159
+ (err) => {
160
+ console.error(
161
+ `[cooper] Failed to start consumer "${subName}":`,
162
+ err
163
+ );
164
+ }
165
+ );
166
+ }
167
+ });
168
+
169
+ return {
170
+ _cooper_type: "subscription",
171
+ topic: name,
172
+ name: subName,
173
+ };
174
+ },
175
+ };
176
+
177
+ registry.registerTopic(name, { name, subscribers });
178
+
179
+ return t;
180
+ }
181
+
182
+ /**
183
+ * Create NATS headers for exactly-once dedup.
184
+ * Uses Nats-Msg-Id header — JetStream deduplicates within the stream's
185
+ * duplicate_window based on this ID.
186
+ */
187
+ function createDedup(
188
+ data: Record<string, any>,
189
+ orderBy?: string
190
+ ): any {
191
+ const h = natsHeaders();
192
+
193
+ // Generate a deterministic dedup ID.
194
+ // If an ordering key is set, use that field's value as the dedup key.
195
+ // Otherwise, hash the entire payload for content-based dedup.
196
+ if (orderBy && data[orderBy] !== undefined) {
197
+ h.set("Nats-Msg-Id", `${orderBy}-${String(data[orderBy])}`);
198
+ } else {
199
+ const payload = JSON.stringify(data);
200
+ let hash = 0;
201
+ for (let i = 0; i < payload.length; i++) {
202
+ hash = ((hash << 5) - hash + payload.charCodeAt(i)) | 0;
203
+ }
204
+ h.set("Nats-Msg-Id", `msg-${Math.abs(hash)}`);
205
+ }
206
+
207
+ return h;
208
+ }
package/src/queue.ts ADDED
@@ -0,0 +1,414 @@
1
+ import { registry } from "./registry.js";
2
+ import {
3
+ ensureConnected,
4
+ getJetStream,
5
+ getJetStreamManager,
6
+ jsonCodec,
7
+ consumerName,
8
+ ensureQueueStream,
9
+ ensureDLQStream,
10
+ } from "./nats.js";
11
+ import { headers as natsHeaders, AckPolicy } from "nats";
12
+
13
+ export interface QueueConfig {
14
+ concurrency?: number;
15
+ retries?: number;
16
+ retryDelay?: "fixed" | "exponential";
17
+ timeout?: string;
18
+ deadLetter?: string;
19
+ }
20
+
21
+ export interface EnqueueOptions {
22
+ delay?: string;
23
+ priority?: "low" | "normal" | "high";
24
+ dedupeKey?: string;
25
+ }
26
+
27
+ export interface QueueClient<T> {
28
+ enqueue(data: T, opts?: EnqueueOptions): Promise<void>;
29
+ worker(
30
+ name: string,
31
+ config: {
32
+ handler: (data: T) => Promise<void>;
33
+ onFailure?: (data: T, error: Error) => Promise<void>;
34
+ }
35
+ ): any;
36
+ list(): Promise<{ id: string; data: T }[]>;
37
+ delete(id: string): Promise<void>;
38
+ }
39
+
40
+ const parseDuration = (dur: string): number => {
41
+ const m = dur.match(/^(\d+)(s|m|h|d)$/);
42
+ if (!m) return 0;
43
+ const mult: Record<string, number> = {
44
+ s: 1000,
45
+ m: 60000,
46
+ h: 3600000,
47
+ d: 86400000,
48
+ };
49
+ return parseInt(m[1]) * (mult[m[2]] ?? 1000);
50
+ };
51
+
52
+ /**
53
+ * Declare a job queue.
54
+ *
55
+ * ```ts
56
+ * export const EmailQueue = queue<{ to: string; subject: string; body: string }>(
57
+ * "email-queue",
58
+ * { concurrency: 10, retries: 3, retryDelay: "exponential", timeout: "30s", deadLetter: "email-dlq" }
59
+ * );
60
+ * ```
61
+ *
62
+ * Local dev uses NATS JetStream for persistent, durable queues.
63
+ * Falls back to in-memory if NATS is unavailable.
64
+ */
65
+ export function queue<T = any>(
66
+ name: string,
67
+ config?: QueueConfig
68
+ ): QueueClient<T> {
69
+ const subject = `cooper.queue.${name}`;
70
+ const concurrency = config?.concurrency ?? 1;
71
+ const maxRetries = config?.retries ?? 0;
72
+ const timeoutMs = config?.timeout ? parseDuration(config.timeout) : 0;
73
+ const dlqName = config?.deadLetter;
74
+
75
+ // In-memory fallback state
76
+ const memJobs: {
77
+ id: string;
78
+ data: T;
79
+ attempts: number;
80
+ priority: string;
81
+ scheduledAt: number;
82
+ }[] = [];
83
+ let memProcessing = false;
84
+ let workerHandler: ((data: T) => Promise<void>) | null = null;
85
+ let failureHandler: ((data: T, error: Error) => Promise<void>) | null = null;
86
+ let consumerStarted = false;
87
+
88
+ // Track retry attempts per message (NATS redelivers original message on NAK)
89
+ const attemptTracker = new Map<number, number>();
90
+
91
+ // In-memory fallback processor
92
+ const processMemQueue = async () => {
93
+ if (memProcessing || !workerHandler) return;
94
+ memProcessing = true;
95
+
96
+ while (memJobs.length > 0) {
97
+ const batch = memJobs.splice(0, concurrency);
98
+ await Promise.allSettled(
99
+ batch.map(async (job) => {
100
+ if (job.scheduledAt > Date.now()) {
101
+ memJobs.push(job);
102
+ return;
103
+ }
104
+ try {
105
+ await executeWithTimeout(workerHandler!, job.data);
106
+ } catch (err) {
107
+ job.attempts++;
108
+ if (job.attempts <= maxRetries) {
109
+ const delay =
110
+ config?.retryDelay === "exponential"
111
+ ? Math.pow(2, job.attempts - 1) * 1000
112
+ : 1000;
113
+ job.scheduledAt = Date.now() + delay;
114
+ memJobs.push(job);
115
+ } else if (failureHandler) {
116
+ await failureHandler(job.data, err as Error);
117
+ }
118
+ }
119
+ })
120
+ );
121
+ }
122
+
123
+ memProcessing = false;
124
+ };
125
+
126
+ async function executeWithTimeout(
127
+ handler: (data: T) => Promise<void>,
128
+ data: T
129
+ ): Promise<void> {
130
+ if (!timeoutMs) {
131
+ return handler(data);
132
+ }
133
+ const result = await Promise.race([
134
+ handler(data),
135
+ new Promise<never>((_, reject) =>
136
+ setTimeout(
137
+ () => reject(new Error(`Job timed out after ${config!.timeout}`)),
138
+ timeoutMs
139
+ )
140
+ ),
141
+ ]);
142
+ return result;
143
+ }
144
+
145
+ /**
146
+ * Start a JetStream consumer to process queue jobs.
147
+ */
148
+ async function startConsumer(
149
+ workerName: string,
150
+ handler: (data: T) => Promise<void>,
151
+ onFailure?: (data: T, error: Error) => Promise<void>
152
+ ): Promise<void> {
153
+ if (consumerStarted) return;
154
+ consumerStarted = true;
155
+
156
+ const js = getJetStream();
157
+ const jsm = getJetStreamManager();
158
+ if (!js || !jsm) return;
159
+
160
+ const streamName =
161
+ "QUEUE_" + name.replace(/[^a-zA-Z0-9_-]/g, "_").toUpperCase();
162
+ const durable = consumerName(workerName);
163
+
164
+ // Ensure consumer exists
165
+ try {
166
+ await jsm.consumers.info(streamName, durable);
167
+ } catch {
168
+ await jsm.consumers.add(streamName, {
169
+ durable_name: durable,
170
+ ack_policy: AckPolicy.Explicit,
171
+ max_ack_pending: concurrency,
172
+ filter_subject: subject,
173
+ // Redeliver unacked messages after ack_wait
174
+ ack_wait: 30 * 1_000_000_000, // 30 seconds in nanos
175
+ });
176
+ }
177
+
178
+ const consumer = await js.consumers.get(streamName, durable);
179
+
180
+ // Process jobs in the background
181
+ (async () => {
182
+ try {
183
+ const messages = await consumer.consume();
184
+ for await (const msg of messages) {
185
+ let jobData: T;
186
+ try {
187
+ jobData = jsonCodec.decode(msg.data) as T;
188
+ } catch {
189
+ // Malformed message — ack to remove from queue
190
+ msg.ack();
191
+ continue;
192
+ }
193
+
194
+ // Track retries using sequence number — msg.seq is the stream seq
195
+ const seq = msg.seq;
196
+ const attempts = attemptTracker.get(seq) ?? 0;
197
+
198
+ try {
199
+ await executeWithTimeout(handler, jobData);
200
+ msg.ack();
201
+ attemptTracker.delete(seq);
202
+ } catch (err) {
203
+ const nextAttempt = attempts + 1;
204
+ attemptTracker.set(seq, nextAttempt);
205
+
206
+ if (nextAttempt <= maxRetries) {
207
+ // NAK with backoff delay for retry
208
+ const delay =
209
+ config?.retryDelay === "exponential"
210
+ ? Math.pow(2, nextAttempt - 1) * 1000
211
+ : 1000;
212
+ msg.nak(delay);
213
+ } else {
214
+ // Max retries exceeded — move to DLQ if configured
215
+ if (dlqName) {
216
+ try {
217
+ await moveToDLQ(dlqName, name, jobData, err as Error);
218
+ } catch (dlqErr) {
219
+ console.error(
220
+ `[cooper] Failed to move job to DLQ "${dlqName}":`,
221
+ dlqErr
222
+ );
223
+ }
224
+ }
225
+
226
+ if (onFailure) {
227
+ try {
228
+ await onFailure(jobData, err as Error);
229
+ } catch (failErr) {
230
+ console.error(
231
+ `[cooper] onFailure handler for queue "${name}" threw:`,
232
+ failErr
233
+ );
234
+ }
235
+ }
236
+
237
+ // Ack to remove from main queue (it's in DLQ now)
238
+ msg.ack();
239
+ attemptTracker.delete(seq);
240
+ }
241
+ }
242
+ }
243
+ } catch (err: any) {
244
+ consumerStarted = false;
245
+ if (!err.message?.includes("closed")) {
246
+ console.error(
247
+ `[cooper] Queue consumer "${name}" stopped:`,
248
+ err
249
+ );
250
+ }
251
+ }
252
+ })();
253
+ }
254
+
255
+ const client: QueueClient<T> = {
256
+ async enqueue(data: T, opts?: EnqueueOptions) {
257
+ const connected = await ensureConnected();
258
+
259
+ if (connected) {
260
+ const js = getJetStream();
261
+ if (js) {
262
+ await ensureQueueStream(name, { dedup: !!opts?.dedupeKey });
263
+
264
+ const h = natsHeaders();
265
+
266
+ // Dedup key
267
+ if (opts?.dedupeKey) {
268
+ h.set("Nats-Msg-Id", opts.dedupeKey);
269
+ }
270
+
271
+ // Priority as header (for visibility — JetStream doesn't natively prioritize)
272
+ if (opts?.priority && opts.priority !== "normal") {
273
+ h.set("Cooper-Priority", opts.priority);
274
+ }
275
+
276
+ // Initial attempt count
277
+ h.set("Cooper-Attempts", "0");
278
+
279
+ if (opts?.delay) {
280
+ // For delayed jobs: JetStream doesn't have native delay.
281
+ // We publish immediately but set a header; consumer-side can
282
+ // NAK with delay or we use a timer.
283
+ const delayMs = parseDuration(opts.delay);
284
+ h.set("Cooper-Delay-Until", String(Date.now() + delayMs));
285
+ }
286
+
287
+ await js.publish(subject, jsonCodec.encode(data), { headers: h });
288
+ return;
289
+ }
290
+ }
291
+
292
+ // Fallback: in-memory
293
+ const delay = opts?.delay ? parseDuration(opts.delay) : 0;
294
+ memJobs.push({
295
+ id: crypto.randomUUID(),
296
+ data,
297
+ attempts: 0,
298
+ priority: opts?.priority ?? "normal",
299
+ scheduledAt: Date.now() + delay,
300
+ });
301
+
302
+ const priorityOrder: Record<string, number> = {
303
+ high: 0,
304
+ normal: 1,
305
+ low: 2,
306
+ };
307
+ memJobs.sort(
308
+ (a, b) =>
309
+ (priorityOrder[a.priority] ?? 1) - (priorityOrder[b.priority] ?? 1)
310
+ );
311
+
312
+ setImmediate(() => processMemQueue());
313
+ },
314
+
315
+ worker(workerName: string, workerConfig) {
316
+ workerHandler = workerConfig.handler;
317
+ failureHandler = workerConfig.onFailure ?? null;
318
+
319
+ registry.registerQueue(name, {
320
+ name,
321
+ options: config ?? {},
322
+ worker: {
323
+ name: workerName,
324
+ handler: workerConfig.handler,
325
+ onFailure: workerConfig.onFailure,
326
+ },
327
+ });
328
+
329
+ // Start JetStream consumer
330
+ ensureConnected().then(async (connected) => {
331
+ if (connected) {
332
+ await ensureQueueStream(name);
333
+ if (dlqName) await ensureDLQStream(dlqName);
334
+ startConsumer(
335
+ workerName,
336
+ workerConfig.handler,
337
+ workerConfig.onFailure
338
+ ).catch((err) => {
339
+ console.error(
340
+ `[cooper] Failed to start queue consumer "${name}":`,
341
+ err
342
+ );
343
+ });
344
+ }
345
+ });
346
+
347
+ // Kick in-memory fallback processing
348
+ setImmediate(() => processMemQueue());
349
+
350
+ return { _cooper_type: "queue_worker", queue: name, name: workerName };
351
+ },
352
+
353
+ async list() {
354
+ const connected = await ensureConnected();
355
+
356
+ if (connected) {
357
+ const jsm = getJetStreamManager();
358
+ if (jsm) {
359
+ const streamName =
360
+ "QUEUE_" + name.replace(/[^a-zA-Z0-9_-]/g, "_").toUpperCase();
361
+ try {
362
+ const info = await jsm.streams.info(streamName);
363
+ // Return stream message count as approximation
364
+ // JetStream doesn't support listing individual messages directly
365
+ return Array.from(
366
+ { length: Number(info.state.messages) },
367
+ (_, i) => ({
368
+ id: `js-${i}`,
369
+ data: {} as T,
370
+ })
371
+ );
372
+ } catch {
373
+ return [];
374
+ }
375
+ }
376
+ }
377
+
378
+ return memJobs.map((j) => ({ id: j.id, data: j.data }));
379
+ },
380
+
381
+ async delete(id: string) {
382
+ // In-memory fallback
383
+ const idx = memJobs.findIndex((j) => j.id === id);
384
+ if (idx >= 0) memJobs.splice(idx, 1);
385
+ },
386
+ };
387
+
388
+ registry.registerQueue(name, { name, options: config ?? {} });
389
+
390
+ return client;
391
+ }
392
+
393
+ /**
394
+ * Move a failed job to the dead-letter queue.
395
+ */
396
+ async function moveToDLQ<T>(
397
+ dlqName: string,
398
+ sourceQueue: string,
399
+ data: T,
400
+ error: Error
401
+ ): Promise<void> {
402
+ const js = getJetStream();
403
+ if (!js) return;
404
+
405
+ await ensureDLQStream(dlqName);
406
+
407
+ const dlqSubject = `cooper.dlq.${dlqName}`;
408
+ const h = natsHeaders();
409
+ h.set("Cooper-Source-Queue", sourceQueue);
410
+ h.set("Cooper-Error", error.message.slice(0, 256));
411
+ h.set("Cooper-Failed-At", new Date().toISOString());
412
+
413
+ await js.publish(dlqSubject, jsonCodec.encode(data), { headers: h });
414
+ }
@@ -0,0 +1,89 @@
1
+ import { CooperError } from "./error.js";
2
+ import type { MiddlewareFn } from "./registry.js";
3
+
4
+ export interface RateLimitConfig {
5
+ /** Time window — e.g. "1m", "10s", "1h" */
6
+ window: string;
7
+ /** Max requests allowed within the window */
8
+ max: number;
9
+ /** Function to derive the rate limit key from the request. Defaults to IP address. */
10
+ key?: (req: any) => string;
11
+ }
12
+
13
+ function parseTTL(ttl: string): number {
14
+ const match = ttl.match(/^(\d+)(s|m|h|d)$/);
15
+ if (!match) return 60;
16
+ const [, num, unit] = match;
17
+ const multipliers: Record<string, number> = { s: 1, m: 60, h: 3600, d: 86400 };
18
+ return parseInt(num) * (multipliers[unit] ?? 60);
19
+ }
20
+
21
+ function parseWindowMs(window: string): number {
22
+ return parseTTL(window) * 1000;
23
+ }
24
+
25
+ let redis: any = null;
26
+
27
+ async function ensureRedis(): Promise<any> {
28
+ if (redis) return redis;
29
+ const Redis = (await import("ioredis")).default;
30
+ const url = process.env.COOPER_VALKEY_URL ?? "redis://localhost:6379";
31
+ redis = new Redis(url);
32
+ return redis;
33
+ }
34
+
35
+ /**
36
+ * Create a rate limiting middleware.
37
+ *
38
+ * Can be used globally:
39
+ * ```ts
40
+ * cooper.use(rateLimit({ window: "1m", max: 100 }));
41
+ * ```
42
+ *
43
+ * Or per-route via middleware config:
44
+ * ```ts
45
+ * export default api({
46
+ * method: "POST",
47
+ * path: "/api/login",
48
+ * middleware: [rateLimit({ window: "1m", max: 5 })],
49
+ * handler: async (input) => { ... }
50
+ * });
51
+ * ```
52
+ */
53
+ export function rateLimit(config: RateLimitConfig): MiddlewareFn {
54
+ const { window, max, key: keyFn } = config;
55
+ const ttlSec = parseTTL(window);
56
+ const windowMs = parseWindowMs(window);
57
+
58
+ return async (req: any, next: (req: any) => Promise<any>) => {
59
+ const identifier = keyFn ? keyFn(req) : (req.ip ?? "unknown");
60
+ const redisKey = `cooper:rl:${identifier}:${Math.floor(Date.now() / windowMs)}`;
61
+
62
+ const r = await ensureRedis();
63
+ const count = await r.incr(redisKey);
64
+
65
+ // Set TTL on first increment
66
+ if (count === 1) {
67
+ await r.expire(redisKey, ttlSec);
68
+ }
69
+
70
+ if (count > max) {
71
+ // Calculate retry-after in seconds
72
+ const ttl = await r.ttl(redisKey);
73
+ const retryAfter = ttl > 0 ? ttl : ttlSec;
74
+
75
+ const error = new CooperError("RATE_LIMITED", `Rate limit exceeded. Try again in ${retryAfter}s.`);
76
+ error.retryAfter = retryAfter;
77
+ throw error;
78
+ }
79
+
80
+ // Attach rate limit headers info to request for downstream use
81
+ req._rateLimit = {
82
+ limit: max,
83
+ remaining: max - count,
84
+ reset: Math.ceil(Date.now() / 1000) + ttlSec,
85
+ };
86
+
87
+ return next(req);
88
+ };
89
+ }