@techstream/quark-core 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,214 @@
1
+ /**
2
+ * @techstream/quark-core - Job Queue Module
3
+ * Provides BullMQ queue initialization and management
4
+ */
5
+
6
+ import { Queue, QueueEvents, Worker } from "bullmq";
7
+ import { ServiceError } from "../errors.js";
8
+
9
+ /**
10
+ * Default Redis configuration
11
+ */
12
+ const DEFAULT_REDIS_CONFIG = {
13
+ host: process.env.REDIS_HOST || "localhost",
14
+ port: parseInt(process.env.REDIS_PORT || "6379", 10),
15
+ db: parseInt(process.env.REDIS_DB || "0", 10),
16
+ retryStrategy: (times) => {
17
+ const delay = Math.min(times * 50, 2000);
18
+ return delay;
19
+ },
20
+ };
21
+
22
+ /**
23
+ * Map to store singleton queue instances
24
+ */
25
+ const queues = new Map();
26
+
27
+ /**
28
+ * Creates or retrieves a singleton BullMQ queue
29
+ * @param {string} name - Queue name
30
+ * @param {Object} options - Queue options
31
+ * @param {Object} options.redis - Redis connection config
32
+ * @param {Object} options.defaultJobOptions - Default job options
33
+ * @returns {Queue} BullMQ Queue instance
34
+ */
35
+ export const createQueue = (name, options = {}) => {
36
+ if (queues.has(name)) {
37
+ return queues.get(name);
38
+ }
39
+
40
+ const {
41
+ redis = DEFAULT_REDIS_CONFIG,
42
+ defaultJobOptions = {
43
+ attempts: 3,
44
+ backoff: {
45
+ type: "exponential",
46
+ delay: 2000,
47
+ },
48
+ removeOnComplete: true,
49
+ },
50
+ ...queueOptions
51
+ } = options;
52
+
53
+ const queue = new Queue(name, {
54
+ connection: redis,
55
+ defaultJobOptions,
56
+ ...queueOptions,
57
+ });
58
+
59
+ queues.set(name, queue);
60
+
61
+ // Clean up on graceful shutdown
62
+ queue.on("error", (error) => {
63
+ console.error(`Queue "${name}" error:`, error);
64
+ });
65
+
66
+ return queue;
67
+ };
68
+
69
+ /**
70
+ * Creates a job processor worker
71
+ * @param {string} queueName - Queue name
72
+ * @param {Function} handler - Job handler function(job) => Promise<any>
73
+ * @param {Object} options - Worker options
74
+ * @param {Object} options.redis - Redis connection config
75
+ * @param {number} options.concurrency - Concurrency level
76
+ * @returns {Worker} BullMQ Worker instance
77
+ */
78
+ export const createWorker = (queueName, handler, options = {}) => {
79
+ const {
80
+ redis = DEFAULT_REDIS_CONFIG,
81
+ concurrency = 1,
82
+ ...workerOptions
83
+ } = options;
84
+
85
+ const worker = new Worker(queueName, handler, {
86
+ connection: redis,
87
+ concurrency,
88
+ ...workerOptions,
89
+ });
90
+
91
+ worker.on("error", (error) => {
92
+ console.error(`Worker for queue "${queueName}" error:`, error);
93
+ });
94
+
95
+ worker.on("failed", (job, error) => {
96
+ console.error(
97
+ `Job ${job.id} in queue "${queueName}" failed:`,
98
+ error.message,
99
+ );
100
+ });
101
+
102
+ return worker;
103
+ };
104
+
105
+ /**
106
+ * Gets or creates queue events listener
107
+ * @param {string} queueName - Queue name
108
+ * @param {Object} options - QueueEvents options
109
+ * @returns {QueueEvents} BullMQ QueueEvents instance
110
+ */
111
+ export const createQueueEvents = (queueName, options = {}) => {
112
+ const { redis = DEFAULT_REDIS_CONFIG, ...eventsOptions } = options;
113
+
114
+ return new QueueEvents(queueName, {
115
+ connection: redis,
116
+ ...eventsOptions,
117
+ });
118
+ };
119
+
120
+ /**
121
+ * Utility to add a job to a queue with error handling
122
+ * @param {Queue} queue - BullMQ Queue instance
123
+ * @param {string} jobName - Job name/type (e.g., 'send-welcome-email')
124
+ * @param {Object} data - Job data
125
+ * @param {Object} jobOptions - Job-specific options
126
+ * @returns {Promise<Job>} Queued job
127
+ */
128
+ export const addJob = async (queue, jobName, data, jobOptions = {}) => {
129
+ try {
130
+ const job = await queue.add(jobName, data, jobOptions);
131
+ return job;
132
+ } catch (error) {
133
+ throw new ServiceError(
134
+ "BullMQ",
135
+ `Failed to add job "${jobName}" to queue "${queue.name}": ${error.message}`,
136
+ 500,
137
+ );
138
+ }
139
+ };
140
+
141
+ /**
142
+ * Utility to get job status and progress
143
+ * @param {Job} job - BullMQ Job instance
144
+ * @returns {Promise<Object>} Job status information
145
+ */
146
+ export const getJobStatus = async (job) => {
147
+ return {
148
+ id: job.id,
149
+ state: await job.getState(),
150
+ progress: job.progress(),
151
+ attempts: job.attemptsMade,
152
+ maxAttempts: job.opts.attempts,
153
+ data: job.data,
154
+ };
155
+ };
156
+
157
+ /**
158
+ * Clears all jobs from a queue (use with caution!)
159
+ * @param {Queue} queue - BullMQ Queue instance
160
+ * @param {Object} options - Clear options
161
+ * @returns {Promise<void>}
162
+ */
163
+ export const clearQueue = async (queue, options = {}) => {
164
+ const { grace = 5000 } = options;
165
+ try {
166
+ await queue.clean(grace, 100);
167
+ } catch (error) {
168
+ console.error(`Failed to clear queue "${queue.name}":`, error);
169
+ throw error;
170
+ }
171
+ };
172
+
173
+ /**
174
+ * Gracefully closes all queues, workers, and listeners
175
+ * Useful for process shutdown
176
+ * @returns {Promise<void>}
177
+ */
178
+ export const closeAllQueues = async () => {
179
+ try {
180
+ for (const [name, queue] of queues) {
181
+ await queue.close();
182
+ console.log(`Queue "${name}" closed`);
183
+ }
184
+ queues.clear();
185
+ } catch (error) {
186
+ console.error("Failed to close queues:", error);
187
+ throw error;
188
+ }
189
+ };
190
+
191
+ /**
192
+ * Health check for Redis connectivity
193
+ * @returns {Promise<boolean>} True if Redis is accessible
194
+ */
195
+ export const checkQueueHealth = async () => {
196
+ try {
197
+ if (queues.size === 0) {
198
+ // Create a temporary queue to test connectivity
199
+ const testQueue = new Queue("_health_check", {
200
+ connection: DEFAULT_REDIS_CONFIG,
201
+ });
202
+ await testQueue.client.ping();
203
+ await testQueue.close();
204
+ return true;
205
+ }
206
+
207
+ const [firstQueue] = queues.values();
208
+ await firstQueue.client.ping();
209
+ return true;
210
+ } catch (error) {
211
+ console.error("Queue health check failed:", error);
212
+ return false;
213
+ }
214
+ };
@@ -0,0 +1,253 @@
1
+ /**
2
+ * @techstream/quark-core - Rate Limiting Module
3
+ * Provides both in-memory and Redis-based rate limiting
4
+ */
5
+
6
+ /**
7
+ * In-memory rate limiter (for single-instance deployments)
8
+ */
9
+ class MemoryRateLimiter {
10
+ constructor() {
11
+ this.store = new Map();
12
+ this.cleanupInterval = null;
13
+ }
14
+
15
+ /**
16
+ * Start periodic cleanup of expired records
17
+ */
18
+ startCleanup(intervalMs = 60000) {
19
+ if (this.cleanupInterval) return;
20
+
21
+ this.cleanupInterval = setInterval(() => {
22
+ const now = Date.now();
23
+ for (const [key, record] of this.store.entries()) {
24
+ if (now > record.resetTime) {
25
+ this.store.delete(key);
26
+ }
27
+ }
28
+ }, intervalMs);
29
+
30
+ // Don't keep the process alive just for cleanup
31
+ this.cleanupInterval.unref();
32
+ }
33
+
34
+ /**
35
+ * Stop cleanup interval
36
+ */
37
+ stopCleanup() {
38
+ if (this.cleanupInterval) {
39
+ clearInterval(this.cleanupInterval);
40
+ this.cleanupInterval = null;
41
+ }
42
+ }
43
+
44
+ /**
45
+ * Check and increment rate limit
46
+ */
47
+ async checkLimit(key, maxRequests, windowMs) {
48
+ const now = Date.now();
49
+ const record = this.store.get(key) || {
50
+ count: 0,
51
+ resetTime: now + windowMs,
52
+ };
53
+
54
+ // Reset if window has passed
55
+ if (now > record.resetTime) {
56
+ record.count = 0;
57
+ record.resetTime = now + windowMs;
58
+ }
59
+
60
+ // Check if limit exceeded
61
+ if (record.count >= maxRequests) {
62
+ return {
63
+ limited: true,
64
+ remaining: 0,
65
+ resetTime: record.resetTime,
66
+ };
67
+ }
68
+
69
+ // Increment counter
70
+ record.count++;
71
+ this.store.set(key, record);
72
+
73
+ return {
74
+ limited: false,
75
+ remaining: maxRequests - record.count,
76
+ resetTime: record.resetTime,
77
+ };
78
+ }
79
+
80
+ /**
81
+ * Reset a specific key
82
+ */
83
+ async reset(key) {
84
+ this.store.delete(key);
85
+ }
86
+
87
+ /**
88
+ * Clear all records
89
+ */
90
+ async clear() {
91
+ this.store.clear();
92
+ }
93
+ }
94
+
95
+ /**
96
+ * Redis-based rate limiter (for multi-instance deployments)
97
+ * Uses Lua scripting for atomic check-and-increment.
98
+ */
99
+ class RedisRateLimiter {
100
+ constructor(redisClient, options = {}) {
101
+ this.redis = redisClient;
102
+ this.failOpen = options.failOpen ?? true;
103
+ }
104
+
105
+ /**
106
+ * Check and increment rate limit using an atomic Lua script.
107
+ * Prevents TOCTOU race conditions by doing INCR + PEXPIRE in one round-trip.
108
+ */
109
+ async checkLimit(key, maxRequests, windowMs) {
110
+ const now = Date.now();
111
+ const windowKey = `ratelimit:${key}`;
112
+
113
+ try {
114
+ // Atomic Lua: INCR the key, set PEXPIRE on first request, return [count, pttl]
115
+ const luaScript = `
116
+ local count = redis.call('INCR', KEYS[1])
117
+ if count == 1 then
118
+ redis.call('PEXPIRE', KEYS[1], ARGV[1])
119
+ end
120
+ local ttl = redis.call('PTTL', KEYS[1])
121
+ return {count, ttl}
122
+ `;
123
+
124
+ const [currentCount, ttl] = await this.redis.eval(
125
+ luaScript,
126
+ 1,
127
+ windowKey,
128
+ windowMs,
129
+ );
130
+
131
+ const resetTime = ttl > 0 ? now + ttl : now + windowMs;
132
+
133
+ if (currentCount > maxRequests) {
134
+ return {
135
+ limited: true,
136
+ remaining: 0,
137
+ resetTime,
138
+ };
139
+ }
140
+
141
+ return {
142
+ limited: false,
143
+ remaining: maxRequests - currentCount,
144
+ resetTime,
145
+ };
146
+ } catch (error) {
147
+ // Configurable fail-open / fail-closed behaviour
148
+ if (this.failOpen) {
149
+ return {
150
+ limited: false,
151
+ remaining: maxRequests,
152
+ resetTime: now + windowMs,
153
+ };
154
+ }
155
+ return {
156
+ limited: true,
157
+ remaining: 0,
158
+ resetTime: now + windowMs,
159
+ };
160
+ }
161
+ }
162
+
163
+ /**
164
+ * Reset a specific key
165
+ */
166
+ async reset(key) {
167
+ await this.redis.del(`ratelimit:${key}`);
168
+ }
169
+
170
+ /**
171
+ * Clear all rate limit keys using SCAN (non-blocking, production-safe)
172
+ */
173
+ async clear() {
174
+ let cursor = "0";
175
+ do {
176
+ const [nextCursor, keys] = await this.redis.scan(
177
+ cursor,
178
+ "MATCH",
179
+ "ratelimit:*",
180
+ "COUNT",
181
+ 100,
182
+ );
183
+ cursor = nextCursor;
184
+ if (keys.length > 0) {
185
+ await this.redis.del(...keys);
186
+ }
187
+ } while (cursor !== "0");
188
+ }
189
+ }
190
+
191
+ /**
192
+ * Create a rate limiter instance
193
+ * @param {Object} options - Configuration options
194
+ * @param {string} options.type - "memory" or "redis"
195
+ * @param {Object} options.redisClient - Redis client instance (required if type is "redis")
196
+ * @returns {MemoryRateLimiter|RedisRateLimiter}
197
+ */
198
+ export function createRateLimiter(options = {}) {
199
+ const { type = "memory", redisClient = null } = options;
200
+
201
+ if (type === "redis") {
202
+ if (!redisClient) {
203
+ throw new Error("Redis client is required for Redis-based rate limiting");
204
+ }
205
+ return new RedisRateLimiter(redisClient, { failOpen: options.failOpen });
206
+ }
207
+
208
+ const limiter = new MemoryRateLimiter();
209
+ limiter.startCleanup();
210
+ return limiter;
211
+ }
212
+
213
+ /**
214
+ * Rate limiter configuration
215
+ */
216
+ export const RATE_LIMIT_PRESETS = {
217
+ strict: {
218
+ windowMs: 15 * 60 * 1000, // 15 minutes
219
+ maxRequests: 5,
220
+ },
221
+ moderate: {
222
+ windowMs: 15 * 60 * 1000, // 15 minutes
223
+ maxRequests: 50,
224
+ },
225
+ relaxed: {
226
+ windowMs: 15 * 60 * 1000, // 15 minutes
227
+ maxRequests: 100,
228
+ },
229
+ auth: {
230
+ windowMs: 15 * 60 * 1000, // 15 minutes
231
+ maxRequests: 5,
232
+ },
233
+ api: {
234
+ windowMs: 15 * 60 * 1000, // 15 minutes
235
+ maxRequests: 100,
236
+ },
237
+ };
238
+
239
+ /**
240
+ * Helper function to create rate limit middleware
241
+ * @param {Object} limiter - Rate limiter instance
242
+ * @param {Object} config - Rate limit configuration
243
+ * @returns {Function} Middleware function
244
+ */
245
+ export function createRateLimitMiddleware(
246
+ limiter,
247
+ config = RATE_LIMIT_PRESETS.api,
248
+ ) {
249
+ return async (ip, path) => {
250
+ const key = `${ip}:${path}`;
251
+ return limiter.checkLimit(key, config.maxRequests, config.windowMs);
252
+ };
253
+ }
@@ -0,0 +1,130 @@
1
+ import assert from "node:assert";
2
+ import { test } from "node:test";
3
+ import {
4
+ createRateLimiter,
5
+ createRateLimitMiddleware,
6
+ RATE_LIMIT_PRESETS,
7
+ } from "../src/rate-limiter.js";
8
+
9
+ test("Rate Limiter Module", async (t) => {
10
+ await t.test("createRateLimiter creates memory limiter by default", () => {
11
+ const limiter = createRateLimiter();
12
+ assert(limiter !== null);
13
+ });
14
+
15
+ await t.test(
16
+ "createRateLimiter throws error for Redis without client",
17
+ () => {
18
+ assert.throws(
19
+ () => createRateLimiter({ type: "redis" }),
20
+ /Redis client is required/,
21
+ );
22
+ },
23
+ );
24
+
25
+ await t.test("Memory rate limiter allows requests under limit", async () => {
26
+ const limiter = createRateLimiter({ type: "memory" });
27
+ const result = await limiter.checkLimit("test-key", 5, 60000);
28
+
29
+ assert.strictEqual(result.limited, false);
30
+ assert.strictEqual(result.remaining, 4);
31
+ assert(result.resetTime > Date.now());
32
+ });
33
+
34
+ await t.test("Memory rate limiter blocks requests over limit", async () => {
35
+ const limiter = createRateLimiter({ type: "memory" });
36
+
37
+ // Make 5 requests (limit is 5)
38
+ for (let i = 0; i < 5; i++) {
39
+ await limiter.checkLimit("test-key-2", 5, 60000);
40
+ }
41
+
42
+ // 6th request should be blocked
43
+ const result = await limiter.checkLimit("test-key-2", 5, 60000);
44
+ assert.strictEqual(result.limited, true);
45
+ assert.strictEqual(result.remaining, 0);
46
+ });
47
+
48
+ await t.test("Memory rate limiter resets after window", async () => {
49
+ const limiter = createRateLimiter({ type: "memory" });
50
+
51
+ // Make a request with very short window
52
+ const result1 = await limiter.checkLimit("test-key-3", 1, 10);
53
+ assert.strictEqual(result1.limited, false);
54
+
55
+ // Second request should be blocked
56
+ const result2 = await limiter.checkLimit("test-key-3", 1, 10);
57
+ assert.strictEqual(result2.limited, true);
58
+
59
+ // Wait for window to expire
60
+ await new Promise((resolve) => setTimeout(resolve, 20));
61
+
62
+ // Should be allowed again
63
+ const result3 = await limiter.checkLimit("test-key-3", 1, 10);
64
+ assert.strictEqual(result3.limited, false);
65
+ });
66
+
67
+ await t.test("Memory rate limiter can reset a key", async () => {
68
+ const limiter = createRateLimiter({ type: "memory" });
69
+
70
+ await limiter.checkLimit("test-key-4", 1, 60000);
71
+ await limiter.reset("test-key-4");
72
+
73
+ const result = await limiter.checkLimit("test-key-4", 1, 60000);
74
+ assert.strictEqual(result.limited, false);
75
+ });
76
+
77
+ await t.test("Memory rate limiter can clear all keys", async () => {
78
+ const limiter = createRateLimiter({ type: "memory" });
79
+
80
+ await limiter.checkLimit("key-1", 1, 60000);
81
+ await limiter.checkLimit("key-2", 1, 60000);
82
+ await limiter.clear();
83
+
84
+ const result1 = await limiter.checkLimit("key-1", 1, 60000);
85
+ const result2 = await limiter.checkLimit("key-2", 1, 60000);
86
+
87
+ assert.strictEqual(result1.limited, false);
88
+ assert.strictEqual(result2.limited, false);
89
+ });
90
+
91
+ await t.test("RATE_LIMIT_PRESETS have correct structure", () => {
92
+ assert(RATE_LIMIT_PRESETS.strict);
93
+ assert(RATE_LIMIT_PRESETS.moderate);
94
+ assert(RATE_LIMIT_PRESETS.relaxed);
95
+ assert(RATE_LIMIT_PRESETS.auth);
96
+ assert(RATE_LIMIT_PRESETS.api);
97
+
98
+ assert.strictEqual(RATE_LIMIT_PRESETS.auth.maxRequests, 5);
99
+ assert.strictEqual(RATE_LIMIT_PRESETS.api.maxRequests, 100);
100
+ });
101
+
102
+ await t.test("createRateLimitMiddleware returns a function", () => {
103
+ const limiter = createRateLimiter({ type: "memory" });
104
+ const middleware = createRateLimitMiddleware(limiter);
105
+
106
+ assert.strictEqual(typeof middleware, "function");
107
+ });
108
+
109
+ await t.test("createRateLimitMiddleware works with API preset", async () => {
110
+ const limiter = createRateLimiter({ type: "memory" });
111
+ const middleware = createRateLimitMiddleware(
112
+ limiter,
113
+ RATE_LIMIT_PRESETS.api,
114
+ );
115
+
116
+ const result = await middleware("192.168.1.1", "/api/posts");
117
+
118
+ assert.strictEqual(result.limited, false);
119
+ assert(result.remaining > 0);
120
+ });
121
+
122
+ await t.test("Cleanup interval is started for memory limiter", () => {
123
+ const limiter = createRateLimiter({ type: "memory" });
124
+ assert(limiter.cleanupInterval !== null);
125
+
126
+ // Clean up
127
+ limiter.stopCleanup();
128
+ assert(limiter.cleanupInterval === null);
129
+ });
130
+ });
package/src/redis.js ADDED
@@ -0,0 +1,96 @@
1
+ /**
2
+ * Builds REDIS_URL from individual environment variables if not explicitly provided.
3
+ * Allows configuration via individual REDIS_* vars instead of a single REDIS_URL.
4
+ */
5
+ function getRedisUrl() {
6
+ if (process.env.REDIS_URL) {
7
+ return process.env.REDIS_URL;
8
+ }
9
+
10
+ const host = process.env.REDIS_HOST || "localhost";
11
+ const port = process.env.REDIS_PORT || "6379";
12
+
13
+ return `redis://${host}:${port}`;
14
+ }
15
+
16
+ /**
17
+ * Creates a Redis configuration object from environment variables.
18
+ * Returns the URL and any additional options — does not create an actual connection.
19
+ * Pass the URL to your Redis library of choice (e.g., ioredis).
20
+ */
21
+ export const createRedisConfig = (options = {}) => {
22
+ const redisUrl = getRedisUrl();
23
+
24
+ return {
25
+ url: redisUrl,
26
+ ...options,
27
+ };
28
+ };
29
+
30
+ /**
31
+ * @deprecated Use `createRedisConfig` instead.
32
+ */
33
+ export const createRedisClient = createRedisConfig;
34
+
35
+ /**
36
+ * Pings Redis to verify connectivity.
37
+ * Dynamically imports ioredis so it doesn't fail at load time if the package is not installed.
38
+ * Creates a temporary connection, sends PING, expects PONG, then disconnects.
39
+ *
40
+ * @param {object} [options]
41
+ * @param {number} [options.timeout=3000] - Connection/ping timeout in milliseconds.
42
+ * @returns {Promise<{ status: "ok", latencyMs: number } | { status: "error", message: string }>}
43
+ */
44
+ export async function pingRedis({ timeout = 3000 } = {}) {
45
+ /** @type {import("ioredis").default | null} */
46
+ let client = null;
47
+
48
+ try {
49
+ const { default: Redis } = await import("ioredis");
50
+
51
+ const url = getRedisUrl();
52
+
53
+ client = new Redis(url, {
54
+ lazyConnect: true,
55
+ connectTimeout: timeout,
56
+ maxRetriesPerRequest: 0,
57
+ enableReadyCheck: false,
58
+ });
59
+
60
+ await client.connect();
61
+
62
+ const start = performance.now();
63
+ const result = await Promise.race([
64
+ client.ping(),
65
+ new Promise((_, reject) =>
66
+ setTimeout(() => reject(new Error("PING timed out")), timeout),
67
+ ),
68
+ ]);
69
+
70
+ const latencyMs = Math.round(performance.now() - start);
71
+
72
+ if (result !== "PONG") {
73
+ return {
74
+ status: "error",
75
+ message: `Unexpected PING response: ${result}`,
76
+ };
77
+ }
78
+
79
+ return { status: "ok", latencyMs };
80
+ } catch (/** @type {any} */ error) {
81
+ const message =
82
+ error?.code === "MODULE_NOT_FOUND" ||
83
+ error?.code === "ERR_MODULE_NOT_FOUND"
84
+ ? "ioredis is not installed"
85
+ : (error?.message ?? String(error));
86
+ return { status: "error", message };
87
+ } finally {
88
+ try {
89
+ await client?.disconnect();
90
+ } catch {
91
+ // ignore disconnect errors
92
+ }
93
+ }
94
+ }
95
+
96
+ export { getRedisUrl };