consumer-pgmq 3.0.0 → 3.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -11,6 +11,7 @@ This project is a consumer of Supabase/Postgresql queue(using pgmq extension) to
11
11
  - Pop consume type is when the consumer gets the message and the message is deleted from queue.
12
12
  - Support for both Supabase and Postgresql
13
13
  - Support for both visibility time and pool size
14
+ - Support to control the limit of consumers running at once. PS: is focus on Postgres queue driver, is helpful when you have a weak database where no allow to have a lot of connections.
14
15
 
15
16
  ## Installation
16
17
 
@@ -43,10 +44,16 @@ if you use read consume type, the pool size is the number of messages will get a
43
44
  - enabledPolling: The enabled polling. PS: if true, the consumer will poll the message, if false, the consumer will consume the message one time and stop. PS: is required to the versions more than 1.0.5.
44
45
  - queueNameDlq: The name of the dead letter queue. PS: recommended to set the same name of the queue, but suffix with '_dlq'. For example: **messages_dlq**
45
46
  - totalRetriesBeforeSendToDlq: The total retries before send to dlq. For example: if you set totalRetriesBeforeSendToDlq equal 2, the message will be sent to dlq if the handler fails 2 times, so the third time the message will be sent to dlq and remove the main queue to avoid infinite retries.
47
+ - enableControlConsumer: The enable control consumer. PS: if true, you can control the number of consumers running at once, because your Postgres database is weak and can be a problem to have a lot of connections.
48
+ - Warning:
49
+ - You need to execute SQL to create the table 'workers' and you can use **schema.sql** file for it.
50
+ - If you need to increase the number of workers you can insert new registers on table 'workers'.
51
+ - Each register on table workers will represent a process running as consumer(worker). PS: status column can be 'idle' or 'working'. The **idle** means is waiting to be allocate by a consumer and the **working** means was allocated by a consumer.
46
52
 
47
53
  ## Extra points to know when use the dlq feature
48
54
  - The dead letter queue no work If you setted the consumerType option with value 'pop', because the pop get the message and remove from queue at same time, so if failed when you are processing you lose the message.
49
55
  - Recommendation no set lower value to the option 'visibilityTime' if you are using the dead letter queue feature. For example: set visibilityTime value lower than 30 seconds, because if the message wasn't delete and the message be available again the consumer application can consume the message again.
56
+ - On Postgresql queue driver when you enable the option 'isCustomQueueImplementation', means you created the custom table to work as queue. PS: in this case you need to use the **schema.sql** file to create a queue table, if you dont want jobs and jobs_dlq as table name you can change in the **schema.sql** file.
50
57
 
51
58
  ## Events
52
59
 
@@ -228,6 +235,82 @@ async function start() {
228
235
  start()
229
236
  ```
230
237
 
238
+ - Consuming messages from Postgresql queue has option enabledControlConsumer and isCustomQueueImplementation enable on Postgres queue driver:
239
+
240
+ ```js
241
+ import { config } from "dotenv"
242
+ config()
243
+
244
+ import Consumer from '../src/consumer';
245
+ import PostgresQueueDriver from '../src/queueDriver/PostgresQueueDriver';
246
+
247
+ import { Client } from 'pg'
248
+
249
+ async function start() {
250
+
251
+ const pgClient = new Client({
252
+ host: process.env.POSTGRES_HOST,
253
+ database: process.env.POSTGRES_DATABASE,
254
+ password: process.env.POSTGRES_PASSWORD,
255
+ port: Number(process.env.POSTGRES_PORT),
256
+ user: process.env.POSTGRES_USER,
257
+ ssl: true,
258
+ })
259
+
260
+ await pgClient.connect()
261
+
262
+
263
+ const postgresQueueDriver = new PostgresQueueDriver(
264
+ pgClient, "public", true
265
+ )
266
+
267
+ const consumer = new Consumer(
268
+ {
269
+ queueName: 'jobs',
270
+ visibilityTime: 30,
271
+ consumeType: "read",
272
+ poolSize: 8,
273
+ timeMsWaitBeforeNextPolling: 1000,
274
+ enabledPolling: true,
275
+ queueNameDlq: "jobs_dlq",
276
+ totalRetriesBeforeSendToDlq: 2,
277
+ enableControlConsumer: true
278
+ },
279
+ async function (message: { [key: string]: any }, signal): Promise<void> {
280
+ console.log(message)
281
+ },
282
+ postgresQueueDriver
283
+ );
284
+
285
+ for (let index = 0; index < 100; index++) {
286
+ await postgresQueueDriver.send("jobs", {
287
+ message: `Message ${index}`,
288
+ id: index
289
+ })
290
+ }
291
+
292
+ consumer.on("send-to-dlq", (message: { [key: string]: any }) => {
293
+ console.log("Send to DLQ =>", message)
294
+ })
295
+
296
+ consumer.on('error', (err: Error) => {
297
+ console.error('Error consuming message:', err.message);
298
+ });
299
+
300
+ await consumer.start();
301
+
302
+ process.on("SIGINT", async () => {
303
+ await consumer.freeConsumer()
304
+ await pgClient.end()
305
+ process.exit(0)
306
+ })
307
+
308
+ }
309
+
310
+ start()
311
+ ```
312
+
313
+
231
314
 
232
315
 
233
316
 
package/dist/consumer.js CHANGED
@@ -9,6 +9,7 @@ class Consumer extends events_1.EventEmitter {
9
9
  constructor(options, callback, client) {
10
10
  super();
11
11
  this.setTimeoutId = null;
12
+ this.id = null;
12
13
  this.options = options;
13
14
  this.callback = callback;
14
15
  this.client = client;
@@ -121,7 +122,17 @@ class Consumer extends events_1.EventEmitter {
121
122
  * @public
122
123
  */
123
124
  async start() {
125
+ if (this.options.enableControlConsumer) {
126
+ const { id } = await this.client.allocateConsumer();
127
+ this.id = id;
128
+ }
124
129
  await this.pollMessage();
125
130
  }
131
+ async freeConsumer() {
132
+ if (this.id) {
133
+ console.log("passed on here");
134
+ await this.client.freeConsumer(this.id);
135
+ }
136
+ }
126
137
  }
127
138
  exports.default = Consumer;
@@ -1,9 +1,54 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  class PostgresQueueDriver {
4
- constructor(connection, schema = "public") {
4
+ constructor(connection, schema = "public", isCustomQueueImplementation = true) {
5
5
  this.connection = connection;
6
6
  this.schema = schema;
7
+ this.isCustomQueueImplementation = isCustomQueueImplementation;
8
+ }
9
+ /**
10
+ * Allocate the consumer
11
+ * @returns Promise<{ id: string; }>
12
+ */
13
+ async allocateConsumer() {
14
+ const register = await this.connection.query(`
15
+ WITH next_workers AS (
16
+ SELECT id
17
+ FROM workers as w
18
+ WHERE
19
+ status = 'idle'
20
+ LIMIT 1
21
+ FOR UPDATE SKIP LOCKED
22
+ )
23
+ UPDATE workers
24
+ SET status = 'working',
25
+ updated_at = now()
26
+ FROM next_workers
27
+ WHERE workers.id = next_workers.id
28
+ RETURNING workers.*;
29
+ `);
30
+ if (register.rows.length == 0) {
31
+ throw new Error("No available consumer(worker) to allocate");
32
+ }
33
+ return { id: register.rows[0].id };
34
+ }
35
+ async freeConsumer(id) {
36
+ try {
37
+ await this.connection.query(`UPDATE workers set status = 'idle' WHERE id = $1`, [id]);
38
+ }
39
+ catch (error) {
40
+ throw error;
41
+ }
42
+ }
43
+ async sendCustomQueue(queueName, message) {
44
+ try {
45
+ const query = `INSERT INTO ${this.schema}.${queueName}(payload) VALUES($1)`;
46
+ await this.connection.query(query, [JSON.stringify(message)]);
47
+ return { error: null };
48
+ }
49
+ catch (error) {
50
+ return { error };
51
+ }
7
52
  }
8
53
  /**
9
54
  * Send the message
@@ -12,6 +57,9 @@ class PostgresQueueDriver {
12
57
  * @returns Promise<{ error: any }>
13
58
  */
14
59
  async send(queueName, message) {
60
+ if (this.isCustomQueueImplementation) {
61
+ return this.sendCustomQueue(queueName, message);
62
+ }
15
63
  try {
16
64
  await this.connection.query(`
17
65
  SELECT * FROM ${this.schema}.send(
@@ -26,6 +74,51 @@ class PostgresQueueDriver {
26
74
  return { error };
27
75
  }
28
76
  }
77
+ async getCustomQueue(queueName, visibilityTime, totalMessages) {
78
+ try {
79
+ const register = await this.connection.query(`
80
+ WITH next_job AS (
81
+ SELECT id
82
+ FROM ${queueName} as jobs
83
+ WHERE
84
+ (
85
+ status = 'pending'
86
+ OR (status = 'in_progress' AND visible_at <= now())
87
+ )
88
+ ORDER BY created_at
89
+ LIMIT $1
90
+ FOR UPDATE SKIP LOCKED
91
+ )
92
+ UPDATE jobs
93
+ SET status = 'in_progress',
94
+ updated_at = now(),
95
+ visible_at = now() + interval '${visibilityTime} seconds',
96
+ retry_count = retry_count + 1
97
+ FROM next_job
98
+ WHERE jobs.id = next_job.id
99
+ RETURNING jobs.*;
100
+ `, [
101
+ totalMessages,
102
+ ]);
103
+ if (!register.rows) {
104
+ return { data: [], error: null };
105
+ }
106
+ const items = [];
107
+ for (const row of register.rows) {
108
+ items.push({
109
+ msg_id: row.id,
110
+ read_ct: row.retry_count,
111
+ enqueued_at: row.created_at,
112
+ vt: row.visible_at,
113
+ message: row.payload, // Assuming the message content is stored in a column named 'payload' or '
114
+ });
115
+ }
116
+ return { data: items, error: null };
117
+ }
118
+ catch (error) {
119
+ return { data: [], error };
120
+ }
121
+ }
29
122
  /**
30
123
  * Get the message
31
124
  * @param queueName The name of the queue
@@ -34,6 +127,9 @@ class PostgresQueueDriver {
34
127
  * @returns Promise<{ data: Message[], error: any }>
35
128
  */
36
129
  async get(queueName, visibilityTime, totalMessages) {
130
+ if (this.isCustomQueueImplementation) {
131
+ return this.getCustomQueue(queueName, visibilityTime, totalMessages);
132
+ }
37
133
  try {
38
134
  const register = await this.connection.query(`
39
135
  SELECT * FROM ${this.schema}.read(
@@ -57,6 +153,13 @@ class PostgresQueueDriver {
57
153
  * @returns Promise<{ data: Message[], error: any }>
58
154
  */
59
155
  async pop(queueName) {
156
+ if (this.isCustomQueueImplementation) {
157
+ const result = await this.getCustomQueue(queueName, 30, 1);
158
+ if (result.data && result.data[0]) {
159
+ await this.delete(queueName, result.data[0].msg_id);
160
+ }
161
+ return result;
162
+ }
60
163
  try {
61
164
  const register = await this.connection.query(`
62
165
  SELECT * FROM ${this.schema}.pop(
@@ -72,6 +175,12 @@ class PostgresQueueDriver {
72
175
  return { data: [], error };
73
176
  }
74
177
  }
178
+ async deleteCustomQueue(queueName, messageID) {
179
+ await this.connection.query(`
180
+ DELETE FROM ${queueName}
181
+ WHERE id = $1;`, [messageID]);
182
+ return { error: null };
183
+ }
75
184
  /**
76
185
  * Delete the message
77
186
  * @param queueName The name of the queue
@@ -80,6 +189,9 @@ class PostgresQueueDriver {
80
189
  */
81
190
  async delete(queueName, messageID) {
82
191
  try {
192
+ if (this.isCustomQueueImplementation) {
193
+ return this.deleteCustomQueue(queueName, messageID);
194
+ }
83
195
  await this.connection.query(`
84
196
  SELECT * FROM ${this.schema}.delete(
85
197
  queue_name => $1,
@@ -56,5 +56,11 @@ class SupabaseQueueDriver {
56
56
  });
57
57
  return { error };
58
58
  }
59
+ allocateConsumer() {
60
+ throw new Error("method logic no implemented");
61
+ }
62
+ freeConsumer(id) {
63
+ throw new Error("method logic no implemented");
64
+ }
59
65
  }
60
66
  exports.default = SupabaseQueueDriver;
@@ -14,41 +14,43 @@ async function start() {
14
14
  password: process.env.POSTGRES_PASSWORD,
15
15
  port: Number(process.env.POSTGRES_PORT),
16
16
  user: process.env.POSTGRES_USER,
17
- ssl: false,
17
+ ssl: true,
18
18
  })
19
19
 
20
20
  await pgClient.connect()
21
21
 
22
22
 
23
23
  const postgresQueueDriver = new PostgresQueueDriver(
24
- pgClient, "pgmq"
24
+ pgClient, "public", true
25
25
  )
26
26
 
27
-
28
27
  const consumer = new Consumer(
29
28
  {
30
- queueName: 'subscriptions',
29
+ queueName: 'jobs',
31
30
  visibilityTime: 30,
32
31
  consumeType: "read",
33
32
  poolSize: 8,
34
33
  timeMsWaitBeforeNextPolling: 1000,
35
34
  enabledPolling: true,
36
- queueNameDlq: "subscriptions_dlq",
37
- totalRetriesBeforeSendToDlq: 2
35
+ queueNameDlq: "jobs_dlq",
36
+ totalRetriesBeforeSendToDlq: 2,
37
+ enableControlConsumer: true
38
38
  },
39
39
  async function (message: { [key: string]: any }, signal): Promise<void> {
40
40
  console.log(message)
41
- throw new Error("Error in message")
42
- // const url = "https://jsonplaceholder.typicode.com/todos/1";
43
- // await timersPromises.setTimeout(100, null, { signal });
44
- // console.log("Fetching data...");
45
- // const response = await fetch(url, { signal });
46
- // const todo = await response.json();
47
- // console.log("Todo:", todo);
48
41
  },
49
42
  postgresQueueDriver
50
43
  );
51
44
 
45
+ for (let index = 0; index < 100; index++) {
46
+ await postgresQueueDriver.send("jobs", {
47
+ message: `Message ${index}`,
48
+ id: index
49
+ })
50
+ }
51
+
52
+ console.log("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
53
+
52
54
  consumer.on("send-to-dlq", (message: { [key: string]: any }) => {
53
55
  console.log("Send to DLQ =>", message)
54
56
  })
@@ -60,6 +62,7 @@ async function start() {
60
62
  await consumer.start();
61
63
 
62
64
  process.on("SIGINT", async () => {
65
+ await consumer.freeConsumer()
63
66
  await pgClient.end()
64
67
  process.exit(0)
65
68
  })
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "consumer-pgmq",
3
- "version": "3.0.0",
3
+ "version": "3.1.1",
4
4
  "description": "The consumer of Supabase pgmq",
5
5
  "main": "dist/index.js",
6
6
  "type": "commonjs",
@@ -24,6 +24,7 @@
24
24
  "@types/jest": "^30.0.0",
25
25
  "@types/pg": "^8.15.5",
26
26
  "ts-jest": "^29.4.1",
27
+ "ts-node": "^10.9.2",
27
28
  "ts-node-dev": "^2.0.0",
28
29
  "typescript": "^5.9.2"
29
30
  },
package/schema.sql ADDED
@@ -0,0 +1,134 @@
1
+
2
+ CREATE TYPE job_status AS ENUM ('pending', 'in_progress');
3
+
4
+ CREATE TYPE worker_status AS ENUM ('idle', 'working');
5
+
6
+ CREATE TABLE workers (
7
+ id SERIAL PRIMARY KEY,
8
+ status worker_status NOT NULL DEFAULT 'idle', -- Tracks the job lifecycle
9
+ created_at TIMESTAMP DEFAULT now(),
10
+ updated_at TIMESTAMP DEFAULT now()
11
+ );
12
+
13
+ CREATE TABLE jobs (
14
+ id SERIAL PRIMARY KEY,
15
+ status job_status NOT NULL DEFAULT 'pending', -- Tracks the job lifecycle
16
+ payload JSONB, -- Chose payload as `jsonb` but could have gone with `bytea` instead.
17
+ visible_at TIMESTAMP DEFAULT now(), -- SQS visibility timeout (will come back to this later)
18
+ retry_count INT DEFAULT 0, -- Tracks retry attempts
19
+ created_at TIMESTAMP DEFAULT now(),
20
+ updated_at TIMESTAMP DEFAULT now()
21
+ );
22
+
23
+ CREATE TABLE jobs_dlq (
24
+ id SERIAL PRIMARY KEY,
25
+ status job_status NOT NULL DEFAULT 'pending', -- Tracks the job lifecycle
26
+ payload JSONB, -- Chose payload as `jsonb` but could have gone with `bytea` instead.
27
+ visible_at TIMESTAMP DEFAULT now(), -- SQS visibility timeout (will come back to this later)
28
+ retry_count INT DEFAULT 0, -- Tracks retry attempts
29
+ created_at TIMESTAMP DEFAULT now(),
30
+ updated_at TIMESTAMP DEFAULT now()
31
+ );
32
+
33
+
34
+ WITH next_workers AS (
35
+ SELECT id
36
+ FROM workers as w
37
+ WHERE
38
+ status = 'idle'
39
+ LIMIT 1
40
+ FOR UPDATE SKIP LOCKED
41
+ )
42
+ UPDATE workers
43
+ SET status = 'working',
44
+ updated_at = now()
45
+ FROM next_workers
46
+ WHERE workers.id = next_workers.id
47
+ RETURNING workers.*;
48
+
49
+ CREATE INDEX ON public.workers (status);
50
+
51
+ insert into workers(status)
52
+ values ('idle'),
53
+ ('idle'),
54
+ ('idle'),
55
+ ('idle'),
56
+ ('idle'),
57
+ ('idle'),
58
+ ('idle'),
59
+ ('idle'),
60
+ ('idle'),
61
+ ('idle'),
62
+ ('idle'),
63
+ ('idle'),
64
+ ('idle'),
65
+ ('idle'),
66
+ ('idle'),
67
+ ('idle'),
68
+ ('idle'),
69
+ ('idle'),
70
+ ('idle'),
71
+ ('idle'),
72
+ ('idle'),
73
+ ('idle'),
74
+ ('idle'),
75
+ ('idle'),
76
+ ('idle'),
77
+ ('idle'),
78
+ ('idle'),
79
+ ('idle'),
80
+ ('idle'),
81
+ ('idle'),
82
+ ('idle'),
83
+ ('idle'),
84
+ ('idle'),
85
+ ('idle'),
86
+ ('idle'),
87
+ ('idle'),
88
+ ('idle'),
89
+ ('idle'),
90
+ ('idle'),
91
+ ('idle'),
92
+ ('idle'),
93
+ ('idle'),
94
+ ('idle'),
95
+ ('idle'),
96
+ ('idle'),
97
+ ('idle'),
98
+ ('idle'),
99
+ ('idle'),
100
+ ('idle'),
101
+ ('idle'),
102
+ ('idle'),
103
+ ('idle'),
104
+ ('idle'),
105
+ ('idle'),
106
+ ('idle'),
107
+ ('idle'),
108
+ ('idle'),
109
+ ('idle'),
110
+ ('idle'),
111
+ ('idle'),
112
+ ('idle'),
113
+ ('idle'),
114
+ ('idle'),
115
+ ('idle'),
116
+ ('idle'),
117
+ ('idle'),
118
+ ('idle'),
119
+ ('idle'),
120
+ ('idle'),
121
+ ('idle'),
122
+ ('idle'),
123
+ ('idle'),
124
+ ('idle'),
125
+ ('idle'),
126
+ ('idle'),
127
+ ('idle'),
128
+ ('idle'),
129
+ ('idle'),
130
+ ('idle'),
131
+ ('idle'),
132
+ ('idle');
133
+
134
+
package/src/consumer.ts CHANGED
@@ -24,6 +24,8 @@ class Consumer extends EventEmitter {
24
24
 
25
25
  private setTimeoutId: NodeJS.Timeout | null = null;
26
26
 
27
+ private id: string | null = null;
28
+
27
29
  constructor(
28
30
  options: Options,
29
31
  callback: HandlerCallback,
@@ -174,8 +176,21 @@ class Consumer extends EventEmitter {
174
176
  * @public
175
177
  */
176
178
  async start() {
179
+ if (this.options.enableControlConsumer) {
180
+ const { id } = await this.client.allocateConsumer()
181
+ this.id = id
182
+ }
183
+
184
+
177
185
  await this.pollMessage();
178
186
  }
187
+
188
+ async freeConsumer() {
189
+ if (this.id) {
190
+ console.log("passed on here")
191
+ await this.client.freeConsumer(this.id)
192
+ }
193
+ }
179
194
  }
180
195
 
181
196
 
@@ -7,8 +7,61 @@ class PostgresQueueDriver implements QueueDriver {
7
7
  constructor(
8
8
  private connection: Client,
9
9
  private schema: string = "public",
10
+ private isCustomQueueImplementation = true
10
11
  ) { }
11
12
 
13
+ /**
14
+ * Allocate the consumer
15
+ * @returns Promise<{ id: string; }>
16
+ */
17
+ async allocateConsumer(): Promise<{ id: string; }> {
18
+ const register = await this.connection.query(`
19
+ WITH next_workers AS (
20
+ SELECT id
21
+ FROM workers as w
22
+ WHERE
23
+ status = 'idle'
24
+ LIMIT 1
25
+ FOR UPDATE SKIP LOCKED
26
+ )
27
+ UPDATE workers
28
+ SET status = 'working',
29
+ updated_at = now()
30
+ FROM next_workers
31
+ WHERE workers.id = next_workers.id
32
+ RETURNING workers.*;
33
+ `)
34
+
35
+ if (register.rows.length == 0) {
36
+ throw new Error("No available consumer(worker) to allocate");
37
+ }
38
+
39
+ return { id: register.rows[0].id };
40
+ }
41
+
42
+
43
+ async freeConsumer(id: string): Promise<void> {
44
+ try {
45
+ await this.connection.query(`UPDATE workers set status = 'idle' WHERE id = $1`, [id])
46
+ } catch (error) {
47
+ throw error
48
+ }
49
+ }
50
+
51
+
52
+ private async sendCustomQueue(
53
+ queueName: string,
54
+ message: { [key: string]: any; }
55
+ ) {
56
+ try {
57
+ const query = `INSERT INTO ${this.schema}.${queueName}(payload) VALUES($1)`
58
+ await this.connection.query(query, [JSON.stringify(message)])
59
+ return { error: null };
60
+ } catch (error) {
61
+ return { error };
62
+ }
63
+ }
64
+
12
65
  /**
13
66
  * Send the message
14
67
  * @param queueName The name of the queue
@@ -19,6 +72,10 @@ class PostgresQueueDriver implements QueueDriver {
19
72
  queueName: string,
20
73
  message: { [key: string]: any; },
21
74
  ): Promise<{ error: any; }> {
75
+ if (this.isCustomQueueImplementation) {
76
+ return this.sendCustomQueue(queueName, message)
77
+ }
78
+
22
79
  try {
23
80
  await this.connection.query(`
24
81
  SELECT * FROM ${this.schema}.send(
@@ -35,6 +92,61 @@ class PostgresQueueDriver implements QueueDriver {
35
92
  }
36
93
  }
37
94
 
95
+ private async getCustomQueue(
96
+ queueName: string,
97
+ visibilityTime: number,
98
+ totalMessages: number
99
+ ): Promise<{ data: Message[]; error: any; }> {
100
+ try {
101
+ const register = await this.connection.query(
102
+ `
103
+ WITH next_job AS (
104
+ SELECT id
105
+ FROM ${queueName} as jobs
106
+ WHERE
107
+ (
108
+ status = 'pending'
109
+ OR (status = 'in_progress' AND visible_at <= now())
110
+ )
111
+ ORDER BY created_at
112
+ LIMIT $1
113
+ FOR UPDATE SKIP LOCKED
114
+ )
115
+ UPDATE jobs
116
+ SET status = 'in_progress',
117
+ updated_at = now(),
118
+ visible_at = now() + interval '${visibilityTime} seconds',
119
+ retry_count = retry_count + 1
120
+ FROM next_job
121
+ WHERE jobs.id = next_job.id
122
+ RETURNING jobs.*;
123
+ `,
124
+ [
125
+ totalMessages,
126
+ ]
127
+ );
128
+
129
+ if (!register.rows) {
130
+ return { data: [], error: null };
131
+ }
132
+
133
+ const items: Message[] = [];
134
+ for (const row of register.rows) {
135
+ items.push({
136
+ msg_id: row.id,
137
+ read_ct: row.retry_count,
138
+ enqueued_at: row.created_at,
139
+ vt: row.visible_at,
140
+ message: row.payload, // Assuming the message content is stored in a column named 'payload' or '
141
+ });
142
+ }
143
+
144
+ return { data: items, error: null };
145
+ } catch (error) {
146
+ return { data: [], error };
147
+ }
148
+ }
149
+
38
150
  /**
39
151
  * Get the message
40
152
  * @param queueName The name of the queue
@@ -43,6 +155,10 @@ class PostgresQueueDriver implements QueueDriver {
43
155
  * @returns Promise<{ data: Message[], error: any }>
44
156
  */
45
157
  async get(queueName: string, visibilityTime: number, totalMessages: number): Promise<{ data: Message[]; error: any; }> {
158
+ if (this.isCustomQueueImplementation) {
159
+ return this.getCustomQueue(queueName, visibilityTime, totalMessages);
160
+ }
161
+
46
162
  try {
47
163
  const register = await this.connection.query(`
48
164
  SELECT * FROM ${this.schema}.read(
@@ -70,6 +186,13 @@ class PostgresQueueDriver implements QueueDriver {
70
186
  * @returns Promise<{ data: Message[], error: any }>
71
187
  */
72
188
  async pop(queueName: string): Promise<{ data: Message[]; error: any; }> {
189
+ if (this.isCustomQueueImplementation) {
190
+ const result = await this.getCustomQueue(queueName, 30, 1);
191
+ if (result.data && result.data[0]) {
192
+ await this.delete(queueName, result.data[0].msg_id);
193
+ }
194
+ return result
195
+ }
73
196
  try {
74
197
  const register = await this.connection.query(`
75
198
  SELECT * FROM ${this.schema}.pop(
@@ -89,6 +212,14 @@ class PostgresQueueDriver implements QueueDriver {
89
212
 
90
213
  }
91
214
 
215
+ private async deleteCustomQueue(queueName: string, messageID: number) {
216
+ await this.connection.query(`
217
+ DELETE FROM ${queueName}
218
+ WHERE id = $1;`, [messageID]
219
+ )
220
+ return { error: null };
221
+ }
222
+
92
223
  /**
93
224
  * Delete the message
94
225
  * @param queueName The name of the queue
@@ -97,6 +228,9 @@ class PostgresQueueDriver implements QueueDriver {
97
228
  */
98
229
  async delete(queueName: string, messageID: number): Promise<{ error: any; }> {
99
230
  try {
231
+ if (this.isCustomQueueImplementation) {
232
+ return this.deleteCustomQueue(queueName, messageID)
233
+ }
100
234
  await this.connection.query(`
101
235
  SELECT * FROM ${this.schema}.delete(
102
236
  queue_name => $1,
@@ -67,6 +67,14 @@ class SupabaseQueueDriver implements QueueDriver {
67
67
  return { error };
68
68
  }
69
69
 
70
+ allocateConsumer(): Promise<{ id: string; }> {
71
+ throw new Error("method logic no implemented")
72
+ }
73
+
74
+ freeConsumer(id: string): Promise<void> {
75
+ throw new Error("method logic no implemented")
76
+ }
77
+
70
78
  }
71
79
 
72
80
  export default SupabaseQueueDriver;
package/src/type.ts CHANGED
@@ -40,6 +40,11 @@ interface Options {
40
40
  * The total retries before send the message to DLQ. PS: if set queueNameDlq, this option is required
41
41
  */
42
42
  totalRetriesBeforeSendToDlq?: number;
43
+
44
+ /**
45
+ * The enable control consumer. PS: if true, set the allocate a consumer from table workers, because that way you can control the number of consumers(workers)
46
+ */
47
+ enableControlConsumer?: boolean;
43
48
  }
44
49
 
45
50
  /**
@@ -119,6 +124,10 @@ interface QueueDriver {
119
124
  queueName: string,
120
125
  messageID: number
121
126
  ): Promise<{ error: any }>;
127
+
128
+ allocateConsumer(): Promise<{ id: string }>;
129
+
130
+ freeConsumer(id: string): Promise<void>;
122
131
  }
123
132
 
124
133
  export type { Options, HandlerCallback, Message, QueueDriver }
@@ -18,6 +18,8 @@ describe('Consumer', () => {
18
18
  pop: jest.fn(),
19
19
  delete: jest.fn(),
20
20
  send: jest.fn(),
21
+ allocateConsumer: jest.fn(),
22
+ freeConsumer: jest.fn()
21
23
  }
22
24
  jest.useFakeTimers();
23
25
  })
@@ -188,8 +190,6 @@ describe('Consumer', () => {
188
190
  )
189
191
  })
190
192
 
191
-
192
-
193
193
  it('Should not process message if read method does not return any message second polling', async () => {
194
194
  queueDriver.get.mockResolvedValueOnce({ data: [message], error: null })
195
195
  queueDriver.delete.mockResolvedValueOnce({ error: null })
@@ -397,8 +397,35 @@ describe('Consumer', () => {
397
397
 
398
398
  await consumer.start()
399
399
 
400
+ expect(queueDriver.allocateConsumer).toHaveBeenCalledTimes(0)
400
401
  expect(onError).toHaveBeenCalled()
401
402
  expect((onError.mock.calls[0][0] as Error).message)
402
403
  .toBe('visibilityTime is required for read')
403
404
  })
405
+
406
+
407
+ it('Should call allocateConsumer when option enableControlConsumer is true', async () => {
408
+ queueDriver.allocateConsumer.mockResolvedValue({ id: '1' })
409
+ const consumer = new Consumer(
410
+ { queueName: 'q', consumeType: 'read', enableControlConsumer: true } as Options,
411
+ async () => { },
412
+ queueDriver
413
+ )
414
+
415
+ await consumer.start()
416
+
417
+ expect(queueDriver.allocateConsumer).toHaveBeenCalled()
418
+ })
419
+
420
+
421
+ it('Should call freeConsumer when option enableControlConsumer is true and process exit', async () => {
422
+ queueDriver.allocateConsumer.mockResolvedValue({ id: '1' })
423
+ const consumer = new Consumer(
424
+ { queueName: 'q', consumeType: 'read', enableControlConsumer: true } as Options,
425
+ async () => { },
426
+ queueDriver
427
+ )
428
+
429
+ await consumer.start()
430
+ })
404
431
  })